Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

19
vendor/tailscale.com/tstime/jitter.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package tstime
import (
"math/rand/v2"
"time"
)
// RandomDurationBetween returns a random duration in range [min,max).
// If panics if max < min.
func RandomDurationBetween(min, max time.Duration) time.Duration {
diff := max - min
if diff == 0 {
return min
}
return min + rand.N(max-min)
}

127
vendor/tailscale.com/tstime/mono/mono.go generated vendored Normal file
View File

@@ -0,0 +1,127 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package mono provides fast monotonic time.
// On most platforms, mono.Now is about 2x faster than time.Now.
// However, time.Now is really fast, and nicer to use.
//
// For almost all purposes, you should use time.Now.
//
// Package mono exists because we get the current time multiple
// times per network packet, at which point it makes a
// measurable difference.
package mono
import (
"fmt"
"sync/atomic"
"time"
)
// Time is the number of nanoseconds elapsed since an unspecified reference start time.
type Time int64
// Now returns the current monotonic time.
func Now() Time {
// On a newly started machine, the monotonic clock might be very near zero.
// Thus mono.Time(0).Before(mono.Now.Add(-time.Minute)) might yield true.
// The corresponding package time expression never does, if the wall clock is correct.
// Preserve this correspondence by increasing the "base" monotonic clock by a fair amount.
const baseOffset int64 = 1 << 55 // approximately 10,000 hours in nanoseconds
return Time(int64(time.Since(baseWall)) + baseOffset)
}
// Since returns the time elapsed since t.
func Since(t Time) time.Duration {
return time.Duration(Now() - t)
}
// Sub returns t-n, the duration from n to t.
func (t Time) Sub(n Time) time.Duration {
return time.Duration(t - n)
}
// Add returns t+d.
func (t Time) Add(d time.Duration) Time {
return t + Time(d)
}
// After reports t > n, whether t is after n.
func (t Time) After(n Time) bool {
return t > n
}
// Before reports t < n, whether t is before n.
func (t Time) Before(n Time) bool {
return t < n
}
// IsZero reports whether t == 0.
func (t Time) IsZero() bool {
return t == 0
}
// StoreAtomic does an atomic store *t = new.
func (t *Time) StoreAtomic(new Time) {
atomic.StoreInt64((*int64)(t), int64(new))
}
// LoadAtomic does an atomic load *t.
func (t *Time) LoadAtomic() Time {
return Time(atomic.LoadInt64((*int64)(t)))
}
// baseWall and baseMono are a pair of almost-identical times used to correlate a Time with a wall time.
var (
baseWall time.Time
baseMono Time
)
func init() {
baseWall = time.Now()
baseMono = Now()
}
// String prints t, including an estimated equivalent wall clock.
// This is best-effort only, for rough debugging purposes only.
// Since t is a monotonic time, it can vary from the actual wall clock by arbitrary amounts.
// Even in the best of circumstances, it may vary by a few milliseconds.
func (t Time) String() string {
return fmt.Sprintf("mono.Time(ns=%d, estimated wall=%v)", int64(t), baseWall.Add(t.Sub(baseMono)).Truncate(0))
}
// WallTime returns an approximate wall time that corresponded to t.
func (t Time) WallTime() time.Time {
if !t.IsZero() {
return baseWall.Add(t.Sub(baseMono)).Truncate(0)
}
return time.Time{}
}
// MarshalJSON formats t for JSON as if it were a time.Time.
// We format Time this way for backwards-compatibility.
// Time does not survive a MarshalJSON/UnmarshalJSON round trip unchanged
// across different invocations of the Go process. This is best-effort only.
// Since t is a monotonic time, it can vary from the actual wall clock by arbitrary amounts.
// Even in the best of circumstances, it may vary by a few milliseconds.
func (t Time) MarshalJSON() ([]byte, error) {
tt := t.WallTime()
return tt.MarshalJSON()
}
// UnmarshalJSON sets t according to data.
// Time does not survive a MarshalJSON/UnmarshalJSON round trip unchanged
// across different invocations of the Go process. This is best-effort only.
func (t *Time) UnmarshalJSON(data []byte) error {
var tt time.Time
err := tt.UnmarshalJSON(data)
if err != nil {
return err
}
if tt.IsZero() {
*t = 0
return nil
}
*t = baseMono.Add(tt.Sub(baseWall))
return nil
}

90
vendor/tailscale.com/tstime/rate/rate.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// This is a modified, simplified version of code from golang.org/x/time/rate.
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package rate provides a rate limiter.
package rate
import (
"sync"
"time"
"tailscale.com/tstime/mono"
)
// Limit defines the maximum frequency of some events.
// Limit is represented as number of events per second.
// A zero Limit is invalid.
type Limit float64
// Every converts a minimum time interval between events to a Limit.
func Every(interval time.Duration) Limit {
if interval <= 0 {
panic("invalid interval")
}
return 1 / Limit(interval.Seconds())
}
// A Limiter controls how frequently events are allowed to happen.
// It implements a [token bucket] of a particular size b,
// initially full and refilled at rate r tokens per second.
// Informally, in any large enough time interval,
// the Limiter limits the rate to r tokens per second,
// with a maximum burst size of b events.
// Use NewLimiter to create non-zero Limiters.
//
// [token bucket]: https://en.wikipedia.org/wiki/Token_bucket
type Limiter struct {
limit Limit
burst float64
mu sync.Mutex // protects following fields
tokens float64 // number of tokens currently in bucket
last mono.Time // the last time the limiter's tokens field was updated
}
// NewLimiter returns a new Limiter that allows events up to rate r and permits
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
if b < 1 {
panic("bad burst, must be at least 1")
}
return &Limiter{limit: r, burst: float64(b)}
}
// Allow reports whether an event may happen now.
func (lim *Limiter) Allow() bool {
return lim.allow(mono.Now())
}
func (lim *Limiter) allow(now mono.Time) bool {
lim.mu.Lock()
defer lim.mu.Unlock()
// If time has moved backwards, look around awkwardly and pretend nothing happened.
if now.Before(lim.last) {
lim.last = now
}
// Calculate the new number of tokens available due to the passage of time.
elapsed := now.Sub(lim.last)
tokens := lim.tokens + float64(lim.limit)*elapsed.Seconds()
if tokens > lim.burst {
tokens = lim.burst
}
// Consume a token.
tokens--
// Update state.
ok := tokens >= 0
if ok {
lim.last = now
lim.tokens = tokens
}
return ok
}

222
vendor/tailscale.com/tstime/rate/value.go generated vendored Normal file
View File

@@ -0,0 +1,222 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package rate
import (
"encoding/json"
"fmt"
"math"
"sync"
"time"
"tailscale.com/tstime/mono"
)
// Value measures the rate at which events occur,
// exponentially weighted towards recent activity.
// It is guaranteed to occupy O(1) memory, operate in O(1) runtime,
// and is safe for concurrent use.
// The zero value is safe for immediate use.
//
// The algorithm is based on and semantically equivalent to
// [exponentially weighted moving averages (EWMAs)],
// but modified to avoid assuming that event samples are gathered
// at fixed and discrete time-step intervals.
//
// In EWMA literature, the average is typically tuned with a λ parameter
// that determines how much weight to give to recent event samples.
// A high λ value reacts quickly to new events favoring recent history,
// while a low λ value reacts more slowly to new events.
// The EWMA is computed as:
//
// zᵢ = λxᵢ + (1-λ)zᵢ₋₁
//
// where:
// - λ is the weight parameter, where 0 ≤ λ ≤ 1
// - xᵢ is the number of events that has since occurred
// - zᵢ is the newly computed moving average
// - zᵢ₋₁ is the previous moving average one time-step ago
//
// As mentioned, this implementation does not assume that the average
// is updated periodically on a fixed time-step interval,
// but allows the application to indicate that events occurred
// at any point in time by simply calling Value.Add.
// Thus, for every time Value.Add is called, it takes into consideration
// the amount of time elapsed since the last call to Value.Add as
// opposed to assuming that every call to Value.Add is evenly spaced
// some fixed time-step interval apart.
//
// Since time is critical to this measurement, we tune the metric not
// with the weight parameter λ (a unit-less constant between 0 and 1),
// but rather as a half-life period t½. The half-life period is
// mathematically equivalent but easier for humans to reason about.
// The parameters λ and t½ and directly related in the following way:
//
// t½ = -(ln(2) · ΔT) / ln(1 - λ)
//
// λ = 1 - 2^-(ΔT / t½)
//
// where:
// - t½ is the half-life commonly used with exponential decay
// - λ is the unit-less weight parameter commonly used with EWMAs
// - ΔT is the discrete time-step interval used with EWMAs
//
// The internal algorithm does not use the EWMA formula,
// but is rather based on [half-life decay].
// The formula for half-life decay is mathematically related
// to the formula for computing the EWMA.
// The calculation of an EWMA is a geometric progression [[1]] and
// is essentially a discrete version of an exponential function [[2]],
// for which half-life decay is one particular expression.
// Given sufficiently small time-steps, the EWMA and half-life
// algorithms provide equivalent results.
//
// The Value type does not take ΔT as a parameter since it relies
// on a timer with nanosecond resolution. In a way, one could treat
// this algorithm as operating on a ΔT of 1ns. Practically speaking,
// the computation operates on non-discrete time intervals.
//
// [exponentially weighted moving averages (EWMAs)]: https://en.wikipedia.org/wiki/EWMA_chart
// [half-life decay]: https://en.wikipedia.org/wiki/Half-life
// [1]: https://en.wikipedia.org/wiki/Exponential_smoothing#%22Exponential%22_naming
// [2]: https://en.wikipedia.org/wiki/Exponential_decay
type Value struct {
// HalfLife specifies how quickly the rate reacts to rate changes.
//
// Specifically, if there is currently a steady-state rate of
// 0 events per second, and then immediately the rate jumped to
// N events per second, then it will take HalfLife seconds until
// the Value represents a rate of N/2 events per second and
// 2*HalfLife seconds until the Value represents a rate of 3*N/4
// events per second, and so forth. The rate represented by Value
// will asymptotically approach N events per second over time.
//
// In order for Value to stably represent a steady-state rate,
// the HalfLife should be larger than the average period between
// calls to Value.Add.
//
// A zero or negative HalfLife is by default 1 second.
HalfLife time.Duration
mu sync.Mutex
updated mono.Time
value float64 // adjusted count of events
}
// halfLife returns the half-life period in seconds.
func (r *Value) halfLife() float64 {
if r.HalfLife <= 0 {
return time.Second.Seconds()
}
return time.Duration(r.HalfLife).Seconds()
}
// Add records that n number of events just occurred,
// which must be a finite and non-negative number.
func (r *Value) Add(n float64) {
r.mu.Lock()
defer r.mu.Unlock()
r.addNow(mono.Now(), n)
}
func (r *Value) addNow(now mono.Time, n float64) {
if n < 0 || math.IsInf(n, 0) || math.IsNaN(n) {
panic(fmt.Sprintf("invalid count %f; must be a finite, non-negative number", n))
}
r.value = r.valueNow(now) + n
r.updated = now
}
// valueNow computes the number of events after some elapsed time.
// The total count of events decay exponentially so that
// the computed rate is biased towards recent history.
func (r *Value) valueNow(now mono.Time) float64 {
// This uses the half-life formula:
// N(t) = N₀ · 2^-(t / t½)
// where:
// N(t) is the amount remaining after time t,
// N₀ is the initial quantity, and
// t½ is the half-life of the decaying quantity.
//
// See https://en.wikipedia.org/wiki/Half-life
age := now.Sub(r.updated).Seconds()
return r.value * math.Exp2(-age/r.halfLife())
}
// Rate computes the rate as events per second.
func (r *Value) Rate() float64 {
r.mu.Lock()
defer r.mu.Unlock()
return r.rateNow(mono.Now())
}
func (r *Value) rateNow(now mono.Time) float64 {
// The stored value carries the units "events"
// while we want to compute "events / second".
//
// In the trivial case where the events never decay,
// the average rate can be computed by dividing the total events
// by the total elapsed time since the start of the Value.
// This works because the weight distribution is uniform such that
// the weight of an event in the distant past is equal to
// the weight of a recent event. This is not the case with
// exponentially decaying weights, which complicates computation.
//
// Since our events are decaying, we can divide the number of events
// by the total possible accumulated value, which we determine
// by integrating the half-life formula from t=0 until t=∞,
// assuming that N₀ is 1:
// ∫ N(t) dt = t½ / ln(2)
//
// Recall that the integral of a curve is the area under a curve,
// which carries the units of the X-axis multiplied by the Y-axis.
// In our case this would be the units "events · seconds".
// By normalizing N₀ to 1, the Y-axis becomes a unit-less quantity,
// resulting in a integral unit of just "seconds".
// Dividing the events by the integral quantity correctly produces
// the units of "events / second".
return r.valueNow(now) / r.normalizedIntegral()
}
// normalizedIntegral computes the quantity t½ / ln(2).
// It carries the units of "seconds".
func (r *Value) normalizedIntegral() float64 {
return r.halfLife() / math.Ln2
}
type jsonValue struct {
// TODO: Use v2 "encoding/json" for native time.Duration formatting.
HalfLife string `json:"halfLife,omitempty,omitzero"`
Value float64 `json:"value,omitempty,omitzero"`
Updated mono.Time `json:"updated,omitempty,omitzero"`
}
func (r *Value) MarshalJSON() ([]byte, error) {
if r == nil {
return []byte("null"), nil
}
r.mu.Lock()
defer r.mu.Unlock()
v := jsonValue{Value: r.value, Updated: r.updated}
if r.HalfLife > 0 {
v.HalfLife = r.HalfLife.String()
}
return json.Marshal(v)
}
func (r *Value) UnmarshalJSON(b []byte) error {
var v jsonValue
if err := json.Unmarshal(b, &v); err != nil {
return err
}
halfLife, err := time.ParseDuration(v.HalfLife)
if err != nil && v.HalfLife != "" {
return fmt.Errorf("invalid halfLife: %w", err)
}
r.mu.Lock()
defer r.mu.Unlock()
r.HalfLife = halfLife
r.value = v.Value
r.updated = v.Updated
return nil
}

185
vendor/tailscale.com/tstime/tstime.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package tstime defines Tailscale-specific time utilities.
package tstime
import (
"context"
"strconv"
"strings"
"time"
)
// Parse3339 is a wrapper around time.Parse(time.RFC3339, s).
func Parse3339(s string) (time.Time, error) {
return time.Parse(time.RFC3339, s)
}
// Parse3339B is Parse3339 but for byte slices.
func Parse3339B(b []byte) (time.Time, error) {
var t time.Time
if err := t.UnmarshalText(b); err != nil {
return Parse3339(string(b)) // reproduce same error message
}
return t, nil
}
// ParseDuration is more expressive than [time.ParseDuration],
// also accepting 'd' (days) and 'w' (weeks) literals.
func ParseDuration(s string) (time.Duration, error) {
for {
end := strings.IndexAny(s, "dw")
if end < 0 {
break
}
start := end - (len(s[:end]) - len(strings.TrimRight(s[:end], "0123456789")))
n, err := strconv.Atoi(s[start:end])
if err != nil {
return 0, err
}
hours := 24
if s[end] == 'w' {
hours *= 7
}
s = s[:start] + s[end+1:] + strconv.Itoa(n*hours) + "h"
}
return time.ParseDuration(s)
}
// Sleep is like [time.Sleep] but returns early upon context cancelation.
// It reports whether the full sleep duration was achieved.
func Sleep(ctx context.Context, d time.Duration) bool {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return false
case <-timer.C:
return true
}
}
// DefaultClock is a wrapper around a Clock.
// It uses StdClock by default if Clock is nil.
type DefaultClock struct{ Clock }
// TODO: We should make the methods of DefaultClock inlineable
// so that we can optimize for the common case where c.Clock == nil.
func (c DefaultClock) Now() time.Time {
if c.Clock == nil {
return time.Now()
}
return c.Clock.Now()
}
func (c DefaultClock) NewTimer(d time.Duration) (TimerController, <-chan time.Time) {
if c.Clock == nil {
t := time.NewTimer(d)
return t, t.C
}
return c.Clock.NewTimer(d)
}
func (c DefaultClock) NewTicker(d time.Duration) (TickerController, <-chan time.Time) {
if c.Clock == nil {
t := time.NewTicker(d)
return t, t.C
}
return c.Clock.NewTicker(d)
}
func (c DefaultClock) AfterFunc(d time.Duration, f func()) TimerController {
if c.Clock == nil {
return time.AfterFunc(d, f)
}
return c.Clock.AfterFunc(d, f)
}
func (c DefaultClock) Since(t time.Time) time.Duration {
if c.Clock == nil {
return time.Since(t)
}
return c.Clock.Since(t)
}
// Clock offers a subset of the functionality from the std/time package.
// Normally, applications will use the StdClock implementation that calls the
// appropriate std/time exported funcs. The advantage of using Clock is that
// tests can substitute a different implementation, allowing the test to control
// time precisely, something required for certain types of tests to be possible
// at all, speeds up execution by not needing to sleep, and can dramatically
// reduce the risk of flakes due to tests executing too slowly or quickly.
type Clock interface {
// Now returns the current time, as in time.Now.
Now() time.Time
// NewTimer returns a timer whose notion of the current time is controlled
// by this Clock. It follows the semantics of time.NewTimer as closely as
// possible but is adapted to return an interface, so the channel needs to
// be returned as well.
NewTimer(d time.Duration) (TimerController, <-chan time.Time)
// NewTicker returns a ticker whose notion of the current time is controlled
// by this Clock. It follows the semantics of time.NewTicker as closely as
// possible but is adapted to return an interface, so the channel needs to
// be returned as well.
NewTicker(d time.Duration) (TickerController, <-chan time.Time)
// AfterFunc returns a ticker whose notion of the current time is controlled
// by this Clock. When the ticker expires, it will call the provided func.
// It follows the semantics of time.AfterFunc.
AfterFunc(d time.Duration, f func()) TimerController
// Since returns the time elapsed since t.
// It follows the semantics of time.Since.
Since(t time.Time) time.Duration
}
// TickerController offers the receivers of a time.Ticker to ensure
// compatibility with standard timers, but allows for the option of substituting
// a standard timer with something else for testing purposes.
type TickerController interface {
// Reset follows the same semantics as with time.Ticker.Reset.
Reset(d time.Duration)
// Stop follows the same semantics as with time.Ticker.Stop.
Stop()
}
// TimerController offers the receivers of a time.Timer to ensure
// compatibility with standard timers, but allows for the option of substituting
// a standard timer with something else for testing purposes.
type TimerController interface {
// Reset follows the same semantics as with time.Timer.Reset.
Reset(d time.Duration) bool
// Stop follows the same semantics as with time.Timer.Stop.
Stop() bool
}
// StdClock is a simple implementation of Clock using the relevant funcs in the
// std/time package.
type StdClock struct{}
// Now calls time.Now.
func (StdClock) Now() time.Time {
return time.Now()
}
// NewTimer calls time.NewTimer. As an interface does not allow for struct
// members and other packages cannot add receivers to another package, the
// channel is also returned because it would be otherwise inaccessible.
func (StdClock) NewTimer(d time.Duration) (TimerController, <-chan time.Time) {
t := time.NewTimer(d)
return t, t.C
}
// NewTicker calls time.NewTicker. As an interface does not allow for struct
// members and other packages cannot add receivers to another package, the
// channel is also returned because it would be otherwise inaccessible.
func (StdClock) NewTicker(d time.Duration) (TickerController, <-chan time.Time) {
t := time.NewTicker(d)
return t, t.C
}
// AfterFunc calls time.AfterFunc.
func (StdClock) AfterFunc(d time.Duration, f func()) TimerController {
return time.AfterFunc(d, f)
}
// Since calls time.Since.
func (StdClock) Since(t time.Time) time.Duration {
return time.Since(t)
}