Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

14
vendor/tailscale.com/util/cibuild/cibuild.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package cibuild reports runtime CI information.
package cibuild
import "os"
// On reports whether the current binary is executing on a CI system.
func On() bool {
// CI env variable is set by GitHub.
// https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables
return os.Getenv("GITHUB_ACTIONS") != "" || os.Getenv("CI") == "true"
}

385
vendor/tailscale.com/util/clientmetric/clientmetric.go generated vendored Normal file
View File

@@ -0,0 +1,385 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package clientmetric provides client-side metrics whose values
// get occasionally logged.
package clientmetric
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
var (
mu sync.Mutex // guards vars in this block
metrics = map[string]*Metric{}
numWireID int // how many wireIDs have been allocated
lastDelta time.Time // time of last call to EncodeLogTailMetricsDelta
sortedDirty bool // whether sorted needs to be rebuilt
sorted []*Metric // by name
lastLogVal []scanEntry // by Metric.regIdx
unsorted []*Metric // by Metric.regIdx
// valFreeList is a set of free contiguous int64s whose
// element addresses get assigned to Metric.v.
// Any memory address in len(valFreeList) is free for use.
// They're contiguous to reduce cache churn during diff scans.
// When out of length, a new backing array is made.
valFreeList []int64
)
// scanEntry contains the minimal data needed for quickly scanning
// memory for changed values. It's small to reduce memory pressure.
type scanEntry struct {
v *int64 // Metric.v
f func() int64 // Metric.f
lastLogged int64 // last logged value
}
// Type is a metric type: counter or gauge.
type Type uint8
const (
TypeGauge Type = iota
TypeCounter
)
// Metric is an integer metric value that's tracked over time.
//
// It's safe for concurrent use.
type Metric struct {
v *int64 // atomic; the metric value
f func() int64 // value function (v is ignored if f is non-nil)
regIdx int // index into lastLogVal and unsorted
name string
typ Type
deltasDisabled bool
// The following fields are owned by the package-level 'mu':
// wireID is the lazily-allocated "wire ID". Until a metric is encoded
// in the logs (by EncodeLogTailMetricsDelta), it has no wireID. This
// ensures that unused metrics don't waste valuable low numbers, which
// encode with varints with fewer bytes.
wireID int
// lastNamed is the last time the name of this metric was
// written on the wire.
lastNamed time.Time
}
func (m *Metric) Name() string { return m.name }
func (m *Metric) Value() int64 {
if m.f != nil {
return m.f()
}
return atomic.LoadInt64(m.v)
}
func (m *Metric) Type() Type { return m.typ }
// DisableDeltas disables uploading of deltas for this metric (absolute values
// are always uploaded).
func (m *Metric) DisableDeltas() {
m.deltasDisabled = true
}
// Add increments m's value by n.
//
// If m is of type counter, n should not be negative.
func (m *Metric) Add(n int64) {
if m.f != nil {
panic("Add() called on metric with value function")
}
atomic.AddInt64(m.v, n)
}
// Set sets m's value to v.
//
// If m is of type counter, Set should not be used.
func (m *Metric) Set(v int64) {
if m.f != nil {
panic("Set() called on metric with value function")
}
atomic.StoreInt64(m.v, v)
}
// Publish registers a metric in the global map.
// It panics if the name is a duplicate anywhere in the process.
func (m *Metric) Publish() {
mu.Lock()
defer mu.Unlock()
if m.name == "" {
panic("unnamed Metric")
}
if _, dup := metrics[m.name]; dup {
panic("duplicate metric " + m.name)
}
metrics[m.name] = m
sortedDirty = true
if m.f != nil {
lastLogVal = append(lastLogVal, scanEntry{f: m.f})
} else {
if len(valFreeList) == 0 {
valFreeList = make([]int64, 256)
}
m.v = &valFreeList[0]
valFreeList = valFreeList[1:]
lastLogVal = append(lastLogVal, scanEntry{v: m.v})
}
m.regIdx = len(unsorted)
unsorted = append(unsorted, m)
}
// Metrics returns the sorted list of metrics.
//
// The returned slice should not be mutated.
func Metrics() []*Metric {
mu.Lock()
defer mu.Unlock()
if sortedDirty {
sortedDirty = false
sorted = make([]*Metric, 0, len(metrics))
for _, m := range metrics {
sorted = append(sorted, m)
}
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].name < sorted[j].name
})
}
return sorted
}
// HasPublished reports whether a metric with the given name has already been
// published.
func HasPublished(name string) bool {
mu.Lock()
defer mu.Unlock()
_, ok := metrics[name]
return ok
}
// NewUnpublished initializes a new Metric without calling Publish on
// it.
func NewUnpublished(name string, typ Type) *Metric {
if i := strings.IndexFunc(name, isIllegalMetricRune); name == "" || i != -1 {
panic(fmt.Sprintf("illegal metric name %q (index %v)", name, i))
}
return &Metric{
name: name,
typ: typ,
}
}
func isIllegalMetricRune(r rune) bool {
return !(r >= 'a' && r <= 'z' ||
r >= 'A' && r <= 'Z' ||
r >= '0' && r <= '9' ||
r == '_')
}
// NewCounter returns a new metric that can only increment.
func NewCounter(name string) *Metric {
m := NewUnpublished(name, TypeCounter)
m.Publish()
return m
}
// NewGauge returns a new metric that can both increment and decrement.
func NewGauge(name string) *Metric {
m := NewUnpublished(name, TypeGauge)
m.Publish()
return m
}
// NewCounterFunc returns a counter metric that has its value determined by
// calling the provided function (calling Add() and Set() will panic). No
// locking guarantees are made for the invocation.
func NewCounterFunc(name string, f func() int64) *Metric {
m := NewUnpublished(name, TypeCounter)
m.f = f
m.Publish()
return m
}
// NewGaugeFunc returns a gauge metric that has its value determined by
// calling the provided function (calling Add() and Set() will panic). No
// locking guarantees are made for the invocation.
func NewGaugeFunc(name string, f func() int64) *Metric {
m := NewUnpublished(name, TypeGauge)
m.f = f
m.Publish()
return m
}
// WritePrometheusExpositionFormat writes all client metrics to w in
// the Prometheus text-based exposition format.
//
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md
func WritePrometheusExpositionFormat(w io.Writer) {
for _, m := range Metrics() {
switch m.Type() {
case TypeGauge:
fmt.Fprintf(w, "# TYPE %s gauge\n", m.Name())
case TypeCounter:
fmt.Fprintf(w, "# TYPE %s counter\n", m.Name())
}
fmt.Fprintf(w, "%s %v\n", m.Name(), m.Value())
}
}
const (
// metricLogNameFrequency is how often a metric's name=>id
// mapping is redundantly put in the logs. In other words,
// this is how far in the logs you need to fetch from a
// given point in time to recompute the metrics at that point
// in time.
metricLogNameFrequency = 4 * time.Hour
// minMetricEncodeInterval is the minimum interval that the
// metrics will be scanned for changes before being encoded
// for logtail.
minMetricEncodeInterval = 15 * time.Second
)
// EncodeLogTailMetricsDelta return an encoded string representing the metrics
// differences since the previous call.
//
// It implements the requirements of a logtail.Config.MetricsDelta
// func. Notably, its output is safe to embed in a JSON string literal
// without further escaping.
//
// The current encoding is:
// - name immediately following metric:
// 'N' + hex(varint(len(name))) + name
// - set value of a metric:
// 'S' + hex(varint(wireid)) + hex(varint(value))
// - increment a metric: (decrements if negative)
// 'I' + hex(varint(wireid)) + hex(varint(value))
func EncodeLogTailMetricsDelta() string {
mu.Lock()
defer mu.Unlock()
now := time.Now()
if !lastDelta.IsZero() && now.Sub(lastDelta) < minMetricEncodeInterval {
return ""
}
lastDelta = now
var enc *deltaEncBuf // lazy
for i, ent := range lastLogVal {
var val int64
if ent.f != nil {
val = ent.f()
} else {
val = atomic.LoadInt64(ent.v)
}
delta := val - ent.lastLogged
if delta == 0 {
continue
}
lastLogVal[i].lastLogged = val
m := unsorted[i]
if enc == nil {
enc = deltaPool.Get().(*deltaEncBuf)
enc.buf.Reset()
}
if m.wireID == 0 {
numWireID++
m.wireID = numWireID
}
writeValue := m.deltasDisabled
if m.lastNamed.IsZero() || now.Sub(m.lastNamed) > metricLogNameFrequency {
enc.writeName(m.Name(), m.Type())
m.lastNamed = now
writeValue = true
}
if writeValue {
enc.writeValue(m.wireID, val)
} else {
enc.writeDelta(m.wireID, delta)
}
}
if enc == nil {
return ""
}
defer deltaPool.Put(enc)
return enc.buf.String()
}
var deltaPool = &sync.Pool{
New: func() any {
return new(deltaEncBuf)
},
}
// deltaEncBuf encodes metrics per the format described
// on EncodeLogTailMetricsDelta above.
type deltaEncBuf struct {
buf bytes.Buffer
scratch [binary.MaxVarintLen64]byte
}
// writeName writes a "name" (N) record to the buffer, which notes
// that the immediately following record's wireID has the provided
// name.
func (b *deltaEncBuf) writeName(name string, typ Type) {
var namePrefix string
if typ == TypeGauge {
// Add the gauge_ prefix so that tsweb knows that this is a gauge metric
// when generating the Prometheus version.
namePrefix = "gauge_"
}
b.buf.WriteByte('N')
b.writeHexVarint(int64(len(namePrefix) + len(name)))
b.buf.WriteString(namePrefix)
b.buf.WriteString(name)
}
// writeDelta writes a "set" (S) record to the buffer, noting that the
// metric with the given wireID now has value v.
func (b *deltaEncBuf) writeValue(wireID int, v int64) {
b.buf.WriteByte('S')
b.writeHexVarint(int64(wireID))
b.writeHexVarint(v)
}
// writeDelta writes an "increment" (I) delta value record to the
// buffer, noting that the metric with the given wireID now has a
// value that's v larger (or smaller if v is negative).
func (b *deltaEncBuf) writeDelta(wireID int, v int64) {
b.buf.WriteByte('I')
b.writeHexVarint(int64(wireID))
b.writeHexVarint(v)
}
// writeHexVarint writes v to the buffer as a hex-encoded varint.
func (b *deltaEncBuf) writeHexVarint(v int64) {
n := binary.PutVarint(b.scratch[:], v)
hexLen := n * 2
oldLen := b.buf.Len()
b.buf.Grow(hexLen)
hexBuf := b.buf.Bytes()[oldLen : oldLen+hexLen]
hex.Encode(hexBuf, b.scratch[:n])
b.buf.Write(hexBuf)
}
var TestHooks testHooks
type testHooks struct{}
func (testHooks) ResetLastDelta() {
lastDelta = time.Time{}
}

209
vendor/tailscale.com/util/cloudenv/cloudenv.go generated vendored Normal file
View File

@@ -0,0 +1,209 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package cloudenv reports which known cloud environment we're running in.
package cloudenv
import (
"context"
"encoding/json"
"log"
"math/rand/v2"
"net"
"net/http"
"os"
"runtime"
"strings"
"time"
"tailscale.com/syncs"
"tailscale.com/types/lazy"
)
// CommonNonRoutableMetadataIP is the IP address of the metadata server
// on Amazon EC2, Google Compute Engine, and Azure. It's not routable.
// (169.254.0.0/16 is a Link Local range: RFC 3927)
const CommonNonRoutableMetadataIP = "169.254.169.254"
// GoogleMetadataAndDNSIP is the metadata IP used by Google Cloud.
// It's also the *.internal DNS server, and proxies to 8.8.8.8.
const GoogleMetadataAndDNSIP = "169.254.169.254"
// AWSResolverIP is the IP address of the AWS DNS server.
// See https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html
const AWSResolverIP = "169.254.169.253"
// AzureResolverIP is Azure's DNS resolver IP.
// See https://docs.microsoft.com/en-us/azure/virtual-network/what-is-ip-address-168-63-129-16
const AzureResolverIP = "168.63.129.16"
// Cloud is a recognize cloud environment with properties that
// Tailscale can specialize for in places.
type Cloud string
const (
AWS = Cloud("aws") // Amazon Web Services (EC2 in particular)
Azure = Cloud("azure") // Microsoft Azure
GCP = Cloud("gcp") // Google Cloud
DigitalOcean = Cloud("digitalocean") // DigitalOcean
)
// ResolverIP returns the cloud host's recursive DNS server or the
// empty string if not available.
func (c Cloud) ResolverIP() string {
switch c {
case GCP:
return GoogleMetadataAndDNSIP
case AWS:
return AWSResolverIP
case Azure:
return AzureResolverIP
case DigitalOcean:
return getDigitalOceanResolver()
}
return ""
}
var (
// https://docs.digitalocean.com/support/check-your-droplets-network-configuration/
digitalOceanResolvers = []string{"67.207.67.2", "67.207.67.3"}
digitalOceanResolver lazy.SyncValue[string]
)
func getDigitalOceanResolver() string {
// Randomly select one of the available resolvers so we don't overload
// one of them by sending all traffic there.
return digitalOceanResolver.Get(func() string {
return digitalOceanResolvers[rand.IntN(len(digitalOceanResolvers))]
})
}
// HasInternalTLD reports whether c is a cloud environment
// whose ResolverIP serves *.internal records.
func (c Cloud) HasInternalTLD() bool {
switch c {
case GCP, AWS:
return true
}
return false
}
var cloudAtomic syncs.AtomicValue[Cloud]
// Get returns the current cloud, or the empty string if unknown.
func Get() Cloud {
if c, ok := cloudAtomic.LoadOk(); ok {
return c
}
c := getCloud()
cloudAtomic.Store(c) // even if empty
return c
}
func readFileTrimmed(name string) string {
v, _ := os.ReadFile(name)
return strings.TrimSpace(string(v))
}
func getCloud() Cloud {
var hitMetadata bool
switch runtime.GOOS {
case "android", "ios", "darwin":
// Assume these aren't running on a cloud.
return ""
case "linux":
biosVendor := readFileTrimmed("/sys/class/dmi/id/bios_vendor")
if biosVendor == "Amazon EC2" || strings.HasSuffix(biosVendor, ".amazon") {
return AWS
}
sysVendor := readFileTrimmed("/sys/class/dmi/id/sys_vendor")
if sysVendor == "DigitalOcean" {
return DigitalOcean
}
// TODO(andrew): "Vultr" is also valid if we need it
prod := readFileTrimmed("/sys/class/dmi/id/product_name")
if prod == "Google Compute Engine" {
return GCP
}
if prod == "Google" { // old GCP VMs, it seems
hitMetadata = true
}
if prod == "Virtual Machine" || biosVendor == "Microsoft Corporation" {
// Azure, or maybe all Hyper-V?
hitMetadata = true
}
default:
// TODO(bradfitz): use Win32_SystemEnclosure from WMI or something on
// Windows to see if it's a physical machine and skip the cloud check
// early. Otherwise use similar clues as Linux about whether we should
// burn up to 2 seconds waiting for a metadata server that might not be
// there. And for BSDs, look where the /sys stuff is.
return ""
}
if !hitMetadata {
return ""
}
const maxWait = 2 * time.Second
tr := &http.Transport{
DisableKeepAlives: true,
Dial: (&net.Dialer{
Timeout: maxWait,
}).Dial,
}
ctx, cancel := context.WithTimeout(context.Background(), maxWait)
defer cancel()
// We want to hit CommonNonRoutableMetadataIP to see if we're on AWS, GCP,
// or Azure. All three (and many others) use the same metadata IP.
//
// But to avoid triggering the AWS CloudWatch "MetadataNoToken" metric (for which
// there might be an alert registered?), make our initial request be a token
// request. This only works on AWS, but the failing HTTP response on other clouds gives
// us enough clues about which cloud we're on.
req, err := http.NewRequestWithContext(ctx, "PUT", "http://"+CommonNonRoutableMetadataIP+"/latest/api/token", strings.NewReader(""))
if err != nil {
log.Printf("cloudenv: [unexpected] error creating request: %v", err)
return ""
}
req.Header.Set("X-Aws-Ec2-Metadata-Token-Ttl-Seconds", "5")
res, err := tr.RoundTrip(req)
if err != nil {
return ""
}
res.Body.Close()
if res.Header.Get("Metadata-Flavor") == "Google" {
return GCP
}
server := res.Header.Get("Server")
if server == "EC2ws" {
return AWS
}
if strings.HasPrefix(server, "Microsoft") {
// e.g. "Microsoft-IIS/10.0"
req, _ := http.NewRequestWithContext(ctx, "GET", "http://"+CommonNonRoutableMetadataIP+"/metadata/instance/compute?api-version=2021-02-01", nil)
req.Header.Set("Metadata", "true")
res, err := tr.RoundTrip(req)
if err != nil {
return ""
}
defer res.Body.Close()
var meta struct {
AzEnvironment string `json:"azEnvironment"`
}
if err := json.NewDecoder(res.Body).Decode(&meta); err != nil {
return ""
}
if strings.HasPrefix(meta.AzEnvironment, "Azure") {
return Azure
}
return ""
}
// TODO: more, as needed.
return ""
}

117
vendor/tailscale.com/util/cmpver/version.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package cmpver implements a variant of debian version number
// comparison.
//
// A version is a string consisting of alternating non-numeric and
// numeric fields. When comparing two versions, each one is broken
// down into its respective fields, and the fields are compared
// pairwise. The comparison is lexicographic for non-numeric fields,
// numeric for numeric fields. The first non-equal field pair
// determines the ordering of the two versions.
//
// This comparison scheme is a simplified version of Debian's version
// number comparisons. Debian differs in a few details of
// lexicographical field comparison, where certain characters have
// special meaning and ordering. We don't need that, because Tailscale
// version numbers don't need it.
package cmpver
import (
"fmt"
"strconv"
"strings"
)
// Less reports whether v1 is less than v2.
//
// Note that "12" is less than "12.0".
func Less(v1, v2 string) bool {
return Compare(v1, v2) < 0
}
// LessEq reports whether v1 is less than or equal to v2.
//
// Note that "12" is less than "12.0".
func LessEq(v1, v2 string) bool {
return Compare(v1, v2) <= 0
}
func isnum(r rune) bool {
return r >= '0' && r <= '9'
}
func notnum(r rune) bool {
return !isnum(r)
}
// Compare returns an integer comparing two strings as version numbers.
// The result will be -1, 0, or 1 representing the sign of v1 - v2:
//
// Compare(v1, v2) < 0 if v1 < v2
// == 0 if v1 == v2
// > 0 if v1 > v2
func Compare(v1, v2 string) int {
var (
f1, f2 string
n1, n2 uint64
err error
)
for v1 != "" || v2 != "" {
// Compare the non-numeric character run lexicographically.
f1, v1 = splitPrefixFunc(v1, notnum)
f2, v2 = splitPrefixFunc(v2, notnum)
if res := strings.Compare(f1, f2); res != 0 {
return res
}
// Compare the numeric character run numerically.
f1, v1 = splitPrefixFunc(v1, isnum)
f2, v2 = splitPrefixFunc(v2, isnum)
// ParseUint refuses to parse empty strings, which would only
// happen if we reached end-of-string. We follow the Debian
// convention that empty strings mean zero, because
// empirically that produces reasonable-feeling comparison
// behavior.
n1 = 0
if f1 != "" {
n1, err = strconv.ParseUint(f1, 10, 64)
if err != nil {
panic(fmt.Sprintf("all-number string %q didn't parse as string: %s", f1, err))
}
}
n2 = 0
if f2 != "" {
n2, err = strconv.ParseUint(f2, 10, 64)
if err != nil {
panic(fmt.Sprintf("all-number string %q didn't parse as string: %s", f2, err))
}
}
switch {
case n1 == n2:
case n1 < n2:
return -1
case n1 > n2:
return 1
}
}
// Only way to reach here is if v1 and v2 run out of fields
// simultaneously - i.e. exactly equal versions.
return 0
}
// splitPrefixFunc splits s at the first rune where f(rune) is false.
func splitPrefixFunc(s string, f func(rune) bool) (string, string) {
for i, r := range s {
if !f(r) {
return s[:i], s[i:]
}
}
return s, s[:0]
}

135
vendor/tailscale.com/util/ctxkey/key.go generated vendored Normal file
View File

@@ -0,0 +1,135 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// ctxkey provides type-safe key-value pairs for use with [context.Context].
//
// Example usage:
//
// // Create a context key.
// var TimeoutKey = ctxkey.New("mapreduce.Timeout", 5*time.Second)
//
// // Store a context value.
// ctx = mapreduce.TimeoutKey.WithValue(ctx, 10*time.Second)
//
// // Load a context value.
// timeout := mapreduce.TimeoutKey.Value(ctx)
// ... // use timeout of type time.Duration
//
// This is inspired by https://go.dev/issue/49189.
package ctxkey
import (
"context"
"fmt"
"reflect"
)
// Key is a generic key type associated with a specific value type.
//
// A zero Key is valid where the Value type itself is used as the context key.
// This pattern should only be used with locally declared Go types,
// otherwise different packages risk producing key conflicts.
//
// Example usage:
//
// type peerInfo struct { ... } // peerInfo is a locally declared type
// var peerInfoKey ctxkey.Key[peerInfo]
// ctx = peerInfoKey.WithValue(ctx, info) // store a context value
// info = peerInfoKey.Value(ctx) // load a context value
type Key[Value any] struct {
name *stringer[string]
defVal *Value
}
// New constructs a new context key with an associated value type
// where the default value for an unpopulated value is the provided value.
//
// The provided name is an arbitrary name only used for human debugging.
// As a convention, it is recommended that the name be the dot-delimited
// combination of the package name of the caller with the variable name.
// If the name is not provided, then the name of the Value type is used.
// Every key is unique, even if provided the same name.
//
// Example usage:
//
// package mapreduce
// var NumWorkersKey = ctxkey.New("mapreduce.NumWorkers", runtime.NumCPU())
func New[Value any](name string, defaultValue Value) Key[Value] {
// Allocate a new stringer to ensure that every invocation of New
// creates a universally unique context key even for the same name
// since newly allocated pointers are globally unique within a process.
key := Key[Value]{name: new(stringer[string])}
if name == "" {
name = reflect.TypeFor[Value]().String()
}
key.name.v = name
if v := reflect.ValueOf(defaultValue); v.IsValid() && !v.IsZero() {
key.defVal = &defaultValue
}
return key
}
// contextKey returns the context key to use.
func (key Key[Value]) contextKey() any {
if key.name == nil {
// Use the reflect.Type of the Value (implies key not created by New).
return reflect.TypeFor[Value]()
} else {
// Use the name pointer directly (implies key created by New).
return key.name
}
}
// WithValue returns a copy of parent in which the value associated with key is val.
//
// It is a type-safe equivalent of [context.WithValue].
func (key Key[Value]) WithValue(parent context.Context, val Value) context.Context {
return context.WithValue(parent, key.contextKey(), stringer[Value]{val})
}
// ValueOk returns the value in the context associated with this key
// and also reports whether it was present.
// If the value is not present, it returns the default value.
func (key Key[Value]) ValueOk(ctx context.Context) (v Value, ok bool) {
vv, ok := ctx.Value(key.contextKey()).(stringer[Value])
if !ok && key.defVal != nil {
vv.v = *key.defVal
}
return vv.v, ok
}
// Value returns the value in the context associated with this key.
// If the value is not present, it returns the default value.
func (key Key[Value]) Value(ctx context.Context) (v Value) {
v, _ = key.ValueOk(ctx)
return v
}
// Has reports whether the context has a value for this key.
func (key Key[Value]) Has(ctx context.Context) (ok bool) {
_, ok = key.ValueOk(ctx)
return ok
}
// String returns the name of the key.
func (key Key[Value]) String() string {
if key.name == nil {
return reflect.TypeFor[Value]().String()
}
return key.name.String()
}
// stringer implements [fmt.Stringer] on a generic T.
//
// This assists in debugging such that printing a context prints key and value.
// Note that the [context] package lacks a dependency on [reflect],
// so it cannot print arbitrary values. By implementing [fmt.Stringer],
// we functionally teach a context how to print itself.
//
// Wrapping values within a struct has an added bonus that interface kinds
// are properly handled. Without wrapping, we would be unable to distinguish
// between a nil value that was explicitly set or not.
// However, the presence of a stringer indicates an explicit nil value.
type stringer[T any] struct{ v T }
func (v stringer[T]) String() string { return fmt.Sprint(v.v) }

37
vendor/tailscale.com/util/deephash/debug.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build deephash_debug
package deephash
import "fmt"
func (h *hasher) HashBytes(b []byte) {
fmt.Printf("B(%q)+", b)
h.Block512.HashBytes(b)
}
func (h *hasher) HashString(s string) {
fmt.Printf("S(%q)+", s)
h.Block512.HashString(s)
}
func (h *hasher) HashUint8(n uint8) {
fmt.Printf("U8(%d)+", n)
h.Block512.HashUint8(n)
}
func (h *hasher) HashUint16(n uint16) {
fmt.Printf("U16(%d)+", n)
h.Block512.HashUint16(n)
}
func (h *hasher) HashUint32(n uint32) {
fmt.Printf("U32(%d)+", n)
h.Block512.HashUint32(n)
}
func (h *hasher) HashUint64(n uint64) {
fmt.Printf("U64(%d)+", n)
h.Block512.HashUint64(n)
}
func (h *hasher) Sum(b []byte) []byte {
fmt.Println("FIN")
return h.Block512.Sum(b)
}

732
vendor/tailscale.com/util/deephash/deephash.go generated vendored Normal file
View File

@@ -0,0 +1,732 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package deephash hashes a Go value recursively, in a predictable order,
// without looping. The hash is only valid within the lifetime of a program.
// Users should not store the hash on disk or send it over the network.
// The hash is sufficiently strong and unique such that
// Hash(&x) == Hash(&y) is an appropriate replacement for x == y.
//
// The definition of equality is identical to reflect.DeepEqual except:
// - Floating-point values are compared based on the raw bits,
// which means that NaNs (with the same bit pattern) are treated as equal.
// - time.Time are compared based on whether they are the same instant in time
// and also in the same zone offset. Monotonic measurements and zone names
// are ignored as part of the hash.
// - netip.Addr are compared based on a shallow comparison of the struct.
//
// WARNING: This package, like most of the tailscale.com Go module,
// should be considered Tailscale-internal; we make no API promises.
//
// # Cycle detection
//
// This package correctly handles cycles in the value graph,
// but in a way that is potentially pathological in some situations.
//
// The algorithm for cycle detection operates by
// pushing a pointer onto a stack whenever deephash is visiting a pointer and
// popping the pointer from the stack after deephash is leaving the pointer.
// Before visiting a new pointer, deephash checks whether it has already been
// visited on the pointer stack. If so, it hashes the index of the pointer
// on the stack and avoids visiting the pointer.
//
// This algorithm is guaranteed to detect cycles, but may expand pointers
// more often than a potential alternate algorithm that remembers all pointers
// ever visited in a map. The current algorithm uses O(D) memory, where D
// is the maximum depth of the recursion, while the alternate algorithm
// would use O(P) memory where P is all pointers ever seen, which can be a lot,
// and most of which may have nothing to do with cycles.
// Also, the alternate algorithm has to deal with challenges of producing
// deterministic results when pointers are visited in non-deterministic ways
// such as when iterating through a Go map. The stack-based algorithm avoids
// this challenge since the stack is always deterministic regardless of
// non-deterministic iteration order of Go maps.
//
// To concretely see how this algorithm can be pathological,
// consider the following data structure:
//
// var big *Item = ... // some large data structure that is slow to hash
// var manyBig []*Item
// for i := range 1000 {
// manyBig = append(manyBig, &big)
// }
// deephash.Hash(manyBig)
//
// Here, the manyBig data structure is not even cyclic.
// We have the same big *Item being stored multiple times in a []*Item.
// When deephash hashes []*Item, it hashes each individual *Item
// not realizing that it had just done the computation earlier.
// To avoid the pathological situation, Item should implement [SelfHasher] and
// memoize attempts to hash itself.
package deephash
// TODO: Add option to teach deephash to memoize the Hash result of particular types?
import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"fmt"
"reflect"
"sync"
"time"
"tailscale.com/util/hashx"
"tailscale.com/util/set"
)
// There is much overlap between the theory of serialization and hashing.
// A hash (useful for determining equality) can be produced by printing a value
// and hashing the output. The format must:
// * be deterministic such that the same value hashes to the same output, and
// * be parsable such that the same value can be reproduced by the output.
//
// The logic below hashes a value by printing it to a hash.Hash.
// To be parsable, it assumes that we know the Go type of each value:
// * scalar types (e.g., bool or int32) are directly printed as their
// underlying memory representation.
// * list types (e.g., strings and slices) are prefixed by a
// fixed-width length field, followed by the contents of the list.
// * slices, arrays, and structs print each element/field consecutively.
// * interfaces print with a 1-byte prefix indicating whether it is nil.
// If non-nil, it is followed by a fixed-width field of the type index,
// followed by the format of the underlying value.
// * pointers print with a 1-byte prefix indicating whether the pointer is
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers are
// followed by a fixed-width field with the index of the previous pointer.
// Newly seen pointers are followed by the format of the underlying value.
// * maps print with a 1-byte prefix indicating whether the map pointer is
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers
// are followed by a fixed-width field of the index of the previous pointer.
// Newly seen maps are printed with a fixed-width length field, followed by
// a fixed-width field with the XOR of the hash of every map entry.
// With a sufficiently strong hash, this value is theoretically "parsable"
// by looking up the hash in a magical map that returns the set of entries
// for that given hash.
// SelfHasher is implemented by types that can compute their own hash
// by writing values through the provided [Hasher] parameter.
// Implementations must not leak the provided [Hasher].
//
// If the implementation of SelfHasher recursively calls [deephash.Hash],
// then infinite recursion is quite likely to occur.
// To avoid this, use a type definition to drop methods before calling [deephash.Hash]:
//
// func (v *MyType) Hash(h deephash.Hasher) {
// v.hashMu.Lock()
// defer v.hashMu.Unlock()
// if v.dirtyHash {
// type MyTypeWithoutMethods MyType // type define MyType to drop Hash method
// v.dirtyHash = false // clear out dirty bit to avoid hashing over it
// v.hashSum = deephash.Sum{} // clear out hashSum to avoid hashing over it
// v.hashSum = deephash.Hash((*MyTypeWithoutMethods)(v))
// }
// h.HashSum(v.hashSum)
// }
//
// In the above example, we acquire a lock since it is possible that deephash
// is called in a concurrent manner, which implies that MyType.Hash may also
// be called in a concurrent manner. Whether this lock is necessary is
// application-dependent and left as an exercise to the reader.
// Also, the example assumes that dirtyHash is set elsewhere by application
// logic whenever a mutation is made to MyType that would alter the hash.
type SelfHasher interface {
Hash(Hasher)
}
// Hasher is a value passed to [SelfHasher.Hash] that allow implementations
// to hash themselves in a structured manner.
type Hasher struct{ h *hashx.Block512 }
// HashBytes hashes a sequence of bytes b.
// The length of b is not explicitly hashed.
func (h Hasher) HashBytes(b []byte) { h.h.HashBytes(b) }
// HashString hashes the string data of s
// The length of s is not explicitly hashed.
func (h Hasher) HashString(s string) { h.h.HashString(s) }
// HashUint8 hashes a uint8.
func (h Hasher) HashUint8(n uint8) { h.h.HashUint8(n) }
// HashUint16 hashes a uint16.
func (h Hasher) HashUint16(n uint16) { h.h.HashUint16(n) }
// HashUint32 hashes a uint32.
func (h Hasher) HashUint32(n uint32) { h.h.HashUint32(n) }
// HashUint64 hashes a uint64.
func (h Hasher) HashUint64(n uint64) { h.h.HashUint64(n) }
// HashSum hashes a [Sum].
func (h Hasher) HashSum(s Sum) {
// NOTE: Avoid calling h.HashBytes since it escapes b,
// which would force s to be heap allocated.
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[0:8]))
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[8:16]))
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[16:24]))
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[24:32]))
}
// hasher is reusable state for hashing a value.
// Get one via hasherPool.
type hasher struct {
hashx.Block512
visitStack visitStack
}
var hasherPool = &sync.Pool{
New: func() any { return new(hasher) },
}
func (h *hasher) reset() {
if h.Block512.Hash == nil {
h.Block512.Hash = sha256.New()
}
h.Block512.Reset()
}
// hashType hashes a reflect.Type.
// The hash is only consistent within the lifetime of a program.
func (h *hasher) hashType(t reflect.Type) {
// This approach relies on reflect.Type always being backed by a unique
// *reflect.rtype pointer. A safer approach is to use a global sync.Map
// that maps reflect.Type to some arbitrary and unique index.
// While safer, it requires global state with memory that can never be GC'd.
rtypeAddr := reflect.ValueOf(t).Pointer() // address of *reflect.rtype
h.HashUint64(uint64(rtypeAddr))
}
func (h *hasher) sum() (s Sum) {
h.Sum(s.sum[:0])
return s
}
// Sum is an opaque checksum type that is comparable.
type Sum struct {
sum [sha256.Size]byte
}
func (s1 *Sum) xor(s2 Sum) {
for i := range sha256.Size {
s1.sum[i] ^= s2.sum[i]
}
}
func (s Sum) String() string {
// Note: if we change this, keep in sync with AppendTo
return hex.EncodeToString(s.sum[:])
}
// AppendTo appends the string encoding of this sum (as returned by the String
// method) to the provided byte slice and returns the extended buffer.
func (s Sum) AppendTo(b []byte) []byte {
// TODO: switch to upstream implementation if accepted:
// https://github.com/golang/go/issues/53693
var lb [len(s.sum) * 2]byte
hex.Encode(lb[:], s.sum[:])
return append(b, lb[:]...)
}
var (
seedOnce sync.Once
seed uint64
)
func initSeed() {
seed = uint64(time.Now().UnixNano())
}
// Hash returns the hash of v.
func Hash[T any](v *T) Sum {
h := hasherPool.Get().(*hasher)
defer hasherPool.Put(h)
h.reset()
seedOnce.Do(initSeed)
h.HashUint64(seed)
// Always treat the Hash input as if it were an interface by including
// a hash of the type. This ensures that hashing of two different types
// but with the same value structure produces different hashes.
t := reflect.TypeFor[T]()
h.hashType(t)
if v == nil {
h.HashUint8(0) // indicates nil
} else {
h.HashUint8(1) // indicates visiting pointer element
p := pointerOf(reflect.ValueOf(v))
hash := lookupTypeHasher(t)
hash(h, p)
}
return h.sum()
}
// Option is an optional argument to HasherForType.
type Option interface {
isOption()
}
type fieldFilterOpt struct {
t reflect.Type
fields set.Set[string]
includeOnMatch bool // true to include fields, false to exclude them
}
func (fieldFilterOpt) isOption() {}
func (f fieldFilterOpt) filterStructField(sf reflect.StructField) (include bool) {
if f.fields.Contains(sf.Name) {
return f.includeOnMatch
}
return !f.includeOnMatch
}
// IncludeFields returns an option that modifies the hashing for T to only
// include the named struct fields.
//
// T must be a struct type, and must match the type of the value passed to
// HasherForType.
func IncludeFields[T any](fields ...string) Option {
return newFieldFilter[T](true, fields)
}
// ExcludeFields returns an option that modifies the hashing for T to include
// all struct fields of T except those provided in fields.
//
// T must be a struct type, and must match the type of the value passed to
// HasherForType.
func ExcludeFields[T any](fields ...string) Option {
return newFieldFilter[T](false, fields)
}
func newFieldFilter[T any](include bool, fields []string) Option {
t := reflect.TypeFor[T]()
fieldSet := set.Set[string]{}
for _, f := range fields {
if _, ok := t.FieldByName(f); !ok {
panic(fmt.Sprintf("unknown field %q for type %v", f, t))
}
fieldSet.Add(f)
}
return fieldFilterOpt{t, fieldSet, include}
}
// HasherForType returns a hash that is specialized for the provided type.
//
// HasherForType panics if the opts are invalid for the provided type.
//
// Currently, at most one option can be provided (IncludeFields or
// ExcludeFields) and its type must match the type of T. Those restrictions may
// be removed in the future, along with documentation about their precedence
// when combined.
func HasherForType[T any](opts ...Option) func(*T) Sum {
seedOnce.Do(initSeed)
if len(opts) > 1 {
panic("HasherForType only accepts one optional argument") // for now
}
t := reflect.TypeFor[T]()
var hash typeHasherFunc
for _, o := range opts {
switch o := o.(type) {
default:
panic(fmt.Sprintf("unknown HasherOpt %T", o))
case fieldFilterOpt:
if t.Kind() != reflect.Struct {
panic("HasherForStructTypeWithFieldFilter requires T of kind struct")
}
if t != o.t {
panic(fmt.Sprintf("field filter for type %v does not match HasherForType type %v", o.t, t))
}
hash = makeStructHasher(t, o.filterStructField)
}
}
if hash == nil {
hash = lookupTypeHasher(t)
}
return func(v *T) (s Sum) {
// This logic is identical to Hash, but pull out a few statements.
h := hasherPool.Get().(*hasher)
defer hasherPool.Put(h)
h.reset()
h.HashUint64(seed)
h.hashType(t)
if v == nil {
h.HashUint8(0) // indicates nil
} else {
h.HashUint8(1) // indicates visiting pointer element
p := pointerOf(reflect.ValueOf(v))
hash(h, p)
}
return h.sum()
}
}
// Update sets last to the hash of v and reports whether its value changed.
func Update[T any](last *Sum, v *T) (changed bool) {
sum := Hash(v)
changed = sum != *last
if changed {
*last = sum
}
return changed
}
// typeHasherFunc hashes the value pointed at by p for a given type.
// For example, if t is a bool, then p is a *bool.
// The provided pointer must always be non-nil.
type typeHasherFunc func(h *hasher, p pointer)
var typeHasherCache sync.Map // map[reflect.Type]typeHasherFunc
func lookupTypeHasher(t reflect.Type) typeHasherFunc {
if v, ok := typeHasherCache.Load(t); ok {
return v.(typeHasherFunc)
}
hash := makeTypeHasher(t)
v, _ := typeHasherCache.LoadOrStore(t, hash)
return v.(typeHasherFunc)
}
func makeTypeHasher(t reflect.Type) typeHasherFunc {
// Types with specific hashing.
switch t {
case timeTimeType:
return hashTime
case netipAddrType:
return hashAddr
}
// Types that implement their own hashing.
if t.Kind() != reflect.Pointer && t.Kind() != reflect.Interface {
// A method can be implemented on either the value receiver or pointer receiver.
if t.Implements(selfHasherType) || reflect.PointerTo(t).Implements(selfHasherType) {
return makeSelfHasher(t)
}
}
// Types that can have their memory representation directly hashed.
if typeIsMemHashable(t) {
return makeMemHasher(t.Size())
}
switch t.Kind() {
case reflect.String:
return hashString
case reflect.Array:
return makeArrayHasher(t)
case reflect.Slice:
return makeSliceHasher(t)
case reflect.Struct:
return makeStructHasher(t, keepAllStructFields)
case reflect.Map:
return makeMapHasher(t)
case reflect.Pointer:
return makePointerHasher(t)
case reflect.Interface:
return makeInterfaceHasher(t)
default: // Func, Chan, UnsafePointer
return func(*hasher, pointer) {}
}
}
func hashTime(h *hasher, p pointer) {
// Include the zone offset (but not the name) to keep
// Hash(t1) == Hash(t2) being semantically equivalent to
// t1.Format(time.RFC3339Nano) == t2.Format(time.RFC3339Nano).
t := *p.asTime()
_, offset := t.Zone()
h.HashUint64(uint64(t.Unix()))
h.HashUint32(uint32(t.Nanosecond()))
h.HashUint32(uint32(offset))
}
func hashAddr(h *hasher, p pointer) {
// The formatting of netip.Addr covers the
// IP version, the address, and the optional zone name (for v6).
// This is equivalent to a1.MarshalBinary() == a2.MarshalBinary().
ip := *p.asAddr()
switch {
case !ip.IsValid():
h.HashUint64(0)
case ip.Is4():
b := ip.As4()
h.HashUint64(4)
h.HashUint32(binary.LittleEndian.Uint32(b[:]))
case ip.Is6():
b := ip.As16()
z := ip.Zone()
h.HashUint64(16 + uint64(len(z)))
h.HashUint64(binary.LittleEndian.Uint64(b[:8]))
h.HashUint64(binary.LittleEndian.Uint64(b[8:]))
h.HashString(z)
}
}
func makeSelfHasher(t reflect.Type) typeHasherFunc {
return func(h *hasher, p pointer) {
p.asValue(t).Interface().(SelfHasher).Hash(Hasher{&h.Block512})
}
}
func hashString(h *hasher, p pointer) {
s := *p.asString()
h.HashUint64(uint64(len(s)))
h.HashString(s)
}
func makeMemHasher(n uintptr) typeHasherFunc {
return func(h *hasher, p pointer) {
h.HashBytes(p.asMemory(n))
}
}
func makeArrayHasher(t reflect.Type) typeHasherFunc {
var once sync.Once
var hashElem typeHasherFunc
init := func() {
hashElem = lookupTypeHasher(t.Elem())
}
n := t.Len() // number of array elements
nb := t.Elem().Size() // byte size of each array element
return func(h *hasher, p pointer) {
once.Do(init)
for i := range n {
hashElem(h, p.arrayIndex(i, nb))
}
}
}
func makeSliceHasher(t reflect.Type) typeHasherFunc {
nb := t.Elem().Size() // byte size of each slice element
if typeIsMemHashable(t.Elem()) {
return func(h *hasher, p pointer) {
pa := p.sliceArray()
if pa.isNil() {
h.HashUint8(0) // indicates nil
return
}
h.HashUint8(1) // indicates visiting slice
n := p.sliceLen()
b := pa.asMemory(uintptr(n) * nb)
h.HashUint64(uint64(n))
h.HashBytes(b)
}
}
var once sync.Once
var hashElem typeHasherFunc
init := func() {
hashElem = lookupTypeHasher(t.Elem())
if typeIsRecursive(t) {
hashElemDefault := hashElem
hashElem = func(h *hasher, p pointer) {
if idx, ok := h.visitStack.seen(p.p); ok {
h.HashUint8(2) // indicates cycle
h.HashUint64(uint64(idx))
return
}
h.HashUint8(1) // indicates visiting slice element
h.visitStack.push(p.p)
defer h.visitStack.pop(p.p)
hashElemDefault(h, p)
}
}
}
return func(h *hasher, p pointer) {
pa := p.sliceArray()
if pa.isNil() {
h.HashUint8(0) // indicates nil
return
}
once.Do(init)
h.HashUint8(1) // indicates visiting slice
n := p.sliceLen()
h.HashUint64(uint64(n))
for i := range n {
pe := pa.arrayIndex(i, nb)
hashElem(h, pe)
}
}
}
func keepAllStructFields(keepField reflect.StructField) bool { return true }
func makeStructHasher(t reflect.Type, keepField func(reflect.StructField) bool) typeHasherFunc {
type fieldHasher struct {
idx int // index of field for reflect.Type.Field(n); negative if memory is directly hashable
keep bool
hash typeHasherFunc // only valid if idx is not negative
offset uintptr
size uintptr
}
var once sync.Once
var fields []fieldHasher
init := func() {
for i, numField := 0, t.NumField(); i < numField; i++ {
sf := t.Field(i)
f := fieldHasher{i, keepField(sf), nil, sf.Offset, sf.Type.Size()}
if f.keep && typeIsMemHashable(sf.Type) {
f.idx = -1
}
// Combine with previous field if both contiguous and mem-hashable.
if f.idx < 0 && len(fields) > 0 {
if last := &fields[len(fields)-1]; last.idx < 0 && last.offset+last.size == f.offset {
last.size += f.size
continue
}
}
fields = append(fields, f)
}
for i, f := range fields {
if f.idx >= 0 {
fields[i].hash = lookupTypeHasher(t.Field(f.idx).Type)
}
}
}
return func(h *hasher, p pointer) {
once.Do(init)
for _, field := range fields {
if !field.keep {
continue
}
pf := p.structField(field.idx, field.offset, field.size)
if field.idx < 0 {
h.HashBytes(pf.asMemory(field.size))
} else {
field.hash(h, pf)
}
}
}
}
func makeMapHasher(t reflect.Type) typeHasherFunc {
var once sync.Once
var hashKey, hashValue typeHasherFunc
var isRecursive bool
init := func() {
hashKey = lookupTypeHasher(t.Key())
hashValue = lookupTypeHasher(t.Elem())
isRecursive = typeIsRecursive(t)
}
return func(h *hasher, p pointer) {
v := p.asValue(t).Elem() // reflect.Map kind
if v.IsNil() {
h.HashUint8(0) // indicates nil
return
}
once.Do(init)
if isRecursive {
pm := v.UnsafePointer() // underlying pointer of map
if idx, ok := h.visitStack.seen(pm); ok {
h.HashUint8(2) // indicates cycle
h.HashUint64(uint64(idx))
return
}
h.visitStack.push(pm)
defer h.visitStack.pop(pm)
}
h.HashUint8(1) // indicates visiting map entries
h.HashUint64(uint64(v.Len()))
mh := mapHasherPool.Get().(*mapHasher)
defer mapHasherPool.Put(mh)
// Hash a map in a sort-free manner.
// It relies on a map being a an unordered set of KV entries.
// So long as we hash each KV entry together, we can XOR all the
// individual hashes to produce a unique hash for the entire map.
k := mh.valKey.get(v.Type().Key())
e := mh.valElem.get(v.Type().Elem())
mh.sum = Sum{}
mh.h.visitStack = h.visitStack // always use the parent's visit stack to avoid cycles
for iter := v.MapRange(); iter.Next(); {
k.SetIterKey(iter)
e.SetIterValue(iter)
mh.h.reset()
hashKey(&mh.h, pointerOf(k.Addr()))
hashValue(&mh.h, pointerOf(e.Addr()))
mh.sum.xor(mh.h.sum())
}
h.HashBytes(mh.sum.sum[:])
}
}
func makePointerHasher(t reflect.Type) typeHasherFunc {
var once sync.Once
var hashElem typeHasherFunc
var isRecursive bool
init := func() {
hashElem = lookupTypeHasher(t.Elem())
isRecursive = typeIsRecursive(t)
}
return func(h *hasher, p pointer) {
pe := p.pointerElem()
if pe.isNil() {
h.HashUint8(0) // indicates nil
return
}
once.Do(init)
if isRecursive {
if idx, ok := h.visitStack.seen(pe.p); ok {
h.HashUint8(2) // indicates cycle
h.HashUint64(uint64(idx))
return
}
h.visitStack.push(pe.p)
defer h.visitStack.pop(pe.p)
}
h.HashUint8(1) // indicates visiting a pointer element
hashElem(h, pe)
}
}
func makeInterfaceHasher(t reflect.Type) typeHasherFunc {
return func(h *hasher, p pointer) {
v := p.asValue(t).Elem() // reflect.Interface kind
if v.IsNil() {
h.HashUint8(0) // indicates nil
return
}
h.HashUint8(1) // indicates visiting an interface value
v = v.Elem()
t := v.Type()
h.hashType(t)
va := reflect.New(t).Elem()
va.Set(v)
hashElem := lookupTypeHasher(t)
hashElem(h, pointerOf(va.Addr()))
}
}
type mapHasher struct {
h hasher
valKey valueCache
valElem valueCache
sum Sum
}
var mapHasherPool = &sync.Pool{
New: func() any { return new(mapHasher) },
}
type valueCache map[reflect.Type]reflect.Value
// get returns an addressable reflect.Value for the given type.
func (c *valueCache) get(t reflect.Type) reflect.Value {
v, ok := (*c)[t]
if !ok {
v = reflect.New(t).Elem()
if *c == nil {
*c = make(valueCache)
}
(*c)[t] = v
}
return v
}

114
vendor/tailscale.com/util/deephash/pointer.go generated vendored Normal file
View File

@@ -0,0 +1,114 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package deephash
import (
"net/netip"
"reflect"
"time"
"unsafe"
)
// unsafePointer is an untyped pointer.
// It is the caller's responsibility to call operations on the correct type.
//
// This pointer only ever points to a small set of kinds or types:
// time.Time, netip.Addr, string, array, slice, struct, map, pointer, interface,
// or a pointer to memory that is directly hashable.
//
// Arrays are represented as pointers to the first element.
// Structs are represented as pointers to the first field.
// Slices are represented as pointers to a slice header.
// Pointers are represented as pointers to a pointer.
//
// We do not support direct operations on maps and interfaces, and instead
// rely on pointer.asValue to convert the pointer back to a reflect.Value.
// Conversion of an unsafe.Pointer to reflect.Value guarantees that the
// read-only flag in the reflect.Value is unpopulated, avoiding panics that may
// otherwise have occurred since the value was obtained from an unexported field.
type unsafePointer struct{ p unsafe.Pointer }
func unsafePointerOf(v reflect.Value) unsafePointer {
return unsafePointer{v.UnsafePointer()}
}
func (p unsafePointer) isNil() bool {
return p.p == nil
}
// pointerElem dereferences a pointer.
// p must point to a pointer.
func (p unsafePointer) pointerElem() unsafePointer {
return unsafePointer{*(*unsafe.Pointer)(p.p)}
}
// sliceLen returns the slice length.
// p must point to a slice.
func (p unsafePointer) sliceLen() int {
return (*reflect.SliceHeader)(p.p).Len
}
// sliceArray returns a pointer to the underlying slice array.
// p must point to a slice.
func (p unsafePointer) sliceArray() unsafePointer {
return unsafePointer{unsafe.Pointer((*reflect.SliceHeader)(p.p).Data)}
}
// arrayIndex returns a pointer to an element in the array.
// p must point to an array.
func (p unsafePointer) arrayIndex(index int, size uintptr) unsafePointer {
return unsafePointer{unsafe.Add(p.p, uintptr(index)*size)}
}
// structField returns a pointer to a field in a struct.
// p must pointer to a struct.
func (p unsafePointer) structField(index int, offset, size uintptr) unsafePointer {
return unsafePointer{unsafe.Add(p.p, offset)}
}
// asString casts p as a *string.
func (p unsafePointer) asString() *string {
return (*string)(p.p)
}
// asTime casts p as a *time.Time.
func (p unsafePointer) asTime() *time.Time {
return (*time.Time)(p.p)
}
// asAddr casts p as a *netip.Addr.
func (p unsafePointer) asAddr() *netip.Addr {
return (*netip.Addr)(p.p)
}
// asValue casts p as a reflect.Value containing a pointer to value of t.
func (p unsafePointer) asValue(typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, p.p)
}
// asMemory returns the memory pointer at by p for a specified size.
func (p unsafePointer) asMemory(size uintptr) []byte {
return unsafe.Slice((*byte)(p.p), size)
}
// visitStack is a stack of pointers visited.
// Pointers are pushed onto the stack when visited, and popped when leaving.
// The integer value is the depth at which the pointer was visited.
// The length of this stack should be zero after every hashing operation.
type visitStack map[unsafe.Pointer]int
func (v visitStack) seen(p unsafe.Pointer) (int, bool) {
idx, ok := v[p]
return idx, ok
}
func (v *visitStack) push(p unsafe.Pointer) {
if *v == nil {
*v = make(map[unsafe.Pointer]int)
}
(*v)[p] = len(*v)
}
func (v visitStack) pop(p unsafe.Pointer) {
delete(v, p)
}

13
vendor/tailscale.com/util/deephash/pointer_norace.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !race
package deephash
import "reflect"
type pointer = unsafePointer
// pointerOf returns a pointer from v, which must be a reflect.Pointer.
func pointerOf(v reflect.Value) pointer { return unsafePointerOf(v) }

99
vendor/tailscale.com/util/deephash/pointer_race.go generated vendored Normal file
View File

@@ -0,0 +1,99 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build race
package deephash
import (
"fmt"
"net/netip"
"reflect"
"time"
)
// pointer is a typed pointer that performs safety checks for every operation.
type pointer struct {
unsafePointer
t reflect.Type // type of pointed-at value; may be nil
n uintptr // size of valid memory after p
}
// pointerOf returns a pointer from v, which must be a reflect.Pointer.
func pointerOf(v reflect.Value) pointer {
assert(v.Kind() == reflect.Pointer, "got %v, want pointer", v.Kind())
te := v.Type().Elem()
return pointer{unsafePointerOf(v), te, te.Size()}
}
func (p pointer) pointerElem() pointer {
assert(p.t.Kind() == reflect.Pointer, "got %v, want pointer", p.t.Kind())
te := p.t.Elem()
return pointer{p.unsafePointer.pointerElem(), te, te.Size()}
}
func (p pointer) sliceLen() int {
assert(p.t.Kind() == reflect.Slice, "got %v, want slice", p.t.Kind())
return p.unsafePointer.sliceLen()
}
func (p pointer) sliceArray() pointer {
assert(p.t.Kind() == reflect.Slice, "got %v, want slice", p.t.Kind())
n := p.sliceLen()
assert(n >= 0, "got negative slice length %d", n)
ta := reflect.ArrayOf(n, p.t.Elem())
return pointer{p.unsafePointer.sliceArray(), ta, ta.Size()}
}
func (p pointer) arrayIndex(index int, size uintptr) pointer {
assert(p.t.Kind() == reflect.Array, "got %v, want array", p.t.Kind())
assert(0 <= index && index < p.t.Len(), "got array of size %d, want to access element %d", p.t.Len(), index)
assert(p.t.Elem().Size() == size, "got element size of %d, want %d", p.t.Elem().Size(), size)
te := p.t.Elem()
return pointer{p.unsafePointer.arrayIndex(index, size), te, te.Size()}
}
func (p pointer) structField(index int, offset, size uintptr) pointer {
assert(p.t.Kind() == reflect.Struct, "got %v, want struct", p.t.Kind())
assert(p.n >= offset, "got size of %d, want excessive start offset of %d", p.n, offset)
assert(p.n >= offset+size, "got size of %d, want excessive end offset of %d", p.n, offset+size)
if index < 0 {
return pointer{p.unsafePointer.structField(index, offset, size), nil, size}
}
sf := p.t.Field(index)
t := sf.Type
assert(sf.Offset == offset, "got offset of %d, want offset %d", sf.Offset, offset)
assert(t.Size() == size, "got size of %d, want size %d", t.Size(), size)
return pointer{p.unsafePointer.structField(index, offset, size), t, t.Size()}
}
func (p pointer) asString() *string {
assert(p.t.Kind() == reflect.String, "got %v, want string", p.t)
return p.unsafePointer.asString()
}
func (p pointer) asTime() *time.Time {
assert(p.t == timeTimeType, "got %v, want %v", p.t, timeTimeType)
return p.unsafePointer.asTime()
}
func (p pointer) asAddr() *netip.Addr {
assert(p.t == netipAddrType, "got %v, want %v", p.t, netipAddrType)
return p.unsafePointer.asAddr()
}
func (p pointer) asValue(typ reflect.Type) reflect.Value {
assert(p.t == typ, "got %v, want %v", p.t, typ)
return p.unsafePointer.asValue(typ)
}
func (p pointer) asMemory(size uintptr) []byte {
assert(p.n >= size, "got size of %d, want excessive size of %d", p.n, size)
return p.unsafePointer.asMemory(size)
}
func assert(b bool, f string, a ...any) {
if !b {
panic(fmt.Sprintf(f, a...))
}
}

117
vendor/tailscale.com/util/deephash/types.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package deephash
import (
"net/netip"
"reflect"
"time"
)
var (
timeTimeType = reflect.TypeFor[time.Time]()
netipAddrType = reflect.TypeFor[netip.Addr]()
selfHasherType = reflect.TypeFor[SelfHasher]()
)
// typeIsSpecialized reports whether this type has specialized hashing.
// These are never memory hashable and never considered recursive.
func typeIsSpecialized(t reflect.Type) bool {
switch t {
case timeTimeType, netipAddrType:
return true
default:
if t.Kind() != reflect.Pointer && t.Kind() != reflect.Interface {
if t.Implements(selfHasherType) || reflect.PointerTo(t).Implements(selfHasherType) {
return true
}
}
return false
}
}
// typeIsMemHashable reports whether t can be hashed by directly hashing its
// contiguous bytes in memory (e.g. structs with gaps are not mem-hashable).
func typeIsMemHashable(t reflect.Type) bool {
if typeIsSpecialized(t) {
return false
}
if t.Size() == 0 {
return true
}
switch t.Kind() {
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64,
reflect.Complex64, reflect.Complex128:
return true
case reflect.Array:
return typeIsMemHashable(t.Elem())
case reflect.Struct:
var sumFieldSize uintptr
for i, numField := 0, t.NumField(); i < numField; i++ {
sf := t.Field(i)
if !typeIsMemHashable(sf.Type) {
return false
}
sumFieldSize += sf.Type.Size()
}
return sumFieldSize == t.Size() // ensure no gaps
}
return false
}
// typeIsRecursive reports whether t has a path back to itself.
// For interfaces, it currently always reports true.
func typeIsRecursive(t reflect.Type) bool {
inStack := map[reflect.Type]bool{}
var visitType func(t reflect.Type) (isRecursiveSoFar bool)
visitType = func(t reflect.Type) (isRecursiveSoFar bool) {
// Check whether we have seen this type before.
if inStack[t] {
return true
}
inStack[t] = true
defer func() {
delete(inStack, t)
}()
// Types with specialized hashing are never considered recursive.
if typeIsSpecialized(t) {
return false
}
// Any type that is memory hashable must not be recursive since
// cycles can only occur if pointers are involved.
if typeIsMemHashable(t) {
return false
}
// Recursively check types that may contain pointers.
switch t.Kind() {
default:
panic("unhandled kind " + t.Kind().String())
case reflect.String, reflect.UnsafePointer, reflect.Func:
return false
case reflect.Interface:
// Assume the worst for now. TODO(bradfitz): in some cases
// we should be able to prove that it's not recursive. Not worth
// it for now.
return true
case reflect.Array, reflect.Chan, reflect.Pointer, reflect.Slice:
return visitType(t.Elem())
case reflect.Map:
return visitType(t.Key()) || visitType(t.Elem())
case reflect.Struct:
for i, numField := 0, t.NumField(); i < numField; i++ {
if visitType(t.Field(i).Type) {
return true
}
}
return false
}
}
return visitType(t)
}

53
vendor/tailscale.com/util/dirwalk/dirwalk.go generated vendored Normal file
View File

@@ -0,0 +1,53 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package dirwalk contains code to walk a directory.
package dirwalk
import (
"io"
"io/fs"
"os"
"go4.org/mem"
)
var osWalkShallow func(name mem.RO, fn WalkFunc) error
// WalkFunc is the callback type used with WalkShallow.
//
// The name and de are only valid for the duration of func's call
// and should not be retained.
type WalkFunc func(name mem.RO, de fs.DirEntry) error
// WalkShallow reads the entries in the named directory and calls fn for each.
// It does not recurse into subdirectories.
//
// If fn returns an error, iteration stops and WalkShallow returns that value.
//
// On Linux, WalkShallow does not allocate, so long as certain methods on the
// WalkFunc's DirEntry are not called which necessarily allocate.
func WalkShallow(dirName mem.RO, fn WalkFunc) error {
if f := osWalkShallow; f != nil {
return f(dirName, fn)
}
of, err := os.Open(dirName.StringCopy())
if err != nil {
return err
}
defer of.Close()
for {
fis, err := of.ReadDir(100)
for _, de := range fis {
if err := fn(mem.S(de.Name()), de); err != nil {
return err
}
}
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}

167
vendor/tailscale.com/util/dirwalk/dirwalk_linux.go generated vendored Normal file
View File

@@ -0,0 +1,167 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package dirwalk
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
"syscall"
"unsafe"
"go4.org/mem"
"golang.org/x/sys/unix"
)
func init() {
osWalkShallow = linuxWalkShallow
}
var dirEntPool = &sync.Pool{New: func() any { return new(linuxDirEnt) }}
func linuxWalkShallow(dirName mem.RO, fn WalkFunc) error {
const blockSize = 8 << 10
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
nameb := mem.Append(buf[:0], dirName)
nameb = append(nameb, 0)
fd, err := sysOpen(nameb)
if err != nil {
return err
}
defer syscall.Close(fd)
bufp := 0 // starting read position in buf
nbuf := 0 // end valid data in buf
de := dirEntPool.Get().(*linuxDirEnt)
defer de.cleanAndPutInPool()
de.root = dirName
for {
if bufp >= nbuf {
bufp = 0
nbuf, err = readDirent(fd, buf)
if err != nil {
return err
}
if nbuf <= 0 {
return nil
}
}
consumed, name := parseDirEnt(&de.d, buf[bufp:nbuf])
bufp += consumed
if len(name) == 0 || string(name) == "." || string(name) == ".." {
continue
}
de.name = mem.B(name)
if err := fn(de.name, de); err != nil {
return err
}
}
}
type linuxDirEnt struct {
root mem.RO
d syscall.Dirent
name mem.RO
}
func (de *linuxDirEnt) cleanAndPutInPool() {
de.root = mem.RO{}
de.name = mem.RO{}
dirEntPool.Put(de)
}
func (de *linuxDirEnt) Name() string { return de.name.StringCopy() }
func (de *linuxDirEnt) Info() (fs.FileInfo, error) {
return os.Lstat(filepath.Join(de.root.StringCopy(), de.name.StringCopy()))
}
func (de *linuxDirEnt) IsDir() bool {
return de.d.Type == syscall.DT_DIR
}
func (de *linuxDirEnt) Type() fs.FileMode {
switch de.d.Type {
case syscall.DT_BLK:
return fs.ModeDevice // shrug
case syscall.DT_CHR:
return fs.ModeCharDevice
case syscall.DT_DIR:
return fs.ModeDir
case syscall.DT_FIFO:
return fs.ModeNamedPipe
case syscall.DT_LNK:
return fs.ModeSymlink
case syscall.DT_REG:
return 0
case syscall.DT_SOCK:
return fs.ModeSocket
default:
return fs.ModeIrregular // shrug
}
}
func direntNamlen(dirent *syscall.Dirent) int {
const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
limit := dirent.Reclen - fixedHdr
const dirNameLen = 256 // sizeof syscall.Dirent.Name
if limit > dirNameLen {
limit = dirNameLen
}
for i := uint16(0); i < limit; i++ {
if dirent.Name[i] == 0 {
return int(i)
}
}
panic("failed to find terminating 0 byte in dirent")
}
func parseDirEnt(dirent *syscall.Dirent, buf []byte) (consumed int, name []byte) {
// golang.org/issue/37269
copy(unsafe.Slice((*byte)(unsafe.Pointer(dirent)), unsafe.Sizeof(syscall.Dirent{})), buf)
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
}
if len(buf) < int(dirent.Reclen) {
panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
}
consumed = int(dirent.Reclen)
if dirent.Ino == 0 { // File absent in directory.
return
}
name = unsafe.Slice((*byte)(unsafe.Pointer(&dirent.Name[0])), direntNamlen(dirent))
return
}
func sysOpen(name []byte) (fd int, err error) {
if len(name) == 0 || name[len(name)-1] != 0 {
return 0, syscall.EINVAL
}
var dirfd int = unix.AT_FDCWD
for {
r0, _, e1 := syscall.Syscall(unix.SYS_OPENAT, uintptr(dirfd),
uintptr(unsafe.Pointer(&name[0])), 0)
if e1 == 0 {
return int(r0), nil
}
if e1 == syscall.EINTR {
// Since https://golang.org/doc/go1.14#runtime we
// need to loop on EINTR on more places.
continue
}
return 0, syscall.Errno(e1)
}
}
func readDirent(fd int, buf []byte) (n int, err error) {
for {
nbuf, err := syscall.ReadDirent(fd, buf)
if err != syscall.EINTR {
return nbuf, err
}
}
}

265
vendor/tailscale.com/util/dnsname/dnsname.go generated vendored Normal file
View File

@@ -0,0 +1,265 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package dnsname contains string functions for working with DNS names.
package dnsname
import (
"errors"
"fmt"
"strings"
)
const (
// maxLabelLength is the maximum length of a label permitted by RFC 1035.
maxLabelLength = 63
// maxNameLength is the maximum length of a DNS name.
maxNameLength = 253
)
// A FQDN is a fully-qualified DNS name or name suffix.
type FQDN string
func ToFQDN(s string) (FQDN, error) {
if len(s) == 0 || s == "." {
return FQDN("."), nil
}
if s[0] == '.' {
s = s[1:]
}
raw := s
totalLen := len(s)
if s[len(s)-1] == '.' {
s = s[:len(s)-1]
} else {
totalLen += 1 // account for missing dot
}
if totalLen > maxNameLength {
return "", fmt.Errorf("%q is too long to be a DNS name", s)
}
st := 0
for i := range len(s) {
if s[i] != '.' {
continue
}
label := s[st:i]
// You might be tempted to do further validation of the
// contents of labels here, based on the hostname rules in RFC
// 1123. However, DNS labels are not always subject to
// hostname rules. In general, they can contain any non-zero
// byte sequence, even though in practice a more restricted
// set is used.
//
// See https://github.com/tailscale/tailscale/issues/2024 for more.
if len(label) == 0 || len(label) > maxLabelLength {
return "", fmt.Errorf("%q is not a valid DNS label", label)
}
st = i + 1
}
if raw[len(raw)-1] != '.' {
raw = raw + "."
}
return FQDN(raw), nil
}
// WithTrailingDot returns f as a string, with a trailing dot.
func (f FQDN) WithTrailingDot() string {
return string(f)
}
// WithoutTrailingDot returns f as a string, with the trailing dot
// removed.
func (f FQDN) WithoutTrailingDot() string {
return string(f[:len(f)-1])
}
func (f FQDN) NumLabels() int {
if f == "." {
return 0
}
return strings.Count(f.WithTrailingDot(), ".")
}
func (f FQDN) Contains(other FQDN) bool {
if f == other {
return true
}
cmp := f.WithTrailingDot()
if cmp != "." {
cmp = "." + cmp
}
return strings.HasSuffix(other.WithTrailingDot(), cmp)
}
// ValidLabel reports whether label is a valid DNS label.
func ValidLabel(label string) error {
if len(label) == 0 {
return errors.New("empty DNS label")
}
if len(label) > maxLabelLength {
return fmt.Errorf("%q is too long, max length is %d bytes", label, maxLabelLength)
}
if !isalphanum(label[0]) {
return fmt.Errorf("%q is not a valid DNS label: must start with a letter or number", label)
}
if !isalphanum(label[len(label)-1]) {
return fmt.Errorf("%q is not a valid DNS label: must end with a letter or number", label)
}
if len(label) < 2 {
return nil
}
for i := 1; i < len(label)-1; i++ {
if !isdnschar(label[i]) {
return fmt.Errorf("%q is not a valid DNS label: contains invalid character %q", label, label[i])
}
}
return nil
}
// SanitizeLabel takes a string intended to be a DNS name label
// and turns it into a valid name label according to RFC 1035.
func SanitizeLabel(label string) string {
var sb strings.Builder // TODO: don't allocate in common case where label is already fine
start, end := 0, len(label)
// This is technically stricter than necessary as some characters may be dropped,
// but labels have no business being anywhere near this long in any case.
if end > maxLabelLength {
end = maxLabelLength
}
// A label must start with a letter or number...
for ; start < end; start++ {
if isalphanum(label[start]) {
break
}
}
// ...and end with a letter or number.
for ; start < end; end-- {
// This is safe because (start < end) implies (end >= 1).
if isalphanum(label[end-1]) {
break
}
}
for i := start; i < end; i++ {
// Consume a separator only if we are not at a boundary:
// then we can turn it into a hyphen without breaking the rules.
boundary := (i == start) || (i == end-1)
if !boundary && separators[label[i]] {
sb.WriteByte('-')
} else if isdnschar(label[i]) {
sb.WriteByte(tolower(label[i]))
}
}
return sb.String()
}
// HasSuffix reports whether the provided name ends with the
// component(s) in suffix, ignoring any trailing or leading dots.
//
// If suffix is the empty string, HasSuffix always reports false.
func HasSuffix(name, suffix string) bool {
name = strings.TrimSuffix(name, ".")
suffix = strings.TrimSuffix(suffix, ".")
suffix = strings.TrimPrefix(suffix, ".")
nameBase := strings.TrimSuffix(name, suffix)
return len(nameBase) < len(name) && strings.HasSuffix(nameBase, ".")
}
// TrimSuffix trims any trailing dots from a name and removes the
// suffix ending if present. The name will never be returned with
// a trailing dot, even after trimming.
func TrimSuffix(name, suffix string) string {
if HasSuffix(name, suffix) {
name = strings.TrimSuffix(name, ".")
suffix = strings.Trim(suffix, ".")
name = strings.TrimSuffix(name, suffix)
}
return strings.TrimSuffix(name, ".")
}
// TrimCommonSuffixes returns hostname with some common suffixes removed.
func TrimCommonSuffixes(hostname string) string {
hostname = strings.TrimSuffix(hostname, ".local")
hostname = strings.TrimSuffix(hostname, ".localdomain")
hostname = strings.TrimSuffix(hostname, ".lan")
return hostname
}
// SanitizeHostname turns hostname into a valid name label according
// to RFC 1035.
func SanitizeHostname(hostname string) string {
hostname = TrimCommonSuffixes(hostname)
return SanitizeLabel(hostname)
}
// NumLabels returns the number of DNS labels in hostname.
// If hostname is empty or the top-level name ".", returns 0.
func NumLabels(hostname string) int {
if hostname == "" || hostname == "." {
return 0
}
return strings.Count(hostname, ".")
}
// FirstLabel returns the first DNS label of hostname.
func FirstLabel(hostname string) string {
first, _, _ := strings.Cut(hostname, ".")
return first
}
// ValidHostname checks if a string is a valid hostname.
func ValidHostname(hostname string) error {
fqdn, err := ToFQDN(hostname)
if err != nil {
return err
}
for _, label := range strings.Split(fqdn.WithoutTrailingDot(), ".") {
if err := ValidLabel(label); err != nil {
return err
}
}
return nil
}
var separators = map[byte]bool{
' ': true,
'.': true,
'@': true,
'_': true,
}
func islower(c byte) bool {
return 'a' <= c && c <= 'z'
}
func isupper(c byte) bool {
return 'A' <= c && c <= 'Z'
}
func isalpha(c byte) bool {
return islower(c) || isupper(c)
}
func isalphanum(c byte) bool {
return isalpha(c) || ('0' <= c && c <= '9')
}
func isdnschar(c byte) bool {
return isalphanum(c) || c == '-'
}
func tolower(c byte) byte {
if isupper(c) {
return c + 'a' - 'A'
} else {
return c
}
}

104
vendor/tailscale.com/util/execqueue/execqueue.go generated vendored Normal file
View File

@@ -0,0 +1,104 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package execqueue implements an ordered asynchronous queue for executing functions.
package execqueue
import (
"context"
"errors"
"sync"
)
type ExecQueue struct {
mu sync.Mutex
closed bool
inFlight bool // whether a goroutine is running q.run
doneWaiter chan struct{} // non-nil if waiter is waiting, then closed
queue []func()
}
func (q *ExecQueue) Add(f func()) {
q.mu.Lock()
defer q.mu.Unlock()
if q.closed {
return
}
if q.inFlight {
q.queue = append(q.queue, f)
} else {
q.inFlight = true
go q.run(f)
}
}
// RunSync waits for the queue to be drained and then synchronously runs f.
// It returns an error if the queue is closed before f is run or ctx expires.
func (q *ExecQueue) RunSync(ctx context.Context, f func()) error {
for {
if err := q.Wait(ctx); err != nil {
return err
}
q.mu.Lock()
if q.inFlight {
q.mu.Unlock()
continue
}
defer q.mu.Unlock()
if q.closed {
return errors.New("closed")
}
f()
return nil
}
}
func (q *ExecQueue) run(f func()) {
f()
q.mu.Lock()
for len(q.queue) > 0 && !q.closed {
f := q.queue[0]
q.queue[0] = nil
q.queue = q.queue[1:]
q.mu.Unlock()
f()
q.mu.Lock()
}
q.inFlight = false
q.queue = nil
if q.doneWaiter != nil {
close(q.doneWaiter)
q.doneWaiter = nil
}
q.mu.Unlock()
}
// Shutdown asynchronously signals the queue to stop.
func (q *ExecQueue) Shutdown() {
q.mu.Lock()
defer q.mu.Unlock()
q.closed = true
}
// Wait waits for the queue to be empty.
func (q *ExecQueue) Wait(ctx context.Context) error {
q.mu.Lock()
waitCh := q.doneWaiter
if q.inFlight && waitCh == nil {
waitCh = make(chan struct{})
q.doneWaiter = waitCh
}
q.mu.Unlock()
if waitCh == nil {
return nil
}
select {
case <-waitCh:
return nil
case <-ctx.Done():
return ctx.Err()
}
}

93
vendor/tailscale.com/util/goroutines/goroutines.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// The goroutines package contains utilities for getting active goroutines.
package goroutines
import (
"bytes"
"fmt"
"runtime"
"strconv"
)
// ScrubbedGoroutineDump returns either the current goroutine's stack or all
// goroutines' stacks, but with the actual values of arguments scrubbed out,
// lest it contain some private key material.
func ScrubbedGoroutineDump(all bool) []byte {
var buf []byte
// Grab stacks multiple times into increasingly larger buffer sizes
// to minimize the risk that we blow past our iOS memory limit.
for size := 1 << 10; size <= 1<<20; size += 1 << 10 {
buf = make([]byte, size)
buf = buf[:runtime.Stack(buf, all)]
if len(buf) < size {
// It fit.
break
}
}
return scrubHex(buf)
}
func scrubHex(buf []byte) []byte {
saw := map[string][]byte{} // "0x123" => "v1%3" (unique value 1 and its value mod 8)
foreachHexAddress(buf, func(in []byte) {
if string(in) == "0x0" {
return
}
if v, ok := saw[string(in)]; ok {
for i := range in {
in[i] = '_'
}
copy(in, v)
return
}
inStr := string(in)
u64, err := strconv.ParseUint(string(in[2:]), 16, 64)
for i := range in {
in[i] = '_'
}
if err != nil {
in[0] = '?'
return
}
v := []byte(fmt.Sprintf("v%d%%%d", len(saw)+1, u64%8))
saw[inStr] = v
copy(in, v)
})
return buf
}
var ohx = []byte("0x")
// foreachHexAddress calls f with each subslice of b that matches
// regexp `0x[0-9a-f]*`.
func foreachHexAddress(b []byte, f func([]byte)) {
for len(b) > 0 {
i := bytes.Index(b, ohx)
if i == -1 {
return
}
b = b[i:]
hx := hexPrefix(b)
f(hx)
b = b[len(hx):]
}
}
func hexPrefix(b []byte) []byte {
for i, c := range b {
if i < 2 {
continue
}
if !isHexByte(c) {
return b[:i]
}
}
return b
}
func isHexByte(b byte) bool {
return '0' <= b && b <= '9' || 'a' <= b && b <= 'f' || 'A' <= b && b <= 'F'
}

29
vendor/tailscale.com/util/groupmember/groupmember.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package groupmember verifies group membership of the provided user on the
// local system.
package groupmember
import (
"os/user"
"slices"
)
// IsMemberOfGroup reports whether the provided user is a member of
// the provided system group.
func IsMemberOfGroup(group, userName string) (bool, error) {
u, err := user.Lookup(userName)
if err != nil {
return false, err
}
g, err := user.LookupGroup(group)
if err != nil {
return false, err
}
ugids, err := u.GroupIds()
if err != nil {
return false, err
}
return slices.Contains(ugids, g.Gid), nil
}

197
vendor/tailscale.com/util/hashx/block512.go generated vendored Normal file
View File

@@ -0,0 +1,197 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package hashx provides a concrete implementation of [hash.Hash]
// that operates on a particular block size.
package hashx
import (
"encoding/binary"
"fmt"
"hash"
"unsafe"
)
var _ hash.Hash = (*Block512)(nil)
// Block512 wraps a [hash.Hash] for functions that operate on 512-bit block sizes.
// It has efficient methods for hashing fixed-width integers.
//
// A hashing algorithm that operates on 512-bit block sizes should be used.
// The hash still operates correctly even with misaligned block sizes,
// but operates less efficiently.
//
// Example algorithms with 512-bit block sizes include:
// - MD4 (https://golang.org/x/crypto/md4)
// - MD5 (https://golang.org/pkg/crypto/md5)
// - BLAKE2s (https://golang.org/x/crypto/blake2s)
// - BLAKE3
// - RIPEMD (https://golang.org/x/crypto/ripemd160)
// - SHA-0
// - SHA-1 (https://golang.org/pkg/crypto/sha1)
// - SHA-2 (https://golang.org/pkg/crypto/sha256)
// - Whirlpool
//
// See https://en.wikipedia.org/wiki/Comparison_of_cryptographic_hash_functions#Parameters
// for a list of hash functions and their block sizes.
//
// Block512 assumes that [hash.Hash.Write] never fails and
// never allows the provided buffer to escape.
type Block512 struct {
hash.Hash
x [512 / 8]byte
nx int
}
// New512 constructs a new Block512 that wraps h.
//
// It reports an error if the block sizes do not match.
// Misaligned block sizes perform poorly, but execute correctly.
// The error may be ignored if performance is not a concern.
func New512(h hash.Hash) (*Block512, error) {
b := &Block512{Hash: h}
if len(b.x)%h.BlockSize() != 0 {
return b, fmt.Errorf("hashx.Block512: inefficient use of hash.Hash with %d-bit block size", 8*h.BlockSize())
}
return b, nil
}
// Write hashes the contents of b.
func (h *Block512) Write(b []byte) (int, error) {
h.HashBytes(b)
return len(b), nil
}
// Sum appends the current hash to b and returns the resulting slice.
//
// It flushes any partially completed blocks to the underlying [hash.Hash],
// which may cause future operations to be misaligned and less efficient
// until [Block512.Reset] is called.
func (h *Block512) Sum(b []byte) []byte {
if h.nx > 0 {
h.Hash.Write(h.x[:h.nx])
h.nx = 0
}
// Unfortunately hash.Hash.Sum always causes the input to escape since
// escape analysis cannot prove anything past an interface method call.
// Assuming h already escapes, we call Sum with h.x first,
// and then copy the result to b.
sum := h.Hash.Sum(h.x[:0])
return append(b, sum...)
}
// Reset resets Block512 to its initial state.
// It recursively resets the underlying [hash.Hash].
func (h *Block512) Reset() {
h.Hash.Reset()
h.nx = 0
}
// HashUint8 hashes n as a 1-byte integer.
func (h *Block512) HashUint8(n uint8) {
// NOTE: This method is carefully written to be inlineable.
if h.nx <= len(h.x)-1 {
h.x[h.nx] = n
h.nx += 1
} else {
h.hashUint8Slow(n) // mark "noinline" to keep this within inline budget
}
}
//go:noinline
func (h *Block512) hashUint8Slow(n uint8) { h.hashUint(uint64(n), 1) }
// HashUint16 hashes n as a 2-byte little-endian integer.
func (h *Block512) HashUint16(n uint16) {
// NOTE: This method is carefully written to be inlineable.
if h.nx <= len(h.x)-2 {
binary.LittleEndian.PutUint16(h.x[h.nx:], n)
h.nx += 2
} else {
h.hashUint16Slow(n) // mark "noinline" to keep this within inline budget
}
}
//go:noinline
func (h *Block512) hashUint16Slow(n uint16) { h.hashUint(uint64(n), 2) }
// HashUint32 hashes n as a 4-byte little-endian integer.
func (h *Block512) HashUint32(n uint32) {
// NOTE: This method is carefully written to be inlineable.
if h.nx <= len(h.x)-4 {
binary.LittleEndian.PutUint32(h.x[h.nx:], n)
h.nx += 4
} else {
h.hashUint32Slow(n) // mark "noinline" to keep this within inline budget
}
}
//go:noinline
func (h *Block512) hashUint32Slow(n uint32) { h.hashUint(uint64(n), 4) }
// HashUint64 hashes n as a 8-byte little-endian integer.
func (h *Block512) HashUint64(n uint64) {
// NOTE: This method is carefully written to be inlineable.
if h.nx <= len(h.x)-8 {
binary.LittleEndian.PutUint64(h.x[h.nx:], n)
h.nx += 8
} else {
h.hashUint64Slow(n) // mark "noinline" to keep this within inline budget
}
}
//go:noinline
func (h *Block512) hashUint64Slow(n uint64) { h.hashUint(uint64(n), 8) }
func (h *Block512) hashUint(n uint64, i int) {
for ; i > 0; i-- {
if h.nx == len(h.x) {
h.Hash.Write(h.x[:])
h.nx = 0
}
h.x[h.nx] = byte(n)
h.nx += 1
n >>= 8
}
}
// HashBytes hashes the contents of b.
// It does not explicitly hash the length separately.
func (h *Block512) HashBytes(b []byte) {
// Nearly identical to sha256.digest.Write.
if h.nx > 0 {
n := copy(h.x[h.nx:], b)
h.nx += n
if h.nx == len(h.x) {
h.Hash.Write(h.x[:])
h.nx = 0
}
b = b[n:]
}
if len(b) >= len(h.x) {
n := len(b) &^ (len(h.x) - 1) // n is a multiple of len(h.x)
h.Hash.Write(b[:n])
b = b[n:]
}
if len(b) > 0 {
h.nx = copy(h.x[:], b)
}
}
// HashString hashes the contents of s.
// It does not explicitly hash the length separately.
func (h *Block512) HashString(s string) {
// TODO: Avoid unsafe when standard hashers implement io.StringWriter.
// See https://go.dev/issue/38776.
type stringHeader struct {
p unsafe.Pointer
n int
}
p := (*stringHeader)(unsafe.Pointer(&s))
b := unsafe.Slice((*byte)(p.p), p.n)
h.HashBytes(b)
}
// TODO: Add Hash.MarshalBinary and Hash.UnmarshalBinary?

197
vendor/tailscale.com/util/httphdr/httphdr.go generated vendored Normal file
View File

@@ -0,0 +1,197 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package httphdr implements functionality for parsing and formatting
// standard HTTP headers.
package httphdr
import (
"bytes"
"strconv"
"strings"
)
// Range is a range of bytes within some content.
type Range struct {
// Start is the starting offset.
// It is zero if Length is negative; it must not be negative.
Start int64
// Length is the length of the content.
// It is zero if the length extends to the end of the content.
// It is negative if the length is relative to the end (e.g., last 5 bytes).
Length int64
}
// ows is optional whitespace.
const ows = " \t" // per RFC 7230, section 3.2.3
// ParseRange parses a "Range" header per RFC 7233, section 3.
// It only handles "Range" headers where the units is "bytes".
// The "Range" header is usually only specified in GET requests.
func ParseRange(hdr string) (ranges []Range, ok bool) {
// Grammar per RFC 7233, appendix D:
// Range = byte-ranges-specifier | other-ranges-specifier
// byte-ranges-specifier = bytes-unit "=" byte-range-set
// bytes-unit = "bytes"
// byte-range-set =
// *("," OWS)
// (byte-range-spec | suffix-byte-range-spec)
// *(OWS "," [OWS ( byte-range-spec | suffix-byte-range-spec )])
// byte-range-spec = first-byte-pos "-" [last-byte-pos]
// suffix-byte-range-spec = "-" suffix-length
// We do not support other-ranges-specifier.
// All other identifiers are 1*DIGIT.
hdr = strings.Trim(hdr, ows) // per RFC 7230, section 3.2
units, elems, hasUnits := strings.Cut(hdr, "=")
elems = strings.TrimLeft(elems, ","+ows)
for _, elem := range strings.Split(elems, ",") {
elem = strings.Trim(elem, ows) // per RFC 7230, section 7
switch {
case strings.HasPrefix(elem, "-"): // i.e., "-" suffix-length
n, ok := parseNumber(strings.TrimPrefix(elem, "-"))
if !ok {
return ranges, false
}
ranges = append(ranges, Range{0, -n})
case strings.HasSuffix(elem, "-"): // i.e., first-byte-pos "-"
n, ok := parseNumber(strings.TrimSuffix(elem, "-"))
if !ok {
return ranges, false
}
ranges = append(ranges, Range{n, 0})
default: // i.e., first-byte-pos "-" last-byte-pos
prefix, suffix, hasDash := strings.Cut(elem, "-")
n, ok2 := parseNumber(prefix)
m, ok3 := parseNumber(suffix)
if !hasDash || !ok2 || !ok3 || m < n {
return ranges, false
}
ranges = append(ranges, Range{n, m - n + 1})
}
}
return ranges, units == "bytes" && hasUnits && len(ranges) > 0 // must see at least one element per RFC 7233, section 2.1
}
// FormatRange formats a "Range" header per RFC 7233, section 3.
// It only handles "Range" headers where the units is "bytes".
// The "Range" header is usually only specified in GET requests.
func FormatRange(ranges []Range) (hdr string, ok bool) {
b := []byte("bytes=")
for _, r := range ranges {
switch {
case r.Length > 0: // i.e., first-byte-pos "-" last-byte-pos
if r.Start < 0 {
return string(b), false
}
b = strconv.AppendUint(b, uint64(r.Start), 10)
b = append(b, '-')
b = strconv.AppendUint(b, uint64(r.Start+r.Length-1), 10)
b = append(b, ',')
case r.Length == 0: // i.e., first-byte-pos "-"
if r.Start < 0 {
return string(b), false
}
b = strconv.AppendUint(b, uint64(r.Start), 10)
b = append(b, '-')
b = append(b, ',')
case r.Length < 0: // i.e., "-" suffix-length
if r.Start != 0 {
return string(b), false
}
b = append(b, '-')
b = strconv.AppendUint(b, uint64(-r.Length), 10)
b = append(b, ',')
default:
return string(b), false
}
}
return string(bytes.TrimRight(b, ",")), len(ranges) > 0
}
// ParseContentRange parses a "Content-Range" header per RFC 7233, section 4.2.
// It only handles "Content-Range" headers where the units is "bytes".
// The "Content-Range" header is usually only specified in HTTP responses.
//
// If only the completeLength is specified, then start and length are both zero.
//
// Otherwise, the parses the start and length and the optional completeLength,
// which is -1 if unspecified. The start is non-negative and the length is positive.
func ParseContentRange(hdr string) (start, length, completeLength int64, ok bool) {
// Grammar per RFC 7233, appendix D:
// Content-Range = byte-content-range | other-content-range
// byte-content-range = bytes-unit SP (byte-range-resp | unsatisfied-range)
// bytes-unit = "bytes"
// byte-range-resp = byte-range "/" (complete-length | "*")
// unsatisfied-range = "*/" complete-length
// byte-range = first-byte-pos "-" last-byte-pos
// We do not support other-content-range.
// All other identifiers are 1*DIGIT.
hdr = strings.Trim(hdr, ows) // per RFC 7230, section 3.2
suffix, hasUnits := strings.CutPrefix(hdr, "bytes ")
suffix, unsatisfied := strings.CutPrefix(suffix, "*/")
if unsatisfied { // i.e., unsatisfied-range
n, ok := parseNumber(suffix)
if !ok {
return start, length, completeLength, false
}
completeLength = n
} else { // i.e., byte-range "/" (complete-length | "*")
prefix, suffix, hasDash := strings.Cut(suffix, "-")
middle, suffix, hasSlash := strings.Cut(suffix, "/")
n, ok0 := parseNumber(prefix)
m, ok1 := parseNumber(middle)
o, ok2 := parseNumber(suffix)
if suffix == "*" {
o, ok2 = -1, true
}
if !hasDash || !hasSlash || !ok0 || !ok1 || !ok2 || m < n || (o >= 0 && o <= m) {
return start, length, completeLength, false
}
start = n
length = m - n + 1
completeLength = o
}
return start, length, completeLength, hasUnits
}
// FormatContentRange parses a "Content-Range" header per RFC 7233, section 4.2.
// It only handles "Content-Range" headers where the units is "bytes".
// The "Content-Range" header is usually only specified in HTTP responses.
//
// If start and length are non-positive, then it encodes just the completeLength,
// which must be a non-negative value.
//
// Otherwise, it encodes the start and length as a byte-range,
// and optionally emits the complete length if it is non-negative.
// The length must be positive (as RFC 7233 uses inclusive end offsets).
func FormatContentRange(start, length, completeLength int64) (hdr string, ok bool) {
b := []byte("bytes ")
switch {
case start <= 0 && length <= 0 && completeLength >= 0: // i.e., unsatisfied-range
b = append(b, "*/"...)
b = strconv.AppendUint(b, uint64(completeLength), 10)
ok = true
case start >= 0 && length > 0: // i.e., byte-range "/" (complete-length | "*")
b = strconv.AppendUint(b, uint64(start), 10)
b = append(b, '-')
b = strconv.AppendUint(b, uint64(start+length-1), 10)
b = append(b, '/')
if completeLength >= 0 {
b = strconv.AppendUint(b, uint64(completeLength), 10)
ok = completeLength >= start+length && start+length > 0
} else {
b = append(b, '*')
ok = true
}
}
return string(b), ok
}
// parseNumber parses s as an unsigned decimal integer.
// It parses according to the 1*DIGIT grammar, which allows leading zeros.
func parseNumber(s string) (int64, bool) {
suffix := strings.TrimLeft(s, "0123456789")
prefix := s[:len(s)-len(suffix)]
n, err := strconv.ParseInt(prefix, 10, 64)
return n, suffix == "" && err == nil
}

36
vendor/tailscale.com/util/httpm/httpm.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package httpm has shorter names for HTTP method constants.
//
// Some background: originally Go didn't have http.MethodGet, http.MethodPost
// and life was good and people just wrote readable "GET" and "POST". But then
// in a moment of weakness Brad and others maintaining net/http caved and let
// the http.MethodFoo constants be added and code's been less readable since.
// Now the substance of the method name is hidden away at the end after
// "http.Method" and they all blend together and it's hard to read code using
// them.
//
// This package is a compromise. It provides constants, but shorter and closer
// to how it used to look. It does violate Go style
// (https://github.com/golang/go/wiki/CodeReviewComments#mixed-caps) that says
// constants shouldn't be SCREAM_CASE. But this isn't INT_MAX; it's GET and
// POST, which are already defined as all caps.
//
// It would be tempting to make these constants be typed but then they wouldn't
// be assignable to things in net/http that just want string. Oh well.
package httpm
const (
GET = "GET"
HEAD = "HEAD"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
DELETE = "DELETE"
CONNECT = "CONNECT"
OPTIONS = "OPTIONS"
TRACE = "TRACE"
SPACEJUMP = "SPACEJUMP" // https://www.w3.org/Protocols/HTTP/Methods/SpaceJump.html
BREW = "BREW" // https://datatracker.ietf.org/doc/html/rfc2324#section-2.1.1
)

37
vendor/tailscale.com/util/lineread/lineread.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package lineread reads lines from files. It's not fancy, but it got repetitive.
package lineread
import (
"bufio"
"io"
"os"
)
// File opens name and calls fn for each line. It returns an error if the Open failed
// or once fn returns an error.
func File(name string, fn func(line []byte) error) error {
f, err := os.Open(name)
if err != nil {
return err
}
defer f.Close()
return Reader(f, fn)
}
// Reader calls fn for each line.
// If fn returns an error, Reader stops reading and returns that error.
// Reader may also return errors encountered reading and parsing from r.
// To stop reading early, use a sentinel "stop" error value and ignore
// it when returned from Reader.
func Reader(r io.Reader, fn func(line []byte) error) error {
bs := bufio.NewScanner(r)
for bs.Scan() {
if err := fn(bs.Bytes()); err != nil {
return err
}
}
return bs.Err()
}

130
vendor/tailscale.com/util/linuxfw/detector.go generated vendored Normal file
View File

@@ -0,0 +1,130 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package linuxfw
import (
"errors"
"os/exec"
"tailscale.com/envknob"
"tailscale.com/hostinfo"
"tailscale.com/types/logger"
"tailscale.com/version/distro"
)
func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode {
if distro.Get() == distro.Gokrazy {
// Reduce startup logging on gokrazy. There's no way to do iptables on
// gokrazy anyway.
logf("GoKrazy should use nftables.")
hostinfo.SetFirewallMode("nft-gokrazy")
return FirewallModeNfTables
}
mode := envknob.String("TS_DEBUG_FIREWALL_MODE")
// If the envknob isn't set, fall back to the pref suggested by c2n or
// nodeattrs.
if mode == "" {
mode = prefHint
logf("using firewall mode pref %s", prefHint)
} else if prefHint != "" {
logf("TS_DEBUG_FIREWALL_MODE set, overriding firewall mode from %s to %s", prefHint, mode)
}
var det linuxFWDetector
if mode == "" {
// We have no preference, so check if `iptables` is even available.
_, err := det.iptDetect()
if err != nil && errors.Is(err, exec.ErrNotFound) {
logf("iptables not found: %v; falling back to nftables", err)
mode = "nftables"
}
}
// We now use iptables as default and have "auto" and "nftables" as
// options for people to test further.
switch mode {
case "auto":
return pickFirewallModeFromInstalledRules(logf, det)
case "nftables":
hostinfo.SetFirewallMode("nft-forced")
return FirewallModeNfTables
case "iptables":
hostinfo.SetFirewallMode("ipt-forced")
default:
logf("default choosing iptables")
hostinfo.SetFirewallMode("ipt-default")
}
return FirewallModeIPTables
}
// tableDetector abstracts helpers to detect the firewall mode.
// It is implemented for testing purposes.
type tableDetector interface {
iptDetect() (int, error)
nftDetect() (int, error)
}
type linuxFWDetector struct{}
// iptDetect returns the number of iptables rules in the current namespace.
func (l linuxFWDetector) iptDetect() (int, error) {
return detectIptables()
}
// nftDetect returns the number of nftables rules in the current namespace.
func (l linuxFWDetector) nftDetect() (int, error) {
return detectNetfilter()
}
// pickFirewallModeFromInstalledRules returns the firewall mode to use based on
// the environment and the system's capabilities.
func pickFirewallModeFromInstalledRules(logf logger.Logf, det tableDetector) FirewallMode {
if distro.Get() == distro.Gokrazy {
// Reduce startup logging on gokrazy. There's no way to do iptables on
// gokrazy anyway.
return FirewallModeNfTables
}
iptAva, nftAva := true, true
iptRuleCount, err := det.iptDetect()
if err != nil {
logf("detect iptables rule: %v", err)
iptAva = false
}
nftRuleCount, err := det.nftDetect()
if err != nil {
logf("detect nftables rule: %v", err)
nftAva = false
}
logf("nftables rule count: %d, iptables rule count: %d", nftRuleCount, iptRuleCount)
switch {
case nftRuleCount > 0 && iptRuleCount == 0:
logf("nftables is currently in use")
hostinfo.SetFirewallMode("nft-inuse")
return FirewallModeNfTables
case iptRuleCount > 0 && nftRuleCount == 0:
logf("iptables is currently in use")
hostinfo.SetFirewallMode("ipt-inuse")
return FirewallModeIPTables
case nftAva:
// if both iptables and nftables are available but
// neither/both are currently used, use nftables.
logf("nftables is available")
hostinfo.SetFirewallMode("nft")
return FirewallModeNfTables
case iptAva:
logf("iptables is available")
hostinfo.SetFirewallMode("ipt")
return FirewallModeIPTables
default:
// if neither iptables nor nftables are available, use iptablesRunner as a dummy
// runner which exists but won't do anything. Creating iptablesRunner errors only
// if the iptables command is missing or doesnt support "--version", as long as it
// can determine a version then itll carry on.
hostinfo.SetFirewallMode("ipt-fb")
return FirewallModeIPTables
}
}

142
vendor/tailscale.com/util/linuxfw/fake.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package linuxfw
import (
"errors"
"fmt"
"os"
"strconv"
"strings"
)
type fakeIPTables struct {
n map[string][]string
}
type fakeRule struct {
table, chain string
args []string
}
func newFakeIPTables() *fakeIPTables {
return &fakeIPTables{
n: map[string][]string{
"filter/INPUT": nil,
"filter/OUTPUT": nil,
"filter/FORWARD": nil,
"nat/PREROUTING": nil,
"nat/OUTPUT": nil,
"nat/POSTROUTING": nil,
"mangle/FORWARD": nil,
},
}
}
func (n *fakeIPTables) Insert(table, chain string, pos int, args ...string) error {
k := table + "/" + chain
if rules, ok := n.n[k]; ok {
if pos > len(rules)+1 {
return fmt.Errorf("bad position %d in %s", pos, k)
}
rules = append(rules, "")
copy(rules[pos:], rules[pos-1:])
rules[pos-1] = strings.Join(args, " ")
n.n[k] = rules
} else {
return fmt.Errorf("unknown table/chain %s", k)
}
return nil
}
func (n *fakeIPTables) Append(table, chain string, args ...string) error {
k := table + "/" + chain
return n.Insert(table, chain, len(n.n[k])+1, args...)
}
func (n *fakeIPTables) Exists(table, chain string, args ...string) (bool, error) {
k := table + "/" + chain
if rules, ok := n.n[k]; ok {
for _, rule := range rules {
if rule == strings.Join(args, " ") {
return true, nil
}
}
return false, nil
} else {
return false, fmt.Errorf("unknown table/chain %s", k)
}
}
func (n *fakeIPTables) Delete(table, chain string, args ...string) error {
k := table + "/" + chain
if rules, ok := n.n[k]; ok {
for i, rule := range rules {
if rule == strings.Join(args, " ") {
rules = append(rules[:i], rules[i+1:]...)
n.n[k] = rules
return nil
}
}
return fmt.Errorf("delete of unknown rule %q from %s", strings.Join(args, " "), k)
} else {
return fmt.Errorf("unknown table/chain %s", k)
}
}
func (n *fakeIPTables) List(table, chain string) ([]string, error) {
k := table + "/" + chain
if rules, ok := n.n[k]; ok {
return rules, nil
} else {
return nil, fmt.Errorf("unknown table/chain %s", k)
}
}
func (n *fakeIPTables) ClearChain(table, chain string) error {
k := table + "/" + chain
if _, ok := n.n[k]; ok {
n.n[k] = nil
return nil
} else {
return errors.New("exitcode:1")
}
}
func (n *fakeIPTables) NewChain(table, chain string) error {
k := table + "/" + chain
if _, ok := n.n[k]; ok {
return fmt.Errorf("table/chain %s already exists", k)
}
n.n[k] = nil
return nil
}
func (n *fakeIPTables) DeleteChain(table, chain string) error {
k := table + "/" + chain
if rules, ok := n.n[k]; ok {
if len(rules) != 0 {
return fmt.Errorf("table/chain %s is not empty", k)
}
delete(n.n, k)
return nil
} else {
return fmt.Errorf("unknown table/chain %s", k)
}
}
func NewFakeIPTablesRunner() *iptablesRunner {
ipt4 := newFakeIPTables()
v6Available := false
var ipt6 iptablesInterface
if use6, err := strconv.ParseBool(os.Getenv("TS_TEST_FAKE_NETFILTER_6")); use6 || err != nil {
ipt6 = newFakeIPTables()
v6Available = true
}
iptr := &iptablesRunner{ipt4, ipt6, v6Available, v6Available, v6Available}
return iptr
}

39
vendor/tailscale.com/util/linuxfw/helpers.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package linuxfw
import (
"encoding/hex"
"fmt"
"strings"
"unicode"
"tailscale.com/util/slicesx"
)
func formatMaybePrintable(b []byte) string {
// Remove a single trailing null, if any.
if slicesx.LastEqual(b, 0) {
b = b[:len(b)-1]
}
nonprintable := strings.IndexFunc(string(b), func(r rune) bool {
return r > unicode.MaxASCII || !unicode.IsPrint(r)
})
if nonprintable >= 0 {
return "<hex>" + hex.EncodeToString(b)
}
return string(b)
}
func formatPortRange(r [2]uint16) string {
if r == [2]uint16{0, 65535} {
return fmt.Sprintf(`any`)
} else if r[0] == r[1] {
return fmt.Sprintf(`%d`, r[0])
}
return fmt.Sprintf(`%d-%d`, r[0], r[1])
}

73
vendor/tailscale.com/util/linuxfw/iptables.go generated vendored Normal file
View File

@@ -0,0 +1,73 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// TODO(#8502): add support for more architectures
//go:build linux && (arm64 || amd64)
package linuxfw
import (
"fmt"
"os/exec"
"strings"
"unicode"
"tailscale.com/types/logger"
"tailscale.com/util/multierr"
)
// DebugNetfilter prints debug information about iptables rules to the
// provided log function.
func DebugIptables(logf logger.Logf) error {
// unused.
return nil
}
// detectIptables returns the number of iptables rules that are present in the
// system, ignoring the default "ACCEPT" rule present in the standard iptables
// chains.
//
// It only returns an error when there is no iptables binary, or when iptables -S
// fails. In all other cases, it returns the number of non-default rules.
//
// If the iptables binary is not found, it returns an underlying exec.ErrNotFound
// error.
func detectIptables() (int, error) {
// run "iptables -S" to get the list of rules using iptables
// exec.Command returns an error if the binary is not found
cmd := exec.Command("iptables", "-S")
output, err := cmd.Output()
ip6cmd := exec.Command("ip6tables", "-S")
ip6output, ip6err := ip6cmd.Output()
var allLines []string
outputStr := string(output)
lines := strings.Split(outputStr, "\n")
ip6outputStr := string(ip6output)
ip6lines := strings.Split(ip6outputStr, "\n")
switch {
case err == nil && ip6err == nil:
allLines = append(lines, ip6lines...)
case err == nil && ip6err != nil:
allLines = lines
case err != nil && ip6err == nil:
allLines = ip6lines
default:
return 0, FWModeNotSupportedError{
Mode: FirewallModeIPTables,
Err: fmt.Errorf("iptables command run fail: %w", multierr.New(err, ip6err)),
}
}
// count the number of non-default rules
count := 0
for _, line := range allLines {
trimmedLine := strings.TrimLeftFunc(line, unicode.IsSpace)
if line != "" && strings.HasPrefix(trimmedLine, "-A") {
// if the line is not empty and starts with "-A", it is a rule appended not default
count++
}
}
// return the count of non-default rules
return count, nil
}

79
vendor/tailscale.com/util/linuxfw/iptables_for_svcs.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package linuxfw
import (
"fmt"
"net/netip"
)
// This file contains functionality to insert portmapping rules for a 'service'.
// These are currently only used by the Kubernetes operator proxies.
// An iptables rule for such a service contains a comment with the service name.
// EnsurePortMapRuleForSvc adds a prerouting rule that forwards traffic received
// on match port and NOT on the provided interface to target IP and target port.
// Rule will only be added if it does not already exists.
func (i *iptablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error {
table := i.getIPTByAddr(targetIP)
args := argsForPortMapRule(svc, tun, targetIP, pm)
exists, err := table.Exists("nat", "PREROUTING", args...)
if err != nil {
return fmt.Errorf("error checking if rule exists: %w", err)
}
if !exists {
return table.Append("nat", "PREROUTING", args...)
}
return nil
}
// DeleteMapRuleForSvc constructs a prerouting rule as would be created by
// EnsurePortMapRuleForSvc with the provided args and, if such a rule exists,
// deletes it.
func (i *iptablesRunner) DeletePortMapRuleForSvc(svc, excludeI string, targetIP netip.Addr, pm PortMap) error {
table := i.getIPTByAddr(targetIP)
args := argsForPortMapRule(svc, excludeI, targetIP, pm)
exists, err := table.Exists("nat", "PREROUTING", args...)
if err != nil {
return fmt.Errorf("error checking if rule exists: %w", err)
}
if exists {
return table.Delete("nat", "PREROUTING", args...)
}
return nil
}
// DeleteSvc constructs all possible rules that would have been created by
// EnsurePortMapRuleForSvc from the provided args and ensures that each one that
// exists is deleted.
func (i *iptablesRunner) DeleteSvc(svc, tun string, targetIPs []netip.Addr, pms []PortMap) error {
for _, tip := range targetIPs {
for _, pm := range pms {
if err := i.DeletePortMapRuleForSvc(svc, tun, tip, pm); err != nil {
return fmt.Errorf("error deleting rule: %w", err)
}
}
}
return nil
}
func argsForPortMapRule(svc, excludeI string, targetIP netip.Addr, pm PortMap) []string {
c := commentForSvc(svc, pm)
return []string{
"!", "-i", excludeI,
"-p", pm.Protocol,
"--dport", fmt.Sprintf("%d", pm.MatchPort),
"-m", "comment", "--comment", c,
"-j", "DNAT",
"--to-destination", fmt.Sprintf("%v:%v", targetIP, pm.TargetPort),
}
}
// commentForSvc generates a comment to be added to an iptables DNAT rule for a
// service. This is for iptables debugging/readability purposes only.
func commentForSvc(svc string, pm PortMap) string {
return fmt.Sprintf("%s:%s:%d -> %s:%d", svc, pm.Protocol, pm.MatchPort, pm.Protocol, pm.TargetPort)
}

774
vendor/tailscale.com/util/linuxfw/iptables_runner.go generated vendored Normal file
View File

@@ -0,0 +1,774 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package linuxfw
import (
"bytes"
"errors"
"fmt"
"log"
"net/netip"
"os"
"os/exec"
"slices"
"strconv"
"strings"
"github.com/coreos/go-iptables/iptables"
"tailscale.com/net/tsaddr"
"tailscale.com/types/logger"
"tailscale.com/util/multierr"
"tailscale.com/version/distro"
)
// isNotExistError needs to be overridden in tests that rely on distinguishing
// this error, because we don't have a good way how to create a new
// iptables.Error of that type.
var isNotExistError = func(err error) bool {
var e *iptables.Error
return errors.As(err, &e) && e.IsNotExist()
}
type iptablesInterface interface {
// Adding this interface for testing purposes so we can mock out
// the iptables library, in reality this is a wrapper to *iptables.IPTables.
Insert(table, chain string, pos int, args ...string) error
Append(table, chain string, args ...string) error
Exists(table, chain string, args ...string) (bool, error)
Delete(table, chain string, args ...string) error
List(table, chain string) ([]string, error)
ClearChain(table, chain string) error
NewChain(table, chain string) error
DeleteChain(table, chain string) error
}
type iptablesRunner struct {
ipt4 iptablesInterface
ipt6 iptablesInterface
v6Available bool
v6NATAvailable bool
v6FilterAvailable bool
}
func checkIP6TablesExists() error {
// Some distros ship ip6tables separately from iptables.
if _, err := exec.LookPath("ip6tables"); err != nil {
return fmt.Errorf("path not found: %w", err)
}
return nil
}
// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules.
// If the underlying iptables library fails to initialize, that error is
// returned. The runner probes for IPv6 support once at initialization time and
// if not found, no IPv6 rules will be modified for the lifetime of the runner.
func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) {
ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
return nil, err
}
supportsV6, supportsV6NAT, supportsV6Filter := false, false, false
v6err := CheckIPv6(logf)
ip6terr := checkIP6TablesExists()
var ipt6 *iptables.IPTables
switch {
case v6err != nil:
logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err)
case ip6terr != nil:
logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr)
default:
supportsV6 = true
ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return nil, err
}
supportsV6Filter = checkSupportsV6Filter(ipt6, logf)
supportsV6NAT = checkSupportsV6NAT(ipt6, logf)
logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT)
}
return &iptablesRunner{
ipt4: ipt4,
ipt6: ipt6,
v6Available: supportsV6,
v6NATAvailable: supportsV6NAT,
v6FilterAvailable: supportsV6Filter}, nil
}
// checkSupportsV6Filter returns whether the system has a "filter" table in the
// IPv6 tables. Some container environments such as GitHub codespaces have
// limited local IPv6 support, and containers containing ip6tables, but do not
// have kernel support for IPv6 filtering.
// We will not set ip6tables rules in these instances.
func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool {
if ipt == nil {
return false
}
_, filterListErr := ipt.ListChains("filter")
if filterListErr == nil {
return true
}
logf("ip6tables filtering is not supported on this host: %v", filterListErr)
return false
}
// checkSupportsV6NAT returns whether the system has a "nat" table in the
// IPv6 netfilter stack.
//
// The nat table was added after the initial release of ipv6
// netfilter, so some older distros ship a kernel that can't NAT IPv6
// traffic.
// ipt must be initialized for IPv6.
func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool {
if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 {
return false
}
_, natListErr := ipt.ListChains("nat")
if natListErr == nil {
return true
}
// TODO (irbekrm): the following two checks were added before the check
// above that verifies that nat chains can be listed. It is a
// container-friendly check (see
// https://github.com/tailscale/tailscale/issues/11344), but also should
// be good enough on its own in other environments. If we never observe
// it falsely succeed, let's remove the other two checks.
bs, err := os.ReadFile("/proc/net/ip6_tables_names")
if err != nil {
return false
}
if bytes.Contains(bs, []byte("nat\n")) {
logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing")
return true
}
if exec.Command("modprobe", "ip6table_nat").Run() == nil {
logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded")
return true
}
return false
}
// HasIPV6 reports true if the system supports IPv6.
func (i *iptablesRunner) HasIPV6() bool {
return i.v6Available
}
// HasIPV6Filter reports true if the system supports ip6tables filter table.
func (i *iptablesRunner) HasIPV6Filter() bool {
return i.v6FilterAvailable
}
// HasIPV6NAT reports true if the system supports IPv6 NAT.
func (i *iptablesRunner) HasIPV6NAT() bool {
return i.v6NATAvailable
}
// getIPTByAddr returns the iptablesInterface with correct IP family
// that we will be using for the given address.
func (i *iptablesRunner) getIPTByAddr(addr netip.Addr) iptablesInterface {
nf := i.ipt4
if addr.Is6() {
nf = i.ipt6
}
return nf
}
// AddLoopbackRule adds an iptables rule to permit loopback traffic to
// a local Tailscale IP.
func (i *iptablesRunner) AddLoopbackRule(addr netip.Addr) error {
if err := i.getIPTByAddr(addr).Insert("filter", "ts-input", 1, "-i", "lo", "-s", addr.String(), "-j", "ACCEPT"); err != nil {
return fmt.Errorf("adding loopback allow rule for %q: %w", addr, err)
}
return nil
}
// tsChain returns the name of the tailscale sub-chain corresponding
// to the given "parent" chain (e.g. INPUT, FORWARD, ...).
func tsChain(chain string) string {
return "ts-" + strings.ToLower(chain)
}
// DelLoopbackRule removes the iptables rule permitting loopback
// traffic to a Tailscale IP.
func (i *iptablesRunner) DelLoopbackRule(addr netip.Addr) error {
if err := i.getIPTByAddr(addr).Delete("filter", "ts-input", "-i", "lo", "-s", addr.String(), "-j", "ACCEPT"); err != nil {
return fmt.Errorf("deleting loopback allow rule for %q: %w", addr, err)
}
return nil
}
// getTables gets the available iptablesInterface in iptables runner.
func (i *iptablesRunner) getTables() []iptablesInterface {
if i.HasIPV6Filter() {
return []iptablesInterface{i.ipt4, i.ipt6}
}
return []iptablesInterface{i.ipt4}
}
// getNATTables gets the available iptablesInterface in iptables runner.
// If the system does not support IPv6 NAT, only the IPv4 iptablesInterface
// is returned.
func (i *iptablesRunner) getNATTables() []iptablesInterface {
if i.HasIPV6NAT() {
return i.getTables()
}
return []iptablesInterface{i.ipt4}
}
// AddHooks inserts calls to tailscale's netfilter chains in
// the relevant main netfilter chains. The tailscale chains must
// already exist. If they do not, an error is returned.
func (i *iptablesRunner) AddHooks() error {
// divert inserts a jump to the tailscale chain in the given table/chain.
// If the jump already exists, it is a no-op.
divert := func(ipt iptablesInterface, table, chain string) error {
tsChain := tsChain(chain)
args := []string{"-j", tsChain}
exists, err := ipt.Exists(table, chain, args...)
if err != nil {
return fmt.Errorf("checking for %v in %s/%s: %w", args, table, chain, err)
}
if exists {
return nil
}
if err := ipt.Insert(table, chain, 1, args...); err != nil {
return fmt.Errorf("adding %v in %s/%s: %w", args, table, chain, err)
}
return nil
}
for _, ipt := range i.getTables() {
if err := divert(ipt, "filter", "INPUT"); err != nil {
return err
}
if err := divert(ipt, "filter", "FORWARD"); err != nil {
return err
}
}
for _, ipt := range i.getNATTables() {
if err := divert(ipt, "nat", "POSTROUTING"); err != nil {
return err
}
}
return nil
}
// AddChains creates custom Tailscale chains in netfilter via iptables
// if the ts-chain doesn't already exist.
func (i *iptablesRunner) AddChains() error {
// create creates a chain in the given table if it doesn't already exist.
// If the chain already exists, it is a no-op.
create := func(ipt iptablesInterface, table, chain string) error {
err := ipt.ClearChain(table, chain)
if isNotExistError(err) {
// nonexistent chain. let's create it!
return ipt.NewChain(table, chain)
}
if err != nil {
return fmt.Errorf("setting up %s/%s: %w", table, chain, err)
}
return nil
}
for _, ipt := range i.getTables() {
if err := create(ipt, "filter", "ts-input"); err != nil {
return err
}
if err := create(ipt, "filter", "ts-forward"); err != nil {
return err
}
}
for _, ipt := range i.getNATTables() {
if err := create(ipt, "nat", "ts-postrouting"); err != nil {
return err
}
}
return nil
}
// AddBase adds some basic processing rules to be supplemented by
// later calls to other helpers.
func (i *iptablesRunner) AddBase(tunname string) error {
if err := i.addBase4(tunname); err != nil {
return err
}
if i.HasIPV6Filter() {
if err := i.addBase6(tunname); err != nil {
return err
}
}
return nil
}
// addBase4 adds some basic IPv4 processing rules to be
// supplemented by later calls to other helpers.
func (i *iptablesRunner) addBase4(tunname string) error {
// Only allow CGNAT range traffic to come from tailscale0. There
// is an exception carved out for ranges used by ChromeOS, for
// which we fall out of the Tailscale chain.
//
// Note, this will definitely break nodes that end up using the
// CGNAT range for other purposes :(.
args := []string{"!", "-i", tunname, "-s", tsaddr.ChromeOSVMRange().String(), "-j", "RETURN"}
if err := i.ipt4.Append("filter", "ts-input", args...); err != nil {
return fmt.Errorf("adding %v in v4/filter/ts-input: %w", args, err)
}
args = []string{"!", "-i", tunname, "-s", tsaddr.CGNATRange().String(), "-j", "DROP"}
if err := i.ipt4.Append("filter", "ts-input", args...); err != nil {
return fmt.Errorf("adding %v in v4/filter/ts-input: %w", args, err)
}
// Explicitly allow all other inbound traffic to the tun interface
args = []string{"-i", tunname, "-j", "ACCEPT"}
if err := i.ipt4.Append("filter", "ts-input", args...); err != nil {
return fmt.Errorf("adding %v in v4/filter/ts-input: %w", args, err)
}
// Forward all traffic from the Tailscale interface, and drop
// traffic to the tailscale interface by default. We use packet
// marks here so both filter/FORWARD and nat/POSTROUTING can match
// on these packets of interest.
//
// In particular, we only want to apply SNAT rules in
// nat/POSTROUTING to packets that originated from the Tailscale
// interface, but we can't match on the inbound interface in
// POSTROUTING. So instead, we match on the inbound interface in
// filter/FORWARD, and set a packet mark that nat/POSTROUTING can
// use to effectively run that same test again.
args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask}
if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err)
}
args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"}
if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err)
}
args = []string{"-o", tunname, "-s", tsaddr.CGNATRange().String(), "-j", "DROP"}
if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err)
}
args = []string{"-o", tunname, "-j", "ACCEPT"}
if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err)
}
return nil
}
func (i *iptablesRunner) AddDNATRule(origDst, dst netip.Addr) error {
table := i.getIPTByAddr(dst)
return table.Insert("nat", "PREROUTING", 1, "--destination", origDst.String(), "-j", "DNAT", "--to-destination", dst.String())
}
// EnsureSNATForDst sets up firewall to ensure that all traffic aimed for dst, has its source ip set to src:
// - creates a SNAT rule if not already present
// - ensures that any no longer valid SNAT rules for the same dst are removed
func (i *iptablesRunner) EnsureSNATForDst(src, dst netip.Addr) error {
table := i.getIPTByAddr(dst)
rules, err := table.List("nat", "POSTROUTING")
if err != nil {
return fmt.Errorf("error listing rules: %v", err)
}
// iptables accept either address or a CIDR value for the --destination flag, but converts an address to /32
// CIDR. Explicitly passing a /32 CIDR made it possible to test this rule.
dstPrefix, err := dst.Prefix(32)
if err != nil {
return fmt.Errorf("error calculating prefix of dst %v: %v", dst, err)
}
// wantsArgsPrefix is the prefix of the SNAT rule for the provided destination.
// We should only have one POSTROUTING rule with this prefix.
wantsArgsPrefix := fmt.Sprintf("-d %s -j SNAT --to-source", dstPrefix.String())
// wantsArgs is the actual SNAT rule that we want.
wantsArgs := fmt.Sprintf("%s %s", wantsArgsPrefix, src.String())
for _, r := range rules {
args := argsFromPostRoutingRule(r)
if strings.HasPrefix(args, wantsArgsPrefix) {
if strings.HasPrefix(args, wantsArgs) {
return nil
}
// SNAT rule matching the destination, but for a different source - delete.
if err := table.Delete("nat", "POSTROUTING", strings.Split(args, " ")...); err != nil {
// If we failed to delete don't crash the node- the proxy should still be functioning.
log.Printf("[unexpected] error deleting rule %s: %v, please report it.", r, err)
}
break
}
}
return table.Insert("nat", "POSTROUTING", 1, "-d", dstPrefix.String(), "-j", "SNAT", "--to-source", src.String())
}
func (i *iptablesRunner) DNATNonTailscaleTraffic(tun string, dst netip.Addr) error {
table := i.getIPTByAddr(dst)
return table.Insert("nat", "PREROUTING", 1, "!", "-i", tun, "-j", "DNAT", "--to-destination", dst.String())
}
// DNATWithLoadBalancer adds iptables rules to forward all traffic received for
// originDst to the backend dsts. Traffic will be load balanced using round robin.
func (i *iptablesRunner) DNATWithLoadBalancer(origDst netip.Addr, dsts []netip.Addr) error {
table := i.getIPTByAddr(dsts[0])
if err := table.ClearChain("nat", "PREROUTING"); err != nil && !isNotExistError(err) {
// If clearing the PREROUTING chain fails, fail the whole operation. This
// rule is currently only used in Kubernetes containers where a
// failed container gets restarted which should hopefully fix things.
return fmt.Errorf("error clearing nat PREROUTING chain: %w", err)
}
// If dsts contain more than one address, for n := n in range(len(dsts)..2) route packets for every nth connection to dsts[n].
for i := len(dsts); i >= 2; i-- {
dst := dsts[i-1] // the order in which rules for addrs are installed does not matter
if err := table.Append("nat", "PREROUTING", "--destination", origDst.String(), "-m", "statistic", "--mode", "nth", "--every", fmt.Sprint(i), "--packet", "0", "-j", "DNAT", "--to-destination", dst.String()); err != nil {
return fmt.Errorf("error adding DNAT rule for %s: %w", dst.String(), err)
}
}
// If the packet falls through to this rule, we route to the first destination in the list unconditionally.
return table.Append("nat", "PREROUTING", "--destination", origDst.String(), "-j", "DNAT", "--to-destination", dsts[0].String())
}
func (i *iptablesRunner) ClampMSSToPMTU(tun string, addr netip.Addr) error {
table := i.getIPTByAddr(addr)
return table.Append("mangle", "FORWARD", "-o", tun, "-p", "tcp", "--tcp-flags", "SYN,RST", "SYN", "-j", "TCPMSS", "--clamp-mss-to-pmtu")
}
// addBase6 adds some basic IPv6 processing rules to be
// supplemented by later calls to other helpers.
func (i *iptablesRunner) addBase6(tunname string) error {
// TODO: only allow traffic from Tailscale's ULA range to come
// from tailscale0.
// Explicitly allow all other inbound traffic to the tun interface
args := []string{"-i", tunname, "-j", "ACCEPT"}
if err := i.ipt6.Append("filter", "ts-input", args...); err != nil {
return fmt.Errorf("adding %v in v6/filter/ts-input: %w", args, err)
}
args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask}
if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err)
}
args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"}
if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err)
}
// TODO: drop forwarded traffic to tailscale0 from tailscale's ULA
// (see corresponding IPv4 CGNAT rule).
args = []string{"-o", tunname, "-j", "ACCEPT"}
if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err)
}
return nil
}
// DelChains removes the custom Tailscale chains from netfilter via iptables.
func (i *iptablesRunner) DelChains() error {
for _, ipt := range i.getTables() {
if err := delChain(ipt, "filter", "ts-input"); err != nil {
return err
}
if err := delChain(ipt, "filter", "ts-forward"); err != nil {
return err
}
}
for _, ipt := range i.getNATTables() {
if err := delChain(ipt, "nat", "ts-postrouting"); err != nil {
return err
}
}
return nil
}
// DelBase empties but does not remove custom Tailscale chains from
// netfilter via iptables.
func (i *iptablesRunner) DelBase() error {
del := func(ipt iptablesInterface, table, chain string) error {
if err := ipt.ClearChain(table, chain); err != nil {
if isNotExistError(err) {
// nonexistent chain. That's fine, since it's
// the desired state anyway.
return nil
}
return fmt.Errorf("flushing %s/%s: %w", table, chain, err)
}
return nil
}
for _, ipt := range i.getTables() {
if err := del(ipt, "filter", "ts-input"); err != nil {
return err
}
if err := del(ipt, "filter", "ts-forward"); err != nil {
return err
}
}
for _, ipt := range i.getNATTables() {
if err := del(ipt, "nat", "ts-postrouting"); err != nil {
return err
}
}
return nil
}
// DelHooks deletes the calls to tailscale's netfilter chains
// in the relevant main netfilter chains.
func (i *iptablesRunner) DelHooks(logf logger.Logf) error {
for _, ipt := range i.getTables() {
if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil {
return err
}
if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil {
return err
}
}
for _, ipt := range i.getNATTables() {
if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil {
return err
}
}
return nil
}
// AddSNATRule adds a netfilter rule to SNAT traffic destined for
// local subnets.
func (i *iptablesRunner) AddSNATRule() error {
args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"}
for _, ipt := range i.getNATTables() {
if err := ipt.Append("nat", "ts-postrouting", args...); err != nil {
return fmt.Errorf("adding %v in nat/ts-postrouting: %w", args, err)
}
}
return nil
}
// DelSNATRule removes the netfilter rule to SNAT traffic destined for
// local subnets. An error is returned if the rule does not exist.
func (i *iptablesRunner) DelSNATRule() error {
args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"}
for _, ipt := range i.getNATTables() {
if err := ipt.Delete("nat", "ts-postrouting", args...); err != nil {
return fmt.Errorf("deleting %v in nat/ts-postrouting: %w", args, err)
}
}
return nil
}
func statefulRuleArgs(tunname string) []string {
return []string{"-o", tunname, "-m", "conntrack", "!", "--ctstate", "ESTABLISHED,RELATED", "-j", "DROP"}
}
// AddStatefulRule adds a netfilter rule for stateful packet filtering using
// conntrack.
func (i *iptablesRunner) AddStatefulRule(tunname string) error {
// Drop packets that are destined for the tailscale interface if
// they're a new connection, per conntrack, to prevent hosts on the
// same subnet from being able to use this device as a way to forward
// packets on to the Tailscale network.
//
// The conntrack states are:
// NEW A packet which creates a new connection.
// ESTABLISHED A packet which belongs to an existing connection
// (i.e., a reply packet, or outgoing packet on a
// connection which has seen replies).
// RELATED A packet which is related to, but not part of, an
// existing connection, such as an ICMP error.
// INVALID A packet which could not be identified for some
// reason: this includes running out of memory and ICMP
// errors which don't correspond to any known
// connection. Generally these packets should be
// dropped.
//
// We drop NEW packets to prevent connections from coming "into"
// Tailscale from other hosts on the same network segment; we drop
// INVALID packets as well.
args := statefulRuleArgs(tunname)
for _, ipt := range i.getTables() {
// First, find the final "accept" rule.
rules, err := ipt.List("filter", "ts-forward")
if err != nil {
return fmt.Errorf("listing rules in filter/ts-forward: %w", err)
}
want := fmt.Sprintf("-A %s -o %s -j ACCEPT", "ts-forward", tunname)
pos := slices.Index(rules, want)
if pos < 0 {
return fmt.Errorf("couldn't find final ACCEPT rule in filter/ts-forward")
}
if err := ipt.Insert("filter", "ts-forward", pos, args...); err != nil {
return fmt.Errorf("adding %v in filter/ts-forward: %w", args, err)
}
}
return nil
}
// DelStatefulRule removes the netfilter rule for stateful packet filtering
// using conntrack.
func (i *iptablesRunner) DelStatefulRule(tunname string) error {
args := statefulRuleArgs(tunname)
for _, ipt := range i.getTables() {
if err := ipt.Delete("filter", "ts-forward", args...); err != nil {
return fmt.Errorf("deleting %v in filter/ts-forward: %w", args, err)
}
}
return nil
}
// buildMagicsockPortRule generates the string slice containing the arguments
// to describe a rule accepting traffic on a particular port to iptables. It is
// separated out here to avoid repetition in AddMagicsockPortRule and
// RemoveMagicsockPortRule, since it is important that the same rule is passed
// to Append() and Delete().
func buildMagicsockPortRule(port uint16) []string {
return []string{"-p", "udp", "--dport", strconv.FormatUint(uint64(port), 10), "-j", "ACCEPT"}
}
// AddMagicsockPortRule adds a rule to iptables to allow incoming traffic on
// the specified UDP port, so magicsock can accept incoming connections.
// network must be either "udp4" or "udp6" - this determines whether the rule
// is added for IPv4 or IPv6.
func (i *iptablesRunner) AddMagicsockPortRule(port uint16, network string) error {
var ipt iptablesInterface
switch network {
case "udp4":
ipt = i.ipt4
case "udp6":
ipt = i.ipt6
default:
return fmt.Errorf("unsupported network %s", network)
}
args := buildMagicsockPortRule(port)
if err := ipt.Append("filter", "ts-input", args...); err != nil {
return fmt.Errorf("adding %v in filter/ts-input: %w", args, err)
}
return nil
}
// DelMagicsockPortRule removes a rule added by AddMagicsockPortRule to accept
// incoming traffic on a particular UDP port.
// network must be either "udp4" or "udp6" - this determines whether the rule
// is removed for IPv4 or IPv6.
func (i *iptablesRunner) DelMagicsockPortRule(port uint16, network string) error {
var ipt iptablesInterface
switch network {
case "udp4":
ipt = i.ipt4
case "udp6":
ipt = i.ipt6
default:
return fmt.Errorf("unsupported network %s", network)
}
args := buildMagicsockPortRule(port)
if err := ipt.Delete("filter", "ts-input", args...); err != nil {
return fmt.Errorf("removing %v in filter/ts-input: %w", args, err)
}
return nil
}
// IPTablesCleanUp removes all Tailscale added iptables rules.
// Any errors that occur are logged to the provided logf.
func IPTablesCleanUp(logf logger.Logf) {
if distro.Get() == distro.Gokrazy {
// Gokrazy uses nftables and doesn't have the "iptables" command.
// Avoid log spam on cleanup. (#12277)
return
}
err := clearRules(iptables.ProtocolIPv4, logf)
if err != nil {
logf("linuxfw: clear iptables: %v", err)
}
err = clearRules(iptables.ProtocolIPv6, logf)
if err != nil {
logf("linuxfw: clear ip6tables: %v", err)
}
}
// delTSHook deletes hook in a chain that jumps to a ts-chain. If the hook does not
// exist, it's a no-op since the desired state is already achieved but we log the
// error because error code from the iptables module resists unwrapping.
func delTSHook(ipt iptablesInterface, table, chain string, logf logger.Logf) error {
tsChain := tsChain(chain)
args := []string{"-j", tsChain}
if err := ipt.Delete(table, chain, args...); err != nil && !isNotExistError(err) {
return fmt.Errorf("deleting %v in %s/%s: %v", args, table, chain, err)
}
return nil
}
// delChain flushes and deletes a chain. If the chain does not exist, it's a no-op
// since the desired state is already achieved. otherwise, it returns an error.
func delChain(ipt iptablesInterface, table, chain string) error {
if err := ipt.ClearChain(table, chain); err != nil {
if isNotExistError(err) {
// nonexistent chain. nothing to do.
return nil
}
return fmt.Errorf("flushing %s/%s: %w", table, chain, err)
}
if err := ipt.DeleteChain(table, chain); err != nil {
return fmt.Errorf("deleting %s/%s: %w", table, chain, err)
}
return nil
}
// clearRules clears all the iptables rules created by Tailscale
// for the given protocol. If error occurs, it's logged but not returned.
func clearRules(proto iptables.Protocol, logf logger.Logf) error {
ipt, err := iptables.NewWithProtocol(proto)
if err != nil {
return err
}
var errs []error
if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil {
errs = append(errs, err)
}
if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil {
errs = append(errs, err)
}
if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil {
errs = append(errs, err)
}
if err := delChain(ipt, "filter", "ts-input"); err != nil {
errs = append(errs, err)
}
if err := delChain(ipt, "filter", "ts-forward"); err != nil {
errs = append(errs, err)
}
if err := delChain(ipt, "nat", "ts-postrouting"); err != nil {
errs = append(errs, err)
}
return multierr.New(errs...)
}
// argsFromPostRoutingRule accepts a rule as returned by iptables.List and, if it is a rule from POSTROUTING chain,
// returns the args part, else returns the original rule.
func argsFromPostRoutingRule(r string) string {
args, _ := strings.CutPrefix(r, "-A POSTROUTING ")
return args
}

182
vendor/tailscale.com/util/linuxfw/linuxfw.go generated vendored Normal file
View File

@@ -0,0 +1,182 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
// Package linuxfw returns the kind of firewall being used by the kernel.
package linuxfw
import (
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/tailscale/netlink"
"tailscale.com/types/logger"
)
// MatchDecision is the decision made by the firewall for a packet matched by a rule.
// It is used to decide whether to accept or masquerade a packet in addMatchSubnetRouteMarkRule.
type MatchDecision int
const (
Accept MatchDecision = iota
Masq
)
type FWModeNotSupportedError struct {
Mode FirewallMode
Err error
}
func (e FWModeNotSupportedError) Error() string {
return fmt.Sprintf("firewall mode %q not supported: %v", e.Mode, e.Err)
}
func (e FWModeNotSupportedError) Is(target error) bool {
_, ok := target.(FWModeNotSupportedError)
return ok
}
func (e FWModeNotSupportedError) Unwrap() error {
return e.Err
}
type FirewallMode string
const (
FirewallModeIPTables FirewallMode = "iptables"
FirewallModeNfTables FirewallMode = "nftables"
)
// The following bits are added to packet marks for Tailscale use.
//
// We tried to pick bits sufficiently out of the way that it's
// unlikely to collide with existing uses. We have 4 bytes of mark
// bits to play with. We leave the lower byte alone on the assumption
// that sysadmins would use those. Kubernetes uses a few bits in the
// second byte, so we steer clear of that too.
//
// Empirically, most of the documentation on packet marks on the
// internet gives the impression that the marks are 16 bits
// wide. Based on this, we theorize that the upper two bytes are
// relatively unused in the wild, and so we consume bits 16:23 (the
// third byte).
//
// The constants are in the iptables/iproute2 string format for
// matching and setting the bits, so they can be directly embedded in
// commands.
const (
// The mask for reading/writing the 'firewall mask' bits on a packet.
// See the comment on the const block on why we only use the third byte.
//
// We claim bits 16:23 entirely. For now we only use the lower four
// bits, leaving the higher 4 bits for future use.
TailscaleFwmarkMask = "0xff0000"
TailscaleFwmarkMaskNum = 0xff0000
// Packet is from Tailscale and to a subnet route destination, so
// is allowed to be routed through this machine.
TailscaleSubnetRouteMark = "0x40000"
TailscaleSubnetRouteMarkNum = 0x40000
// Packet was originated by tailscaled itself, and must not be
// routed over the Tailscale network.
TailscaleBypassMark = "0x80000"
TailscaleBypassMarkNum = 0x80000
)
// getTailscaleFwmarkMaskNeg returns the negation of TailscaleFwmarkMask in bytes.
func getTailscaleFwmarkMaskNeg() []byte {
return []byte{0xff, 0x00, 0xff, 0xff}
}
// getTailscaleFwmarkMask returns the TailscaleFwmarkMask in bytes.
func getTailscaleFwmarkMask() []byte {
return []byte{0x00, 0xff, 0x00, 0x00}
}
// getTailscaleSubnetRouteMark returns the TailscaleSubnetRouteMark in bytes.
func getTailscaleSubnetRouteMark() []byte {
return []byte{0x00, 0x04, 0x00, 0x00}
}
// checkIPv6ForTest can be set in tests.
var checkIPv6ForTest func(logger.Logf) error
// checkIPv6 checks whether the system appears to have a working IPv6
// network stack. It returns an error explaining what looks wrong or
// missing. It does not check that IPv6 is currently functional or
// that there's a global address, just that the system would support
// IPv6 if it were on an IPv6 network.
func CheckIPv6(logf logger.Logf) error {
if f := checkIPv6ForTest; f != nil {
return f(logf)
}
_, err := os.Stat("/proc/sys/net/ipv6")
if os.IsNotExist(err) {
return err
}
bs, err := os.ReadFile("/proc/sys/net/ipv6/conf/all/disable_ipv6")
if err != nil {
// Be conservative if we can't find the IPv6 configuration knob.
return err
}
disabled, err := strconv.ParseBool(strings.TrimSpace(string(bs)))
if err != nil {
return errors.New("disable_ipv6 has invalid bool")
}
if disabled {
return errors.New("disable_ipv6 is set")
}
// Older kernels don't support IPv6 policy routing. Some kernels
// support policy routing but don't have this knob, so absence of
// the knob is not fatal.
bs, err = os.ReadFile("/proc/sys/net/ipv6/conf/all/disable_policy")
if err == nil {
disabled, err = strconv.ParseBool(strings.TrimSpace(string(bs)))
if err != nil {
return errors.New("disable_policy has invalid bool")
}
if disabled {
return errors.New("disable_policy is set")
}
}
if err := CheckIPRuleSupportsV6(logf); err != nil {
return fmt.Errorf("kernel doesn't support IPv6 policy routing: %w", err)
}
return nil
}
func CheckIPRuleSupportsV6(logf logger.Logf) error {
// First try just a read-only operation to ideally avoid
// having to modify any state.
if rules, err := netlink.RuleList(netlink.FAMILY_V6); err != nil {
return fmt.Errorf("querying IPv6 policy routing rules: %w", err)
} else {
if len(rules) > 0 {
logf("[v1] kernel supports IPv6 policy routing (found %d rules)", len(rules))
return nil
}
}
// Try to actually create & delete one as a test.
rule := netlink.NewRule()
rule.Priority = 1234
rule.Mark = TailscaleBypassMarkNum
rule.Table = 52
rule.Family = netlink.FAMILY_V6
// First delete the rule unconditionally, and don't check for
// errors. This is just cleaning up anything that might be already
// there.
netlink.RuleDel(rule)
// And clean up on exit.
defer netlink.RuleDel(rule)
return netlink.RuleAdd(rule)
}

View File

@@ -0,0 +1,40 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// NOTE: linux_{arm64, amd64} are the only two currently supported archs due to missing
// support in upstream dependencies.
// TODO(#8502): add support for more architectures
//go:build linux && !(arm64 || amd64)
package linuxfw
import (
"errors"
"tailscale.com/types/logger"
)
// ErrUnsupported is the error returned from all functions on non-Linux
// platforms.
var ErrUnsupported = errors.New("linuxfw:unsupported")
// DebugNetfilter is not supported on non-Linux platforms.
func DebugNetfilter(logf logger.Logf) error {
return ErrUnsupported
}
// DetectNetfilter is not supported on non-Linux platforms.
func detectNetfilter() (int, error) {
return 0, ErrUnsupported
}
// DebugIptables is not supported on non-Linux platforms.
func debugIptables(logf logger.Logf) error {
return ErrUnsupported
}
// DetectIptables is not supported on non-Linux platforms.
func detectIptables() (int, error) {
return 0, ErrUnsupported
}

292
vendor/tailscale.com/util/linuxfw/nftables.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// TODO(#8502): add support for more architectures
//go:build linux && (arm64 || amd64)
package linuxfw
import (
"cmp"
"fmt"
"sort"
"strings"
"github.com/google/nftables"
"github.com/google/nftables/expr"
"github.com/google/nftables/xt"
"github.com/josharian/native"
"golang.org/x/sys/unix"
"tailscale.com/types/logger"
)
// DebugNetfilter prints debug information about netfilter rules to the
// provided log function.
func DebugNetfilter(logf logger.Logf) error {
conn, err := nftables.New()
if err != nil {
return err
}
chains, err := conn.ListChains()
if err != nil {
return fmt.Errorf("cannot list chains: %w", err)
}
if len(chains) == 0 {
logf("netfilter: no chains")
return nil
}
for _, chain := range chains {
logf("netfilter: table=%s chain=%s", chain.Table.Name, chain.Name)
rules, err := conn.GetRules(chain.Table, chain)
if err != nil {
continue
}
sort.Slice(rules, func(i, j int) bool {
return rules[i].Position < rules[j].Position
})
for i, rule := range rules {
logf("netfilter: rule[%d]: pos=%d flags=%d", i, rule.Position, rule.Flags)
for _, ex := range rule.Exprs {
switch v := ex.(type) {
case *expr.Meta:
key := cmp.Or(metaKeyNames[v.Key], "UNKNOWN")
logf("netfilter: Meta: key=%s source_register=%v register=%d", key, v.SourceRegister, v.Register)
case *expr.Cmp:
op := cmp.Or(cmpOpNames[v.Op], "UNKNOWN")
logf("netfilter: Cmp: op=%s register=%d data=%s", op, v.Register, formatMaybePrintable(v.Data))
case *expr.Counter:
// don't print
case *expr.Verdict:
kind := cmp.Or(verdictNames[v.Kind], "UNKNOWN")
logf("netfilter: Verdict: kind=%s data=%s", kind, v.Chain)
case *expr.Target:
logf("netfilter: Target: name=%s info=%s", v.Name, printTargetInfo(v.Name, v.Info))
case *expr.Match:
logf("netfilter: Match: name=%s info=%+v", v.Name, printMatchInfo(v.Name, v.Info))
case *expr.Payload:
logf("netfilter: Payload: op=%s src=%d dst=%d base=%s offset=%d len=%d",
payloadOperationTypeNames[v.OperationType],
v.SourceRegister, v.DestRegister,
payloadBaseNames[v.Base],
v.Offset, v.Len)
// TODO(andrew): csum
case *expr.Bitwise:
var xor string
for _, b := range v.Xor {
if b != 0 {
xor = fmt.Sprintf(" xor=%v", v.Xor)
break
}
}
logf("netfilter: Bitwise: src=%d dst=%d len=%d mask=%v%s",
v.SourceRegister, v.DestRegister, v.Len, v.Mask, xor)
default:
logf("netfilter: unknown %T: %+v", v, v)
}
}
}
}
return nil
}
// detectNetfilter returns the number of nftables rules present in the system.
func detectNetfilter() (int, error) {
// Frist try creating a dummy postrouting chain. Emperically, we have
// noticed that on some devices there is partial nftables support and the
// kernel rejects some chains that are valid on other devices. This is a
// workaround to detect that case.
//
// This specifically allows us to run in on GKE nodes using COS images which
// have partial nftables support (as of 2023-10-18). When we try to create a
// dummy postrouting chain, we get an error like:
// add chain: conn.Receive: netlink receive: no such file or directory
nft, err := newNfTablesRunner(logger.Discard)
if err != nil {
return 0, FWModeNotSupportedError{
Mode: FirewallModeNfTables,
Err: fmt.Errorf("cannot create nftables runner: %w", err),
}
}
if err := nft.createDummyPostroutingChains(); err != nil {
return 0, FWModeNotSupportedError{
Mode: FirewallModeNfTables,
Err: err,
}
}
conn, err := nftables.New()
if err != nil {
return 0, FWModeNotSupportedError{
Mode: FirewallModeNfTables,
Err: err,
}
}
chains, err := conn.ListChains()
if err != nil {
return 0, FWModeNotSupportedError{
Mode: FirewallModeNfTables,
Err: fmt.Errorf("cannot list chains: %w", err),
}
}
var validRules int
for _, chain := range chains {
rules, err := conn.GetRules(chain.Table, chain)
if err != nil {
continue
}
validRules += len(rules)
}
return validRules, nil
}
func printMatchInfo(name string, info xt.InfoAny) string {
var sb strings.Builder
sb.WriteString(`{`)
var handled bool = true
switch v := info.(type) {
// TODO(andrew): we should support these common types
//case *xt.ConntrackMtinfo3:
//case *xt.ConntrackMtinfo2:
case *xt.Tcp:
fmt.Fprintf(&sb, "Src:%s Dst:%s", formatPortRange(v.SrcPorts), formatPortRange(v.DstPorts))
if v.Option != 0 {
fmt.Fprintf(&sb, " Option:%d", v.Option)
}
if v.FlagsMask != 0 {
fmt.Fprintf(&sb, " FlagsMask:%d", v.FlagsMask)
}
if v.FlagsCmp != 0 {
fmt.Fprintf(&sb, " FlagsCmp:%d", v.FlagsCmp)
}
if v.InvFlags != 0 {
fmt.Fprintf(&sb, " InvFlags:%d", v.InvFlags)
}
case *xt.Udp:
fmt.Fprintf(&sb, "Src:%s Dst:%s", formatPortRange(v.SrcPorts), formatPortRange(v.DstPorts))
if v.InvFlags != 0 {
fmt.Fprintf(&sb, " InvFlags:%d", v.InvFlags)
}
case *xt.AddrType:
var sprefix, dprefix string
if v.InvertSource {
sprefix = "!"
}
if v.InvertDest {
dprefix = "!"
}
// TODO(andrew): translate source/dest
fmt.Fprintf(&sb, "Source:%s%d Dest:%s%d", sprefix, v.Source, dprefix, v.Dest)
case *xt.AddrTypeV1:
// TODO(andrew): translate source/dest
fmt.Fprintf(&sb, "Source:%d Dest:%d", v.Source, v.Dest)
var flags []string
for flag, name := range addrTypeFlagNames {
if v.Flags&flag != 0 {
flags = append(flags, name)
}
}
if len(flags) > 0 {
sort.Strings(flags)
fmt.Fprintf(&sb, "Flags:%s", strings.Join(flags, ","))
}
default:
handled = false
}
if handled {
sb.WriteString(`}`)
return sb.String()
}
unknown, ok := info.(*xt.Unknown)
if !ok {
return fmt.Sprintf("(%T)%+v", info, info)
}
data := []byte(*unknown)
// Things where upstream has no type
handled = true
switch name {
case "pkttype":
if len(data) != 8 {
handled = false
break
}
pkttype := int(native.Endian.Uint32(data[0:4]))
invert := int(native.Endian.Uint32(data[4:8]))
var invertPrefix string
if invert != 0 {
invertPrefix = "!"
}
pkttypeName := packetTypeNames[pkttype]
if pkttypeName != "" {
fmt.Fprintf(&sb, "PktType:%s%s", invertPrefix, pkttypeName)
} else {
fmt.Fprintf(&sb, "PktType:%s%d", invertPrefix, pkttype)
}
default:
handled = true
}
if !handled {
return fmt.Sprintf("(%T)%+v", info, info)
}
sb.WriteString(`}`)
return sb.String()
}
func printTargetInfo(name string, info xt.InfoAny) string {
var sb strings.Builder
sb.WriteString(`{`)
unknown, ok := info.(*xt.Unknown)
if !ok {
return fmt.Sprintf("(%T)%+v", info, info)
}
data := []byte(*unknown)
// Things where upstream has no type
switch name {
case "LOG":
if len(data) != 32 {
fmt.Fprintf(&sb, `Error:"bad size; want 32, got %d"`, len(data))
break
}
level := data[0]
logflags := data[1]
prefix := unix.ByteSliceToString(data[2:])
fmt.Fprintf(&sb, "Level:%d LogFlags:%d Prefix:%q", level, logflags, prefix)
default:
return fmt.Sprintf("(%T)%+v", info, info)
}
sb.WriteString(`}`)
return sb.String()
}

245
vendor/tailscale.com/util/linuxfw/nftables_for_svcs.go generated vendored Normal file
View File

@@ -0,0 +1,245 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package linuxfw
import (
"errors"
"fmt"
"net/netip"
"reflect"
"strings"
"github.com/google/nftables"
"github.com/google/nftables/binaryutil"
"github.com/google/nftables/expr"
"golang.org/x/sys/unix"
)
// This file contains functionality that is currently (09/2024) used to set up
// routing for the Tailscale Kubernetes operator egress proxies. A tailnet
// service (identified by tailnet IP or FQDN) that gets exposed to cluster
// workloads gets a separate prerouting chain created for it for each IP family
// of the chain's target addresses. Each service's prerouting chain contains one
// or more portmapping rules. A portmapping rule DNATs traffic received on a
// particular port to a port of the tailnet service. Creating a chain per
// service makes it easier to delete a service when no longer needed and helps
// with readability.
// EnsurePortMapRuleForSvc:
// - ensures that nat table exists
// - ensures that there is a prerouting chain for the given service and IP family of the target address in the nat table
// - ensures that there is a portmapping rule mathcing the given portmap (only creates the rule if it does not already exist)
func (n *nftablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error {
t, ch, err := n.ensureChainForSvc(svc, targetIP)
if err != nil {
return fmt.Errorf("error ensuring chain for %s: %w", svc, err)
}
meta := svcPortMapRuleMeta(svc, targetIP, pm)
rule, err := n.findRuleByMetadata(t, ch, meta)
if err != nil {
return fmt.Errorf("error looking up rule: %w", err)
}
if rule != nil {
return nil
}
p, err := protoFromString(pm.Protocol)
if err != nil {
return fmt.Errorf("error converting protocol %s: %w", pm.Protocol, err)
}
rule = portMapRule(t, ch, tun, targetIP, pm.MatchPort, pm.TargetPort, p, meta)
n.conn.InsertRule(rule)
return n.conn.Flush()
}
// DeletePortMapRuleForSvc deletes a portmapping rule in the given service/IP family chain.
// It finds the matching rule using metadata attached to the rule.
// The caller is expected to call DeleteSvc if the whole service (the chain)
// needs to be deleted, so we don't deal with the case where this is the only
// rule in the chain here.
func (n *nftablesRunner) DeletePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error {
table, err := n.getNFTByAddr(targetIP)
if err != nil {
return fmt.Errorf("error setting up nftables for IP family of %s: %w", targetIP, err)
}
t, err := getTableIfExists(n.conn, table.Proto, "nat")
if err != nil {
return fmt.Errorf("error checking if nat table exists: %w", err)
}
if t == nil {
return nil
}
ch, err := getChainFromTable(n.conn, t, svc)
if err != nil && !errors.Is(err, errorChainNotFound{t.Name, svc}) {
return fmt.Errorf("error checking if chain %s exists: %w", svc, err)
}
if errors.Is(err, errorChainNotFound{t.Name, svc}) {
return nil // service chain does not exist, so neither does the portmapping rule
}
meta := svcPortMapRuleMeta(svc, targetIP, pm)
rule, err := n.findRuleByMetadata(t, ch, meta)
if err != nil {
return fmt.Errorf("error checking if rule exists: %w", err)
}
if rule == nil {
return nil
}
if err := n.conn.DelRule(rule); err != nil {
return fmt.Errorf("error deleting rule: %w", err)
}
return n.conn.Flush()
}
// DeleteSvc deletes the chains for the given service if any exist.
func (n *nftablesRunner) DeleteSvc(svc, tun string, targetIPs []netip.Addr, pm []PortMap) error {
for _, tip := range targetIPs {
table, err := n.getNFTByAddr(tip)
if err != nil {
return fmt.Errorf("error setting up nftables for IP family of %s: %w", tip, err)
}
t, err := getTableIfExists(n.conn, table.Proto, "nat")
if err != nil {
return fmt.Errorf("error checking if nat table exists: %w", err)
}
if t == nil {
return nil
}
ch, err := getChainFromTable(n.conn, t, svc)
if err != nil && !errors.Is(err, errorChainNotFound{t.Name, svc}) {
return fmt.Errorf("error checking if chain %s exists: %w", svc, err)
}
if errors.Is(err, errorChainNotFound{t.Name, svc}) {
return nil
}
n.conn.DelChain(ch)
}
return n.conn.Flush()
}
func portMapRule(t *nftables.Table, ch *nftables.Chain, tun string, targetIP netip.Addr, matchPort, targetPort uint16, proto uint8, meta []byte) *nftables.Rule {
var fam uint32
if targetIP.Is4() {
fam = unix.NFPROTO_IPV4
} else {
fam = unix.NFPROTO_IPV6
}
rule := &nftables.Rule{
Table: t,
Chain: ch,
UserData: meta,
Exprs: []expr.Any{
&expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1},
&expr.Cmp{
Op: expr.CmpOpNeq,
Register: 1,
Data: []byte(tun),
},
&expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1},
&expr.Cmp{
Op: expr.CmpOpEq,
Register: 1,
Data: []byte{proto},
},
&expr.Payload{
DestRegister: 1,
Base: expr.PayloadBaseTransportHeader,
Offset: 2,
Len: 2,
},
&expr.Cmp{
Op: expr.CmpOpEq,
Register: 1,
Data: binaryutil.BigEndian.PutUint16(matchPort),
},
&expr.Immediate{
Register: 1,
Data: targetIP.AsSlice(),
},
&expr.Immediate{
Register: 2,
Data: binaryutil.BigEndian.PutUint16(targetPort),
},
&expr.NAT{
Type: expr.NATTypeDestNAT,
Family: fam,
RegAddrMin: 1,
RegAddrMax: 1,
RegProtoMin: 2,
RegProtoMax: 2,
},
},
}
return rule
}
// svcPortMapRuleMeta generates metadata for a rule.
// This metadata can then be used to find the rule.
// https://github.com/google/nftables/issues/48
func svcPortMapRuleMeta(svcName string, targetIP netip.Addr, pm PortMap) []byte {
return []byte(fmt.Sprintf("svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol))
}
func (n *nftablesRunner) findRuleByMetadata(t *nftables.Table, ch *nftables.Chain, meta []byte) (*nftables.Rule, error) {
if n.conn == nil || t == nil || ch == nil || len(meta) == 0 {
return nil, nil
}
rules, err := n.conn.GetRules(t, ch)
if err != nil {
return nil, fmt.Errorf("error listing rules: %w", err)
}
for _, rule := range rules {
if reflect.DeepEqual(rule.UserData, meta) {
return rule, nil
}
}
return nil, nil
}
func (n *nftablesRunner) ensureChainForSvc(svc string, targetIP netip.Addr) (*nftables.Table, *nftables.Chain, error) {
polAccept := nftables.ChainPolicyAccept
table, err := n.getNFTByAddr(targetIP)
if err != nil {
return nil, nil, fmt.Errorf("error setting up nftables for IP family of %v: %w", targetIP, err)
}
nat, err := createTableIfNotExist(n.conn, table.Proto, "nat")
if err != nil {
return nil, nil, fmt.Errorf("error ensuring nat table: %w", err)
}
svcCh, err := getOrCreateChain(n.conn, chainInfo{
table: nat,
name: svc,
chainType: nftables.ChainTypeNAT,
chainHook: nftables.ChainHookPrerouting,
chainPriority: nftables.ChainPriorityNATDest,
chainPolicy: &polAccept,
})
if err != nil {
return nil, nil, fmt.Errorf("error ensuring prerouting chain: %w", err)
}
return nat, svcCh, nil
}
// // PortMap is the port mapping for a service rule.
type PortMap struct {
// MatchPort is the local port to which the rule should apply.
MatchPort uint16
// TargetPort is the port to which the traffic should be forwarded.
TargetPort uint16
// Protocol is the protocol to match packets on. Only TCP and UDP are
// supported.
Protocol string
}
func protoFromString(s string) (uint8, error) {
switch strings.ToLower(s) {
case "tcp":
return unix.IPPROTO_TCP, nil
case "udp":
return unix.IPPROTO_UDP, nil
default:
return 0, fmt.Errorf("unrecognized protocol: %q", s)
}
}

2055
vendor/tailscale.com/util/linuxfw/nftables_runner.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

95
vendor/tailscale.com/util/linuxfw/nftables_types.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// TODO(#8502): add support for more architectures
//go:build linux && (arm64 || amd64)
package linuxfw
import (
"github.com/google/nftables/expr"
"github.com/google/nftables/xt"
)
var metaKeyNames = map[expr.MetaKey]string{
expr.MetaKeyLEN: "LEN",
expr.MetaKeyPROTOCOL: "PROTOCOL",
expr.MetaKeyPRIORITY: "PRIORITY",
expr.MetaKeyMARK: "MARK",
expr.MetaKeyIIF: "IIF",
expr.MetaKeyOIF: "OIF",
expr.MetaKeyIIFNAME: "IIFNAME",
expr.MetaKeyOIFNAME: "OIFNAME",
expr.MetaKeyIIFTYPE: "IIFTYPE",
expr.MetaKeyOIFTYPE: "OIFTYPE",
expr.MetaKeySKUID: "SKUID",
expr.MetaKeySKGID: "SKGID",
expr.MetaKeyNFTRACE: "NFTRACE",
expr.MetaKeyRTCLASSID: "RTCLASSID",
expr.MetaKeySECMARK: "SECMARK",
expr.MetaKeyNFPROTO: "NFPROTO",
expr.MetaKeyL4PROTO: "L4PROTO",
expr.MetaKeyBRIIIFNAME: "BRIIIFNAME",
expr.MetaKeyBRIOIFNAME: "BRIOIFNAME",
expr.MetaKeyPKTTYPE: "PKTTYPE",
expr.MetaKeyCPU: "CPU",
expr.MetaKeyIIFGROUP: "IIFGROUP",
expr.MetaKeyOIFGROUP: "OIFGROUP",
expr.MetaKeyCGROUP: "CGROUP",
expr.MetaKeyPRANDOM: "PRANDOM",
}
var cmpOpNames = map[expr.CmpOp]string{
expr.CmpOpEq: "EQ",
expr.CmpOpNeq: "NEQ",
expr.CmpOpLt: "LT",
expr.CmpOpLte: "LTE",
expr.CmpOpGt: "GT",
expr.CmpOpGte: "GTE",
}
var verdictNames = map[expr.VerdictKind]string{
expr.VerdictReturn: "RETURN",
expr.VerdictGoto: "GOTO",
expr.VerdictJump: "JUMP",
expr.VerdictBreak: "BREAK",
expr.VerdictContinue: "CONTINUE",
expr.VerdictDrop: "DROP",
expr.VerdictAccept: "ACCEPT",
expr.VerdictStolen: "STOLEN",
expr.VerdictQueue: "QUEUE",
expr.VerdictRepeat: "REPEAT",
expr.VerdictStop: "STOP",
}
var payloadOperationTypeNames = map[expr.PayloadOperationType]string{
expr.PayloadLoad: "LOAD",
expr.PayloadWrite: "WRITE",
}
var payloadBaseNames = map[expr.PayloadBase]string{
expr.PayloadBaseLLHeader: "ll-header",
expr.PayloadBaseNetworkHeader: "network-header",
expr.PayloadBaseTransportHeader: "transport-header",
}
var packetTypeNames = map[int]string{
0 /* PACKET_HOST */ : "unicast",
1 /* PACKET_BROADCAST */ : "broadcast",
2 /* PACKET_MULTICAST */ : "multicast",
}
var addrTypeFlagNames = map[xt.AddrTypeFlags]string{
xt.AddrTypeUnspec: "unspec",
xt.AddrTypeUnicast: "unicast",
xt.AddrTypeLocal: "local",
xt.AddrTypeBroadcast: "broadcast",
xt.AddrTypeAnycast: "anycast",
xt.AddrTypeMulticast: "multicast",
xt.AddrTypeBlackhole: "blackhole",
xt.AddrTypeUnreachable: "unreachable",
xt.AddrTypeProhibit: "prohibit",
xt.AddrTypeThrow: "throw",
xt.AddrTypeNat: "nat",
xt.AddrTypeXresolve: "xresolve",
}

70
vendor/tailscale.com/util/mak/mak.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package mak helps make maps. It contains generic helpers to make/assign
// things, notably to maps, but also slices.
package mak
import (
"fmt"
"reflect"
)
// Set populates an entry in a map, making the map if necessary.
//
// That is, it assigns (*m)[k] = v, making *m if it was nil.
func Set[K comparable, V any, T ~map[K]V](m *T, k K, v V) {
if *m == nil {
*m = make(map[K]V)
}
(*m)[k] = v
}
// NonNil takes a pointer to a Go data structure
// (currently only a slice or a map) and makes sure it's non-nil for
// JSON serialization. (In particular, JavaScript clients usually want
// the field to be defined after they decode the JSON.)
//
// Deprecated: use NonNilSliceForJSON or NonNilMapForJSON instead.
func NonNil(ptr any) {
if ptr == nil {
panic("nil interface")
}
rv := reflect.ValueOf(ptr)
if rv.Kind() != reflect.Ptr {
panic(fmt.Sprintf("kind %v, not Ptr", rv.Kind()))
}
if rv.Pointer() == 0 {
panic("nil pointer")
}
rv = rv.Elem()
if rv.Pointer() != 0 {
return
}
switch rv.Type().Kind() {
case reflect.Slice:
rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
case reflect.Map:
rv.Set(reflect.MakeMap(rv.Type()))
}
}
// NonNilSliceForJSON makes sure that *slicePtr is non-nil so it will
// won't be omitted from JSON serialization and possibly confuse JavaScript
// clients expecting it to be present.
func NonNilSliceForJSON[T any, S ~[]T](slicePtr *S) {
if *slicePtr != nil {
return
}
*slicePtr = make([]T, 0)
}
// NonNilMapForJSON makes sure that *slicePtr is non-nil so it will
// won't be omitted from JSON serialization and possibly confuse JavaScript
// clients expecting it to be present.
func NonNilMapForJSON[K comparable, V any, M ~map[K]V](mapPtr *M) {
if *mapPtr != nil {
return
}
*mapPtr = make(M)
}

136
vendor/tailscale.com/util/multierr/multierr.go generated vendored Normal file
View File

@@ -0,0 +1,136 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package multierr provides a simple multiple-error type.
// It was inspired by github.com/go-multierror/multierror.
package multierr
import (
"errors"
"slices"
"strings"
)
// An Error represents multiple errors.
type Error struct {
errs []error
}
// Error implements the error interface.
func (e Error) Error() string {
s := new(strings.Builder)
s.WriteString("multiple errors:")
for _, err := range e.errs {
s.WriteString("\n\t")
s.WriteString(err.Error())
}
return s.String()
}
// Errors returns a slice containing all errors in e.
func (e Error) Errors() []error {
return slices.Clone(e.errs)
}
// Unwrap returns the underlying errors as-is.
func (e Error) Unwrap() []error {
// Do not clone since Unwrap requires callers to not mutate the slice.
// See the documentation in the Go "errors" package.
return e.errs
}
// New returns an error composed from errs.
// Some errors in errs get special treatment:
// - nil errors are discarded
// - errors of type Error are expanded into the top level
//
// If the resulting slice has length 0, New returns nil.
// If the resulting slice has length 1, New returns that error.
// If the resulting slice has length > 1, New returns that slice as an Error.
func New(errs ...error) error {
// First count the number of errors to avoid allocating.
var n int
var errFirst error
for _, e := range errs {
switch e := e.(type) {
case nil:
continue
case Error:
n += len(e.errs)
if errFirst == nil && len(e.errs) > 0 {
errFirst = e.errs[0]
}
default:
n++
if errFirst == nil {
errFirst = e
}
}
}
if n <= 1 {
return errFirst // nil if n == 0
}
// More than one error, allocate slice and construct the multi-error.
dst := make([]error, 0, n)
for _, e := range errs {
switch e := e.(type) {
case nil:
continue
case Error:
dst = append(dst, e.errs...)
default:
dst = append(dst, e)
}
}
return Error{errs: dst}
}
// Is reports whether any error in e matches target.
func (e Error) Is(target error) bool {
for _, err := range e.errs {
if errors.Is(err, target) {
return true
}
}
return false
}
// As finds the first error in e that matches target, and if any is found,
// sets target to that error value and returns true. Otherwise, it returns false.
func (e Error) As(target any) bool {
for _, err := range e.errs {
if ok := errors.As(err, target); ok {
return true
}
}
return false
}
// Range performs a pre-order, depth-first iteration of the error tree
// by successively unwrapping all error values.
// For each iteration it calls fn with the current error value and
// stops iteration if it ever reports false.
func Range(err error, fn func(error) bool) bool {
if err == nil {
return true
}
if !fn(err) {
return false
}
switch err := err.(type) {
case interface{ Unwrap() error }:
if err := err.Unwrap(); err != nil {
if !Range(err, fn) {
return false
}
}
case interface{ Unwrap() []error }:
for _, err := range err.Unwrap() {
if !Range(err, fn) {
return false
}
}
}
return true
}

25
vendor/tailscale.com/util/must/must.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package must assists in calling functions that must succeed.
//
// Example usage:
//
// var target = must.Get(url.Parse(...))
// must.Do(close())
package must
// Do panics if err is non-nil.
func Do(err error) {
if err != nil {
panic(err)
}
}
// Get returns v as is. It panics if err is non-nil.
func Get[T any](v T, err error) T {
if err != nil {
panic(err)
}
return v
}

116
vendor/tailscale.com/util/nocasemaps/nocase.go generated vendored Normal file
View File

@@ -0,0 +1,116 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// nocasemaps provides efficient functions to set and get entries in Go maps
// keyed by a string, where the string is always lower-case.
package nocasemaps
import (
"unicode"
"unicode/utf8"
)
// TODO(https://github.com/golang/go/discussions/54245):
// Define a generic Map type instead. The main reason to avoid that is because
// there is currently no convenient API for iteration.
// An opaque Map type would force callers to interact with the map through
// the methods, preventing accidental interactions with the underlying map
// without using functions in this package.
const stackArraySize = 32
// Get is equivalent to:
//
// v := m[strings.ToLower(k)]
func Get[K ~string, V any](m map[K]V, k K) V {
if isLowerASCII(string(k)) {
return m[k]
}
var a [stackArraySize]byte
return m[K(appendToLower(a[:0], string(k)))]
}
// GetOk is equivalent to:
//
// v, ok := m[strings.ToLower(k)]
func GetOk[K ~string, V any](m map[K]V, k K) (V, bool) {
if isLowerASCII(string(k)) {
v, ok := m[k]
return v, ok
}
var a [stackArraySize]byte
v, ok := m[K(appendToLower(a[:0], string(k)))]
return v, ok
}
// Set is equivalent to:
//
// m[strings.ToLower(k)] = v
func Set[K ~string, V any](m map[K]V, k K, v V) {
if isLowerASCII(string(k)) {
m[k] = v
return
}
// TODO(https://go.dev/issues/55930): This currently always allocates.
// An optimization to the compiler and runtime could make this allocate-free
// in the event that we are overwriting a map entry.
//
// Alternatively, we could use string interning.
// See an example intern data structure, see:
// https://github.com/go-json-experiment/json/blob/master/intern.go
var a [stackArraySize]byte
m[K(appendToLower(a[:0], string(k)))] = v
}
// Delete is equivalent to:
//
// delete(m, strings.ToLower(k))
func Delete[K ~string, V any](m map[K]V, k K) {
if isLowerASCII(string(k)) {
delete(m, k)
return
}
var a [stackArraySize]byte
delete(m, K(appendToLower(a[:0], string(k))))
}
// AppendSliceElem is equivalent to:
//
// append(m[strings.ToLower(k)], v)
func AppendSliceElem[K ~string, S []E, E any](m map[K]S, k K, vs ...E) {
// if the key is already lowercased
if isLowerASCII(string(k)) {
m[k] = append(m[k], vs...)
return
}
// if key needs to become lowercase, uses appendToLower
var a [stackArraySize]byte
s := appendToLower(a[:0], string(k))
m[K(s)] = append(m[K(s)], vs...)
}
func isLowerASCII(s string) bool {
for i := range len(s) {
if c := s[i]; c >= utf8.RuneSelf || ('A' <= c && c <= 'Z') {
return false
}
}
return true
}
func appendToLower(b []byte, s string) []byte {
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case 'A' <= c && c <= 'Z':
b = append(b, c+('a'-'A'))
case c < utf8.RuneSelf:
b = append(b, c)
default:
r, n := utf8.DecodeRuneInString(s[i:])
b = utf8.AppendRune(b, unicode.ToLower(r))
i += n - 1 // -1 to compensate for i++ in loop advancement
}
}
return b
}

View File

@@ -0,0 +1,314 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Code generated by 'go generate'; DO NOT EDIT.
// Package wsc provides access to the Windows Security Center API.
package wsc
import (
"runtime"
"syscall"
"unsafe"
"github.com/dblohm7/wingoes"
"github.com/dblohm7/wingoes/com"
"github.com/dblohm7/wingoes/com/automation"
)
var (
CLSID_WSCProductList = &com.CLSID{0x17072F7B, 0x9ABE, 0x4A74, [8]byte{0xA2, 0x61, 0x1E, 0xB7, 0x6B, 0x55, 0x10, 0x7A}}
)
var (
IID_IWSCProductList = &com.IID{0x722A338C, 0x6E8E, 0x4E72, [8]byte{0xAC, 0x27, 0x14, 0x17, 0xFB, 0x0C, 0x81, 0xC2}}
IID_IWscProduct = &com.IID{0x8C38232E, 0x3A45, 0x4A27, [8]byte{0x92, 0xB0, 0x1A, 0x16, 0xA9, 0x75, 0xF6, 0x69}}
)
type WSC_SECURITY_PRODUCT_STATE int32
const (
WSC_SECURITY_PRODUCT_STATE_ON = WSC_SECURITY_PRODUCT_STATE(0)
WSC_SECURITY_PRODUCT_STATE_OFF = WSC_SECURITY_PRODUCT_STATE(1)
WSC_SECURITY_PRODUCT_STATE_SNOOZED = WSC_SECURITY_PRODUCT_STATE(2)
WSC_SECURITY_PRODUCT_STATE_EXPIRED = WSC_SECURITY_PRODUCT_STATE(3)
)
type WSC_SECURITY_SIGNATURE_STATUS int32
const (
WSC_SECURITY_PRODUCT_OUT_OF_DATE = WSC_SECURITY_SIGNATURE_STATUS(0)
WSC_SECURITY_PRODUCT_UP_TO_DATE = WSC_SECURITY_SIGNATURE_STATUS(1)
)
type WSC_SECURITY_PROVIDER int32
const (
WSC_SECURITY_PROVIDER_FIREWALL = WSC_SECURITY_PROVIDER(1)
WSC_SECURITY_PROVIDER_AUTOUPDATE_SETTINGS = WSC_SECURITY_PROVIDER(2)
WSC_SECURITY_PROVIDER_ANTIVIRUS = WSC_SECURITY_PROVIDER(4)
WSC_SECURITY_PROVIDER_ANTISPYWARE = WSC_SECURITY_PROVIDER(8)
WSC_SECURITY_PROVIDER_INTERNET_SETTINGS = WSC_SECURITY_PROVIDER(16)
WSC_SECURITY_PROVIDER_USER_ACCOUNT_CONTROL = WSC_SECURITY_PROVIDER(32)
WSC_SECURITY_PROVIDER_SERVICE = WSC_SECURITY_PROVIDER(64)
WSC_SECURITY_PROVIDER_NONE = WSC_SECURITY_PROVIDER(0)
WSC_SECURITY_PROVIDER_ALL = WSC_SECURITY_PROVIDER(127)
)
type SECURITY_PRODUCT_TYPE int32
const (
SECURITY_PRODUCT_TYPE_ANTIVIRUS = SECURITY_PRODUCT_TYPE(0)
SECURITY_PRODUCT_TYPE_FIREWALL = SECURITY_PRODUCT_TYPE(1)
SECURITY_PRODUCT_TYPE_ANTISPYWARE = SECURITY_PRODUCT_TYPE(2)
)
type IWscProductABI struct {
com.IUnknownABI // Technically IDispatch, but we're bypassing all of that atm
}
func (abi *IWscProductABI) GetProductName() (pVal string, err error) {
var t0 automation.BSTR
method := unsafe.Slice(abi.Vtbl, 14)[7]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&t0)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
if e.Failed() {
return
}
}
pVal = t0.String()
t0.Close()
return
}
func (abi *IWscProductABI) GetProductState() (val WSC_SECURITY_PRODUCT_STATE, err error) {
method := unsafe.Slice(abi.Vtbl, 14)[8]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&val)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
}
return
}
func (abi *IWscProductABI) GetSignatureStatus() (val WSC_SECURITY_SIGNATURE_STATUS, err error) {
method := unsafe.Slice(abi.Vtbl, 14)[9]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&val)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
}
return
}
func (abi *IWscProductABI) GetRemediationPath() (pVal string, err error) {
var t0 automation.BSTR
method := unsafe.Slice(abi.Vtbl, 14)[10]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&t0)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
if e.Failed() {
return
}
}
pVal = t0.String()
t0.Close()
return
}
func (abi *IWscProductABI) GetProductStateTimestamp() (pVal string, err error) {
var t0 automation.BSTR
method := unsafe.Slice(abi.Vtbl, 14)[11]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&t0)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
if e.Failed() {
return
}
}
pVal = t0.String()
t0.Close()
return
}
func (abi *IWscProductABI) GetProductGuid() (pVal string, err error) {
var t0 automation.BSTR
method := unsafe.Slice(abi.Vtbl, 14)[12]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&t0)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
if e.Failed() {
return
}
}
pVal = t0.String()
t0.Close()
return
}
func (abi *IWscProductABI) GetProductIsDefault() (pVal bool, err error) {
var t0 int32
method := unsafe.Slice(abi.Vtbl, 14)[13]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&t0)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
if e.Failed() {
return
}
}
pVal = t0 != 0
return
}
type WscProduct struct {
com.GenericObject[IWscProductABI]
}
func (o WscProduct) GetProductName() (pVal string, err error) {
p := *(o.Pp)
return p.GetProductName()
}
func (o WscProduct) GetProductState() (val WSC_SECURITY_PRODUCT_STATE, err error) {
p := *(o.Pp)
return p.GetProductState()
}
func (o WscProduct) GetSignatureStatus() (val WSC_SECURITY_SIGNATURE_STATUS, err error) {
p := *(o.Pp)
return p.GetSignatureStatus()
}
func (o WscProduct) GetRemediationPath() (pVal string, err error) {
p := *(o.Pp)
return p.GetRemediationPath()
}
func (o WscProduct) GetProductStateTimestamp() (pVal string, err error) {
p := *(o.Pp)
return p.GetProductStateTimestamp()
}
func (o WscProduct) GetProductGuid() (pVal string, err error) {
p := *(o.Pp)
return p.GetProductGuid()
}
func (o WscProduct) GetProductIsDefault() (pVal bool, err error) {
p := *(o.Pp)
return p.GetProductIsDefault()
}
func (o WscProduct) IID() *com.IID {
return IID_IWscProduct
}
func (o WscProduct) Make(r com.ABIReceiver) any {
if r == nil {
return WscProduct{}
}
runtime.SetFinalizer(r, com.ReleaseABI)
pp := (**IWscProductABI)(unsafe.Pointer(r))
return WscProduct{com.GenericObject[IWscProductABI]{Pp: pp}}
}
func (o WscProduct) MakeFromKnownABI(r **IWscProductABI) WscProduct {
if r == nil {
return WscProduct{}
}
runtime.SetFinalizer(r, func(r **IWscProductABI) { (*r).Release() })
return WscProduct{com.GenericObject[IWscProductABI]{Pp: r}}
}
func (o WscProduct) UnsafeUnwrap() *IWscProductABI {
return *(o.Pp)
}
type IWSCProductListABI struct {
com.IUnknownABI // Technically IDispatch, but we're bypassing all of that atm
}
func (abi *IWSCProductListABI) Initialize(provider WSC_SECURITY_PROVIDER) (err error) {
method := unsafe.Slice(abi.Vtbl, 10)[7]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(provider))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
}
return
}
func (abi *IWSCProductListABI) GetCount() (val int32, err error) {
method := unsafe.Slice(abi.Vtbl, 10)[8]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(unsafe.Pointer(&val)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
}
return
}
func (abi *IWSCProductListABI) GetItem(index uint32) (val WscProduct, err error) {
var t0 *IWscProductABI
method := unsafe.Slice(abi.Vtbl, 10)[9]
hr, _, _ := syscall.SyscallN(method, uintptr(unsafe.Pointer(abi)), uintptr(index), uintptr(unsafe.Pointer(&t0)))
if e := wingoes.ErrorFromHRESULT(wingoes.HRESULT(hr)); !e.IsOK() {
err = e
if e.Failed() {
return
}
}
var r0 WscProduct
val = r0.MakeFromKnownABI(&t0)
return
}
type WSCProductList struct {
com.GenericObject[IWSCProductListABI]
}
func (o WSCProductList) Initialize(provider WSC_SECURITY_PROVIDER) (err error) {
p := *(o.Pp)
return p.Initialize(provider)
}
func (o WSCProductList) GetCount() (val int32, err error) {
p := *(o.Pp)
return p.GetCount()
}
func (o WSCProductList) GetItem(index uint32) (val WscProduct, err error) {
p := *(o.Pp)
return p.GetItem(index)
}
func (o WSCProductList) IID() *com.IID {
return IID_IWSCProductList
}
func (o WSCProductList) Make(r com.ABIReceiver) any {
if r == nil {
return WSCProductList{}
}
runtime.SetFinalizer(r, com.ReleaseABI)
pp := (**IWSCProductListABI)(unsafe.Pointer(r))
return WSCProductList{com.GenericObject[IWSCProductListABI]{Pp: pp}}
}
func (o WSCProductList) UnsafeUnwrap() *IWSCProductListABI {
return *(o.Pp)
}

13
vendor/tailscale.com/util/osdiag/mksyscall.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package osdiag
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
//go:generate go run golang.org/x/tools/cmd/goimports -w zsyscall_windows.go
//sys globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) [int32(failretval)==0] = kernel32.GlobalMemoryStatusEx
//sys regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLen *uint32, reserved *uint32, valueType *uint32, pData *byte, cbData *uint32) (ret error) [failretval!=0] = advapi32.RegEnumValueW
//sys wscEnumProtocols(iProtocols *int32, protocolBuffer *wsaProtocolInfo, bufLen *uint32, errno *int32) (ret int32) = ws2_32.WSCEnumProtocols
//sys wscGetProviderInfo(providerId *windows.GUID, infoType _WSC_PROVIDER_INFO_TYPE, info unsafe.Pointer, infoSize *uintptr, flags uint32, errno *int32) (ret int32) = ws2_32.WSCGetProviderInfo
//sys wscGetProviderPath(providerId *windows.GUID, providerDllPath *uint16, providerDllPathLen *int32, errno *int32) (ret int32) = ws2_32.WSCGetProviderPath

20
vendor/tailscale.com/util/osdiag/osdiag.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package osdiag provides loggers for OS-specific diagnostic information.
package osdiag
// LogSupportInfoReason is an enumeration indicating the reason for logging
// support info.
type LogSupportInfoReason int
const (
LogSupportInfoReasonStartup LogSupportInfoReason = iota + 1 // tailscaled is starting up.
LogSupportInfoReasonBugReport // a bugreport is in the process of being gathered.
)
// SupportInfo obtains OS-specific diagnostic information for troubleshooting
// and support. The reason governs the verbosity of the output.
func SupportInfo(reason LogSupportInfoReason) map[string]any {
return supportInfo(reason)
}

10
vendor/tailscale.com/util/osdiag/osdiag_notwindows.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package osdiag
func supportInfo(LogSupportInfoReason) map[string]any {
return nil
}

655
vendor/tailscale.com/util/osdiag/osdiag_windows.go generated vendored Normal file
View File

@@ -0,0 +1,655 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package osdiag
import (
"encoding/binary"
"errors"
"fmt"
"path/filepath"
"strings"
"unicode/utf16"
"unsafe"
"github.com/dblohm7/wingoes/com"
"github.com/dblohm7/wingoes/pe"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/registry"
"tailscale.com/util/osdiag/internal/wsc"
"tailscale.com/util/winutil"
"tailscale.com/util/winutil/authenticode"
)
var (
errUnexpectedResult = errors.New("API call returned an unexpected value")
)
const (
maxBinaryValueLen = 128 // we'll truncate any binary values longer than this
maxRegValueNameLen = 16384 // maximum length supported by Windows + 1
initialValueBufLen = 80 // large enough to contain a stringified GUID encoded as UTF-16
)
const (
supportInfoKeyModules = "modules"
supportInfoKeyPageFile = "pageFile"
supportInfoKeyRegistry = "registry"
supportInfoKeySecurity = "securitySoftware"
supportInfoKeyWinsockLSP = "winsockLSP"
)
func supportInfo(reason LogSupportInfoReason) map[string]any {
output := make(map[string]any)
regInfo, err := getRegistrySupportInfo(registry.LOCAL_MACHINE, []string{winutil.RegPolicyBase, winutil.RegBase})
if err == nil {
output[supportInfoKeyRegistry] = regInfo
} else {
output[supportInfoKeyRegistry] = err
}
pageFileInfo, err := getPageFileInfo()
if err == nil {
output[supportInfoKeyPageFile] = pageFileInfo
} else {
output[supportInfoKeyPageFile] = err
}
if reason == LogSupportInfoReasonBugReport {
modInfo, err := getModuleInfo()
if err == nil {
output[supportInfoKeyModules] = modInfo
} else {
output[supportInfoKeyModules] = err
}
output[supportInfoKeySecurity] = getSecurityInfo()
lspInfo, err := getWinsockLSPInfo()
if err == nil {
output[supportInfoKeyWinsockLSP] = lspInfo
} else {
output[supportInfoKeyWinsockLSP] = err
}
}
return output
}
type getRegistrySupportInfoBufs struct {
nameBuf []uint16
valueBuf []byte
}
func getRegistrySupportInfo(root registry.Key, subKeys []string) (map[string]any, error) {
bufs := getRegistrySupportInfoBufs{
nameBuf: make([]uint16, maxRegValueNameLen),
valueBuf: make([]byte, initialValueBufLen),
}
output := make(map[string]any)
for _, subKey := range subKeys {
if err := getRegSubKey(root, subKey, 5, &bufs, output); err != nil && !errors.Is(err, registry.ErrNotExist) {
return nil, fmt.Errorf("getRegistrySupportInfo: %w", err)
}
}
return output, nil
}
func keyString(key registry.Key, subKey string) string {
var keyStr string
switch key {
case registry.CLASSES_ROOT:
keyStr = `HKCR\`
case registry.CURRENT_USER:
keyStr = `HKCU\`
case registry.LOCAL_MACHINE:
keyStr = `HKLM\`
case registry.USERS:
keyStr = `HKU\`
case registry.CURRENT_CONFIG:
keyStr = `HKCC\`
case registry.PERFORMANCE_DATA:
keyStr = `HKPD\`
default:
}
return keyStr + subKey
}
func getRegSubKey(key registry.Key, subKey string, recursionLimit int, bufs *getRegistrySupportInfoBufs, output map[string]any) error {
keyStr := keyString(key, subKey)
k, err := registry.OpenKey(key, subKey, registry.READ)
if err != nil {
return fmt.Errorf("opening %q: %w", keyStr, err)
}
defer k.Close()
kv := make(map[string]any)
index := uint32(0)
loopValues:
for {
nbuf := bufs.nameBuf
nameLen := uint32(len(nbuf))
valueType := uint32(0)
vbuf := bufs.valueBuf
valueLen := uint32(len(vbuf))
err := regEnumValue(k, index, &nbuf[0], &nameLen, nil, &valueType, &vbuf[0], &valueLen)
switch err {
case windows.ERROR_NO_MORE_ITEMS:
break loopValues
case windows.ERROR_MORE_DATA:
bufs.valueBuf = make([]byte, valueLen)
continue
case nil:
default:
return fmt.Errorf("regEnumValue: %w", err)
}
var value any
switch valueType {
case registry.SZ, registry.EXPAND_SZ:
value = windows.UTF16PtrToString((*uint16)(unsafe.Pointer(&vbuf[0])))
case registry.BINARY:
if valueLen > maxBinaryValueLen {
valueLen = maxBinaryValueLen
}
value = append([]byte{}, vbuf[:valueLen]...)
case registry.DWORD:
value = binary.LittleEndian.Uint32(vbuf[:4])
case registry.MULTI_SZ:
// Adapted from x/sys/windows/registry/(Key).GetStringsValue
p := (*[1 << 29]uint16)(unsafe.Pointer(&vbuf[0]))[: valueLen/2 : valueLen/2]
var strs []string
if len(p) > 0 {
if p[len(p)-1] == 0 {
p = p[:len(p)-1]
}
strs = make([]string, 0, 5)
from := 0
for i, c := range p {
if c == 0 {
strs = append(strs, string(utf16.Decode(p[from:i])))
from = i + 1
}
}
}
value = strs
case registry.QWORD:
value = binary.LittleEndian.Uint64(vbuf[:8])
default:
value = fmt.Sprintf("<unsupported value type %d>", valueType)
}
kv[windows.UTF16PtrToString(&nbuf[0])] = value
index++
}
if recursionLimit > 0 {
if sks, err := k.ReadSubKeyNames(0); err == nil {
for _, sk := range sks {
if err := getRegSubKey(k, sk, recursionLimit-1, bufs, kv); err != nil {
return err
}
}
}
}
output[keyStr] = kv
return nil
}
type moduleInfo struct {
path string `json:"-"` // internal use only
BaseAddress uintptr `json:"baseAddress"`
Size uint32 `json:"size"`
DebugInfo map[string]string `json:"debugInfo,omitempty"` // map for JSON marshaling purposes
DebugInfoErr error `json:"debugInfoErr,omitempty"`
Signature map[string]string `json:"signature,omitempty"` // map for JSON marshaling purposes
SignatureErr error `json:"signatureErr,omitempty"`
VersionInfo map[string]string `json:"versionInfo,omitempty"` // map for JSON marshaling purposes
VersionErr error `json:"versionErr,omitempty"`
}
func (mi *moduleInfo) setVersionInfo() {
vi, err := pe.NewVersionInfo(mi.path)
if err != nil {
if !errors.Is(err, pe.ErrNotPresent) {
mi.VersionErr = err
}
return
}
info := map[string]string{
"": vi.VersionNumber().String(),
}
ci, err := vi.Field("CompanyName")
if err == nil {
info["companyName"] = ci
}
mi.VersionInfo = info
}
var errAssertingType = errors.New("asserting DataDirectory type")
func (mi *moduleInfo) setDebugInfo() {
pem, err := pe.NewPEFromBaseAddressAndSize(mi.BaseAddress, mi.Size)
if err != nil {
mi.DebugInfoErr = err
return
}
defer pem.Close()
debugDirAny, err := pem.DataDirectoryEntry(pe.IMAGE_DIRECTORY_ENTRY_DEBUG)
if err != nil {
if !errors.Is(err, pe.ErrNotPresent) {
mi.DebugInfoErr = err
}
return
}
debugDir, ok := debugDirAny.([]pe.IMAGE_DEBUG_DIRECTORY)
if !ok {
mi.DebugInfoErr = errAssertingType
return
}
for _, dde := range debugDir {
if dde.Type != pe.IMAGE_DEBUG_TYPE_CODEVIEW {
continue
}
cv, err := pem.ExtractCodeViewInfo(dde)
if err == nil {
mi.DebugInfo = map[string]string{
"id": cv.String(),
"pdb": strings.ToLower(filepath.Base(cv.PDBPath)),
}
} else {
mi.DebugInfoErr = err
}
return
}
}
func (mi *moduleInfo) setAuthenticodeInfo() {
certSubject, provenance, err := authenticode.QueryCertSubject(mi.path)
if err != nil {
if !errors.Is(err, authenticode.ErrSigNotFound) {
mi.SignatureErr = err
}
return
}
sigInfo := map[string]string{
"subject": certSubject,
}
switch provenance {
case authenticode.SigProvEmbedded:
sigInfo["provenance"] = "embedded"
case authenticode.SigProvCatalog:
sigInfo["provenance"] = "catalog"
default:
}
mi.Signature = sigInfo
}
func getModuleInfo() (map[string]moduleInfo, error) {
// Take a snapshot of all modules currently loaded into the current process
snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPMODULE, 0)
if err != nil {
return nil, err
}
defer windows.CloseHandle(snap)
result := make(map[string]moduleInfo)
me := windows.ModuleEntry32{
Size: uint32(unsafe.Sizeof(windows.ModuleEntry32{})),
}
// Now walk the list
for merr := windows.Module32First(snap, &me); merr == nil; merr = windows.Module32Next(snap, &me) {
name := strings.ToLower(windows.UTF16ToString(me.Module[:]))
path := windows.UTF16ToString(me.ExePath[:])
base := me.ModBaseAddr
size := me.ModBaseSize
entry := moduleInfo{
path: path,
BaseAddress: base,
Size: size,
}
entry.setVersionInfo()
entry.setDebugInfo()
entry.setAuthenticodeInfo()
result[name] = entry
}
return result, nil
}
type _WSC_PROVIDER_INFO_TYPE int32
const (
providerInfoLspCategories _WSC_PROVIDER_INFO_TYPE = 0
)
const (
_SOCKET_ERROR = -1
)
// Note that wsaProtocolInfo needs to be identical to windows.WSAProtocolInfo;
// the purpose of this type is to have the ability to use it as a reciever in
// the path and categoryFlags funcs defined below.
type wsaProtocolInfo windows.WSAProtocolInfo
func (pi *wsaProtocolInfo) path() (string, error) {
var errno int32
var buf [windows.MAX_PATH]uint16
bufCount := int32(len(buf))
ret := wscGetProviderPath(&pi.ProviderId, &buf[0], &bufCount, &errno)
if ret == _SOCKET_ERROR {
return "", windows.Errno(errno)
}
if ret != 0 {
return "", errUnexpectedResult
}
return windows.UTF16ToString(buf[:bufCount]), nil
}
func (pi *wsaProtocolInfo) categoryFlags() (uint32, error) {
var errno int32
var result uint32
bufLen := uintptr(unsafe.Sizeof(result))
ret := wscGetProviderInfo(&pi.ProviderId, providerInfoLspCategories, unsafe.Pointer(&result), &bufLen, 0, &errno)
if ret == _SOCKET_ERROR {
return 0, windows.Errno(errno)
}
if ret != 0 {
return 0, errUnexpectedResult
}
return result, nil
}
type wsaProtocolInfoOutput struct {
Description string `json:"description,omitempty"`
Version int32 `json:"version"`
AddressFamily int32 `json:"addressFamily"`
SocketType int32 `json:"socketType"`
Protocol int32 `json:"protocol"`
ServiceFlags1 string `json:"serviceFlags1"`
ProviderFlags string `json:"providerFlags"`
Path string `json:"path,omitempty"`
PathErr error `json:"pathErr,omitempty"`
Category string `json:"category,omitempty"`
CategoryErr error `json:"categoryErr,omitempty"`
BaseProviderID string `json:"baseProviderID,omitempty"`
LayerProviderID string `json:"layerProviderID,omitempty"`
Chain []uint32 `json:"chain,omitempty"`
}
func getWinsockLSPInfo() (map[uint32]wsaProtocolInfoOutput, error) {
protocols, err := enumWinsockProtocols()
if err != nil {
return nil, err
}
result := make(map[uint32]wsaProtocolInfoOutput, len(protocols))
for _, p := range protocols {
v := wsaProtocolInfoOutput{
Description: windows.UTF16ToString(p.ProtocolName[:]),
Version: p.Version,
AddressFamily: p.AddressFamily,
SocketType: p.SocketType,
Protocol: p.Protocol,
ServiceFlags1: fmt.Sprintf("0x%08X", p.ServiceFlags1), // Serializing as hex string to make the flags easier to decode by human inspection
ProviderFlags: fmt.Sprintf("0x%08X", p.ProviderFlags),
}
switch p.ProtocolChain.ChainLen {
case windows.BASE_PROTOCOL:
v.BaseProviderID = p.ProviderId.String()
case windows.LAYERED_PROTOCOL:
v.LayerProviderID = p.ProviderId.String()
default:
v.Chain = p.ProtocolChain.ChainEntries[:p.ProtocolChain.ChainLen]
}
// Queries that are only valid for base and layered protocols (not chains)
if v.Chain == nil {
path, err := p.path()
if err == nil {
v.Path = strings.ToLower(path)
} else {
v.PathErr = err
}
category, err := p.categoryFlags()
if err == nil {
v.Category = fmt.Sprintf("0x%08X", category)
} else if !errors.Is(err, windows.WSAEINVALIDPROVIDER) {
// WSAEINVALIDPROVIDER == "no category info found", so we only log
// errors other than that one.
v.CategoryErr = err
}
}
// Chains reference other providers using catalog entry IDs, so we use that
// value as the key in our map.
result[p.CatalogEntryId] = v
}
return result, nil
}
func enumWinsockProtocols() ([]wsaProtocolInfo, error) {
// Get the required size
var errno int32
var bytesReqd uint32
ret := wscEnumProtocols(nil, nil, &bytesReqd, &errno)
if ret != _SOCKET_ERROR {
return nil, errUnexpectedResult
}
if e := windows.Errno(errno); e != windows.WSAENOBUFS {
return nil, e
}
// Allocate
szEntry := uint32(unsafe.Sizeof(wsaProtocolInfo{}))
buf := make([]wsaProtocolInfo, bytesReqd/szEntry)
// Now do the query for real
bufLen := uint32(len(buf)) * szEntry
ret = wscEnumProtocols(nil, &buf[0], &bufLen, &errno)
if ret == _SOCKET_ERROR {
return nil, windows.Errno(errno)
}
return buf, nil
}
type providerKey struct {
provType wsc.WSC_SECURITY_PROVIDER
provKey string
}
var providerKeys = []providerKey{
providerKey{
wsc.WSC_SECURITY_PROVIDER_ANTIVIRUS,
"av",
},
providerKey{
wsc.WSC_SECURITY_PROVIDER_ANTISPYWARE,
"antispy",
},
providerKey{
wsc.WSC_SECURITY_PROVIDER_FIREWALL,
"firewall",
},
}
const (
maxProvCount = 100
)
type secProductInfo struct {
Name string `json:"name,omitempty"`
NameErr error `json:"nameErr,omitempty"`
State string `json:"state,omitempty"`
StateErr error `json:"stateErr,omitempty"`
}
func getSecurityInfo() map[string]any {
result := make(map[string]any)
for _, prov := range providerKeys {
// Note that we need to obtain a new product list for each provider type;
// the docs clearly state that we cannot reuse objects.
productList, err := com.CreateInstance[wsc.WSCProductList](wsc.CLSID_WSCProductList)
if err != nil {
result[prov.provKey] = err
continue
}
err = productList.Initialize(prov.provType)
if err != nil {
result[prov.provKey] = err
continue
}
n, err := productList.GetCount()
if err != nil {
result[prov.provKey] = err
continue
}
if n == 0 {
continue
}
n = min(n, maxProvCount)
values := make([]any, 0, n)
for i := int32(0); i < n; i++ {
product, err := productList.GetItem(uint32(i))
if err != nil {
values = append(values, err)
continue
}
var value secProductInfo
value.Name, err = product.GetProductName()
if err != nil {
value.NameErr = err
}
state, err := product.GetProductState()
if err == nil {
switch state {
case wsc.WSC_SECURITY_PRODUCT_STATE_ON:
value.State = "on"
case wsc.WSC_SECURITY_PRODUCT_STATE_OFF:
value.State = "off"
case wsc.WSC_SECURITY_PRODUCT_STATE_SNOOZED:
value.State = "snoozed"
case wsc.WSC_SECURITY_PRODUCT_STATE_EXPIRED:
value.State = "expired"
default:
value.State = fmt.Sprintf("<unknown state value %d>", state)
}
} else {
value.StateErr = err
}
values = append(values, value)
}
result[prov.provKey] = values
}
return result
}
type _MEMORYSTATUSEX struct {
Length uint32
MemoryLoad uint32
TotalPhys uint64
AvailPhys uint64
TotalPageFile uint64
AvailPageFile uint64
TotalVirtual uint64
AvailVirtual uint64
AvailExtendedVirtual uint64
}
func getPageFileInfo() (map[string]any, error) {
memStatus := _MEMORYSTATUSEX{
Length: uint32(unsafe.Sizeof(_MEMORYSTATUSEX{})),
}
if err := globalMemoryStatusEx(&memStatus); err != nil {
return nil, err
}
result := map[string]any{
"bytesAvailable": memStatus.AvailPageFile,
"bytesTotal": memStatus.TotalPageFile,
}
if entries, err := getEffectivePageFileValue(); err == nil {
// autoManaged is set to true when there is at least one page file that
// is automatically managed.
autoManaged := false
// If there is only one entry that consists of only one part, then
// the page files are 100% managed by the system.
// If there are multiple entries, then each one must be checked.
// Each entry then consists of three components, deliminated by spaces.
// If the latter two components are both "0", then that entry is auto-managed.
for _, entry := range entries {
if parts := strings.Split(entry, " "); (len(parts) == 1 && len(entries) == 1) ||
(len(parts) == 3 && parts[1] == "0" && parts[2] == "0") {
autoManaged = true
break
}
}
result["autoManaged"] = autoManaged
}
return result, nil
}
func getEffectivePageFileValue() ([]string, error) {
const subKey = `SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management`
key, err := registry.OpenKey(registry.LOCAL_MACHINE, subKey, registry.QUERY_VALUE)
if err != nil {
return nil, err
}
defer key.Close()
// Rare but possible case: the user has updated their page file config but
// they haven't yet rebooted for the change to take effect. This is the
// current setting that the machine is still operating with.
if entries, _, err := key.GetStringsValue("ExistingPageFiles"); err == nil {
return entries, nil
}
// Otherwise we use this value (yes, the above value uses "Page" and this one uses "Paging").
entries, _, err := key.GetStringsValue("PagingFiles")
return entries, err
}

85
vendor/tailscale.com/util/osdiag/zsyscall_windows.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Code generated by 'go generate'; DO NOT EDIT.
package osdiag
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/registry"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW")
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
procWSCEnumProtocols = modws2_32.NewProc("WSCEnumProtocols")
procWSCGetProviderInfo = modws2_32.NewProc("WSCGetProviderInfo")
procWSCGetProviderPath = modws2_32.NewProc("WSCGetProviderPath")
)
func regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLen *uint32, reserved *uint32, valueType *uint32, pData *byte, cbData *uint32) (ret error) {
r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(valueName)), uintptr(unsafe.Pointer(valueNameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(pData)), uintptr(unsafe.Pointer(cbData)), 0)
if r0 != 0 {
ret = syscall.Errno(r0)
}
return
}
func globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) {
r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(memStatus)), 0, 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func wscEnumProtocols(iProtocols *int32, protocolBuffer *wsaProtocolInfo, bufLen *uint32, errno *int32) (ret int32) {
r0, _, _ := syscall.Syscall6(procWSCEnumProtocols.Addr(), 4, uintptr(unsafe.Pointer(iProtocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufLen)), uintptr(unsafe.Pointer(errno)), 0, 0)
ret = int32(r0)
return
}
func wscGetProviderInfo(providerId *windows.GUID, infoType _WSC_PROVIDER_INFO_TYPE, info unsafe.Pointer, infoSize *uintptr, flags uint32, errno *int32) (ret int32) {
r0, _, _ := syscall.Syscall6(procWSCGetProviderInfo.Addr(), 6, uintptr(unsafe.Pointer(providerId)), uintptr(infoType), uintptr(info), uintptr(unsafe.Pointer(infoSize)), uintptr(flags), uintptr(unsafe.Pointer(errno)))
ret = int32(r0)
return
}
func wscGetProviderPath(providerId *windows.GUID, providerDllPath *uint16, providerDllPathLen *int32, errno *int32) (ret int32) {
r0, _, _ := syscall.Syscall6(procWSCGetProviderPath.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(unsafe.Pointer(providerDllPath)), uintptr(unsafe.Pointer(providerDllPathLen)), uintptr(unsafe.Pointer(errno)), 0, 0)
ret = int32(r0)
return
}

View File

@@ -0,0 +1,12 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package osshare
import (
"tailscale.com/types/logger"
)
func SetFileSharingEnabled(enabled bool, logf logger.Logf) {}

View File

@@ -0,0 +1,106 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package osshare provides utilities for enabling/disabling Taildrop file
// sharing on Windows.
package osshare
import (
"fmt"
"os"
"path/filepath"
"sync"
"golang.org/x/sys/windows/registry"
"tailscale.com/types/logger"
)
const (
sendFileShellKey = `*\shell\tailscale`
)
var ipnExePath struct {
sync.Mutex
cache string // absolute path of tailscale-ipn.exe, populated lazily on first use
}
func getIpnExePath(logf logger.Logf) string {
ipnExePath.Lock()
defer ipnExePath.Unlock()
if ipnExePath.cache != "" {
return ipnExePath.cache
}
// Find the absolute path of tailscale-ipn.exe assuming that it's in the same
// directory as this executable (tailscaled.exe).
p, err := os.Executable()
if err != nil {
logf("os.Executable error: %v", err)
return ""
}
if p, err = filepath.EvalSymlinks(p); err != nil {
logf("filepath.EvalSymlinks error: %v", err)
return ""
}
p = filepath.Join(filepath.Dir(p), "tailscale-ipn.exe")
if p, err = filepath.Abs(p); err != nil {
logf("filepath.Abs error: %v", err)
return ""
}
ipnExePath.cache = p
return p
}
// SetFileSharingEnabled adds/removes "Send with Tailscale" from the Windows shell menu.
func SetFileSharingEnabled(enabled bool, logf logger.Logf) {
logf = logger.WithPrefix(logf, fmt.Sprintf("SetFileSharingEnabled(%v) error: ", enabled))
if enabled {
enableFileSharing(logf)
} else {
disableFileSharing(logf)
}
}
func enableFileSharing(logf logger.Logf) {
path := getIpnExePath(logf)
if path == "" {
return
}
k, _, err := registry.CreateKey(registry.CLASSES_ROOT, sendFileShellKey, registry.WRITE)
if err != nil {
logf("failed to create HKEY_CLASSES_ROOT\\%s reg key: %v", sendFileShellKey, err)
return
}
defer k.Close()
if err := k.SetStringValue("", "Send with Tailscale..."); err != nil {
logf("k.SetStringValue error: %v", err)
return
}
if err := k.SetStringValue("Icon", path+",0"); err != nil {
logf("k.SetStringValue error: %v", err)
return
}
c, _, err := registry.CreateKey(k, "command", registry.WRITE)
if err != nil {
logf("failed to create HKEY_CLASSES_ROOT\\%s\\command reg key: %v", sendFileShellKey, err)
return
}
defer c.Close()
if err := c.SetStringValue("", "\""+path+"\" /push \"%1\""); err != nil {
logf("c.SetStringValue error: %v", err)
}
}
func disableFileSharing(logf logger.Logf) {
if err := registry.DeleteKey(registry.CLASSES_ROOT, sendFileShellKey+"\\command"); err != nil &&
err != registry.ErrNotExist {
logf("registry.DeleteKey error: %v\n", err)
return
}
if err := registry.DeleteKey(registry.CLASSES_ROOT, sendFileShellKey); err != nil && err != registry.ErrNotExist {
logf("registry.DeleteKey error: %v\n", err)
}
}

54
vendor/tailscale.com/util/osuser/group_ids.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package osuser
import (
"context"
"fmt"
"os/exec"
"os/user"
"runtime"
"strings"
"time"
"tailscale.com/version/distro"
)
// GetGroupIds returns the list of group IDs that the user is a member of, or
// an error. It will first try to use the 'id' command to get the group IDs,
// and if that fails, it will fall back to the user.GroupIds method.
func GetGroupIds(user *user.User) ([]string, error) {
if runtime.GOOS != "linux" {
return user.GroupIds()
}
if distro.Get() == distro.Gokrazy {
// Gokrazy is a single-user appliance with ~no userspace.
// There aren't users to look up (no /etc/passwd, etc)
// so rather than fail below, just hardcode root.
// TODO(bradfitz): fix os/user upstream instead?
return []string{"0"}, nil
}
if ids, err := getGroupIdsWithId(user.Username); err == nil {
return ids, nil
}
return user.GroupIds()
}
func getGroupIdsWithId(usernameOrUID string) ([]string, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "id", "-Gz", usernameOrUID)
out, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("running 'id' command: %w", err)
}
return parseGroupIds(out), nil
}
func parseGroupIds(cmdOutput []byte) []string {
return strings.Split(strings.Trim(string(cmdOutput), "\n\x00"), "\x00")
}

149
vendor/tailscale.com/util/osuser/user.go generated vendored Normal file
View File

@@ -0,0 +1,149 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package osuser implements OS user lookup. It's a wrapper around os/user that
// works on non-cgo builds.
package osuser
import (
"context"
"errors"
"log"
"os/exec"
"os/user"
"runtime"
"strings"
"time"
"unicode/utf8"
"tailscale.com/version/distro"
)
// LookupByUIDWithShell is like os/user.LookupId but handles a few edge cases
// like gokrazy and non-cgo lookups, and returns the user shell. The user shell
// lookup is best-effort and may be empty.
func LookupByUIDWithShell(uid string) (u *user.User, shell string, err error) {
return lookup(uid, user.LookupId, true)
}
// LookupByUsernameWithShell is like os/user.Lookup but handles a few edge
// cases like gokrazy and non-cgo lookups, and returns the user shell. The user
// shell lookup is best-effort and may be empty.
func LookupByUsernameWithShell(username string) (u *user.User, shell string, err error) {
return lookup(username, user.Lookup, true)
}
// LookupByUID is like os/user.LookupId but handles a few edge cases like
// gokrazy and non-cgo lookups.
func LookupByUID(uid string) (*user.User, error) {
u, _, err := lookup(uid, user.LookupId, false)
return u, err
}
// LookupByUsername is like os/user.Lookup but handles a few edge cases like
// gokrazy and non-cgo lookups.
func LookupByUsername(username string) (*user.User, error) {
u, _, err := lookup(username, user.Lookup, false)
return u, err
}
// lookupStd is either user.Lookup or user.LookupId.
type lookupStd func(string) (*user.User, error)
func lookup(usernameOrUID string, std lookupStd, wantShell bool) (*user.User, string, error) {
// Skip getent entirely on Non-Unix platforms that won't ever have it.
// (Using HasPrefix for "wasip1", anticipating that WASI support will
// move beyond "preview 1" some day.)
if runtime.GOOS == "windows" || runtime.GOOS == "js" || runtime.GOARCH == "wasm" {
u, err := std(usernameOrUID)
return u, "", err
}
// No getent on Gokrazy. So hard-code the login shell.
if distro.Get() == distro.Gokrazy {
var shell string
if wantShell {
shell = "/tmp/serial-busybox/ash"
}
u, err := std(usernameOrUID)
if err != nil {
return &user.User{
Uid: "0",
Gid: "0",
Username: "root",
Name: "Gokrazy",
HomeDir: "/",
}, shell, nil
}
return u, shell, nil
}
// Start with getent if caller wants to get the user shell.
if wantShell {
return userLookupGetent(usernameOrUID, std)
}
// If shell is not required, try os/user.Lookup* first and only use getent
// if that fails. This avoids spawning a child process when os/user lookup
// succeeds.
if u, err := std(usernameOrUID); err == nil {
return u, "", nil
}
return userLookupGetent(usernameOrUID, std)
}
func checkGetentInput(usernameOrUID string) bool {
maxUid := 32
if runtime.GOOS == "linux" {
maxUid = 256
}
if len(usernameOrUID) > maxUid || len(usernameOrUID) == 0 {
return false
}
for _, r := range usernameOrUID {
if r < ' ' || r == 0x7f || r == utf8.RuneError { // TODO(bradfitz): more?
return false
}
}
return true
}
// userLookupGetent uses "getent" to look up users so that even with static
// tailscaled binaries without cgo (as we distribute), we can still look up
// PAM/NSS users which the standard library's os/user without cgo won't get
// (because of no libc hooks). If "getent" fails, userLookupGetent falls back
// to the standard library.
func userLookupGetent(usernameOrUID string, std lookupStd) (*user.User, string, error) {
// Do some basic validation before passing this string to "getent", even though
// getent should do its own validation.
if !checkGetentInput(usernameOrUID) {
return nil, "", errors.New("invalid username or UID")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
out, err := exec.CommandContext(ctx, "getent", "passwd", usernameOrUID).Output()
if err != nil {
log.Printf("error calling getent for user %q: %v", usernameOrUID, err)
u, err := std(usernameOrUID)
return u, "", err
}
// output is "alice:x:1001:1001:Alice Smith,,,:/home/alice:/bin/bash"
f := strings.SplitN(strings.TrimSpace(string(out)), ":", 10)
for len(f) < 7 {
f = append(f, "")
}
var mandatoryFields = []int{0, 2, 3, 5}
for _, v := range mandatoryFields {
if f[v] == "" {
log.Printf("getent for user %q returned invalid output: %q", usernameOrUID, out)
u, err := std(usernameOrUID)
return u, "", err
}
}
return &user.User{
Username: f[0],
Uid: f[2],
Gid: f[3],
Name: f[4],
HomeDir: f[5],
}, f[6], nil
}

View File

@@ -0,0 +1,39 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package progresstracking provides wrappers around io.Reader and io.Writer
// that track progress.
package progresstracking
import (
"io"
"time"
)
// NewReader wraps the given Reader with a progress tracking Reader that
// reports progress at the following points:
//
// - First read
// - Every read spaced at least interval since the prior read
// - Last read
func NewReader(r io.Reader, interval time.Duration, onProgress func(totalRead int, err error)) io.Reader {
return &reader{Reader: r, interval: interval, onProgress: onProgress}
}
type reader struct {
io.Reader
interval time.Duration
onProgress func(int, error)
lastTracked time.Time
totalRead int
}
func (r *reader) Read(p []byte) (int, error) {
n, err := r.Reader.Read(p)
r.totalRead += n
if time.Since(r.lastTracked) > r.interval || err != nil {
r.onProgress(r.totalRead, err)
r.lastTracked = time.Now()
}
return n, err
}

115
vendor/tailscale.com/util/race/race.go generated vendored Normal file
View File

@@ -0,0 +1,115 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package race contains a helper to "race" two functions, returning the first
// successful result. It also allows explicitly triggering the
// (possibly-waiting) second function when the first function returns an error
// or indicates that it should be retried.
package race
import (
"context"
"errors"
"time"
)
type resultType int
const (
first resultType = iota
second
)
// queryResult is an internal type for storing the result of a function call
type queryResult[T any] struct {
ty resultType
res T
err error
}
// Func is the signature of a function to be called.
type Func[T any] func(context.Context) (T, error)
// Race allows running two functions concurrently and returning the first
// non-error result returned.
type Race[T any] struct {
func1, func2 Func[T]
d time.Duration
results chan queryResult[T]
startFallback chan struct{}
}
// New creates a new Race that, when Start is called, will immediately call
// func1 to obtain a result. After the timeout d or if triggered by an error
// response from func1, func2 will be called.
func New[T any](d time.Duration, func1, func2 Func[T]) *Race[T] {
ret := &Race[T]{
func1: func1,
func2: func2,
d: d,
results: make(chan queryResult[T], 2),
startFallback: make(chan struct{}),
}
return ret
}
// Start will start the "race" process, returning the first non-error result or
// the errors that occurred when calling func1 and/or func2.
func (rh *Race[T]) Start(ctx context.Context) (T, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// func1 is started immediately
go func() {
ret, err := rh.func1(ctx)
rh.results <- queryResult[T]{first, ret, err}
}()
// func2 is started after a timeout
go func() {
wait := time.NewTimer(rh.d)
defer wait.Stop()
// Wait for our timeout, trigger, or context to finish.
select {
case <-ctx.Done():
// Nothing to do; we're done
var zero T
rh.results <- queryResult[T]{second, zero, ctx.Err()}
return
case <-rh.startFallback:
case <-wait.C:
}
ret, err := rh.func2(ctx)
rh.results <- queryResult[T]{second, ret, err}
}()
// For each possible result, get it off the channel.
var errs []error
for range 2 {
res := <-rh.results
// If this was an error, store it and hope that the other
// result gives us something.
if res.err != nil {
errs = append(errs, res.err)
// Start the fallback function immediately if this is
// the first function's error, to avoid having
// to wait.
if res.ty == first {
close(rh.startFallback)
}
continue
}
// Got a valid response! Return it.
return res.res, nil
}
// If we get here, both raced functions failed. Return whatever errors
// we have, joined together.
var zero T
return zero, errors.Join(errs...)
}

8
vendor/tailscale.com/util/racebuild/off.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !race
package racebuild
const On = false

8
vendor/tailscale.com/util/racebuild/on.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build race
package racebuild
const On = true

6
vendor/tailscale.com/util/racebuild/racebuild.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package racebuild exports a constant about whether the current binary
// was built with the race detector.
package racebuild

90
vendor/tailscale.com/util/rands/cheap.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rands
import (
"math/bits"
randv2 "math/rand/v2"
)
// Shuffle is like rand.Shuffle, but it does not allocate or lock any RNG state.
func Shuffle[T any](seed uint64, data []T) {
var pcg randv2.PCG
pcg.Seed(seed, seed)
for i := len(data) - 1; i > 0; i-- {
j := int(uint64n(&pcg, uint64(i+1)))
data[i], data[j] = data[j], data[i]
}
}
// IntN is like rand.IntN, but it is seeded on the stack and does not allocate
// or lock any RNG state.
func IntN(seed uint64, n int) int {
var pcg randv2.PCG
pcg.Seed(seed, seed)
return int(uint64n(&pcg, uint64(n)))
}
// Perm is like rand.Perm, but it is seeded on the stack and does not allocate
// or lock any RNG state.
func Perm(seed uint64, n int) []int {
p := make([]int, n)
for i := range p {
p[i] = i
}
Shuffle(seed, p)
return p
}
// uint64n is the no-bounds-checks version of rand.Uint64N from the standard
// library. 32-bit optimizations have been elided.
func uint64n(pcg *randv2.PCG, n uint64) uint64 {
if n&(n-1) == 0 { // n is power of two, can mask
return pcg.Uint64() & (n - 1)
}
// Suppose we have a uint64 x uniform in the range [0,2⁶⁴)
// and want to reduce it to the range [0,n) preserving exact uniformity.
// We can simulate a scaling arbitrary precision x * (n/2⁶⁴) by
// the high bits of a double-width multiply of x*n, meaning (x*n)/2⁶⁴.
// Since there are 2⁶⁴ possible inputs x and only n possible outputs,
// the output is necessarily biased if n does not divide 2⁶⁴.
// In general (x*n)/2⁶⁴ = k for x*n in [k*2⁶⁴,(k+1)*2⁶⁴).
// There are either floor(2⁶⁴/n) or ceil(2⁶⁴/n) possible products
// in that range, depending on k.
// But suppose we reject the sample and try again when
// x*n is in [k*2⁶⁴, k*2⁶⁴+(2⁶⁴%n)), meaning rejecting fewer than n possible
// outcomes out of the 2⁶⁴.
// Now there are exactly floor(2⁶⁴/n) possible ways to produce
// each output value k, so we've restored uniformity.
// To get valid uint64 math, 2⁶⁴ % n = (2⁶⁴ - n) % n = -n % n,
// so the direct implementation of this algorithm would be:
//
// hi, lo := bits.Mul64(r.Uint64(), n)
// thresh := -n % n
// for lo < thresh {
// hi, lo = bits.Mul64(r.Uint64(), n)
// }
//
// That still leaves an expensive 64-bit division that we would rather avoid.
// We know that thresh < n, and n is usually much less than 2⁶⁴, so we can
// avoid the last four lines unless lo < n.
//
// See also:
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction
// https://lemire.me/blog/2016/06/30/fast-random-shuffling
hi, lo := bits.Mul64(pcg.Uint64(), n)
if lo < n {
thresh := -n % n
for lo < thresh {
hi, lo = bits.Mul64(pcg.Uint64(), n)
}
}
return hi
}

25
vendor/tailscale.com/util/rands/rands.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package rands contains utility functions for randomness.
package rands
import (
crand "crypto/rand"
"encoding/hex"
)
// HexString returns a string of n cryptographically random lowercase
// hex characters.
//
// That is, HexString(3) returns something like "0fc", containing 12
// bits of randomness.
func HexString(n int) string {
nb := n / 2
if n%2 == 1 {
nb++
}
b := make([]byte, nb)
crand.Read(b)
return hex.EncodeToString(b)[:n]
}

79
vendor/tailscale.com/util/ringbuffer/ringbuffer.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package ringbuffer contains a fixed-size concurrency-safe generic ring
// buffer.
package ringbuffer
import "sync"
// New creates a new RingBuffer containing at most max items.
func New[T any](max int) *RingBuffer[T] {
return &RingBuffer[T]{
max: max,
}
}
// RingBuffer is a concurrency-safe ring buffer.
type RingBuffer[T any] struct {
mu sync.Mutex
pos int
buf []T
max int
}
// Add appends a new item to the RingBuffer, possibly overwriting the oldest
// item in the buffer if it is already full.
//
// It does nothing if rb is nil.
func (rb *RingBuffer[T]) Add(t T) {
if rb == nil {
return
}
rb.mu.Lock()
defer rb.mu.Unlock()
if len(rb.buf) < rb.max {
rb.buf = append(rb.buf, t)
} else {
rb.buf[rb.pos] = t
rb.pos = (rb.pos + 1) % rb.max
}
}
// GetAll returns a copy of all the entries in the ring buffer in the order they
// were added.
//
// It returns nil if rb is nil.
func (rb *RingBuffer[T]) GetAll() []T {
if rb == nil {
return nil
}
rb.mu.Lock()
defer rb.mu.Unlock()
out := make([]T, len(rb.buf))
for i := range len(rb.buf) {
x := (rb.pos + i) % rb.max
out[i] = rb.buf[x]
}
return out
}
// Len returns the number of elements in the ring buffer. Note that this value
// could change immediately after being returned if a concurrent caller
// modifies the buffer.
func (rb *RingBuffer[T]) Len() int {
if rb == nil {
return 0
}
rb.mu.Lock()
defer rb.mu.Unlock()
return len(rb.buf)
}
// Clear will empty the ring buffer.
func (rb *RingBuffer[T]) Clear() {
rb.mu.Lock()
defer rb.mu.Unlock()
rb.pos = 0
rb.buf = nil
}

28
vendor/tailscale.com/util/set/handle.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package set
// HandleSet is a set of T.
//
// It is not safe for concurrent use.
type HandleSet[T any] map[Handle]T
// Handle is an opaque comparable value that's used as the map key in a
// HandleSet. The only way to get one is to call HandleSet.Add.
type Handle struct {
v *byte
}
// Add adds the element (map value) e to the set.
//
// It returns the handle (map key) with which e can be removed, using a map
// delete.
func (s *HandleSet[T]) Add(e T) Handle {
h := Handle{new(byte)}
if *s == nil {
*s = make(HandleSet[T])
}
(*s)[h] = e
return h
}

94
vendor/tailscale.com/util/set/set.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package set contains set types.
package set
import (
"encoding/json"
"maps"
)
// Set is a set of T.
type Set[T comparable] map[T]struct{}
// SetOf returns a new set constructed from the elements in slice.
func SetOf[T comparable](slice []T) Set[T] {
return Of(slice...)
}
// Of returns a new set constructed from the elements in slice.
func Of[T comparable](slice ...T) Set[T] {
s := make(Set[T])
s.AddSlice(slice)
return s
}
// Clone returns a new set cloned from the elements in s.
func (s Set[T]) Clone() Set[T] {
return maps.Clone(s)
}
// Add adds e to s.
func (s Set[T]) Add(e T) { s[e] = struct{}{} }
// AddSlice adds each element of es to s.
func (s Set[T]) AddSlice(es []T) {
for _, e := range es {
s.Add(e)
}
}
// AddSet adds each element of es to s.
func (s Set[T]) AddSet(es Set[T]) {
for e := range es {
s.Add(e)
}
}
// Make lazily initializes the map pointed to by s to be non-nil.
func (s *Set[T]) Make() {
if *s == nil {
*s = make(Set[T])
}
}
// Slice returns the elements of the set as a slice. The elements will not be
// in any particular order.
func (s Set[T]) Slice() []T {
es := make([]T, 0, s.Len())
for k := range s {
es = append(es, k)
}
return es
}
// Delete removes e from the set.
func (s Set[T]) Delete(e T) { delete(s, e) }
// Contains reports whether s contains e.
func (s Set[T]) Contains(e T) bool {
_, ok := s[e]
return ok
}
// Len reports the number of items in s.
func (s Set[T]) Len() int { return len(s) }
// Equal reports whether s is equal to other.
func (s Set[T]) Equal(other Set[T]) bool {
return maps.Equal(s, other)
}
func (s Set[T]) MarshalJSON() ([]byte, error) {
return json.Marshal(s.Slice())
}
func (s *Set[T]) UnmarshalJSON(buf []byte) error {
var ss []T
if err := json.Unmarshal(buf, &ss); err != nil {
return err
}
*s = SetOf(ss)
return nil
}

73
vendor/tailscale.com/util/set/slice.go generated vendored Normal file
View File

@@ -0,0 +1,73 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package set
import (
"slices"
"tailscale.com/types/views"
)
// Slice is a set of elements tracked in a slice of unique elements.
type Slice[T comparable] struct {
slice []T
set map[T]bool // nil until/unless slice is large enough
}
// Slice returns a view of the underlying slice.
// The elements are in order of insertion.
// The returned value is only valid until ss is modified again.
func (ss *Slice[T]) Slice() views.Slice[T] { return views.SliceOf(ss.slice) }
// Len returns the number of elements in the set.
func (ss *Slice[T]) Len() int { return len(ss.slice) }
// Contains reports whether v is in the set.
// The amortized cost is O(1).
func (ss *Slice[T]) Contains(v T) bool {
if ss.set != nil {
return ss.set[v]
}
return slices.Index(ss.slice, v) != -1
}
// Remove removes v from the set.
// The cost is O(n).
func (ss *Slice[T]) Remove(v T) {
if ss.set != nil {
if !ss.set[v] {
return
}
delete(ss.set, v)
}
if ix := slices.Index(ss.slice, v); ix != -1 {
ss.slice = append(ss.slice[:ix], ss.slice[ix+1:]...)
}
}
// Add adds each element in vs to the set.
// The amortized cost is O(1) per element.
func (ss *Slice[T]) Add(vs ...T) {
for _, v := range vs {
if ss.Contains(v) {
continue
}
ss.slice = append(ss.slice, v)
if ss.set != nil {
ss.set[v] = true
} else if len(ss.slice) > 8 {
ss.set = make(map[T]bool, len(ss.slice))
for _, v := range ss.slice {
ss.set[v] = true
}
}
}
}
// AddSlice adds all elements in vs to the set.
func (ss *Slice[T]) AddSlice(vs views.Slice[T]) {
for i := range vs.Len() {
ss.Add(vs.At(i))
}
}

311
vendor/tailscale.com/util/singleflight/singleflight.go generated vendored Normal file
View File

@@ -0,0 +1,311 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package singleflight provides a duplicate function call suppression
// mechanism.
//
// This is a Tailscale fork of Go's singleflight package which has had several
// homes in the past:
//
// - https://github.com/golang/go/commit/61d3b2db6292581fc07a3767ec23ec94ad6100d1
// - https://github.com/golang/groupcache/tree/master/singleflight
// - https://pkg.go.dev/golang.org/x/sync/singleflight
//
// This fork adds generics.
package singleflight // import "tailscale.com/util/singleflight"
import (
"bytes"
"context"
"errors"
"fmt"
"runtime"
"runtime/debug"
"sync"
"sync/atomic"
)
// errGoexit indicates the runtime.Goexit was called in
// the user given function.
var errGoexit = errors.New("runtime.Goexit was called")
// A panicError is an arbitrary value recovered from a panic
// with the stack trace during the execution of given function.
type panicError struct {
value interface{}
stack []byte
}
// Error implements error interface.
func (p *panicError) Error() string {
return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
}
func newPanicError(v interface{}) error {
stack := debug.Stack()
// The first line of the stack trace is of the form "goroutine N [status]:"
// but by the time the panic reaches Do the goroutine may no longer exist
// and its status will have changed. Trim out the misleading line.
if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
stack = stack[line+1:]
}
return &panicError{value: v, stack: stack}
}
// call is an in-flight or completed singleflight.Do call
type call[V any] struct {
wg sync.WaitGroup
// These fields are written once before the WaitGroup is done
// and are only read after the WaitGroup is done.
val V
err error
// These fields are read and written with the singleflight
// mutex held before the WaitGroup is done, and are read but
// not written after the WaitGroup is done.
dups int
chans []chan<- Result[V]
// These fields are only written when the call is being created, and
// only in the DoChanContext method.
cancel context.CancelFunc
ctxWaiters atomic.Int64
}
// Group represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type Group[K comparable, V any] struct {
mu sync.Mutex // protects m
m map[K]*call[V] // lazily initialized
}
// Result holds the results of Do, so they can be passed
// on a channel.
type Result[V any] struct {
Val V
Err error
Shared bool
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *Group[K, V]) Do(key K, fn func() (V, error)) (v V, err error, shared bool) {
g.mu.Lock()
if g.m == nil {
g.m = make(map[K]*call[V])
}
if c, ok := g.m[key]; ok {
c.dups++
g.mu.Unlock()
c.wg.Wait()
if e, ok := c.err.(*panicError); ok {
panic(e)
} else if c.err == errGoexit {
runtime.Goexit()
}
return c.val, c.err, true
}
c := new(call[V])
c.wg.Add(1)
g.m[key] = c
g.mu.Unlock()
g.doCall(c, key, fn)
return c.val, c.err, c.dups > 0
}
// DoChan is like Do but returns a channel that will receive the
// results when they are ready.
//
// The returned channel will not be closed.
func (g *Group[K, V]) DoChan(key K, fn func() (V, error)) <-chan Result[V] {
ch := make(chan Result[V], 1)
g.mu.Lock()
if g.m == nil {
g.m = make(map[K]*call[V])
}
if c, ok := g.m[key]; ok {
c.dups++
c.chans = append(c.chans, ch)
g.mu.Unlock()
return ch
}
c := &call[V]{chans: []chan<- Result[V]{ch}}
c.wg.Add(1)
g.m[key] = c
g.mu.Unlock()
go g.doCall(c, key, fn)
return ch
}
// DoChanContext is like [Group.DoChan], but supports context cancelation. The
// context passed to the fn function is a context that is canceled only when
// there are no callers waiting on a result (i.e. all callers have canceled
// their contexts).
//
// The context that is passed to the fn function is not derived from any of the
// input contexts, so context values will not be propagated. If context values
// are needed, they must be propagated explicitly.
//
// The returned channel will not be closed. The Result.Err field is set to the
// context error if the context is canceled.
func (g *Group[K, V]) DoChanContext(ctx context.Context, key K, fn func(context.Context) (V, error)) <-chan Result[V] {
ch := make(chan Result[V], 1)
g.mu.Lock()
if g.m == nil {
g.m = make(map[K]*call[V])
}
c, ok := g.m[key]
if ok {
// Call already in progress; add to the waiters list and then
// release the mutex.
c.dups++
c.ctxWaiters.Add(1)
c.chans = append(c.chans, ch)
g.mu.Unlock()
} else {
// The call hasn't been started yet; we need to start it.
//
// Create a context that is not canceled when the parent context is,
// but otherwise propagates all values.
callCtx, callCancel := context.WithCancel(context.Background())
c = &call[V]{
chans: []chan<- Result[V]{ch},
cancel: callCancel,
}
c.wg.Add(1)
c.ctxWaiters.Add(1) // one caller waiting
g.m[key] = c
g.mu.Unlock()
// Wrap our function to provide the context.
go g.doCall(c, key, func() (V, error) {
return fn(callCtx)
})
}
// Instead of returning the channel directly, we need to track
// when the call finishes so we can handle context cancelation.
// Do so by creating an final channel that gets the
// result and hooking that up to the wait function.
final := make(chan Result[V], 1)
go g.waitCtx(ctx, c, ch, final)
return final
}
// waitCtx will wait on the provided call to finish, or the context to be done.
// If the context is done, and this is the last waiter, then the context
// provided to the underlying function will be canceled.
func (g *Group[K, V]) waitCtx(ctx context.Context, c *call[V], result <-chan Result[V], output chan<- Result[V]) {
var res Result[V]
select {
case <-ctx.Done():
case res = <-result:
}
// Decrement the caller count, and if we're the last one, cancel the
// context we created. Do this in all cases, error and otherwise, so we
// don't leak goroutines.
//
// Also wait on the call to finish, so we know that the call has
// finished executing after the last caller has returned.
if c.ctxWaiters.Add(-1) == 0 {
c.cancel()
c.wg.Wait()
}
// Ensure that context cancelation takes precedence over a value being
// available by checking ctx.Err() before sending the result to the
// caller. The select above will nondeterministically pick a case if a
// result is available and the ctx.Done channel is closed, so we check
// again here.
if err := ctx.Err(); err != nil {
res = Result[V]{Err: err}
}
output <- res
}
// doCall handles the single call for a key.
func (g *Group[K, V]) doCall(c *call[V], key K, fn func() (V, error)) {
normalReturn := false
recovered := false
// use double-defer to distinguish panic from runtime.Goexit,
// more details see https://golang.org/cl/134395
defer func() {
// the given function invoked runtime.Goexit
if !normalReturn && !recovered {
c.err = errGoexit
}
g.mu.Lock()
defer g.mu.Unlock()
c.wg.Done()
if g.m[key] == c {
delete(g.m, key)
}
if e, ok := c.err.(*panicError); ok {
// In order to prevent the waiting channels from being blocked forever,
// needs to ensure that this panic cannot be recovered.
if len(c.chans) > 0 {
go panic(e)
select {} // Keep this goroutine around so that it will appear in the crash dump.
} else {
panic(e)
}
} else if c.err == errGoexit {
// Already in the process of goexit, no need to call again
} else {
// Normal return
for _, ch := range c.chans {
ch <- Result[V]{c.val, c.err, c.dups > 0}
}
}
}()
func() {
defer func() {
if !normalReturn {
// Ideally, we would wait to take a stack trace until we've determined
// whether this is a panic or a runtime.Goexit.
//
// Unfortunately, the only way we can distinguish the two is to see
// whether the recover stopped the goroutine from terminating, and by
// the time we know that, the part of the stack trace relevant to the
// panic has been discarded.
if r := recover(); r != nil {
c.err = newPanicError(r)
}
}
}()
c.val, c.err = fn()
normalReturn = true
}()
if !normalReturn {
recovered = true
}
}
// Forget tells the singleflight to forget about a key. Future calls
// to Do for this key will call the function rather than waiting for
// an earlier call to complete.
func (g *Group[K, V]) Forget(key K) {
g.mu.Lock()
delete(g.m, key)
g.mu.Unlock()
}

150
vendor/tailscale.com/util/slicesx/slicesx.go generated vendored Normal file
View File

@@ -0,0 +1,150 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package slicesx contains some helpful generic slice functions.
package slicesx
import (
"math/rand/v2"
"slices"
)
// Interleave combines two slices of the form [a, b, c] and [x, y, z] into a
// slice with elements interleaved; i.e. [a, x, b, y, c, z].
func Interleave[S ~[]T, T any](a, b S) S {
// Avoid allocating an empty slice.
if a == nil && b == nil {
return nil
}
var (
i int
ret = make([]T, 0, len(a)+len(b))
)
for i = 0; i < len(a) && i < len(b); i++ {
ret = append(ret, a[i], b[i])
}
ret = append(ret, a[i:]...)
ret = append(ret, b[i:]...)
return ret
}
// Shuffle randomly shuffles a slice in-place, similar to rand.Shuffle.
func Shuffle[S ~[]T, T any](s S) {
// TODO(andrew): use a pooled Rand?
// This is the same Fisher-Yates shuffle implementation as rand.Shuffle
n := len(s)
i := n - 1
for ; i > 1<<31-1-1; i-- {
j := int(rand.N(int64(i + 1)))
s[i], s[j] = s[j], s[i]
}
for ; i > 0; i-- {
j := int(rand.N(int32(i + 1)))
s[i], s[j] = s[j], s[i]
}
}
// Partition returns two slices, the first containing the elements of the input
// slice for which the callback evaluates to true, the second containing the rest.
//
// This function does not mutate s.
func Partition[S ~[]T, T any](s S, cb func(T) bool) (trues, falses S) {
for _, elem := range s {
if cb(elem) {
trues = append(trues, elem)
} else {
falses = append(falses, elem)
}
}
return
}
// EqualSameNil reports whether two slices are equal: the same length, same
// nilness (notably when length zero), and all elements equal. If the lengths
// are different or their nilness differs, Equal returns false. Otherwise, the
// elements are compared in increasing index order, and the comparison stops at
// the first unequal pair. Floating point NaNs are not considered equal.
//
// It is identical to the standard library's slices.Equal but adds the matching
// nilness check.
func EqualSameNil[S ~[]E, E comparable](s1, s2 S) bool {
if len(s1) != len(s2) || (s1 == nil) != (s2 == nil) {
return false
}
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
return true
}
// Filter calls fn with each element of the provided src slice, and appends the
// element to dst if fn returns true.
//
// dst can be nil to allocate a new slice, or set to src[:0] to filter in-place
// without allocating.
func Filter[S ~[]T, T any](dst, src S, fn func(T) bool) S {
for _, x := range src {
if fn(x) {
dst = append(dst, x)
}
}
return dst
}
// AppendMatching appends elements in ps to dst if f(x) is true.
func AppendMatching[T any](dst, ps []T, f func(T) bool) []T {
for _, p := range ps {
if f(p) {
dst = append(dst, p)
}
}
return dst
}
// HasPrefix reports whether the byte slice s begins with prefix.
func HasPrefix[E comparable](s, prefix []E) bool {
return len(s) >= len(prefix) && slices.Equal(s[0:len(prefix)], prefix)
}
// HasSuffix reports whether the slice s ends with suffix.
func HasSuffix[E comparable](s, suffix []E) bool {
return len(s) >= len(suffix) && slices.Equal(s[len(s)-len(suffix):], suffix)
}
// CutPrefix returns s without the provided leading prefix slice and reports
// whether it found the prefix. If s doesn't start with prefix, CutPrefix
// returns s, false. If prefix is the empty slice, CutPrefix returns s, true.
// CutPrefix returns slices of the original slice s, not copies.
func CutPrefix[E comparable](s, prefix []E) (after []E, found bool) {
if !HasPrefix(s, prefix) {
return s, false
}
return s[len(prefix):], true
}
// CutSuffix returns s without the provided ending suffix slice and reports
// whether it found the suffix. If s doesn't end with suffix, CutSuffix returns
// s, false. If suffix is the empty slice, CutSuffix returns s, true.
// CutSuffix returns slices of the original slice s, not copies.
func CutSuffix[E comparable](s, suffix []E) (after []E, found bool) {
if !HasSuffix(s, suffix) {
return s, false
}
return s[:len(s)-len(suffix)], true
}
// FirstEqual reports whether len(s) > 0 and
// its first element == v.
func FirstEqual[T comparable](s []T, v T) bool {
return len(s) > 0 && s[0] == v
}
// LastEqual reports whether len(s) > 0 and
// its last element == v.
func LastEqual[T comparable](s []T, v T) bool {
return len(s) > 0 && s[len(s)-1] == v
}

122
vendor/tailscale.com/util/syspolicy/caching_handler.go generated vendored Normal file
View File

@@ -0,0 +1,122 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package syspolicy
import (
"errors"
"sync"
)
// CachingHandler is a handler that reads policies from an underlying handler the first time each key is requested
// and permanently caches the result unless there is an error. If there is an ErrNoSuchKey error, that result is cached,
// otherwise the actual error is returned and the next read for that key will retry using the handler.
type CachingHandler struct {
mu sync.Mutex
strings map[string]string
uint64s map[string]uint64
bools map[string]bool
strArrs map[string][]string
notFound map[string]bool
handler Handler
}
// NewCachingHandler creates a CachingHandler given a handler.
func NewCachingHandler(handler Handler) *CachingHandler {
return &CachingHandler{
handler: handler,
strings: make(map[string]string),
uint64s: make(map[string]uint64),
bools: make(map[string]bool),
strArrs: make(map[string][]string),
notFound: make(map[string]bool),
}
}
// ReadString reads the policy settings value string given the key.
// ReadString first reads from the handler's cache before resorting to using the handler.
func (ch *CachingHandler) ReadString(key string) (string, error) {
ch.mu.Lock()
defer ch.mu.Unlock()
if val, ok := ch.strings[key]; ok {
return val, nil
}
if notFound := ch.notFound[key]; notFound {
return "", ErrNoSuchKey
}
val, err := ch.handler.ReadString(key)
if errors.Is(err, ErrNoSuchKey) {
ch.notFound[key] = true
return "", err
} else if err != nil {
return "", err
}
ch.strings[key] = val
return val, nil
}
// ReadUInt64 reads the policy settings uint64 value given the key.
// ReadUInt64 first reads from the handler's cache before resorting to using the handler.
func (ch *CachingHandler) ReadUInt64(key string) (uint64, error) {
ch.mu.Lock()
defer ch.mu.Unlock()
if val, ok := ch.uint64s[key]; ok {
return val, nil
}
if notFound := ch.notFound[key]; notFound {
return 0, ErrNoSuchKey
}
val, err := ch.handler.ReadUInt64(key)
if errors.Is(err, ErrNoSuchKey) {
ch.notFound[key] = true
return 0, err
} else if err != nil {
return 0, err
}
ch.uint64s[key] = val
return val, nil
}
// ReadBoolean reads the policy settings boolean value given the key.
// ReadBoolean first reads from the handler's cache before resorting to using the handler.
func (ch *CachingHandler) ReadBoolean(key string) (bool, error) {
ch.mu.Lock()
defer ch.mu.Unlock()
if val, ok := ch.bools[key]; ok {
return val, nil
}
if notFound := ch.notFound[key]; notFound {
return false, ErrNoSuchKey
}
val, err := ch.handler.ReadBoolean(key)
if errors.Is(err, ErrNoSuchKey) {
ch.notFound[key] = true
return false, err
} else if err != nil {
return false, err
}
ch.bools[key] = val
return val, nil
}
// ReadBoolean reads the policy settings boolean value given the key.
// ReadBoolean first reads from the handler's cache before resorting to using the handler.
func (ch *CachingHandler) ReadStringArray(key string) ([]string, error) {
ch.mu.Lock()
defer ch.mu.Unlock()
if val, ok := ch.strArrs[key]; ok {
return val, nil
}
if notFound := ch.notFound[key]; notFound {
return nil, ErrNoSuchKey
}
val, err := ch.handler.ReadStringArray(key)
if errors.Is(err, ErrNoSuchKey) {
ch.notFound[key] = true
return nil, err
} else if err != nil {
return nil, err
}
ch.strArrs[key] = val
return val, nil
}

83
vendor/tailscale.com/util/syspolicy/handler.go generated vendored Normal file
View File

@@ -0,0 +1,83 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package syspolicy
import (
"errors"
"sync/atomic"
)
var (
handlerUsed atomic.Bool
handler Handler = defaultHandler{}
)
// Handler reads system policies from OS-specific storage.
type Handler interface {
// ReadString reads the policy setting's string value for the given key.
// It should return ErrNoSuchKey if the key does not have a value set.
ReadString(key string) (string, error)
// ReadUInt64 reads the policy setting's uint64 value for the given key.
// It should return ErrNoSuchKey if the key does not have a value set.
ReadUInt64(key string) (uint64, error)
// ReadBool reads the policy setting's boolean value for the given key.
// It should return ErrNoSuchKey if the key does not have a value set.
ReadBoolean(key string) (bool, error)
// ReadStringArray reads the policy setting's string array value for the given key.
// It should return ErrNoSuchKey if the key does not have a value set.
ReadStringArray(key string) ([]string, error)
}
// ErrNoSuchKey is returned by a Handler when the specified key does not have a
// value set.
var ErrNoSuchKey = errors.New("no such key")
// defaultHandler is the catch all syspolicy type for anything that isn't windows or apple.
type defaultHandler struct{}
func (defaultHandler) ReadString(_ string) (string, error) {
return "", ErrNoSuchKey
}
func (defaultHandler) ReadUInt64(_ string) (uint64, error) {
return 0, ErrNoSuchKey
}
func (defaultHandler) ReadBoolean(_ string) (bool, error) {
return false, ErrNoSuchKey
}
func (defaultHandler) ReadStringArray(_ string) ([]string, error) {
return nil, ErrNoSuchKey
}
// markHandlerInUse is called before handler methods are called.
func markHandlerInUse() {
handlerUsed.Store(true)
}
// RegisterHandler initializes the policy handler and ensures registration will happen once.
func RegisterHandler(h Handler) {
// Technically this assignment is not concurrency safe, but in the
// event that there was any risk of a data race, we will panic due to
// the CompareAndSwap failing.
handler = h
if !handlerUsed.CompareAndSwap(false, true) {
panic("handler was already used before registration")
}
}
// TB is a subset of testing.TB that we use to set up test helpers.
// It's defined here to avoid pulling in the testing package.
type TB interface {
Helper()
Cleanup(func())
}
func SetHandlerForTest(tb TB, h Handler) {
tb.Helper()
oldHandler := handler
handler = h
tb.Cleanup(func() { handler = oldHandler })
}

105
vendor/tailscale.com/util/syspolicy/handler_windows.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package syspolicy
import (
"errors"
"fmt"
"tailscale.com/util/clientmetric"
"tailscale.com/util/winutil"
)
var (
windowsErrors = clientmetric.NewCounter("windows_syspolicy_errors")
windowsAny = clientmetric.NewGauge("windows_syspolicy_any")
)
type windowsHandler struct{}
func init() {
RegisterHandler(NewCachingHandler(windowsHandler{}))
keyList := []struct {
isSet func(Key) bool
keys []Key
}{
{
isSet: func(k Key) bool {
_, err := handler.ReadString(string(k))
return err == nil
},
keys: stringKeys,
},
{
isSet: func(k Key) bool {
_, err := handler.ReadBoolean(string(k))
return err == nil
},
keys: boolKeys,
},
{
isSet: func(k Key) bool {
_, err := handler.ReadUInt64(string(k))
return err == nil
},
keys: uint64Keys,
},
}
var anySet bool
for _, l := range keyList {
for _, k := range l.keys {
if !l.isSet(k) {
continue
}
clientmetric.NewGauge(fmt.Sprintf("windows_syspolicy_%s", k)).Set(1)
anySet = true
}
}
if anySet {
windowsAny.Set(1)
}
}
func (windowsHandler) ReadString(key string) (string, error) {
s, err := winutil.GetPolicyString(key)
if errors.Is(err, winutil.ErrNoValue) {
err = ErrNoSuchKey
} else if err != nil {
windowsErrors.Add(1)
}
return s, err
}
func (windowsHandler) ReadUInt64(key string) (uint64, error) {
value, err := winutil.GetPolicyInteger(key)
if errors.Is(err, winutil.ErrNoValue) {
err = ErrNoSuchKey
} else if err != nil {
windowsErrors.Add(1)
}
return value, err
}
func (windowsHandler) ReadBoolean(key string) (bool, error) {
value, err := winutil.GetPolicyInteger(key)
if errors.Is(err, winutil.ErrNoValue) {
err = ErrNoSuchKey
} else if err != nil {
windowsErrors.Add(1)
}
return value != 0, err
}
func (windowsHandler) ReadStringArray(key string) ([]string, error) {
value, err := winutil.GetPolicyStringArray(key)
if errors.Is(err, winutil.ErrNoValue) {
err = ErrNoSuchKey
} else if err != nil {
windowsErrors.Add(1)
}
return value, err
}

View File

@@ -0,0 +1,63 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package internal contains miscellaneous functions and types
// that are internal to the syspolicy packages.
package internal
import (
"bytes"
"github.com/go-json-experiment/json/jsontext"
"tailscale.com/types/lazy"
"tailscale.com/version"
)
// OSForTesting is the operating system override used for testing.
// It follows the same naming convention as [version.OS].
var OSForTesting lazy.SyncValue[string]
// OS is like [version.OS], but supports a test hook.
func OS() string {
return OSForTesting.Get(version.OS)
}
// TB is a subset of testing.TB that we use to set up test helpers.
// It's defined here to avoid pulling in the testing package.
type TB interface {
Helper()
Cleanup(func())
Logf(format string, args ...any)
Error(args ...any)
Errorf(format string, args ...any)
Fatal(args ...any)
Fatalf(format string, args ...any)
}
// EqualJSONForTest compares the JSON in j1 and j2 for semantic equality.
// It returns "", "", true if j1 and j2 are equal. Otherwise, it returns
// indented versions of j1 and j2 and false.
func EqualJSONForTest(tb TB, j1, j2 jsontext.Value) (s1, s2 string, equal bool) {
tb.Helper()
j1 = j1.Clone()
j2 = j2.Clone()
// Canonicalize JSON values for comparison.
if err := j1.Canonicalize(); err != nil {
tb.Error(err)
}
if err := j2.Canonicalize(); err != nil {
tb.Error(err)
}
// Check and return true if the two values are structurally equal.
if bytes.Equal(j1, j2) {
return "", "", true
}
// Otherwise, format the values for display and return false.
if err := j1.Indent("", "\t"); err != nil {
tb.Fatal(err)
}
if err := j2.Indent("", "\t"); err != nil {
tb.Fatal(err)
}
return j1.String(), j2.String(), false
}

View File

@@ -0,0 +1,64 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package loggerx provides logging functions to the rest of the syspolicy packages.
package loggerx
import (
"log"
"sync/atomic"
"tailscale.com/types/lazy"
"tailscale.com/types/logger"
"tailscale.com/util/syspolicy/internal"
)
const (
normalPrefix = "syspolicy: "
verbosePrefix = "syspolicy: [v2] "
)
var (
debugLogging atomic.Bool // whether debugging logging is enabled
lazyPrintf lazy.SyncValue[logger.Logf]
lazyVerbosef lazy.SyncValue[logger.Logf]
)
// SetDebugLoggingEnabled controls whether spammy debug logging is enabled.
func SetDebugLoggingEnabled(v bool) {
debugLogging.Store(v)
}
// Errorf formats and writes an error message to the log.
func Errorf(format string, args ...any) {
printf(format, args...)
}
// Verbosef formats and writes an optional, verbose message to the log.
func Verbosef(format string, args ...any) {
if debugLogging.Load() {
printf(format, args...)
} else {
verbosef(format, args...)
}
}
func printf(format string, args ...any) {
lazyPrintf.Get(func() logger.Logf {
return logger.WithPrefix(log.Printf, normalPrefix)
})(format, args...)
}
func verbosef(format string, args ...any) {
lazyVerbosef.Get(func() logger.Logf {
return logger.WithPrefix(log.Printf, verbosePrefix)
})(format, args...)
}
// SetForTest sets the specified printf and verbosef functions for the duration
// of tb and its subtests.
func SetForTest(tb internal.TB, printf, verbosef logger.Logf) {
lazyPrintf.SetForTest(tb, printf, nil)
lazyVerbosef.SetForTest(tb, verbosef, nil)
}

112
vendor/tailscale.com/util/syspolicy/policy_keys.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package syspolicy
import "tailscale.com/util/syspolicy/setting"
type Key = setting.Key
const (
// Keys with a string value
ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL.
LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost.
Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server.
// ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced.
// Exit node ID takes precedence over exit node IP.
// To find the node ID, go to /api.md#device.
ExitNodeID Key = "ExitNodeID"
ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP.
// Keys with a string value that specifies an option: "always", "never", "user-decides".
// The default is "user-decides" unless otherwise stated. Enforcement of
// these policies is typically performed in ipnlocal.applySysPolicy(). GUIs
// typically hide menu items related to policies that are enforced.
EnableIncomingConnections Key = "AllowIncomingConnections"
EnableServerMode Key = "UnattendedMode"
ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess"
EnableTailscaleDNS Key = "UseTailscaleDNSSettings"
EnableTailscaleSubnets Key = "UseTailscaleSubnets"
// CheckUpdates is the key to signal if the updater should periodically
// check for updates.
CheckUpdates Key = "CheckUpdates"
// ApplyUpdates is the key to signal if updates should be automatically
// installed. Its value is "InstallUpdates" because of an awkwardly-named
// visibility option "ApplyUpdates" on MacOS.
ApplyUpdates Key = "InstallUpdates"
// EnableRunExitNode controls if the device acts as an exit node. Even when
// running as an exit node, the device must be approved by a tailnet
// administrator. Its name is slightly awkward because RunExitNodeVisibility
// predates this option but is preserved for backwards compatibility.
EnableRunExitNode Key = "AdvertiseExitNode"
// Keys with a string value that controls visibility: "show", "hide".
// The default is "show" unless otherwise stated. Enforcement of these
// policies is typically performed by the UI code for the relevant operating
// system.
AdminConsoleVisibility Key = "AdminConsole"
NetworkDevicesVisibility Key = "NetworkDevices"
TestMenuVisibility Key = "TestMenu"
UpdateMenuVisibility Key = "UpdateMenu"
ResetToDefaultsVisibility Key = "ResetToDefaults"
// RunExitNodeVisibility controls if the "run as exit node" menu item is
// visible, without controlling the setting itself. This is preserved for
// backwards compatibility but prefer EnableRunExitNode in new deployments.
RunExitNodeVisibility Key = "RunExitNode"
PreferencesMenuVisibility Key = "PreferencesMenu"
ExitNodeMenuVisibility Key = "ExitNodesPicker"
// AutoUpdateVisibility is the key to signal if the menu item for automatic
// installation of updates should be visible. It is only used by macsys
// installations and uses the Sparkle naming convention, even though it does
// not actually control updates, merely the UI for that setting.
AutoUpdateVisibility Key = "ApplyUpdates"
// SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI.
// When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker.
SuggestedExitNodeVisibility Key = "SuggestedExitNode"
// Keys with a string value formatted for use with time.ParseDuration().
KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours
// Boolean Keys that are only applicable on Windows. Booleans are stored in the registry as
// DWORD or QWORD (either is acceptable). 0 means false, and anything else means true.
// The default is 0 unless otherwise stated.
LogSCMInteractions Key = "LogSCMInteractions"
FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock"
// PostureChecking indicates if posture checking is enabled and the client shall gather
// posture data.
// Key is a string value that specifies an option: "always", "never", "user-decides".
// The default is "user-decides" unless otherwise stated.
PostureChecking Key = "PostureChecking"
// DeviceSerialNumber is the serial number of the device that is running Tailscale.
// This is used on iOS/tvOS to allow IT administrators to manually give us a serial number via MDM.
// We are unable to programmatically get the serial number from IOKit due to sandboxing restrictions.
DeviceSerialNumber Key = "DeviceSerialNumber"
// ManagedByOrganizationName indicates the name of the organization managing the Tailscale
// install. It is displayed inside the client UI in a prominent location.
ManagedByOrganizationName Key = "ManagedByOrganizationName"
// ManagedByCaption is an info message displayed inside the client UI as a caption when
// ManagedByOrganizationName is set. It can be used to provide a pointer to support resources
// for Tailscale within the organization.
ManagedByCaption Key = "ManagedByCaption"
// ManagedByURL is a valid URL pointing to a support help desk for Tailscale within the
// organization. A button in the client UI provides easy access to this URL.
ManagedByURL Key = "ManagedByURL"
// AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to
// automatically authenticate managed devices, without requiring user interaction.
AuthKey Key = "AuthKey"
// MachineCertificateSubject is the exact name of a Subject that needs
// to be present in an identity's certificate chain to sign a RegisterRequest,
// formatted as per pkix.Name.String(). The Subject may be that of the identity
// itself, an intermediate CA or the root CA.
//
// Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA"
MachineCertificateSubject Key = "MachineCertificateSubject"
// Keys with a string array value.
// AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes.
AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes"
)

View File

@@ -0,0 +1,38 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package syspolicy
var stringKeys = []Key{
ControlURL,
LogTarget,
Tailnet,
ExitNodeID,
ExitNodeIP,
EnableIncomingConnections,
EnableServerMode,
ExitNodeAllowLANAccess,
EnableTailscaleDNS,
EnableTailscaleSubnets,
AdminConsoleVisibility,
NetworkDevicesVisibility,
TestMenuVisibility,
UpdateMenuVisibility,
RunExitNodeVisibility,
PreferencesMenuVisibility,
ExitNodeMenuVisibility,
AutoUpdateVisibility,
ResetToDefaultsVisibility,
KeyExpirationNoticeTime,
PostureChecking,
ManagedByOrganizationName,
ManagedByCaption,
ManagedByURL,
}
var boolKeys = []Key{
LogSCMInteractions,
FlushDNSOnSessionUnlock,
}
var uint64Keys = []Key{}

71
vendor/tailscale.com/util/syspolicy/setting/errors.go generated vendored Normal file
View File

@@ -0,0 +1,71 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
import (
"errors"
"tailscale.com/types/ptr"
)
var (
// ErrNotConfigured is returned when the requested policy setting is not configured.
ErrNotConfigured = errors.New("not configured")
// ErrTypeMismatch is returned when there's a type mismatch between the actual type
// of the setting value and the expected type.
ErrTypeMismatch = errors.New("type mismatch")
// ErrNoSuchKey is returned by [DefinitionOf] when no policy setting
// has been registered with the specified key.
//
// Until 2024-08-02, this error was also returned by a [Handler] when the specified
// key did not have a value set. While the package maintains compatibility with this
// usage of ErrNoSuchKey, it is recommended to return [ErrNotConfigured] from newer
// [source.Store] implementations.
ErrNoSuchKey = errors.New("no such key")
)
// ErrorText represents an error that occurs when reading or parsing a policy setting.
// This includes errors due to permissions issues, value type and format mismatches,
// and other platform- or source-specific errors. It does not include
// [ErrNotConfigured] and [ErrNoSuchKey], as those correspond to unconfigured
// policy settings rather than settings that cannot be read or parsed
// due to an error.
//
// ErrorText is used to marshal errors when a policy setting is sent over the wire,
// allowing the error to be logged or displayed. It does not preserve the
// type information of the underlying error.
type ErrorText string
// NewErrorText returns a [ErrorText] with the specified error message.
func NewErrorText(text string) *ErrorText {
return ptr.To(ErrorText(text))
}
// MaybeErrorText returns an [ErrorText] with the text of the specified error,
// or nil if err is nil, [ErrNotConfigured], or [ErrNoSuchKey].
func MaybeErrorText(err error) *ErrorText {
if err == nil || errors.Is(err, ErrNotConfigured) || errors.Is(err, ErrNoSuchKey) {
return nil
}
if err, ok := err.(*ErrorText); ok {
return err
}
return ptr.To(ErrorText(err.Error()))
}
// Error implements error.
func (e ErrorText) Error() string {
return string(e)
}
// MarshalText implements [encoding.TextMarshaler].
func (e ErrorText) MarshalText() (text []byte, err error) {
return []byte(e.Error()), nil
}
// UnmarshalText implements [encoding.TextUnmarshaler].
func (e *ErrorText) UnmarshalText(text []byte) error {
*e = ErrorText(text)
return nil
}

13
vendor/tailscale.com/util/syspolicy/setting/key.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
// Key is a string that uniquely identifies a policy and must remain unchanged
// once established and documented for a given policy setting. It may contain
// alphanumeric characters and zero or more [KeyPathSeparator]s to group
// individual policy settings into categories.
type Key string
// KeyPathSeparator allows logical grouping of policy settings into categories.
const KeyPathSeparator = "/"

71
vendor/tailscale.com/util/syspolicy/setting/origin.go generated vendored Normal file
View File

@@ -0,0 +1,71 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
import (
"fmt"
jsonv2 "github.com/go-json-experiment/json"
"github.com/go-json-experiment/json/jsontext"
)
// Origin describes where a policy or a policy setting is configured.
type Origin struct {
data settingOrigin
}
// settingOrigin is the marshallable data of an [Origin].
type settingOrigin struct {
Name string `json:",omitzero"`
Scope PolicyScope
}
// NewOrigin returns a new [Origin] with the specified scope.
func NewOrigin(scope PolicyScope) *Origin {
return NewNamedOrigin("", scope)
}
// NewNamedOrigin returns a new [Origin] with the specified scope and name.
func NewNamedOrigin(name string, scope PolicyScope) *Origin {
return &Origin{settingOrigin{name, scope}}
}
// Scope reports the policy [PolicyScope] where the setting is configured.
func (s Origin) Scope() PolicyScope {
return s.data.Scope
}
// Name returns the name of the policy source where the setting is configured,
// or "" if not available.
func (s Origin) Name() string {
return s.data.Name
}
// String implements [fmt.Stringer].
func (s Origin) String() string {
if s.Name() != "" {
return fmt.Sprintf("%s (%v)", s.Name(), s.Scope())
}
return s.Scope().String()
}
// MarshalJSONV2 implements [jsonv2.MarshalerV2].
func (s Origin) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error {
return jsonv2.MarshalEncode(out, &s.data, opts)
}
// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2].
func (s *Origin) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error {
return jsonv2.UnmarshalDecode(in, &s.data, opts)
}
// MarshalJSON implements [json.Marshaler].
func (s Origin) MarshalJSON() ([]byte, error) {
return jsonv2.Marshal(s) // uses MarshalJSONV2
}
// UnmarshalJSON implements [json.Unmarshaler].
func (s *Origin) UnmarshalJSON(b []byte) error {
return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2
}

View File

@@ -0,0 +1,189 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
import (
"fmt"
"strings"
"tailscale.com/types/lazy"
)
var (
lazyDefaultScope lazy.SyncValue[PolicyScope]
// DeviceScope indicates a scope containing device-global policies.
DeviceScope = PolicyScope{kind: DeviceSetting}
// CurrentProfileScope indicates a scope containing policies that apply to the
// currently active Tailscale profile.
CurrentProfileScope = PolicyScope{kind: ProfileSetting}
// CurrentUserScope indicates a scope containing policies that apply to the
// current user, for whatever that means on the current platform and
// in the current application context.
CurrentUserScope = PolicyScope{kind: UserSetting}
)
// PolicyScope is a management scope.
type PolicyScope struct {
kind Scope
userID string
profileID string
}
// DefaultScope returns the default [PolicyScope] to be used by a program
// when querying policy settings.
// It returns [DeviceScope], unless explicitly changed with [SetDefaultScope].
func DefaultScope() PolicyScope {
return lazyDefaultScope.Get(func() PolicyScope { return DeviceScope })
}
// SetDefaultScope attempts to set the specified scope as the default scope
// to be used by a program when querying policy settings.
// It fails and returns false if called more than once, or if the [DefaultScope]
// has already been used.
func SetDefaultScope(scope PolicyScope) bool {
return lazyDefaultScope.Set(scope)
}
// UserScopeOf returns a policy [PolicyScope] of the user with the specified id.
func UserScopeOf(uid string) PolicyScope {
return PolicyScope{kind: UserSetting, userID: uid}
}
// Kind reports the scope kind of s.
func (s PolicyScope) Kind() Scope {
return s.kind
}
// IsApplicableSetting reports whether the specified setting applies to
// and can be retrieved for this scope. Policy settings are applicable
// to their own scopes as well as more specific scopes. For example,
// device settings are applicable to device, profile and user scopes,
// but user settings are only applicable to user scopes.
// For instance, a menu visibility setting is inherently a user setting
// and only makes sense in the context of a specific user.
func (s PolicyScope) IsApplicableSetting(setting *Definition) bool {
return setting != nil && setting.Scope() <= s.Kind()
}
// IsConfigurableSetting reports whether the specified setting can be configured
// by a policy at this scope. Policy settings are configurable at their own scopes
// as well as broader scopes. For example, [UserSetting]s are configurable in
// user, profile, and device scopes, but [DeviceSetting]s are only configurable
// in the [DeviceScope]. For instance, the InstallUpdates policy setting
// can only be configured in the device scope, as it controls whether updates
// will be installed automatically on the device, rather than for specific users.
func (s PolicyScope) IsConfigurableSetting(setting *Definition) bool {
return setting != nil && setting.Scope() >= s.Kind()
}
// Contains reports whether policy settings that apply to s also apply to s2.
// For example, policy settings that apply to the [DeviceScope] also apply to
// the [CurrentUserScope].
func (s PolicyScope) Contains(s2 PolicyScope) bool {
if s.Kind() > s2.Kind() {
return false
}
switch s.Kind() {
case DeviceSetting:
return true
case ProfileSetting:
return s.profileID == s2.profileID
case UserSetting:
return s.userID == s2.userID
default:
panic("unreachable")
}
}
// StrictlyContains is like [PolicyScope.Contains], but returns false
// when s and s2 is the same scope.
func (s PolicyScope) StrictlyContains(s2 PolicyScope) bool {
return s != s2 && s.Contains(s2)
}
// String implements [fmt.Stringer].
func (s PolicyScope) String() string {
if s.profileID == "" && s.userID == "" {
return s.kind.String()
}
return s.stringSlow()
}
// MarshalText implements [encoding.TextMarshaler].
func (s PolicyScope) MarshalText() ([]byte, error) {
return []byte(s.String()), nil
}
// MarshalText implements [encoding.TextUnmarshaler].
func (s *PolicyScope) UnmarshalText(b []byte) error {
*s = PolicyScope{}
parts := strings.SplitN(string(b), "/", 2)
for i, part := range parts {
kind, id, err := parseScopeAndID(part)
if err != nil {
return err
}
if i > 0 && kind <= s.kind {
return fmt.Errorf("invalid scope hierarchy: %s", b)
}
s.kind = kind
switch kind {
case DeviceSetting:
if id != "" {
return fmt.Errorf("the device scope must not have an ID: %s", b)
}
case ProfileSetting:
s.profileID = id
case UserSetting:
s.userID = id
}
}
return nil
}
func (s PolicyScope) stringSlow() string {
var sb strings.Builder
writeScopeWithID := func(s Scope, id string) {
sb.WriteString(s.String())
if id != "" {
sb.WriteRune('(')
sb.WriteString(id)
sb.WriteRune(')')
}
}
if s.kind == ProfileSetting || s.profileID != "" {
writeScopeWithID(ProfileSetting, s.profileID)
if s.kind != ProfileSetting {
sb.WriteRune('/')
}
}
if s.kind == UserSetting {
writeScopeWithID(UserSetting, s.userID)
}
return sb.String()
}
func parseScopeAndID(s string) (scope Scope, id string, err error) {
name, params, ok := extractScopeAndParams(s)
if !ok {
return 0, "", fmt.Errorf("%q is not a valid scope string", s)
}
if err := scope.UnmarshalText([]byte(name)); err != nil {
return 0, "", err
}
return scope, params, nil
}
func extractScopeAndParams(s string) (name, params string, ok bool) {
paramsStart := strings.Index(s, "(")
if paramsStart == -1 {
return s, "", true
}
paramsEnd := strings.LastIndex(s, ")")
if paramsEnd < paramsStart {
return "", "", false
}
return s[0:paramsStart], s[paramsStart+1 : paramsEnd], true
}

View File

@@ -0,0 +1,67 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
import (
"fmt"
"tailscale.com/types/structs"
)
// RawItem contains a raw policy setting value as read from a policy store, or an
// error if the requested setting could not be read from the store. As a special
// case, it may also hold a value of the [Visibility], [PreferenceOption],
// or [time.Duration] types. While the policy store interface does not support
// these types natively, and the values of these types have to be unmarshalled
// or converted from strings, these setting types predate the typed policy
// hierarchies, and must be supported at this layer.
type RawItem struct {
_ structs.Incomparable
value any
err *ErrorText
origin *Origin // or nil
}
// RawItemOf returns a [RawItem] with the specified value.
func RawItemOf(value any) RawItem {
return RawItemWith(value, nil, nil)
}
// RawItemWith returns a [RawItem] with the specified value, error and origin.
func RawItemWith(value any, err *ErrorText, origin *Origin) RawItem {
return RawItem{value: value, err: err, origin: origin}
}
// Value returns the value of the policy setting, or nil if the policy setting
// is not configured, or an error occurred while reading it.
func (i RawItem) Value() any {
return i.value
}
// Error returns the error that occurred when reading the policy setting,
// or nil if no error occurred.
func (i RawItem) Error() error {
if i.err != nil {
return i.err
}
return nil
}
// Origin returns an optional [Origin] indicating where the policy setting is
// configured.
func (i RawItem) Origin() *Origin {
return i.origin
}
// String implements [fmt.Stringer].
func (i RawItem) String() string {
var suffix string
if i.origin != nil {
suffix = fmt.Sprintf(" - {%v}", i.origin)
}
if i.err != nil {
return fmt.Sprintf("Error{%q}%s", i.err.Error(), suffix)
}
return fmt.Sprintf("%v%s", i.value, suffix)
}

348
vendor/tailscale.com/util/syspolicy/setting/setting.go generated vendored Normal file
View File

@@ -0,0 +1,348 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package setting contains types for defining and representing policy settings.
// It facilitates the registration of setting definitions using [Register] and [RegisterDefinition],
// and the retrieval of registered setting definitions via [Definitions] and [DefinitionOf].
// This package is intended for use primarily within the syspolicy package hierarchy.
package setting
import (
"fmt"
"slices"
"strings"
"sync"
"time"
"tailscale.com/types/lazy"
"tailscale.com/util/syspolicy/internal"
)
// Scope indicates the broadest scope at which a policy setting may apply,
// and the narrowest scope at which it may be configured.
type Scope int8
const (
// DeviceSetting indicates a policy setting that applies to a device, regardless of
// which OS user or Tailscale profile is currently active, if any.
// It can only be configured at a [DeviceScope].
DeviceSetting Scope = iota
// ProfileSetting indicates a policy setting that applies to a Tailscale profile.
// It can only be configured for a specific profile or at a [DeviceScope],
// in which case it applies to all profiles on the device.
ProfileSetting
// UserSetting indicates a policy setting that applies to users.
// It can be configured for a user, profile, or the entire device.
UserSetting
// NumScopes is the number of possible [Scope] values.
NumScopes int = iota // must be the last value in the const block.
)
// String implements [fmt.Stringer].
func (s Scope) String() string {
switch s {
case DeviceSetting:
return "Device"
case ProfileSetting:
return "Profile"
case UserSetting:
return "User"
default:
panic("unreachable")
}
}
// MarshalText implements [encoding.TextMarshaler].
func (s Scope) MarshalText() (text []byte, err error) {
return []byte(s.String()), nil
}
// UnmarshalText implements [encoding.TextUnmarshaler].
func (s *Scope) UnmarshalText(text []byte) error {
switch strings.ToLower(string(text)) {
case "device":
*s = DeviceSetting
case "profile":
*s = ProfileSetting
case "user":
*s = UserSetting
default:
return fmt.Errorf("%q is not a valid scope", string(text))
}
return nil
}
// Type is a policy setting value type.
// Except for [InvalidValue], which represents an invalid policy setting type,
// and [PreferenceOptionValue], [VisibilityValue], and [DurationValue],
// which have special handling due to their legacy status in the package,
// SettingTypes represent the raw value types readable from policy stores.
type Type int
const (
// InvalidValue indicates an invalid policy setting value type.
InvalidValue Type = iota
// BooleanValue indicates a policy setting whose underlying type is a bool.
BooleanValue
// IntegerValue indicates a policy setting whose underlying type is a uint64.
IntegerValue
// StringValue indicates a policy setting whose underlying type is a string.
StringValue
// StringListValue indicates a policy setting whose underlying type is a []string.
StringListValue
// PreferenceOptionValue indicates a three-state policy setting whose
// underlying type is a string, but the actual value is a [PreferenceOption].
PreferenceOptionValue
// VisibilityValue indicates a two-state boolean-like policy setting whose
// underlying type is a string, but the actual value is a [Visibility].
VisibilityValue
// DurationValue indicates an interval/period/duration policy setting whose
// underlying type is a string, but the actual value is a [time.Duration].
DurationValue
)
// String returns a string representation of t.
func (t Type) String() string {
switch t {
case InvalidValue:
return "Invalid"
case BooleanValue:
return "Boolean"
case IntegerValue:
return "Integer"
case StringValue:
return "String"
case StringListValue:
return "StringList"
case PreferenceOptionValue:
return "PreferenceOption"
case VisibilityValue:
return "Visibility"
case DurationValue:
return "Duration"
default:
panic("unreachable")
}
}
// ValueType is a constraint that allows Go types corresponding to [Type].
type ValueType interface {
bool | uint64 | string | []string | Visibility | PreferenceOption | time.Duration
}
// Definition defines policy key, scope and value type.
type Definition struct {
key Key
scope Scope
typ Type
platforms PlatformList
}
// NewDefinition returns a new [Definition] with the specified
// key, scope, type and supported platforms (see [PlatformList]).
func NewDefinition(k Key, s Scope, t Type, platforms ...string) *Definition {
return &Definition{key: k, scope: s, typ: t, platforms: platforms}
}
// Key returns a policy setting's identifier.
func (d *Definition) Key() Key {
if d == nil {
return ""
}
return d.key
}
// Scope reports the broadest [Scope] the policy setting may apply to.
func (d *Definition) Scope() Scope {
if d == nil {
return 0
}
return d.scope
}
// Type reports the underlying value type of the policy setting.
func (d *Definition) Type() Type {
if d == nil {
return InvalidValue
}
return d.typ
}
// IsSupported reports whether the policy setting is supported on the current OS.
func (d *Definition) IsSupported() bool {
if d == nil {
return false
}
return d.platforms.HasCurrent()
}
// SupportedPlatforms reports platforms on which the policy setting is supported.
// An empty [PlatformList] indicates that s is available on all platforms.
func (d *Definition) SupportedPlatforms() PlatformList {
if d == nil {
return nil
}
return d.platforms
}
// String implements [fmt.Stringer].
func (d *Definition) String() string {
if d == nil {
return "(nil)"
}
return fmt.Sprintf("%v(%q, %v)", d.scope, d.key, d.typ)
}
// Equal reports whether d and d2 have the same key, type and scope.
// It does not check whether both s and s2 are supported on the same platforms.
func (d *Definition) Equal(d2 *Definition) bool {
if d == d2 {
return true
}
if d == nil || d2 == nil {
return false
}
return d.key == d2.key && d.typ == d2.typ && d.scope == d2.scope
}
// DefinitionMap is a map of setting [Definition] by [Key].
type DefinitionMap map[Key]*Definition
var (
definitions lazy.SyncValue[DefinitionMap]
definitionsMu sync.Mutex
definitionsList []*Definition
definitionsUsed bool
)
// Register registers a policy setting with the specified key, scope, value type,
// and an optional list of supported platforms. All policy settings must be
// registered before any of them can be used. Register panics if called after
// invoking any functions that use the registered policy definitions. This
// includes calling [Definitions] or [DefinitionOf] directly, or reading any
// policy settings via syspolicy.
func Register(k Key, s Scope, t Type, platforms ...string) {
RegisterDefinition(NewDefinition(k, s, t, platforms...))
}
// RegisterDefinition is like [Register], but accepts a [Definition].
func RegisterDefinition(d *Definition) {
definitionsMu.Lock()
defer definitionsMu.Unlock()
registerLocked(d)
}
func registerLocked(d *Definition) {
if definitionsUsed {
panic("policy definitions are already in use")
}
definitionsList = append(definitionsList, d)
}
func settingDefinitions() (DefinitionMap, error) {
return definitions.GetErr(func() (DefinitionMap, error) {
definitionsMu.Lock()
defer definitionsMu.Unlock()
definitionsUsed = true
return DefinitionMapOf(definitionsList)
})
}
// DefinitionMapOf returns a [DefinitionMap] with the specified settings,
// or an error if any settings have the same key but different type or scope.
func DefinitionMapOf(settings []*Definition) (DefinitionMap, error) {
m := make(DefinitionMap, len(settings))
for _, s := range settings {
if existing, exists := m[s.key]; exists {
if existing.Equal(s) {
// Ignore duplicate setting definitions if they match. It is acceptable
// if the same policy setting was registered more than once
// (e.g. by the syspolicy package itself and by iOS/Android code).
existing.platforms.mergeFrom(s.platforms)
continue
}
return nil, fmt.Errorf("duplicate policy definition: %q", s.key)
}
m[s.key] = s
}
return m, nil
}
// SetDefinitionsForTest allows to register the specified setting definitions
// for the test duration. It is not concurrency-safe, but unlike [Register],
// it does not panic and can be called anytime.
// It returns an error if ds contains two different settings with the same [Key].
func SetDefinitionsForTest(tb lazy.TB, ds ...*Definition) error {
m, err := DefinitionMapOf(ds)
if err != nil {
return err
}
definitions.SetForTest(tb, m, err)
return nil
}
// DefinitionOf returns a setting definition by key,
// or [ErrNoSuchKey] if the specified key does not exist,
// or an error if there are conflicting policy definitions.
func DefinitionOf(k Key) (*Definition, error) {
ds, err := settingDefinitions()
if err != nil {
return nil, err
}
if d, ok := ds[k]; ok {
return d, nil
}
return nil, ErrNoSuchKey
}
// Definitions returns all registered setting definitions,
// or an error if different policies were registered under the same name.
func Definitions() ([]*Definition, error) {
ds, err := settingDefinitions()
if err != nil {
return nil, err
}
res := make([]*Definition, 0, len(ds))
for _, d := range ds {
res = append(res, d)
}
return res, nil
}
// PlatformList is a list of OSes.
// An empty list indicates that all possible platforms are supported.
type PlatformList []string
// Has reports whether l contains the target platform.
func (l PlatformList) Has(target string) bool {
if len(l) == 0 {
return true
}
return slices.ContainsFunc(l, func(os string) bool {
return strings.EqualFold(os, target)
})
}
// HasCurrent is like Has, but for the current platform.
func (l PlatformList) HasCurrent() bool {
return l.Has(internal.OS())
}
// mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions,
// if either l or l2 is empty, the merged result in l will also be empty.
func (l *PlatformList) mergeFrom(l2 PlatformList) {
switch {
case len(*l) == 0:
// No-op. An empty list indicates no platform restrictions.
case len(l2) == 0:
// Merging with an empty list results in an empty list.
*l = l2
default:
// Append, sort and dedup.
*l = append(*l, l2...)
slices.Sort(*l)
*l = slices.Compact(*l)
}
}

170
vendor/tailscale.com/util/syspolicy/setting/snapshot.go generated vendored Normal file
View File

@@ -0,0 +1,170 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
import (
"iter"
"maps"
"slices"
"strings"
xmaps "golang.org/x/exp/maps"
"tailscale.com/util/deephash"
)
// Snapshot is an immutable collection of ([Key], [RawItem]) pairs, representing
// a set of policy settings applied at a specific moment in time.
// A nil pointer to [Snapshot] is valid.
type Snapshot struct {
m map[Key]RawItem
sig deephash.Sum // of m
summary Summary
}
// NewSnapshot returns a new [Snapshot] with the specified items and options.
func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot {
return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)}
}
// All returns an iterator over policy settings in s. The iteration order is not
// specified and is not guaranteed to be the same from one call to the next.
func (s *Snapshot) All() iter.Seq2[Key, RawItem] {
if s == nil {
return func(yield func(Key, RawItem) bool) {}
}
return maps.All(s.m)
}
// Get returns the value of the policy setting with the specified key
// or nil if it is not configured or has an error.
func (s *Snapshot) Get(k Key) any {
v, _ := s.GetErr(k)
return v
}
// GetErr returns the value of the policy setting with the specified key,
// [ErrNotConfigured] if it is not configured, or an error returned by
// the policy Store if the policy setting could not be read.
func (s *Snapshot) GetErr(k Key) (any, error) {
if s != nil {
if s, ok := s.m[k]; ok {
return s.Value(), s.Error()
}
}
return nil, ErrNotConfigured
}
// GetSetting returns the untyped policy setting with the specified key and true
// if a policy setting with such key has been configured;
// otherwise, it returns zero, false.
func (s *Snapshot) GetSetting(k Key) (setting RawItem, ok bool) {
setting, ok = s.m[k]
return setting, ok
}
// Equal reports whether s and s2 are equal.
func (s *Snapshot) Equal(s2 *Snapshot) bool {
if !s.EqualItems(s2) {
return false
}
return s.Summary() == s2.Summary()
}
// EqualItems reports whether items in s and s2 are equal.
func (s *Snapshot) EqualItems(s2 *Snapshot) bool {
if s == s2 {
return true
}
if s.Len() != s2.Len() {
return false
}
if s.Len() == 0 {
return true
}
return s.sig == s2.sig
}
// Keys return an iterator over keys in s. The iteration order is not specified
// and is not guaranteed to be the same from one call to the next.
func (s *Snapshot) Keys() iter.Seq[Key] {
if s.m == nil {
return func(yield func(Key) bool) {}
}
return maps.Keys(s.m)
}
// Len reports the number of [RawItem]s in s.
func (s *Snapshot) Len() int {
if s == nil {
return 0
}
return len(s.m)
}
// Summary returns information about s as a whole rather than about specific [RawItem]s in it.
func (s *Snapshot) Summary() Summary {
if s == nil {
return Summary{}
}
return s.summary
}
// String implements [fmt.Stringer]
func (s *Snapshot) String() string {
if s.Len() == 0 && s.Summary().IsEmpty() {
return "{Empty}"
}
var sb strings.Builder
if !s.summary.IsEmpty() {
sb.WriteRune('{')
if s.Len() == 0 {
sb.WriteString("Empty, ")
}
sb.WriteString(s.summary.String())
sb.WriteRune('}')
}
for _, k := range slices.Sorted(s.Keys()) {
if sb.Len() != 0 {
sb.WriteRune('\n')
}
sb.WriteString(string(k))
sb.WriteString(" = ")
sb.WriteString(s.m[k].String())
}
return sb.String()
}
// MergeSnapshots returns a [Snapshot] that contains all [RawItem]s
// from snapshot1 and snapshot2 and the [Summary] with the narrower [PolicyScope].
// If there's a conflict between policy settings in the two snapshots,
// the policy settings from the snapshot with the broader scope take precedence.
// In other words, policy settings configured for the [DeviceScope] win
// over policy settings configured for a user scope.
func MergeSnapshots(snapshot1, snapshot2 *Snapshot) *Snapshot {
scope1, ok1 := snapshot1.Summary().Scope().GetOk()
scope2, ok2 := snapshot2.Summary().Scope().GetOk()
if ok1 && ok2 && scope1.StrictlyContains(scope2) {
// Swap snapshots if snapshot1 has higher precedence than snapshot2.
snapshot1, snapshot2 = snapshot2, snapshot1
}
if snapshot2.Len() == 0 {
return snapshot1
}
summaryOpts := make([]SummaryOption, 0, 2)
if scope, ok := snapshot1.Summary().Scope().GetOk(); ok {
// Use the scope from snapshot1, if present, which is the more specific snapshot.
summaryOpts = append(summaryOpts, scope)
}
if snapshot1.Len() == 0 {
if origin, ok := snapshot2.Summary().Origin().GetOk(); ok {
// Use the origin from snapshot2 if snapshot1 is empty.
summaryOpts = append(summaryOpts, origin)
}
return &Snapshot{snapshot2.m, snapshot2.sig, SummaryWith(summaryOpts...)}
}
m := make(map[Key]RawItem, snapshot1.Len()+snapshot2.Len())
xmaps.Copy(m, snapshot1.m)
xmaps.Copy(m, snapshot2.m) // snapshot2 has higher precedence
return &Snapshot{m, deephash.Hash(&m), SummaryWith(summaryOpts...)}
}

100
vendor/tailscale.com/util/syspolicy/setting/summary.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
import (
jsonv2 "github.com/go-json-experiment/json"
"github.com/go-json-experiment/json/jsontext"
"tailscale.com/types/opt"
)
// Summary is an immutable [PolicyScope] and [Origin].
type Summary struct {
data summary
}
type summary struct {
Scope opt.Value[PolicyScope] `json:",omitzero"`
Origin opt.Value[Origin] `json:",omitzero"`
}
// SummaryWith returns a [Summary] with the specified options.
func SummaryWith(opts ...SummaryOption) Summary {
var summary Summary
for _, o := range opts {
o.applySummaryOption(&summary)
}
return summary
}
// IsEmpty reports whether s is empty.
func (s Summary) IsEmpty() bool {
return s == Summary{}
}
// Scope reports the [PolicyScope] in s.
func (s Summary) Scope() opt.Value[PolicyScope] {
return s.data.Scope
}
// Origin reports the [Origin] in s.
func (s Summary) Origin() opt.Value[Origin] {
return s.data.Origin
}
// String implements [fmt.Stringer].
func (s Summary) String() string {
if s.IsEmpty() {
return "{Empty}"
}
if origin, ok := s.data.Origin.GetOk(); ok {
return origin.String()
}
return s.data.Scope.String()
}
// MarshalJSONV2 implements [jsonv2.MarshalerV2].
func (s Summary) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error {
return jsonv2.MarshalEncode(out, &s.data, opts)
}
// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2].
func (s *Summary) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error {
return jsonv2.UnmarshalDecode(in, &s.data, opts)
}
// MarshalJSON implements [json.Marshaler].
func (s Summary) MarshalJSON() ([]byte, error) {
return jsonv2.Marshal(s) // uses MarshalJSONV2
}
// UnmarshalJSON implements [json.Unmarshaler].
func (s *Summary) UnmarshalJSON(b []byte) error {
return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2
}
// SummaryOption is an option that configures [Summary]
// The following are allowed options:
//
// - [Summary]
// - [PolicyScope]
// - [Origin]
type SummaryOption interface {
applySummaryOption(summary *Summary)
}
func (s PolicyScope) applySummaryOption(summary *Summary) {
summary.data.Scope.Set(s)
}
func (o Origin) applySummaryOption(summary *Summary) {
summary.data.Origin.Set(o)
if !summary.data.Scope.IsSet() {
summary.data.Scope.Set(o.Scope())
}
}
func (s Summary) applySummaryOption(summary *Summary) {
*summary = s
}

136
vendor/tailscale.com/util/syspolicy/setting/types.go generated vendored Normal file
View File

@@ -0,0 +1,136 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package setting
import (
"encoding"
)
// PreferenceOption is a policy that governs whether a boolean variable
// is forcibly assigned an administrator-defined value, or allowed to receive
// a user-defined value.
type PreferenceOption byte
const (
ShowChoiceByPolicy PreferenceOption = iota
NeverByPolicy
AlwaysByPolicy
)
// Show returns if the UI option that controls the choice administered by this
// policy should be shown. Currently this is true if and only if the policy is
// [ShowChoiceByPolicy].
func (p PreferenceOption) Show() bool {
return p == ShowChoiceByPolicy
}
// ShouldEnable checks if the choice administered by this policy should be
// enabled. If the administrator has chosen a setting, the administrator's
// setting is returned, otherwise userChoice is returned.
func (p PreferenceOption) ShouldEnable(userChoice bool) bool {
switch p {
case NeverByPolicy:
return false
case AlwaysByPolicy:
return true
default:
return userChoice
}
}
// IsAlways reports whether the preference should always be enabled.
func (p PreferenceOption) IsAlways() bool {
return p == AlwaysByPolicy
}
// IsNever reports whether the preference should always be disabled.
func (p PreferenceOption) IsNever() bool {
return p == NeverByPolicy
}
// WillOverride checks if the choice administered by the policy is different
// from the user's choice.
func (p PreferenceOption) WillOverride(userChoice bool) bool {
return p.ShouldEnable(userChoice) != userChoice
}
// String returns a string representation of p.
func (p PreferenceOption) String() string {
switch p {
case AlwaysByPolicy:
return "always"
case NeverByPolicy:
return "never"
default:
return "user-decides"
}
}
// MarshalText implements [encoding.TextMarshaler].
func (p *PreferenceOption) MarshalText() (text []byte, err error) {
return []byte(p.String()), nil
}
// UnmarshalText implements [encoding.TextUnmarshaler].
// It never fails and sets p to [ShowChoiceByPolicy] if the specified text
// does not represent a valid [PreferenceOption].
func (p *PreferenceOption) UnmarshalText(text []byte) error {
switch string(text) {
case "always":
*p = AlwaysByPolicy
case "never":
*p = NeverByPolicy
default:
*p = ShowChoiceByPolicy
}
return nil
}
// Visibility is a policy that controls whether or not a particular
// component of a user interface is to be shown.
type Visibility byte
var (
_ encoding.TextMarshaler = (*Visibility)(nil)
_ encoding.TextUnmarshaler = (*Visibility)(nil)
)
const (
VisibleByPolicy Visibility = 'v'
HiddenByPolicy Visibility = 'h'
)
// Show reports whether the UI option administered by this policy should be shown.
// Currently this is true if the policy is not [hiddenByPolicy].
func (v Visibility) Show() bool {
return v != HiddenByPolicy
}
// String returns a string representation of v.
func (v Visibility) String() string {
switch v {
case 'h':
return "hide"
default:
return "show"
}
}
// MarshalText implements [encoding.TextMarshaler].
func (v Visibility) MarshalText() (text []byte, err error) {
return []byte(v.String()), nil
}
// UnmarshalText implements [encoding.TextUnmarshaler].
// It never fails and sets v to [VisibleByPolicy] if the specified text
// does not represent a valid [Visibility].
func (v *Visibility) UnmarshalText(text []byte) error {
switch string(text) {
case "hide":
*v = HiddenByPolicy
default:
*v = VisibleByPolicy
}
return nil
}

143
vendor/tailscale.com/util/syspolicy/syspolicy.go generated vendored Normal file
View File

@@ -0,0 +1,143 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package syspolicy provides functions to retrieve system settings of a device.
package syspolicy
import (
"errors"
"time"
"tailscale.com/util/syspolicy/internal/loggerx"
"tailscale.com/util/syspolicy/setting"
)
func GetString(key Key, defaultValue string) (string, error) {
markHandlerInUse()
v, err := handler.ReadString(string(key))
if errors.Is(err, ErrNoSuchKey) {
return defaultValue, nil
}
return v, err
}
func GetUint64(key Key, defaultValue uint64) (uint64, error) {
markHandlerInUse()
v, err := handler.ReadUInt64(string(key))
if errors.Is(err, ErrNoSuchKey) {
return defaultValue, nil
}
return v, err
}
func GetBoolean(key Key, defaultValue bool) (bool, error) {
markHandlerInUse()
v, err := handler.ReadBoolean(string(key))
if errors.Is(err, ErrNoSuchKey) {
return defaultValue, nil
}
return v, err
}
func GetStringArray(key Key, defaultValue []string) ([]string, error) {
markHandlerInUse()
v, err := handler.ReadStringArray(string(key))
if errors.Is(err, ErrNoSuchKey) {
return defaultValue, nil
}
return v, err
}
// GetPreferenceOption loads a policy from the registry that can be
// managed by an enterprise policy management system and allows administrative
// overrides of users' choices in a way that we do not want tailcontrol to have
// the authority to set. It describes user-decides/always/never options, where
// "always" and "never" remove the user's ability to make a selection. If not
// present or set to a different value, "user-decides" is the default.
func GetPreferenceOption(name Key) (setting.PreferenceOption, error) {
s, err := GetString(name, "user-decides")
if err != nil {
return setting.ShowChoiceByPolicy, err
}
var opt setting.PreferenceOption
err = opt.UnmarshalText([]byte(s))
return opt, err
}
// GetVisibility loads a policy from the registry that can be managed
// by an enterprise policy management system and describes show/hide decisions
// for UI elements. The registry value should be a string set to "show" (return
// true) or "hide" (return true). If not present or set to a different value,
// "show" (return false) is the default.
func GetVisibility(name Key) (setting.Visibility, error) {
s, err := GetString(name, "show")
if err != nil {
return setting.VisibleByPolicy, err
}
var visibility setting.Visibility
visibility.UnmarshalText([]byte(s))
return visibility, nil
}
// GetDuration loads a policy from the registry that can be managed
// by an enterprise policy management system and describes a duration for some
// action. The registry value should be a string that time.ParseDuration
// understands. If the registry value is "" or can not be processed,
// defaultValue is returned instead.
func GetDuration(name Key, defaultValue time.Duration) (time.Duration, error) {
opt, err := GetString(name, "")
if opt == "" || err != nil {
return defaultValue, err
}
v, err := time.ParseDuration(opt)
if err != nil || v < 0 {
return defaultValue, nil
}
return v, nil
}
// SelectControlURL returns the ControlURL to use based on a value in
// the registry (LoginURL) and the one on disk (in the GUI's
// prefs.conf). If both are empty, it returns a default value. (It
// always return a non-empty value)
//
// See https://github.com/tailscale/tailscale/issues/2798 for some background.
func SelectControlURL(reg, disk string) string {
const def = "https://controlplane.tailscale.com"
// Prior to Dec 2020's commit 739b02e6, the installer
// wrote a LoginURL value of https://login.tailscale.com to the registry.
const oldRegDef = "https://login.tailscale.com"
// If they have an explicit value in the registry, use it,
// unless it's an old default value from an old installer.
// Then we have to see which is better.
if reg != "" {
if reg != oldRegDef {
// Something explicit in the registry that we didn't
// set ourselves by the installer.
return reg
}
if disk == "" {
// Something in the registry is better than nothing on disk.
return reg
}
if disk != def && disk != oldRegDef {
// The value in the registry is the old
// default (login.tailscale.com) but the value
// on disk is neither our old nor new default
// value, so it must be some custom thing that
// the user cares about. Prefer the disk value.
return disk
}
}
if disk != "" {
return disk
}
return def
}
// SetDebugLoggingEnabled controls whether spammy debug logging is enabled.
func SetDebugLoggingEnabled(v bool) {
loggerx.SetDebugLoggingEnabled(v)
}

10
vendor/tailscale.com/util/sysresources/memory.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package sysresources
// TotalMemory returns the total accessible system memory, in bytes. If the
// value cannot be determined, then 0 will be returned.
func TotalMemory() uint64 {
return totalMemoryImpl()
}

16
vendor/tailscale.com/util/sysresources/memory_bsd.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build freebsd || openbsd || dragonfly || netbsd
package sysresources
import "golang.org/x/sys/unix"
func totalMemoryImpl() uint64 {
val, err := unix.SysctlUint64("hw.physmem")
if err != nil {
return 0
}
return val
}

View File

@@ -0,0 +1,16 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build darwin
package sysresources
import "golang.org/x/sys/unix"
func totalMemoryImpl() uint64 {
val, err := unix.SysctlUint64("hw.memsize")
if err != nil {
return 0
}
return val
}

19
vendor/tailscale.com/util/sysresources/memory_linux.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package sysresources
import "golang.org/x/sys/unix"
func totalMemoryImpl() uint64 {
var info unix.Sysinfo_t
if err := unix.Sysinfo(&info); err != nil {
return 0
}
// uint64 casts are required since these might be uint32s
return uint64(info.Totalram) * uint64(info.Unit)
}

View File

@@ -0,0 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !(linux || darwin || freebsd || openbsd || dragonfly || netbsd)
package sysresources
func totalMemoryImpl() uint64 { return 0 }

View File

@@ -0,0 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package sysresources provides OS-independent methods of determining the
// resources available to the current system.
package sysresources

13
vendor/tailscale.com/util/systemd/doc.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
/*
Package systemd contains a minimal wrapper around systemd-notify to enable
applications to signal readiness and status to systemd.
This package will only have effect on Linux systems running Tailscale in a
systemd unit with the Type=notify flag set. On other operating systems (or
when running in a Linux distro without being run from inside systemd) this
package will become a no-op.
*/
package systemd

77
vendor/tailscale.com/util/systemd/systemd_linux.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package systemd
import (
"errors"
"log"
"os"
"sync"
"github.com/mdlayher/sdnotify"
)
var getNotifyOnce struct {
sync.Once
v *sdnotify.Notifier
}
type logOnce struct {
sync.Once
}
func (l *logOnce) logf(format string, args ...any) {
l.Once.Do(func() {
log.Printf(format, args...)
})
}
var (
readyOnce = &logOnce{}
statusOnce = &logOnce{}
)
func notifier() *sdnotify.Notifier {
getNotifyOnce.Do(func() {
var err error
getNotifyOnce.v, err = sdnotify.New()
// Not exist means probably not running under systemd, so don't log.
if err != nil && !errors.Is(err, os.ErrNotExist) {
log.Printf("systemd: systemd-notifier error: %v", err)
}
})
return getNotifyOnce.v
}
// Ready signals readiness to systemd. This will unblock service dependents from starting.
func Ready() {
err := notifier().Notify(sdnotify.Ready)
if err != nil {
readyOnce.logf("systemd: error notifying: %v", err)
}
}
// Status sends a single line status update to systemd so that information shows up
// in systemctl output. For example:
//
// $ systemctl status tailscale
// ● tailscale.service - Tailscale client daemon
// Loaded: loaded (/nix/store/qc312qcy907wz80fqrgbbm8a9djafmlg-unit-tailscale.service/tailscale.service; enabled; vendor preset: enabled)
// Active: active (running) since Tue 2020-11-24 17:54:07 EST; 13h ago
// Main PID: 26741 (.tailscaled-wra)
// Status: "Connected; user@host.domain.tld; 100.101.102.103"
// IP: 0B in, 0B out
// Tasks: 22 (limit: 4915)
// Memory: 30.9M
// CPU: 2min 38.469s
// CGroup: /system.slice/tailscale.service
// └─26741 /nix/store/sv6cj4mw2jajm9xkbwj07k29dj30lh0n-tailscale-date.20200727/bin/tailscaled --port 41641
func Status(format string, args ...any) {
err := notifier().Notify(sdnotify.Statusf(format, args...))
if err != nil {
statusOnce.logf("systemd: error notifying: %v", err)
}
}

View File

@@ -0,0 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !linux
package systemd
func Ready() {}
func Status(string, ...any) {}

21
vendor/tailscale.com/util/testenv/testenv.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package testenv provides utility functions for tests. It does not depend on
// the `testing` package to allow usage in non-test code.
package testenv
import (
"flag"
"tailscale.com/types/lazy"
)
var lazyInTest lazy.SyncValue[bool]
// InTest reports whether the current binary is a test binary.
func InTest() bool {
return lazyInTest.Get(func() bool {
return flag.Lookup("test.v") != nil
})
}

31
vendor/tailscale.com/util/truncate/truncate.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package truncate provides a utility function for safely truncating UTF-8
// strings to a fixed length, respecting multi-byte codepoints.
package truncate
// String returns a prefix of a UTF-8 string s, having length no greater than n
// bytes. If s exceeds this length, it is truncated at a point ≤ n so that the
// result does not end in a partial UTF-8 encoding. If s is less than or equal
// to this length, it is returned unmodified.
func String[String ~string | ~[]byte](s String, n int) String {
if n >= len(s) {
return s
}
// Back up until we find the beginning of a UTF-8 encoding.
for n > 0 && s[n-1]&0xc0 == 0x80 { // 0x10... is a continuation byte
n--
}
// If we're at the beginning of a multi-byte encoding, back up one more to
// skip it. It's possible the value was already complete, but it's simpler
// if we only have to check in one direction.
//
// Otherwise, we have a single-byte code (0x00... or 0x01...).
if n > 0 && s[n-1]&0xc0 == 0xc0 { // 0x11... starts a multibyte encoding
n--
}
return s[:n]
}

62
vendor/tailscale.com/util/uniq/slice.go generated vendored Normal file
View File

@@ -0,0 +1,62 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package uniq provides removal of adjacent duplicate elements in slices.
// It is similar to the unix command uniq.
package uniq
// ModifySlice removes adjacent duplicate elements from the given slice. It
// adjusts the length of the slice appropriately and zeros the tail.
//
// ModifySlice does O(len(*slice)) operations.
func ModifySlice[E comparable](slice *[]E) {
// Remove duplicates
dst := 0
for i := 1; i < len(*slice); i++ {
if (*slice)[i] == (*slice)[dst] {
continue
}
dst++
(*slice)[dst] = (*slice)[i]
}
// Zero out the elements we removed at the end of the slice
end := dst + 1
var zero E
for i := end; i < len(*slice); i++ {
(*slice)[i] = zero
}
// Truncate the slice
if end < len(*slice) {
*slice = (*slice)[:end]
}
}
// ModifySliceFunc is the same as ModifySlice except that it allows using a
// custom comparison function.
//
// eq should report whether the two provided elements are equal.
func ModifySliceFunc[E any](slice *[]E, eq func(i, j E) bool) {
// Remove duplicates
dst := 0
for i := 1; i < len(*slice); i++ {
if eq((*slice)[dst], (*slice)[i]) {
continue
}
dst++
(*slice)[dst] = (*slice)[i]
}
// Zero out the elements we removed at the end of the slice
end := dst + 1
var zero E
for i := end; i < len(*slice); i++ {
(*slice)[i] = zero
}
// Truncate the slice
if end < len(*slice) {
*slice = (*slice)[:end]
}
}

105
vendor/tailscale.com/util/usermetric/usermetric.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package usermetric provides a container and handler
// for user-facing metrics.
package usermetric
import (
"expvar"
"fmt"
"io"
"net/http"
"strings"
"tailscale.com/metrics"
"tailscale.com/tsweb/varz"
)
// Registry tracks user-facing metrics of various Tailscale subsystems.
type Registry struct {
vars expvar.Map
}
// NewMultiLabelMapWithRegistry creates and register a new
// MultiLabelMap[T] variable with the given name and returns it.
// The variable is registered with the userfacing metrics package.
//
// Note that usermetric are not protected against duplicate
// metrics name. It is the caller's responsibility to ensure that
// the name is unique.
func NewMultiLabelMapWithRegistry[T comparable](m *Registry, name string, promType, helpText string) *metrics.MultiLabelMap[T] {
ml := &metrics.MultiLabelMap[T]{
Type: promType,
Help: helpText,
}
var zero T
_ = metrics.LabelString(zero) // panic early if T is invalid
m.vars.Set(name, ml)
return ml
}
// Gauge is a gauge metric with no labels.
type Gauge struct {
m *expvar.Float
help string
}
// NewGauge creates and register a new gauge metric with the given name and help text.
func (r *Registry) NewGauge(name, help string) *Gauge {
g := &Gauge{&expvar.Float{}, help}
r.vars.Set(name, g)
return g
}
// Set sets the gauge to the given value.
func (g *Gauge) Set(v float64) {
if g == nil {
return
}
g.m.Set(v)
}
// String returns the string of the underlying expvar.Float.
// This satisfies the expvar.Var interface.
func (g *Gauge) String() string {
if g == nil {
return ""
}
return g.m.String()
}
// WritePrometheus writes the gauge metric in Prometheus format to the given writer.
// This satisfies the varz.PrometheusWriter interface.
func (g *Gauge) WritePrometheus(w io.Writer, name string) {
io.WriteString(w, "# TYPE ")
io.WriteString(w, name)
io.WriteString(w, " gauge\n")
if g.help != "" {
io.WriteString(w, "# HELP ")
io.WriteString(w, name)
io.WriteString(w, " ")
io.WriteString(w, g.help)
io.WriteString(w, "\n")
}
io.WriteString(w, name)
fmt.Fprintf(w, " %v\n", g.m.Value())
}
// Handler returns a varz.Handler that serves the userfacing expvar contained
// in this package.
func (r *Registry) Handler(w http.ResponseWriter, req *http.Request) {
varz.ExpvarDoHandler(r.vars.Do)(w, req)
}
// String returns the string representation of all the metrics and their
// values in the registry. It is useful for debugging.
func (r *Registry) String() string {
var sb strings.Builder
r.vars.Do(func(kv expvar.KeyValue) {
fmt.Fprintf(&sb, "%s: %v\n", kv.Key, kv.Value)
})
return sb.String()
}

82
vendor/tailscale.com/util/vizerror/vizerror.go generated vendored Normal file
View File

@@ -0,0 +1,82 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package vizerror provides types and utility funcs for handling visible errors
// that are safe to display to end users.
package vizerror
import (
"errors"
"fmt"
)
// Error is an error that is safe to display to end users.
type Error struct {
publicErr error // visible to end users
wrapped error // internal
}
// Error implements the error interface. The returned string is safe to display
// to end users.
func (e Error) Error() string {
return e.publicErr.Error()
}
// New returns an error that formats as the given text. It always returns a vizerror.Error.
func New(publicMsg string) error {
err := errors.New(publicMsg)
return Error{
publicErr: err,
wrapped: err,
}
}
// Errorf returns an Error with the specified publicMsgFormat and values. It always returns a vizerror.Error.
//
// Warning: avoid using an error as one of the format arguments, as this will cause the text
// of that error to be displayed to the end user (which is probably not what you want).
func Errorf(publicMsgFormat string, a ...any) error {
err := fmt.Errorf(publicMsgFormat, a...)
return Error{
publicErr: err,
wrapped: err,
}
}
// Unwrap returns the underlying error.
//
// If the Error was constructed using [WrapWithMessage], this is the wrapped (internal) error
// and not the user-visible error message.
func (e Error) Unwrap() error {
return e.wrapped
}
// Wrap wraps publicErr with a vizerror.Error.
//
// Deprecated: this is almost always the wrong thing to do. Are you really sure
// you know exactly what err.Error() will stringify to and be safe to show to
// users? [WrapWithMessage] is probably what you want.
func Wrap(publicErr error) error {
if publicErr == nil {
return nil
}
return Error{publicErr: publicErr, wrapped: publicErr}
}
// WrapWithMessage wraps the given error with a message that's safe to display
// to end users. The text of the wrapped error will not be displayed to end
// users.
//
// WrapWithMessage should almost always be preferred to [Wrap].
func WrapWithMessage(wrapped error, publicMsg string) error {
return Error{
publicErr: errors.New(publicMsg),
wrapped: wrapped,
}
}
// As returns the first vizerror.Error in err's chain.
func As(err error) (e Error, ok bool) {
ok = errors.As(err, &e)
return
}

View File

@@ -0,0 +1,519 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package authenticode contains Windows Authenticode signature verification code.
package authenticode
import (
"encoding/hex"
"errors"
"fmt"
"path/filepath"
"strings"
"unsafe"
"github.com/dblohm7/wingoes"
"github.com/dblohm7/wingoes/pe"
"golang.org/x/sys/windows"
)
var (
// ErrSigNotFound is returned if no authenticode signature could be found.
ErrSigNotFound = errors.New("authenticode signature not found")
// ErrUnexpectedCertSubject is wrapped with the actual cert subject and
// returned when the binary is signed by a different subject than expected.
ErrUnexpectedCertSubject = errors.New("unexpected cert subject")
errCertSubjectNotFound = errors.New("cert subject not found")
errCertSubjectDecodeLenMismatch = errors.New("length mismatch while decoding cert subject")
)
const (
_CERT_STRONG_SIGN_OID_INFO_CHOICE = 2
_CMSG_SIGNER_CERT_INFO_PARAM = 7
_MSI_INVALID_HASH_IS_FATAL = 1
_TRUST_E_NOSIGNATURE = wingoes.HRESULT(-((0x800B0100 ^ 0xFFFFFFFF) + 1))
)
// Verify performs authenticode verification on the file at path, and also
// ensures that expectedCertSubject matches the actual cert subject. path may
// point to either a PE binary or an MSI package. ErrSigNotFound is returned if
// no signature is found.
func Verify(path string, expectedCertSubject string) error {
path16, err := windows.UTF16PtrFromString(path)
if err != nil {
return err
}
var subject string
if strings.EqualFold(filepath.Ext(path), ".msi") {
subject, err = verifyMSI(path16)
} else {
subject, _, err = queryPE(path16, true)
}
if err != nil {
return err
}
if subject != expectedCertSubject {
return fmt.Errorf("%w %q", ErrUnexpectedCertSubject, subject)
}
return nil
}
// SigProvenance indicates whether an authenticode signature was embedded within
// the file itself, or the signature applies to an associated catalog file.
type SigProvenance int
const (
SigProvUnknown = SigProvenance(iota)
SigProvEmbedded
SigProvCatalog
)
// QueryCertSubject obtains the subject associated with the certificate used to
// sign the PE binary located at path. When err == nil, it also returns the
// provenance of that signature. ErrSigNotFound is returned if no signature
// is found. Note that this function does *not* validate the chain of trust; use
// Verify for that purpose!
func QueryCertSubject(path string) (certSubject string, provenance SigProvenance, err error) {
path16, err := windows.UTF16PtrFromString(path)
if err != nil {
return "", SigProvUnknown, err
}
return queryPE(path16, false)
}
func queryPE(utf16Path *uint16, verify bool) (string, SigProvenance, error) {
certSubject, err := queryEmbeddedCertSubject(utf16Path, verify)
switch {
case err == ErrSigNotFound:
// Try looking for the signature in a catalog file.
default:
return certSubject, SigProvEmbedded, err
}
certSubject, err = queryCatalogCertSubject(utf16Path, verify)
switch {
case err == ErrSigNotFound:
return "", SigProvUnknown, err
default:
return certSubject, SigProvCatalog, err
}
}
// CertSubjectError is returned if a cert subject was successfully resolved but
// there was a problem encountered during its extraction. The Subject is
// provided for informational purposes but is not presumed to be accurate.
type CertSubjectError struct {
Err error // The error that occurred while extracting the cert subject.
Subject string // The (possibly invalid) cert subject that was extracted.
}
func (e *CertSubjectError) Error() string {
if e == nil {
return "<nil>"
}
if e.Subject == "" {
return e.Err.Error()
}
return fmt.Sprintf("cert subject %q: %v", e.Subject, e.Err)
}
func (e *CertSubjectError) Unwrap() error {
return e.Err
}
func verifyMSI(path *uint16) (string, error) {
var certCtx *windows.CertContext
hr := msiGetFileSignatureInformation(path, _MSI_INVALID_HASH_IS_FATAL, &certCtx, nil, nil)
if e := wingoes.ErrorFromHRESULT(hr); e.Failed() {
if e == wingoes.ErrorFromHRESULT(_TRUST_E_NOSIGNATURE) {
return "", ErrSigNotFound
}
return "", e
}
defer windows.CertFreeCertificateContext(certCtx)
return certSubjectFromCertContext(certCtx)
}
func certSubjectFromCertContext(certCtx *windows.CertContext) (string, error) {
desiredLen := windows.CertGetNameString(
certCtx,
windows.CERT_NAME_SIMPLE_DISPLAY_TYPE,
0,
nil,
nil,
0,
)
if desiredLen <= 1 {
return "", errCertSubjectNotFound
}
buf := make([]uint16, desiredLen)
actualLen := windows.CertGetNameString(
certCtx,
windows.CERT_NAME_SIMPLE_DISPLAY_TYPE,
0,
nil,
&buf[0],
desiredLen,
)
if actualLen != desiredLen {
return "", errCertSubjectDecodeLenMismatch
}
return windows.UTF16ToString(buf), nil
}
type objectQuery struct {
certStore windows.Handle
cryptMsg windows.Handle
encodingType uint32
}
func newObjectQuery(utf16Path *uint16) (*objectQuery, error) {
var oq objectQuery
if err := windows.CryptQueryObject(
windows.CERT_QUERY_OBJECT_FILE,
unsafe.Pointer(utf16Path),
windows.CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED,
windows.CERT_QUERY_FORMAT_FLAG_BINARY,
0,
&oq.encodingType,
nil,
nil,
&oq.certStore,
&oq.cryptMsg,
nil,
); err != nil {
return nil, err
}
return &oq, nil
}
func (oq *objectQuery) Close() error {
if oq.certStore != 0 {
if err := windows.CertCloseStore(oq.certStore, 0); err != nil {
return err
}
oq.certStore = 0
}
if oq.cryptMsg != 0 {
if err := cryptMsgClose(oq.cryptMsg); err != nil {
return err
}
oq.cryptMsg = 0
}
return nil
}
func (oq *objectQuery) certSubject() (string, error) {
var certInfoLen uint32
if err := cryptMsgGetParam(
oq.cryptMsg,
_CMSG_SIGNER_CERT_INFO_PARAM,
0,
unsafe.Pointer(nil),
&certInfoLen,
); err != nil {
return "", err
}
buf := make([]byte, certInfoLen)
if err := cryptMsgGetParam(
oq.cryptMsg,
_CMSG_SIGNER_CERT_INFO_PARAM,
0,
unsafe.Pointer(&buf[0]),
&certInfoLen,
); err != nil {
return "", err
}
certInfo := (*windows.CertInfo)(unsafe.Pointer(&buf[0]))
certCtx, err := windows.CertFindCertificateInStore(
oq.certStore,
oq.encodingType,
0,
windows.CERT_FIND_SUBJECT_CERT,
unsafe.Pointer(certInfo),
nil,
)
if err != nil {
return "", err
}
defer windows.CertFreeCertificateContext(certCtx)
return certSubjectFromCertContext(certCtx)
}
func extractCertBlob(hfile windows.Handle) ([]byte, error) {
pef, err := pe.NewPEFromFileHandle(hfile)
if err != nil {
return nil, err
}
defer pef.Close()
certsAny, err := pef.DataDirectoryEntry(pe.IMAGE_DIRECTORY_ENTRY_SECURITY)
if err != nil {
if errors.Is(err, pe.ErrNotPresent) {
err = ErrSigNotFound
}
return nil, err
}
certs, ok := certsAny.([]pe.AuthenticodeCert)
if !ok || len(certs) == 0 {
return nil, ErrSigNotFound
}
for _, cert := range certs {
if cert.Revision() != pe.WIN_CERT_REVISION_2_0 || cert.Type() != pe.WIN_CERT_TYPE_PKCS_SIGNED_DATA {
continue
}
return cert.Data(), nil
}
return nil, ErrSigNotFound
}
type _HCRYPTPROV windows.Handle
type _CRYPT_VERIFY_MESSAGE_PARA struct {
CBSize uint32
MsgAndCertEncodingType uint32
HCryptProv _HCRYPTPROV
FNGetSignerCertificate uintptr
GetArg uintptr
StrongSignPara *windows.CertStrongSignPara
}
func querySubjectFromBlob(blob []byte) (string, error) {
para := _CRYPT_VERIFY_MESSAGE_PARA{
CBSize: uint32(unsafe.Sizeof(_CRYPT_VERIFY_MESSAGE_PARA{})),
MsgAndCertEncodingType: windows.X509_ASN_ENCODING | windows.PKCS_7_ASN_ENCODING,
}
var certCtx *windows.CertContext
if err := cryptVerifyMessageSignature(&para, 0, &blob[0], uint32(len(blob)), nil, nil, &certCtx); err != nil {
return "", err
}
defer windows.CertFreeCertificateContext(certCtx)
return certSubjectFromCertContext(certCtx)
}
func queryEmbeddedCertSubject(utf16Path *uint16, verify bool) (string, error) {
peBinary, err := windows.CreateFile(
utf16Path,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
nil,
windows.OPEN_EXISTING,
0,
0,
)
if err != nil {
return "", err
}
defer windows.CloseHandle(peBinary)
blob, err := extractCertBlob(peBinary)
if err != nil {
return "", err
}
certSubj, err := querySubjectFromBlob(blob)
if err != nil {
return "", err
}
if !verify {
return certSubj, nil
}
wintrustArg := unsafe.Pointer(&windows.WinTrustFileInfo{
Size: uint32(unsafe.Sizeof(windows.WinTrustFileInfo{})),
FilePath: utf16Path,
File: peBinary,
})
if err := verifyTrust(windows.WTD_CHOICE_FILE, wintrustArg); err != nil {
// We might still want to know who the cert subject claims to be
// even if the validation has failed (eg for troubleshooting purposes),
// so we return a CertSubjectError.
return "", &CertSubjectError{Err: err, Subject: certSubj}
}
return certSubj, nil
}
var (
_BCRYPT_SHA256_ALGORITHM = &([]uint16{'S', 'H', 'A', '2', '5', '6', 0})[0]
_OID_CERT_STRONG_SIGN_OS_1 = &([]byte("1.3.6.1.4.1.311.72.1.1\x00"))[0]
)
type _HCATADMIN windows.Handle
type _HCATINFO windows.Handle
type _CATALOG_INFO struct {
size uint32
catalogFile [windows.MAX_PATH]uint16
}
type _WINTRUST_CATALOG_INFO struct {
size uint32
catalogVersion uint32
catalogFilePath *uint16
memberTag *uint16
memberFilePath *uint16
memberFile windows.Handle
pCalculatedFileHash *byte
cbCalculatedFileHash uint32
catalogContext uintptr
catAdmin _HCATADMIN
}
func queryCatalogCertSubject(utf16Path *uint16, verify bool) (string, error) {
var catAdmin _HCATADMIN
policy := windows.CertStrongSignPara{
Size: uint32(unsafe.Sizeof(windows.CertStrongSignPara{})),
InfoChoice: _CERT_STRONG_SIGN_OID_INFO_CHOICE,
InfoOrSerializedInfoOrOID: unsafe.Pointer(_OID_CERT_STRONG_SIGN_OS_1),
}
if err := cryptCATAdminAcquireContext2(
&catAdmin,
nil,
_BCRYPT_SHA256_ALGORITHM,
&policy,
0,
); err != nil {
return "", err
}
defer cryptCATAdminReleaseContext(catAdmin, 0)
// We use windows.CreateFile instead of standard library facilities because:
// 1. Subsequent API calls directly utilize the file's Win32 HANDLE;
// 2. We're going to be hashing the contents of this file, so we want to
// provide a sequential-scan hint to the kernel.
memberFile, err := windows.CreateFile(
utf16Path,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
nil,
windows.OPEN_EXISTING,
windows.FILE_FLAG_SEQUENTIAL_SCAN,
0,
)
if err != nil {
return "", err
}
defer windows.CloseHandle(memberFile)
var hashLen uint32
if err := cryptCATAdminCalcHashFromFileHandle2(
catAdmin,
memberFile,
&hashLen,
nil,
0,
); err != nil {
return "", err
}
hashBuf := make([]byte, hashLen)
if err := cryptCATAdminCalcHashFromFileHandle2(
catAdmin,
memberFile,
&hashLen,
&hashBuf[0],
0,
); err != nil {
return "", err
}
catInfoCtx, err := cryptCATAdminEnumCatalogFromHash(
catAdmin,
&hashBuf[0],
hashLen,
0,
nil,
)
if err != nil {
if err == windows.ERROR_NOT_FOUND {
err = ErrSigNotFound
}
return "", err
}
defer cryptCATAdminReleaseCatalogContext(catAdmin, catInfoCtx, 0)
catInfo := _CATALOG_INFO{
size: uint32(unsafe.Sizeof(_CATALOG_INFO{})),
}
if err := cryptCATAdminCatalogInfoFromContext(catInfoCtx, &catInfo, 0); err != nil {
return "", err
}
oq, err := newObjectQuery(&catInfo.catalogFile[0])
if err != nil {
return "", err
}
defer oq.Close()
certSubj, err := oq.certSubject()
if err != nil {
return "", err
}
if !verify {
return certSubj, nil
}
// memberTag is required to be formatted this way.
hbh := strings.ToUpper(hex.EncodeToString(hashBuf))
memberTag, err := windows.UTF16PtrFromString(hbh)
if err != nil {
return "", err
}
wintrustArg := unsafe.Pointer(&_WINTRUST_CATALOG_INFO{
size: uint32(unsafe.Sizeof(_WINTRUST_CATALOG_INFO{})),
catalogFilePath: &catInfo.catalogFile[0],
memberTag: memberTag,
memberFilePath: utf16Path,
memberFile: memberFile,
catAdmin: catAdmin,
})
if err := verifyTrust(windows.WTD_CHOICE_CATALOG, wintrustArg); err != nil {
// We might still want to know who the cert subject claims to be
// even if the validation has failed (eg for troubleshooting purposes),
// so we return a CertSubjectError.
return "", &CertSubjectError{Err: err, Subject: certSubj}
}
return certSubj, nil
}
func verifyTrust(infoType uint32, info unsafe.Pointer) error {
data := &windows.WinTrustData{
Size: uint32(unsafe.Sizeof(windows.WinTrustData{})),
UIChoice: windows.WTD_UI_NONE,
RevocationChecks: windows.WTD_REVOKE_WHOLECHAIN, // Full revocation checking, as this is called with network connectivity.
UnionChoice: infoType,
StateAction: windows.WTD_STATEACTION_VERIFY,
FileOrCatalogOrBlobOrSgnrOrCert: info,
}
err := windows.WinVerifyTrustEx(windows.InvalidHWND, &windows.WINTRUST_ACTION_GENERIC_VERIFY_V2, data)
data.StateAction = windows.WTD_STATEACTION_CLOSE
windows.WinVerifyTrustEx(windows.InvalidHWND, &windows.WINTRUST_ACTION_GENERIC_VERIFY_V2, data)
return err
}

View File

@@ -0,0 +1,18 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package authenticode
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
//go:generate go run golang.org/x/tools/cmd/goimports -w zsyscall_windows.go
//sys cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GUID, hashAlgorithm *uint16, strongHashPolicy *windows.CertStrongSignPara, flags uint32) (err error) [int32(failretval)==0] = wintrust.CryptCATAdminAcquireContext2
//sys cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Handle, pcbHash *uint32, pbHash *byte, flags uint32) (err error) [int32(failretval)==0] = wintrust.CryptCATAdminCalcHashFromFileHandle2
//sys cryptCATAdminCatalogInfoFromContext(hCatInfo _HCATINFO, catInfo *_CATALOG_INFO, flags uint32) (err error) [int32(failretval)==0] = wintrust.CryptCATCatalogInfoFromContext
//sys cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash uint32, flags uint32, prevCatInfo *_HCATINFO) (ret _HCATINFO, err error) [ret==0] = wintrust.CryptCATAdminEnumCatalogFromHash
//sys cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO, flags uint32) (err error) [int32(failretval)==0] = wintrust.CryptCATAdminReleaseCatalogContext
//sys cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) [int32(failretval)==0] = wintrust.CryptCATAdminReleaseContext
//sys cryptMsgClose(cryptMsg windows.Handle) (err error) [int32(failretval)==0] = crypt32.CryptMsgClose
//sys cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, data unsafe.Pointer, dataLen *uint32) (err error) [int32(failretval)==0] = crypt32.CryptMsgGetParam
//sys cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signerIndex uint32, pbSignedBlob *byte, cbSignedBlob uint32, pbDecoded *byte, pdbDecoded *uint32, ppSignerCert **windows.CertContext) (err error) [int32(failretval)==0] = crypt32.CryptVerifyMessageSignature
//sys msiGetFileSignatureInformation(signedObjectPath *uint16, flags uint32, certCtx **windows.CertContext, pbHashData *byte, cbHashData *uint32) (ret wingoes.HRESULT) = msi.MsiGetFileSignatureInformationW

View File

@@ -0,0 +1,135 @@
// Code generated by 'go generate'; DO NOT EDIT.
package authenticode
import (
"syscall"
"unsafe"
"github.com/dblohm7/wingoes"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modcrypt32 = windows.NewLazySystemDLL("crypt32.dll")
modmsi = windows.NewLazySystemDLL("msi.dll")
modwintrust = windows.NewLazySystemDLL("wintrust.dll")
procCryptMsgClose = modcrypt32.NewProc("CryptMsgClose")
procCryptMsgGetParam = modcrypt32.NewProc("CryptMsgGetParam")
procCryptVerifyMessageSignature = modcrypt32.NewProc("CryptVerifyMessageSignature")
procMsiGetFileSignatureInformationW = modmsi.NewProc("MsiGetFileSignatureInformationW")
procCryptCATAdminAcquireContext2 = modwintrust.NewProc("CryptCATAdminAcquireContext2")
procCryptCATAdminCalcHashFromFileHandle2 = modwintrust.NewProc("CryptCATAdminCalcHashFromFileHandle2")
procCryptCATAdminEnumCatalogFromHash = modwintrust.NewProc("CryptCATAdminEnumCatalogFromHash")
procCryptCATAdminReleaseCatalogContext = modwintrust.NewProc("CryptCATAdminReleaseCatalogContext")
procCryptCATAdminReleaseContext = modwintrust.NewProc("CryptCATAdminReleaseContext")
procCryptCATCatalogInfoFromContext = modwintrust.NewProc("CryptCATCatalogInfoFromContext")
)
func cryptMsgClose(cryptMsg windows.Handle) (err error) {
r1, _, e1 := syscall.Syscall(procCryptMsgClose.Addr(), 1, uintptr(cryptMsg), 0, 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, data unsafe.Pointer, dataLen *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procCryptMsgGetParam.Addr(), 5, uintptr(cryptMsg), uintptr(paramType), uintptr(index), uintptr(data), uintptr(unsafe.Pointer(dataLen)), 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signerIndex uint32, pbSignedBlob *byte, cbSignedBlob uint32, pbDecoded *byte, pdbDecoded *uint32, ppSignerCert **windows.CertContext) (err error) {
r1, _, e1 := syscall.Syscall9(procCryptVerifyMessageSignature.Addr(), 7, uintptr(unsafe.Pointer(pVerifyPara)), uintptr(signerIndex), uintptr(unsafe.Pointer(pbSignedBlob)), uintptr(cbSignedBlob), uintptr(unsafe.Pointer(pbDecoded)), uintptr(unsafe.Pointer(pdbDecoded)), uintptr(unsafe.Pointer(ppSignerCert)), 0, 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func msiGetFileSignatureInformation(signedObjectPath *uint16, flags uint32, certCtx **windows.CertContext, pbHashData *byte, cbHashData *uint32) (ret wingoes.HRESULT) {
r0, _, _ := syscall.Syscall6(procMsiGetFileSignatureInformationW.Addr(), 5, uintptr(unsafe.Pointer(signedObjectPath)), uintptr(flags), uintptr(unsafe.Pointer(certCtx)), uintptr(unsafe.Pointer(pbHashData)), uintptr(unsafe.Pointer(cbHashData)), 0)
ret = wingoes.HRESULT(r0)
return
}
func cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GUID, hashAlgorithm *uint16, strongHashPolicy *windows.CertStrongSignPara, flags uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procCryptCATAdminAcquireContext2.Addr(), 5, uintptr(unsafe.Pointer(hCatAdmin)), uintptr(unsafe.Pointer(pgSubsystem)), uintptr(unsafe.Pointer(hashAlgorithm)), uintptr(unsafe.Pointer(strongHashPolicy)), uintptr(flags), 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Handle, pcbHash *uint32, pbHash *byte, flags uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procCryptCATAdminCalcHashFromFileHandle2.Addr(), 5, uintptr(hCatAdmin), uintptr(file), uintptr(unsafe.Pointer(pcbHash)), uintptr(unsafe.Pointer(pbHash)), uintptr(flags), 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash uint32, flags uint32, prevCatInfo *_HCATINFO) (ret _HCATINFO, err error) {
r0, _, e1 := syscall.Syscall6(procCryptCATAdminEnumCatalogFromHash.Addr(), 5, uintptr(hCatAdmin), uintptr(unsafe.Pointer(pbHash)), uintptr(cbHash), uintptr(flags), uintptr(unsafe.Pointer(prevCatInfo)), 0)
ret = _HCATINFO(r0)
if ret == 0 {
err = errnoErr(e1)
}
return
}
func cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO, flags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseCatalogContext.Addr(), 3, uintptr(hCatAdmin), uintptr(hCatInfo), uintptr(flags))
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseContext.Addr(), 2, uintptr(hCatAdmin), uintptr(flags), 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func cryptCATAdminCatalogInfoFromContext(hCatInfo _HCATINFO, catInfo *_CATALOG_INFO, flags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procCryptCATCatalogInfoFromContext.Addr(), 3, uintptr(hCatInfo), uintptr(unsafe.Pointer(catInfo)), uintptr(flags))
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}

79
vendor/tailscale.com/util/winutil/gp/gp_windows.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package gp contains [Group Policy]-related functions and types.
//
// [Group Policy]: https://web.archive.org/web/20240630210707/https://learn.microsoft.com/en-us/previous-versions/windows/desktop/policy/group-policy-start-page
package gp
import (
"fmt"
"runtime"
"golang.org/x/sys/windows"
)
// Scope is a user or machine policy scope.
type Scope int
const (
// MachinePolicy indicates a machine policy.
// Registry-based machine policies reside in HKEY_LOCAL_MACHINE.
MachinePolicy Scope = iota
// UserPolicy indicates a user policy.
// Registry-based user policies reside in HKEY_CURRENT_USER of the corresponding user.
UserPolicy
)
// _RP_FORCE causes RefreshPolicyEx to reapply policy even if no policy change was detected.
// See [RP_FORCE] for details.
//
// [RP_FORCE]: https://web.archive.org/save/https://learn.microsoft.com/en-us/windows/win32/api/userenv/nf-userenv-refreshpolicyex
const _RP_FORCE = 0x1
// RefreshUserPolicy triggers a machine policy refresh, but does not wait for it to complete.
// When the force parameter is true, it causes the Group Policy to reapply policy even
// if no policy change was detected.
func RefreshMachinePolicy(force bool) error {
return refreshPolicyEx(true, toRefreshPolicyFlags(force))
}
// RefreshUserPolicy triggers a user policy refresh, but does not wait for it to complete.
// When the force parameter is true, it causes the Group Policy to reapply policy even
// if no policy change was detected.
//
// The token indicates user whose policy should be refreshed.
// If specified, the token must be either a primary token with TOKEN_QUERY and TOKEN_DUPLICATE
// access, or an impersonation token with TOKEN_QUERY and TOKEN_IMPERSONATE access,
// and the specified user must be logged in interactively.
//
// Otherwise, a zero token value indicates the current user. It should not
// be used by services or other applications running under system identities.
//
// The function fails with windows.ERROR_ACCESS_DENIED if the user represented by the token
// is not logged in interactively at the time of the call.
func RefreshUserPolicy(token windows.Token, force bool) error {
if token != 0 {
// Impersonate the user whose policy we need to refresh.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := impersonateLoggedOnUser(token); err != nil {
return err
}
defer func() {
if err := windows.RevertToSelf(); err != nil {
// RevertToSelf errors are non-recoverable.
panic(fmt.Errorf("could not revert impersonation: %w", err))
}
}()
}
return refreshPolicyEx(true, toRefreshPolicyFlags(force))
}
func toRefreshPolicyFlags(force bool) uint32 {
if force {
return _RP_FORCE
}
return 0
}

13
vendor/tailscale.com/util/winutil/gp/mksyscall.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package gp
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
//sys enterCriticalPolicySection(machine bool) (handle policyLockHandle, err error) [int32(failretval)==0] = userenv.EnterCriticalPolicySection
//sys impersonateLoggedOnUser(token windows.Token) (err error) [int32(failretval)==0] = advapi32.ImpersonateLoggedOnUser
//sys leaveCriticalPolicySection(handle policyLockHandle) (err error) [int32(failretval)==0] = userenv.LeaveCriticalPolicySection
//sys registerGPNotification(event windows.Handle, machine bool) (err error) [int32(failretval)==0] = userenv.RegisterGPNotification
//sys refreshPolicyEx(machine bool, flags uint32) (err error) [int32(failretval)==0] = userenv.RefreshPolicyEx
//sys unregisterGPNotification(event windows.Handle) (err error) [int32(failretval)==0] = userenv.UnregisterGPNotification

View File

@@ -0,0 +1,292 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package gp
import (
"errors"
"fmt"
"runtime"
"sync"
"sync/atomic"
"golang.org/x/sys/windows"
)
// PolicyLock allows pausing the application of policy to safely read Group Policy
// settings. A PolicyLock is an R-lock that can be held by multiple readers simultaneously,
// preventing the Group Policy Client service (which maintains its W-counterpart) from
// modifying policies while they are being read.
//
// It is not possible to pause group policy processing for longer than 10 minutes.
// If the system needs to apply policies and the lock is being held for more than that,
// the Group Policy Client service will release the lock and continue policy processing.
//
// To avoid deadlocks when acquiring both machine and user locks, acquire the
// user lock before the machine lock.
type PolicyLock struct {
scope Scope
token windows.Token
// hooks for testing
enterFn func(bool) (policyLockHandle, error)
leaveFn func(policyLockHandle) error
closing chan struct{} // closing is closed when the Close method is called.
mu sync.Mutex
handle policyLockHandle
lockCnt atomic.Int32 // A non-zero LSB indicates that the lock can be acquired.
}
// policyLockHandle is the underlying lock handle returned by enterCriticalPolicySection.
type policyLockHandle uintptr
type policyLockResult struct {
handle policyLockHandle
err error
}
var (
// ErrInvalidLockState is returned by (*PolicyLock).Lock if the lock has a zero value or has already been closed.
ErrInvalidLockState = errors.New("the lock has not been created or has already been closed")
)
// NewMachinePolicyLock creates a PolicyLock that facilitates pausing the
// application of computer policy. To avoid deadlocks when acquiring both
// machine and user locks, acquire the user lock before the machine lock.
func NewMachinePolicyLock() *PolicyLock {
lock := &PolicyLock{
scope: MachinePolicy,
closing: make(chan struct{}),
enterFn: enterCriticalPolicySection,
leaveFn: leaveCriticalPolicySection,
}
lock.lockCnt.Store(1) // mark as initialized
return lock
}
// NewUserPolicyLock creates a PolicyLock that facilitates pausing the
// application of the user policy for the specified user. To avoid deadlocks
// when acquiring both machine and user locks, acquire the user lock before the
// machine lock.
//
// The token indicates which user's policy should be locked for reading.
// If specified, the token must have TOKEN_DUPLICATE access,
// the specified user must be logged in interactively.
// and the caller retains ownership of the token.
//
// Otherwise, a zero token value indicates the current user. It should not
// be used by services or other applications running under system identities.
func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) {
lock := &PolicyLock{
scope: UserPolicy,
closing: make(chan struct{}),
enterFn: enterCriticalPolicySection,
leaveFn: leaveCriticalPolicySection,
}
if token != 0 {
err := windows.DuplicateHandle(
windows.CurrentProcess(),
windows.Handle(token),
windows.CurrentProcess(),
(*windows.Handle)(&lock.token),
windows.TOKEN_QUERY|windows.TOKEN_DUPLICATE|windows.TOKEN_IMPERSONATE,
false,
0)
if err != nil {
return nil, err
}
}
lock.lockCnt.Store(1) // mark as initialized
return lock, nil
}
// Lock locks l.
// It returns ErrNotInitialized if l has a zero value or has already been closed,
// or an Errno if the underlying Group Policy lock cannot be acquired.
//
// As a special case, it fails with windows.ERROR_ACCESS_DENIED
// if l is a user policy lock, and the corresponding user is not logged in
// interactively at the time of the call.
func (l *PolicyLock) Lock() error {
l.mu.Lock()
defer l.mu.Unlock()
if l.lockCnt.Add(2)&1 == 0 {
// The lock cannot be acquired because it has either never been properly
// created or its Close method has already been called. However, we need
// to call Unlock to both decrement lockCnt and leave the underlying
// CriticalPolicySection if we won the race with another goroutine and
// now own the lock.
l.Unlock()
return ErrInvalidLockState
}
if l.handle != 0 {
// The underlying CriticalPolicySection is already acquired.
// It is an R-Lock (with the W-counterpart owned by the Group Policy service),
// meaning that it can be acquired by multiple readers simultaneously.
// So we can just return.
return nil
}
return l.lockSlow()
}
// lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock.
// It waits for either the lock to be acquired, or for the Close method to be called.
//
// l.mu must be held.
func (l *PolicyLock) lockSlow() (err error) {
defer func() {
if err != nil {
// Decrement the counter if the lock cannot be acquired,
// and complete the pending close request if we're the last owner.
if l.lockCnt.Add(-2) == 0 {
l.closeInternal()
}
}
}()
// In some cases in production environments, the Group Policy service may
// hold the corresponding W-Lock for extended periods of time (minutes
// rather than seconds or milliseconds). We need to make our wait operation
// cancellable. So, if one goroutine invokes (*PolicyLock).Close while another
// initiates (*PolicyLock).Lock and waits for the underlying R-lock to be
// acquired by enterCriticalPolicySection, the Close method should cancel
// the wait.
initCh := make(chan error)
resultCh := make(chan policyLockResult)
go func() {
closing := l.closing
if l.scope == UserPolicy && l.token != 0 {
// Impersonate the user whose critical policy section we want to acquire.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := impersonateLoggedOnUser(l.token); err != nil {
initCh <- err
return
}
defer func() {
if err := windows.RevertToSelf(); err != nil {
// RevertToSelf errors are non-recoverable.
panic(fmt.Errorf("could not revert impersonation: %w", err))
}
}()
}
close(initCh)
var machine bool
if l.scope == MachinePolicy {
machine = true
}
handle, err := l.enterFn(machine)
send_result:
for {
select {
case resultCh <- policyLockResult{handle, err}:
// lockSlow has received the result.
break send_result
default:
select {
case <-closing:
// The lock is being closed, and we lost the race to l.closing
// it the calling goroutine.
if err == nil {
l.leaveFn(handle)
}
break send_result
default:
// The calling goroutine did not enter the select block yet.
runtime.Gosched() // allow other routines to run
continue send_result
}
}
}
}()
// lockSlow should not return until the goroutine above has been fully initialized,
// even if the lock is being closed.
if err = <-initCh; err != nil {
return err
}
select {
case result := <-resultCh:
if result.err == nil {
l.handle = result.handle
}
return result.err
case <-l.closing:
return ErrInvalidLockState
}
}
// Unlock unlocks l.
// It panics if l is not locked on entry to Unlock.
func (l *PolicyLock) Unlock() {
l.mu.Lock()
defer l.mu.Unlock()
lockCnt := l.lockCnt.Add(-2)
if lockCnt < 0 {
panic("negative lockCnt")
}
if lockCnt > 1 {
// The lock is still being used by other readers.
// We compare against 1 rather than 0 because the least significant bit
// of lockCnt indicates that l has been initialized and a close
// has not been requested yet.
return
}
if l.handle != 0 {
// Impersonation is not required to unlock a critical policy section.
// The handle we pass determines which mutex will be unlocked.
leaveCriticalPolicySection(l.handle)
l.handle = 0
}
if lockCnt == 0 {
// Complete the pending close request if there's no more readers.
l.closeInternal()
}
}
// Close releases resources associated with l.
// It is a no-op for the machine policy lock.
func (l *PolicyLock) Close() error {
lockCnt := l.lockCnt.Load()
if lockCnt&1 == 0 {
// The lock has never been initialized, or close has already been called.
return nil
}
close(l.closing)
// Unset the LSB to indicate a pending close request.
for !l.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) {
lockCnt = l.lockCnt.Load()
}
if lockCnt != 0 {
// The lock is still being used and will be closed upon the final Unlock call.
return nil
}
return l.closeInternal()
}
func (l *PolicyLock) closeInternal() error {
if l.token != 0 {
if err := l.token.Close(); err != nil {
return err
}
l.token = 0
}
l.closing = nil
return nil
}

107
vendor/tailscale.com/util/winutil/gp/watcher_windows.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package gp
import (
"golang.org/x/sys/windows"
)
// ChangeWatcher calls the handler whenever a policy in the specified scope changes.
type ChangeWatcher struct {
gpWaitEvents [2]windows.Handle
handler func()
done chan struct{}
}
// NewChangeWatcher creates an instance of ChangeWatcher that invokes handler
// every time Windows notifies it of a group policy change in the specified scope.
func NewChangeWatcher(scope Scope, handler func()) (*ChangeWatcher, error) {
var err error
// evtDone is signaled by (*gpNotificationWatcher).Close() to indicate that
// the doWatch goroutine should exit.
evtDone, err := windows.CreateEvent(nil, 0, 0, nil)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
windows.CloseHandle(evtDone)
}
}()
// evtChanged is registered with the Windows policy engine to become
// signalled any time group policy has been refreshed.
evtChanged, err := windows.CreateEvent(nil, 0, 0, nil)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
windows.CloseHandle(evtChanged)
}
}()
// Tell Windows to signal evtChanged whenever group policies are refreshed.
if err := registerGPNotification(evtChanged, scope == MachinePolicy); err != nil {
return nil, err
}
result := &ChangeWatcher{
// Ordering of the event handles in gpWaitEvents is important:
// When calling windows.WaitForMultipleObjects and multiple objects are
// signalled simultaneously, it always returns the wait code for the
// lowest-indexed handle in its input array. evtDone is higher priority for
// us than evtChanged, so the former must be placed into the array ahead of
// the latter.
gpWaitEvents: [2]windows.Handle{
evtDone,
evtChanged,
},
handler: handler,
done: make(chan struct{}),
}
go result.doWatch()
return result, nil
}
func (w *ChangeWatcher) doWatch() {
// The wait code corresponding to the event that is signalled when a group
// policy change occurs. That is, w.gpWaitEvents[1] aka evtChanged.
const expectedWaitCode = windows.WAIT_OBJECT_0 + 1
for {
if waitCode, _ := windows.WaitForMultipleObjects(w.gpWaitEvents[:], false, windows.INFINITE); waitCode != expectedWaitCode {
break
}
w.handler()
}
close(w.done)
}
// Close unsubscribes from further Group Policy notifications,
// waits for any running handlers to complete, and releases any remaining resources
// associated with w.
func (w *ChangeWatcher) Close() error {
// Notify doWatch that we're done and it should exit.
if err := windows.SetEvent(w.gpWaitEvents[0]); err != nil {
return err
}
unregisterGPNotification(w.gpWaitEvents[1])
// Wait for doWatch to complete.
<-w.done
// Now we may safely clean up all the things.
for i, evt := range w.gpWaitEvents {
windows.CloseHandle(evt)
w.gpWaitEvents[i] = 0
}
w.handler = nil
return nil
}

View File

@@ -0,0 +1,111 @@
// Code generated by 'go generate'; DO NOT EDIT.
package gp
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
moduserenv = windows.NewLazySystemDLL("userenv.dll")
procImpersonateLoggedOnUser = modadvapi32.NewProc("ImpersonateLoggedOnUser")
procEnterCriticalPolicySection = moduserenv.NewProc("EnterCriticalPolicySection")
procLeaveCriticalPolicySection = moduserenv.NewProc("LeaveCriticalPolicySection")
procRefreshPolicyEx = moduserenv.NewProc("RefreshPolicyEx")
procRegisterGPNotification = moduserenv.NewProc("RegisterGPNotification")
procUnregisterGPNotification = moduserenv.NewProc("UnregisterGPNotification")
)
func impersonateLoggedOnUser(token windows.Token) (err error) {
r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err error) {
var _p0 uint32
if machine {
_p0 = 1
}
r0, _, e1 := syscall.Syscall(procEnterCriticalPolicySection.Addr(), 1, uintptr(_p0), 0, 0)
handle = policyLockHandle(r0)
if int32(handle) == 0 {
err = errnoErr(e1)
}
return
}
func leaveCriticalPolicySection(handle policyLockHandle) (err error) {
r1, _, e1 := syscall.Syscall(procLeaveCriticalPolicySection.Addr(), 1, uintptr(handle), 0, 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func refreshPolicyEx(machine bool, flags uint32) (err error) {
var _p0 uint32
if machine {
_p0 = 1
}
r1, _, e1 := syscall.Syscall(procRefreshPolicyEx.Addr(), 2, uintptr(_p0), uintptr(flags), 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func registerGPNotification(event windows.Handle, machine bool) (err error) {
var _p0 uint32
if machine {
_p0 = 1
}
r1, _, e1 := syscall.Syscall(procRegisterGPNotification.Addr(), 2, uintptr(event), uintptr(_p0), 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}
func unregisterGPNotification(event windows.Handle) (err error) {
r1, _, e1 := syscall.Syscall(procUnregisterGPNotification.Addr(), 1, uintptr(event), 0, 0)
if int32(r1) == 0 {
err = errnoErr(e1)
}
return
}

21
vendor/tailscale.com/util/winutil/mksyscall.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package winutil
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
//go:generate go run golang.org/x/tools/cmd/goimports -w zsyscall_windows.go
//sys dsGetDcName(computerName *uint16, domainName *uint16, domainGuid *windows.GUID, siteName *uint16, flags dsGetDcNameFlag, dcInfo **_DOMAIN_CONTROLLER_INFO) (ret error) = netapi32.DsGetDcNameW
//sys expandEnvironmentStringsForUser(token windows.Token, src *uint16, dst *uint16, dstLen uint32) (err error) [int32(failretval)==0] = userenv.ExpandEnvironmentStringsForUserW
//sys getApplicationRestartSettings(process windows.Handle, commandLine *uint16, commandLineLen *uint32, flags *uint32) (ret wingoes.HRESULT) = kernel32.GetApplicationRestartSettings
//sys loadUserProfile(token windows.Token, profileInfo *_PROFILEINFO) (err error) [int32(failretval)==0] = userenv.LoadUserProfileW
//sys netValidateName(server *uint16, name *uint16, account *uint16, password *uint16, nameType _NETSETUP_NAME_TYPE) (ret error) = netapi32.NetValidateName
//sys queryServiceConfig2(hService windows.Handle, infoLevel uint32, buf *byte, bufLen uint32, bytesNeeded *uint32) (err error) [failretval==0] = advapi32.QueryServiceConfig2W
//sys registerApplicationRestart(cmdLineExclExeName *uint16, flags uint32) (ret wingoes.HRESULT) = kernel32.RegisterApplicationRestart
//sys rmEndSession(session _RMHANDLE) (ret error) = rstrtmgr.RmEndSession
//sys rmGetList(session _RMHANDLE, nProcInfoNeeded *uint32, nProcInfo *uint32, rgAffectedApps *_RM_PROCESS_INFO, pRebootReasons *uint32) (ret error) = rstrtmgr.RmGetList
//sys rmJoinSession(pSession *_RMHANDLE, sessionKey *uint16) (ret error) = rstrtmgr.RmJoinSession
//sys rmRegisterResources(session _RMHANDLE, nFiles uint32, rgsFileNames **uint16, nApplications uint32, rgApplications *_RM_UNIQUE_PROCESS, nServices uint32, rgsServiceNames **uint16) (ret error) = rstrtmgr.RmRegisterResources
//sys rmStartSession(pSession *_RMHANDLE, flags uint32, sessionKey *uint16) (ret error) = rstrtmgr.RmStartSession
//sys unloadUserProfile(token windows.Token, profile registry.Key) (err error) [int32(failretval)==0] = userenv.UnloadUserProfile

Some files were not shown because too many files have changed in this diff Show More