Update dependencies
This commit is contained in:
37
vendor/tailscale.com/util/deephash/debug.go
generated
vendored
Normal file
37
vendor/tailscale.com/util/deephash/debug.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build deephash_debug
|
||||
|
||||
package deephash
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (h *hasher) HashBytes(b []byte) {
|
||||
fmt.Printf("B(%q)+", b)
|
||||
h.Block512.HashBytes(b)
|
||||
}
|
||||
func (h *hasher) HashString(s string) {
|
||||
fmt.Printf("S(%q)+", s)
|
||||
h.Block512.HashString(s)
|
||||
}
|
||||
func (h *hasher) HashUint8(n uint8) {
|
||||
fmt.Printf("U8(%d)+", n)
|
||||
h.Block512.HashUint8(n)
|
||||
}
|
||||
func (h *hasher) HashUint16(n uint16) {
|
||||
fmt.Printf("U16(%d)+", n)
|
||||
h.Block512.HashUint16(n)
|
||||
}
|
||||
func (h *hasher) HashUint32(n uint32) {
|
||||
fmt.Printf("U32(%d)+", n)
|
||||
h.Block512.HashUint32(n)
|
||||
}
|
||||
func (h *hasher) HashUint64(n uint64) {
|
||||
fmt.Printf("U64(%d)+", n)
|
||||
h.Block512.HashUint64(n)
|
||||
}
|
||||
func (h *hasher) Sum(b []byte) []byte {
|
||||
fmt.Println("FIN")
|
||||
return h.Block512.Sum(b)
|
||||
}
|
||||
732
vendor/tailscale.com/util/deephash/deephash.go
generated
vendored
Normal file
732
vendor/tailscale.com/util/deephash/deephash.go
generated
vendored
Normal file
@@ -0,0 +1,732 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package deephash hashes a Go value recursively, in a predictable order,
|
||||
// without looping. The hash is only valid within the lifetime of a program.
|
||||
// Users should not store the hash on disk or send it over the network.
|
||||
// The hash is sufficiently strong and unique such that
|
||||
// Hash(&x) == Hash(&y) is an appropriate replacement for x == y.
|
||||
//
|
||||
// The definition of equality is identical to reflect.DeepEqual except:
|
||||
// - Floating-point values are compared based on the raw bits,
|
||||
// which means that NaNs (with the same bit pattern) are treated as equal.
|
||||
// - time.Time are compared based on whether they are the same instant in time
|
||||
// and also in the same zone offset. Monotonic measurements and zone names
|
||||
// are ignored as part of the hash.
|
||||
// - netip.Addr are compared based on a shallow comparison of the struct.
|
||||
//
|
||||
// WARNING: This package, like most of the tailscale.com Go module,
|
||||
// should be considered Tailscale-internal; we make no API promises.
|
||||
//
|
||||
// # Cycle detection
|
||||
//
|
||||
// This package correctly handles cycles in the value graph,
|
||||
// but in a way that is potentially pathological in some situations.
|
||||
//
|
||||
// The algorithm for cycle detection operates by
|
||||
// pushing a pointer onto a stack whenever deephash is visiting a pointer and
|
||||
// popping the pointer from the stack after deephash is leaving the pointer.
|
||||
// Before visiting a new pointer, deephash checks whether it has already been
|
||||
// visited on the pointer stack. If so, it hashes the index of the pointer
|
||||
// on the stack and avoids visiting the pointer.
|
||||
//
|
||||
// This algorithm is guaranteed to detect cycles, but may expand pointers
|
||||
// more often than a potential alternate algorithm that remembers all pointers
|
||||
// ever visited in a map. The current algorithm uses O(D) memory, where D
|
||||
// is the maximum depth of the recursion, while the alternate algorithm
|
||||
// would use O(P) memory where P is all pointers ever seen, which can be a lot,
|
||||
// and most of which may have nothing to do with cycles.
|
||||
// Also, the alternate algorithm has to deal with challenges of producing
|
||||
// deterministic results when pointers are visited in non-deterministic ways
|
||||
// such as when iterating through a Go map. The stack-based algorithm avoids
|
||||
// this challenge since the stack is always deterministic regardless of
|
||||
// non-deterministic iteration order of Go maps.
|
||||
//
|
||||
// To concretely see how this algorithm can be pathological,
|
||||
// consider the following data structure:
|
||||
//
|
||||
// var big *Item = ... // some large data structure that is slow to hash
|
||||
// var manyBig []*Item
|
||||
// for i := range 1000 {
|
||||
// manyBig = append(manyBig, &big)
|
||||
// }
|
||||
// deephash.Hash(manyBig)
|
||||
//
|
||||
// Here, the manyBig data structure is not even cyclic.
|
||||
// We have the same big *Item being stored multiple times in a []*Item.
|
||||
// When deephash hashes []*Item, it hashes each individual *Item
|
||||
// not realizing that it had just done the computation earlier.
|
||||
// To avoid the pathological situation, Item should implement [SelfHasher] and
|
||||
// memoize attempts to hash itself.
|
||||
package deephash
|
||||
|
||||
// TODO: Add option to teach deephash to memoize the Hash result of particular types?
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/util/hashx"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// There is much overlap between the theory of serialization and hashing.
|
||||
// A hash (useful for determining equality) can be produced by printing a value
|
||||
// and hashing the output. The format must:
|
||||
// * be deterministic such that the same value hashes to the same output, and
|
||||
// * be parsable such that the same value can be reproduced by the output.
|
||||
//
|
||||
// The logic below hashes a value by printing it to a hash.Hash.
|
||||
// To be parsable, it assumes that we know the Go type of each value:
|
||||
// * scalar types (e.g., bool or int32) are directly printed as their
|
||||
// underlying memory representation.
|
||||
// * list types (e.g., strings and slices) are prefixed by a
|
||||
// fixed-width length field, followed by the contents of the list.
|
||||
// * slices, arrays, and structs print each element/field consecutively.
|
||||
// * interfaces print with a 1-byte prefix indicating whether it is nil.
|
||||
// If non-nil, it is followed by a fixed-width field of the type index,
|
||||
// followed by the format of the underlying value.
|
||||
// * pointers print with a 1-byte prefix indicating whether the pointer is
|
||||
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers are
|
||||
// followed by a fixed-width field with the index of the previous pointer.
|
||||
// Newly seen pointers are followed by the format of the underlying value.
|
||||
// * maps print with a 1-byte prefix indicating whether the map pointer is
|
||||
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers
|
||||
// are followed by a fixed-width field of the index of the previous pointer.
|
||||
// Newly seen maps are printed with a fixed-width length field, followed by
|
||||
// a fixed-width field with the XOR of the hash of every map entry.
|
||||
// With a sufficiently strong hash, this value is theoretically "parsable"
|
||||
// by looking up the hash in a magical map that returns the set of entries
|
||||
// for that given hash.
|
||||
|
||||
// SelfHasher is implemented by types that can compute their own hash
|
||||
// by writing values through the provided [Hasher] parameter.
|
||||
// Implementations must not leak the provided [Hasher].
|
||||
//
|
||||
// If the implementation of SelfHasher recursively calls [deephash.Hash],
|
||||
// then infinite recursion is quite likely to occur.
|
||||
// To avoid this, use a type definition to drop methods before calling [deephash.Hash]:
|
||||
//
|
||||
// func (v *MyType) Hash(h deephash.Hasher) {
|
||||
// v.hashMu.Lock()
|
||||
// defer v.hashMu.Unlock()
|
||||
// if v.dirtyHash {
|
||||
// type MyTypeWithoutMethods MyType // type define MyType to drop Hash method
|
||||
// v.dirtyHash = false // clear out dirty bit to avoid hashing over it
|
||||
// v.hashSum = deephash.Sum{} // clear out hashSum to avoid hashing over it
|
||||
// v.hashSum = deephash.Hash((*MyTypeWithoutMethods)(v))
|
||||
// }
|
||||
// h.HashSum(v.hashSum)
|
||||
// }
|
||||
//
|
||||
// In the above example, we acquire a lock since it is possible that deephash
|
||||
// is called in a concurrent manner, which implies that MyType.Hash may also
|
||||
// be called in a concurrent manner. Whether this lock is necessary is
|
||||
// application-dependent and left as an exercise to the reader.
|
||||
// Also, the example assumes that dirtyHash is set elsewhere by application
|
||||
// logic whenever a mutation is made to MyType that would alter the hash.
|
||||
type SelfHasher interface {
|
||||
Hash(Hasher)
|
||||
}
|
||||
|
||||
// Hasher is a value passed to [SelfHasher.Hash] that allow implementations
|
||||
// to hash themselves in a structured manner.
|
||||
type Hasher struct{ h *hashx.Block512 }
|
||||
|
||||
// HashBytes hashes a sequence of bytes b.
|
||||
// The length of b is not explicitly hashed.
|
||||
func (h Hasher) HashBytes(b []byte) { h.h.HashBytes(b) }
|
||||
|
||||
// HashString hashes the string data of s
|
||||
// The length of s is not explicitly hashed.
|
||||
func (h Hasher) HashString(s string) { h.h.HashString(s) }
|
||||
|
||||
// HashUint8 hashes a uint8.
|
||||
func (h Hasher) HashUint8(n uint8) { h.h.HashUint8(n) }
|
||||
|
||||
// HashUint16 hashes a uint16.
|
||||
func (h Hasher) HashUint16(n uint16) { h.h.HashUint16(n) }
|
||||
|
||||
// HashUint32 hashes a uint32.
|
||||
func (h Hasher) HashUint32(n uint32) { h.h.HashUint32(n) }
|
||||
|
||||
// HashUint64 hashes a uint64.
|
||||
func (h Hasher) HashUint64(n uint64) { h.h.HashUint64(n) }
|
||||
|
||||
// HashSum hashes a [Sum].
|
||||
func (h Hasher) HashSum(s Sum) {
|
||||
// NOTE: Avoid calling h.HashBytes since it escapes b,
|
||||
// which would force s to be heap allocated.
|
||||
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[0:8]))
|
||||
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[8:16]))
|
||||
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[16:24]))
|
||||
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[24:32]))
|
||||
}
|
||||
|
||||
// hasher is reusable state for hashing a value.
|
||||
// Get one via hasherPool.
|
||||
type hasher struct {
|
||||
hashx.Block512
|
||||
visitStack visitStack
|
||||
}
|
||||
|
||||
var hasherPool = &sync.Pool{
|
||||
New: func() any { return new(hasher) },
|
||||
}
|
||||
|
||||
func (h *hasher) reset() {
|
||||
if h.Block512.Hash == nil {
|
||||
h.Block512.Hash = sha256.New()
|
||||
}
|
||||
h.Block512.Reset()
|
||||
}
|
||||
|
||||
// hashType hashes a reflect.Type.
|
||||
// The hash is only consistent within the lifetime of a program.
|
||||
func (h *hasher) hashType(t reflect.Type) {
|
||||
// This approach relies on reflect.Type always being backed by a unique
|
||||
// *reflect.rtype pointer. A safer approach is to use a global sync.Map
|
||||
// that maps reflect.Type to some arbitrary and unique index.
|
||||
// While safer, it requires global state with memory that can never be GC'd.
|
||||
rtypeAddr := reflect.ValueOf(t).Pointer() // address of *reflect.rtype
|
||||
h.HashUint64(uint64(rtypeAddr))
|
||||
}
|
||||
|
||||
func (h *hasher) sum() (s Sum) {
|
||||
h.Sum(s.sum[:0])
|
||||
return s
|
||||
}
|
||||
|
||||
// Sum is an opaque checksum type that is comparable.
|
||||
type Sum struct {
|
||||
sum [sha256.Size]byte
|
||||
}
|
||||
|
||||
func (s1 *Sum) xor(s2 Sum) {
|
||||
for i := range sha256.Size {
|
||||
s1.sum[i] ^= s2.sum[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (s Sum) String() string {
|
||||
// Note: if we change this, keep in sync with AppendTo
|
||||
return hex.EncodeToString(s.sum[:])
|
||||
}
|
||||
|
||||
// AppendTo appends the string encoding of this sum (as returned by the String
|
||||
// method) to the provided byte slice and returns the extended buffer.
|
||||
func (s Sum) AppendTo(b []byte) []byte {
|
||||
// TODO: switch to upstream implementation if accepted:
|
||||
// https://github.com/golang/go/issues/53693
|
||||
var lb [len(s.sum) * 2]byte
|
||||
hex.Encode(lb[:], s.sum[:])
|
||||
return append(b, lb[:]...)
|
||||
}
|
||||
|
||||
var (
|
||||
seedOnce sync.Once
|
||||
seed uint64
|
||||
)
|
||||
|
||||
func initSeed() {
|
||||
seed = uint64(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// Hash returns the hash of v.
|
||||
func Hash[T any](v *T) Sum {
|
||||
h := hasherPool.Get().(*hasher)
|
||||
defer hasherPool.Put(h)
|
||||
h.reset()
|
||||
seedOnce.Do(initSeed)
|
||||
h.HashUint64(seed)
|
||||
|
||||
// Always treat the Hash input as if it were an interface by including
|
||||
// a hash of the type. This ensures that hashing of two different types
|
||||
// but with the same value structure produces different hashes.
|
||||
t := reflect.TypeFor[T]()
|
||||
h.hashType(t)
|
||||
if v == nil {
|
||||
h.HashUint8(0) // indicates nil
|
||||
} else {
|
||||
h.HashUint8(1) // indicates visiting pointer element
|
||||
p := pointerOf(reflect.ValueOf(v))
|
||||
hash := lookupTypeHasher(t)
|
||||
hash(h, p)
|
||||
}
|
||||
return h.sum()
|
||||
}
|
||||
|
||||
// Option is an optional argument to HasherForType.
|
||||
type Option interface {
|
||||
isOption()
|
||||
}
|
||||
|
||||
type fieldFilterOpt struct {
|
||||
t reflect.Type
|
||||
fields set.Set[string]
|
||||
includeOnMatch bool // true to include fields, false to exclude them
|
||||
}
|
||||
|
||||
func (fieldFilterOpt) isOption() {}
|
||||
|
||||
func (f fieldFilterOpt) filterStructField(sf reflect.StructField) (include bool) {
|
||||
if f.fields.Contains(sf.Name) {
|
||||
return f.includeOnMatch
|
||||
}
|
||||
return !f.includeOnMatch
|
||||
}
|
||||
|
||||
// IncludeFields returns an option that modifies the hashing for T to only
|
||||
// include the named struct fields.
|
||||
//
|
||||
// T must be a struct type, and must match the type of the value passed to
|
||||
// HasherForType.
|
||||
func IncludeFields[T any](fields ...string) Option {
|
||||
return newFieldFilter[T](true, fields)
|
||||
}
|
||||
|
||||
// ExcludeFields returns an option that modifies the hashing for T to include
|
||||
// all struct fields of T except those provided in fields.
|
||||
//
|
||||
// T must be a struct type, and must match the type of the value passed to
|
||||
// HasherForType.
|
||||
func ExcludeFields[T any](fields ...string) Option {
|
||||
return newFieldFilter[T](false, fields)
|
||||
}
|
||||
|
||||
func newFieldFilter[T any](include bool, fields []string) Option {
|
||||
t := reflect.TypeFor[T]()
|
||||
fieldSet := set.Set[string]{}
|
||||
for _, f := range fields {
|
||||
if _, ok := t.FieldByName(f); !ok {
|
||||
panic(fmt.Sprintf("unknown field %q for type %v", f, t))
|
||||
}
|
||||
fieldSet.Add(f)
|
||||
}
|
||||
return fieldFilterOpt{t, fieldSet, include}
|
||||
}
|
||||
|
||||
// HasherForType returns a hash that is specialized for the provided type.
|
||||
//
|
||||
// HasherForType panics if the opts are invalid for the provided type.
|
||||
//
|
||||
// Currently, at most one option can be provided (IncludeFields or
|
||||
// ExcludeFields) and its type must match the type of T. Those restrictions may
|
||||
// be removed in the future, along with documentation about their precedence
|
||||
// when combined.
|
||||
func HasherForType[T any](opts ...Option) func(*T) Sum {
|
||||
seedOnce.Do(initSeed)
|
||||
if len(opts) > 1 {
|
||||
panic("HasherForType only accepts one optional argument") // for now
|
||||
}
|
||||
t := reflect.TypeFor[T]()
|
||||
var hash typeHasherFunc
|
||||
for _, o := range opts {
|
||||
switch o := o.(type) {
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown HasherOpt %T", o))
|
||||
case fieldFilterOpt:
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("HasherForStructTypeWithFieldFilter requires T of kind struct")
|
||||
}
|
||||
if t != o.t {
|
||||
panic(fmt.Sprintf("field filter for type %v does not match HasherForType type %v", o.t, t))
|
||||
}
|
||||
hash = makeStructHasher(t, o.filterStructField)
|
||||
}
|
||||
}
|
||||
if hash == nil {
|
||||
hash = lookupTypeHasher(t)
|
||||
}
|
||||
return func(v *T) (s Sum) {
|
||||
// This logic is identical to Hash, but pull out a few statements.
|
||||
h := hasherPool.Get().(*hasher)
|
||||
defer hasherPool.Put(h)
|
||||
h.reset()
|
||||
h.HashUint64(seed)
|
||||
|
||||
h.hashType(t)
|
||||
if v == nil {
|
||||
h.HashUint8(0) // indicates nil
|
||||
} else {
|
||||
h.HashUint8(1) // indicates visiting pointer element
|
||||
p := pointerOf(reflect.ValueOf(v))
|
||||
hash(h, p)
|
||||
}
|
||||
return h.sum()
|
||||
}
|
||||
}
|
||||
|
||||
// Update sets last to the hash of v and reports whether its value changed.
|
||||
func Update[T any](last *Sum, v *T) (changed bool) {
|
||||
sum := Hash(v)
|
||||
changed = sum != *last
|
||||
if changed {
|
||||
*last = sum
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
// typeHasherFunc hashes the value pointed at by p for a given type.
|
||||
// For example, if t is a bool, then p is a *bool.
|
||||
// The provided pointer must always be non-nil.
|
||||
type typeHasherFunc func(h *hasher, p pointer)
|
||||
|
||||
var typeHasherCache sync.Map // map[reflect.Type]typeHasherFunc
|
||||
|
||||
func lookupTypeHasher(t reflect.Type) typeHasherFunc {
|
||||
if v, ok := typeHasherCache.Load(t); ok {
|
||||
return v.(typeHasherFunc)
|
||||
}
|
||||
hash := makeTypeHasher(t)
|
||||
v, _ := typeHasherCache.LoadOrStore(t, hash)
|
||||
return v.(typeHasherFunc)
|
||||
}
|
||||
|
||||
func makeTypeHasher(t reflect.Type) typeHasherFunc {
|
||||
// Types with specific hashing.
|
||||
switch t {
|
||||
case timeTimeType:
|
||||
return hashTime
|
||||
case netipAddrType:
|
||||
return hashAddr
|
||||
}
|
||||
|
||||
// Types that implement their own hashing.
|
||||
if t.Kind() != reflect.Pointer && t.Kind() != reflect.Interface {
|
||||
// A method can be implemented on either the value receiver or pointer receiver.
|
||||
if t.Implements(selfHasherType) || reflect.PointerTo(t).Implements(selfHasherType) {
|
||||
return makeSelfHasher(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Types that can have their memory representation directly hashed.
|
||||
if typeIsMemHashable(t) {
|
||||
return makeMemHasher(t.Size())
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
return hashString
|
||||
case reflect.Array:
|
||||
return makeArrayHasher(t)
|
||||
case reflect.Slice:
|
||||
return makeSliceHasher(t)
|
||||
case reflect.Struct:
|
||||
return makeStructHasher(t, keepAllStructFields)
|
||||
case reflect.Map:
|
||||
return makeMapHasher(t)
|
||||
case reflect.Pointer:
|
||||
return makePointerHasher(t)
|
||||
case reflect.Interface:
|
||||
return makeInterfaceHasher(t)
|
||||
default: // Func, Chan, UnsafePointer
|
||||
return func(*hasher, pointer) {}
|
||||
}
|
||||
}
|
||||
|
||||
func hashTime(h *hasher, p pointer) {
|
||||
// Include the zone offset (but not the name) to keep
|
||||
// Hash(t1) == Hash(t2) being semantically equivalent to
|
||||
// t1.Format(time.RFC3339Nano) == t2.Format(time.RFC3339Nano).
|
||||
t := *p.asTime()
|
||||
_, offset := t.Zone()
|
||||
h.HashUint64(uint64(t.Unix()))
|
||||
h.HashUint32(uint32(t.Nanosecond()))
|
||||
h.HashUint32(uint32(offset))
|
||||
}
|
||||
|
||||
func hashAddr(h *hasher, p pointer) {
|
||||
// The formatting of netip.Addr covers the
|
||||
// IP version, the address, and the optional zone name (for v6).
|
||||
// This is equivalent to a1.MarshalBinary() == a2.MarshalBinary().
|
||||
ip := *p.asAddr()
|
||||
switch {
|
||||
case !ip.IsValid():
|
||||
h.HashUint64(0)
|
||||
case ip.Is4():
|
||||
b := ip.As4()
|
||||
h.HashUint64(4)
|
||||
h.HashUint32(binary.LittleEndian.Uint32(b[:]))
|
||||
case ip.Is6():
|
||||
b := ip.As16()
|
||||
z := ip.Zone()
|
||||
h.HashUint64(16 + uint64(len(z)))
|
||||
h.HashUint64(binary.LittleEndian.Uint64(b[:8]))
|
||||
h.HashUint64(binary.LittleEndian.Uint64(b[8:]))
|
||||
h.HashString(z)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSelfHasher(t reflect.Type) typeHasherFunc {
|
||||
return func(h *hasher, p pointer) {
|
||||
p.asValue(t).Interface().(SelfHasher).Hash(Hasher{&h.Block512})
|
||||
}
|
||||
}
|
||||
|
||||
func hashString(h *hasher, p pointer) {
|
||||
s := *p.asString()
|
||||
h.HashUint64(uint64(len(s)))
|
||||
h.HashString(s)
|
||||
}
|
||||
|
||||
func makeMemHasher(n uintptr) typeHasherFunc {
|
||||
return func(h *hasher, p pointer) {
|
||||
h.HashBytes(p.asMemory(n))
|
||||
}
|
||||
}
|
||||
|
||||
func makeArrayHasher(t reflect.Type) typeHasherFunc {
|
||||
var once sync.Once
|
||||
var hashElem typeHasherFunc
|
||||
init := func() {
|
||||
hashElem = lookupTypeHasher(t.Elem())
|
||||
}
|
||||
|
||||
n := t.Len() // number of array elements
|
||||
nb := t.Elem().Size() // byte size of each array element
|
||||
return func(h *hasher, p pointer) {
|
||||
once.Do(init)
|
||||
for i := range n {
|
||||
hashElem(h, p.arrayIndex(i, nb))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeSliceHasher(t reflect.Type) typeHasherFunc {
|
||||
nb := t.Elem().Size() // byte size of each slice element
|
||||
if typeIsMemHashable(t.Elem()) {
|
||||
return func(h *hasher, p pointer) {
|
||||
pa := p.sliceArray()
|
||||
if pa.isNil() {
|
||||
h.HashUint8(0) // indicates nil
|
||||
return
|
||||
}
|
||||
h.HashUint8(1) // indicates visiting slice
|
||||
n := p.sliceLen()
|
||||
b := pa.asMemory(uintptr(n) * nb)
|
||||
h.HashUint64(uint64(n))
|
||||
h.HashBytes(b)
|
||||
}
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
var hashElem typeHasherFunc
|
||||
init := func() {
|
||||
hashElem = lookupTypeHasher(t.Elem())
|
||||
if typeIsRecursive(t) {
|
||||
hashElemDefault := hashElem
|
||||
hashElem = func(h *hasher, p pointer) {
|
||||
if idx, ok := h.visitStack.seen(p.p); ok {
|
||||
h.HashUint8(2) // indicates cycle
|
||||
h.HashUint64(uint64(idx))
|
||||
return
|
||||
}
|
||||
h.HashUint8(1) // indicates visiting slice element
|
||||
h.visitStack.push(p.p)
|
||||
defer h.visitStack.pop(p.p)
|
||||
hashElemDefault(h, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return func(h *hasher, p pointer) {
|
||||
pa := p.sliceArray()
|
||||
if pa.isNil() {
|
||||
h.HashUint8(0) // indicates nil
|
||||
return
|
||||
}
|
||||
once.Do(init)
|
||||
h.HashUint8(1) // indicates visiting slice
|
||||
n := p.sliceLen()
|
||||
h.HashUint64(uint64(n))
|
||||
for i := range n {
|
||||
pe := pa.arrayIndex(i, nb)
|
||||
hashElem(h, pe)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func keepAllStructFields(keepField reflect.StructField) bool { return true }
|
||||
|
||||
func makeStructHasher(t reflect.Type, keepField func(reflect.StructField) bool) typeHasherFunc {
|
||||
type fieldHasher struct {
|
||||
idx int // index of field for reflect.Type.Field(n); negative if memory is directly hashable
|
||||
keep bool
|
||||
hash typeHasherFunc // only valid if idx is not negative
|
||||
offset uintptr
|
||||
size uintptr
|
||||
}
|
||||
var once sync.Once
|
||||
var fields []fieldHasher
|
||||
init := func() {
|
||||
for i, numField := 0, t.NumField(); i < numField; i++ {
|
||||
sf := t.Field(i)
|
||||
f := fieldHasher{i, keepField(sf), nil, sf.Offset, sf.Type.Size()}
|
||||
if f.keep && typeIsMemHashable(sf.Type) {
|
||||
f.idx = -1
|
||||
}
|
||||
|
||||
// Combine with previous field if both contiguous and mem-hashable.
|
||||
if f.idx < 0 && len(fields) > 0 {
|
||||
if last := &fields[len(fields)-1]; last.idx < 0 && last.offset+last.size == f.offset {
|
||||
last.size += f.size
|
||||
continue
|
||||
}
|
||||
}
|
||||
fields = append(fields, f)
|
||||
}
|
||||
|
||||
for i, f := range fields {
|
||||
if f.idx >= 0 {
|
||||
fields[i].hash = lookupTypeHasher(t.Field(f.idx).Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return func(h *hasher, p pointer) {
|
||||
once.Do(init)
|
||||
for _, field := range fields {
|
||||
if !field.keep {
|
||||
continue
|
||||
}
|
||||
pf := p.structField(field.idx, field.offset, field.size)
|
||||
if field.idx < 0 {
|
||||
h.HashBytes(pf.asMemory(field.size))
|
||||
} else {
|
||||
field.hash(h, pf)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeMapHasher(t reflect.Type) typeHasherFunc {
|
||||
var once sync.Once
|
||||
var hashKey, hashValue typeHasherFunc
|
||||
var isRecursive bool
|
||||
init := func() {
|
||||
hashKey = lookupTypeHasher(t.Key())
|
||||
hashValue = lookupTypeHasher(t.Elem())
|
||||
isRecursive = typeIsRecursive(t)
|
||||
}
|
||||
|
||||
return func(h *hasher, p pointer) {
|
||||
v := p.asValue(t).Elem() // reflect.Map kind
|
||||
if v.IsNil() {
|
||||
h.HashUint8(0) // indicates nil
|
||||
return
|
||||
}
|
||||
once.Do(init)
|
||||
if isRecursive {
|
||||
pm := v.UnsafePointer() // underlying pointer of map
|
||||
if idx, ok := h.visitStack.seen(pm); ok {
|
||||
h.HashUint8(2) // indicates cycle
|
||||
h.HashUint64(uint64(idx))
|
||||
return
|
||||
}
|
||||
h.visitStack.push(pm)
|
||||
defer h.visitStack.pop(pm)
|
||||
}
|
||||
h.HashUint8(1) // indicates visiting map entries
|
||||
h.HashUint64(uint64(v.Len()))
|
||||
|
||||
mh := mapHasherPool.Get().(*mapHasher)
|
||||
defer mapHasherPool.Put(mh)
|
||||
|
||||
// Hash a map in a sort-free manner.
|
||||
// It relies on a map being a an unordered set of KV entries.
|
||||
// So long as we hash each KV entry together, we can XOR all the
|
||||
// individual hashes to produce a unique hash for the entire map.
|
||||
k := mh.valKey.get(v.Type().Key())
|
||||
e := mh.valElem.get(v.Type().Elem())
|
||||
mh.sum = Sum{}
|
||||
mh.h.visitStack = h.visitStack // always use the parent's visit stack to avoid cycles
|
||||
for iter := v.MapRange(); iter.Next(); {
|
||||
k.SetIterKey(iter)
|
||||
e.SetIterValue(iter)
|
||||
mh.h.reset()
|
||||
hashKey(&mh.h, pointerOf(k.Addr()))
|
||||
hashValue(&mh.h, pointerOf(e.Addr()))
|
||||
mh.sum.xor(mh.h.sum())
|
||||
}
|
||||
h.HashBytes(mh.sum.sum[:])
|
||||
}
|
||||
}
|
||||
|
||||
func makePointerHasher(t reflect.Type) typeHasherFunc {
|
||||
var once sync.Once
|
||||
var hashElem typeHasherFunc
|
||||
var isRecursive bool
|
||||
init := func() {
|
||||
hashElem = lookupTypeHasher(t.Elem())
|
||||
isRecursive = typeIsRecursive(t)
|
||||
}
|
||||
return func(h *hasher, p pointer) {
|
||||
pe := p.pointerElem()
|
||||
if pe.isNil() {
|
||||
h.HashUint8(0) // indicates nil
|
||||
return
|
||||
}
|
||||
once.Do(init)
|
||||
if isRecursive {
|
||||
if idx, ok := h.visitStack.seen(pe.p); ok {
|
||||
h.HashUint8(2) // indicates cycle
|
||||
h.HashUint64(uint64(idx))
|
||||
return
|
||||
}
|
||||
h.visitStack.push(pe.p)
|
||||
defer h.visitStack.pop(pe.p)
|
||||
}
|
||||
h.HashUint8(1) // indicates visiting a pointer element
|
||||
hashElem(h, pe)
|
||||
}
|
||||
}
|
||||
|
||||
func makeInterfaceHasher(t reflect.Type) typeHasherFunc {
|
||||
return func(h *hasher, p pointer) {
|
||||
v := p.asValue(t).Elem() // reflect.Interface kind
|
||||
if v.IsNil() {
|
||||
h.HashUint8(0) // indicates nil
|
||||
return
|
||||
}
|
||||
h.HashUint8(1) // indicates visiting an interface value
|
||||
v = v.Elem()
|
||||
t := v.Type()
|
||||
h.hashType(t)
|
||||
va := reflect.New(t).Elem()
|
||||
va.Set(v)
|
||||
hashElem := lookupTypeHasher(t)
|
||||
hashElem(h, pointerOf(va.Addr()))
|
||||
}
|
||||
}
|
||||
|
||||
type mapHasher struct {
|
||||
h hasher
|
||||
valKey valueCache
|
||||
valElem valueCache
|
||||
sum Sum
|
||||
}
|
||||
|
||||
var mapHasherPool = &sync.Pool{
|
||||
New: func() any { return new(mapHasher) },
|
||||
}
|
||||
|
||||
type valueCache map[reflect.Type]reflect.Value
|
||||
|
||||
// get returns an addressable reflect.Value for the given type.
|
||||
func (c *valueCache) get(t reflect.Type) reflect.Value {
|
||||
v, ok := (*c)[t]
|
||||
if !ok {
|
||||
v = reflect.New(t).Elem()
|
||||
if *c == nil {
|
||||
*c = make(valueCache)
|
||||
}
|
||||
(*c)[t] = v
|
||||
}
|
||||
return v
|
||||
}
|
||||
114
vendor/tailscale.com/util/deephash/pointer.go
generated
vendored
Normal file
114
vendor/tailscale.com/util/deephash/pointer.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package deephash
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// unsafePointer is an untyped pointer.
|
||||
// It is the caller's responsibility to call operations on the correct type.
|
||||
//
|
||||
// This pointer only ever points to a small set of kinds or types:
|
||||
// time.Time, netip.Addr, string, array, slice, struct, map, pointer, interface,
|
||||
// or a pointer to memory that is directly hashable.
|
||||
//
|
||||
// Arrays are represented as pointers to the first element.
|
||||
// Structs are represented as pointers to the first field.
|
||||
// Slices are represented as pointers to a slice header.
|
||||
// Pointers are represented as pointers to a pointer.
|
||||
//
|
||||
// We do not support direct operations on maps and interfaces, and instead
|
||||
// rely on pointer.asValue to convert the pointer back to a reflect.Value.
|
||||
// Conversion of an unsafe.Pointer to reflect.Value guarantees that the
|
||||
// read-only flag in the reflect.Value is unpopulated, avoiding panics that may
|
||||
// otherwise have occurred since the value was obtained from an unexported field.
|
||||
type unsafePointer struct{ p unsafe.Pointer }
|
||||
|
||||
func unsafePointerOf(v reflect.Value) unsafePointer {
|
||||
return unsafePointer{v.UnsafePointer()}
|
||||
}
|
||||
func (p unsafePointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
// pointerElem dereferences a pointer.
|
||||
// p must point to a pointer.
|
||||
func (p unsafePointer) pointerElem() unsafePointer {
|
||||
return unsafePointer{*(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// sliceLen returns the slice length.
|
||||
// p must point to a slice.
|
||||
func (p unsafePointer) sliceLen() int {
|
||||
return (*reflect.SliceHeader)(p.p).Len
|
||||
}
|
||||
|
||||
// sliceArray returns a pointer to the underlying slice array.
|
||||
// p must point to a slice.
|
||||
func (p unsafePointer) sliceArray() unsafePointer {
|
||||
return unsafePointer{unsafe.Pointer((*reflect.SliceHeader)(p.p).Data)}
|
||||
}
|
||||
|
||||
// arrayIndex returns a pointer to an element in the array.
|
||||
// p must point to an array.
|
||||
func (p unsafePointer) arrayIndex(index int, size uintptr) unsafePointer {
|
||||
return unsafePointer{unsafe.Add(p.p, uintptr(index)*size)}
|
||||
}
|
||||
|
||||
// structField returns a pointer to a field in a struct.
|
||||
// p must pointer to a struct.
|
||||
func (p unsafePointer) structField(index int, offset, size uintptr) unsafePointer {
|
||||
return unsafePointer{unsafe.Add(p.p, offset)}
|
||||
}
|
||||
|
||||
// asString casts p as a *string.
|
||||
func (p unsafePointer) asString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
|
||||
// asTime casts p as a *time.Time.
|
||||
func (p unsafePointer) asTime() *time.Time {
|
||||
return (*time.Time)(p.p)
|
||||
}
|
||||
|
||||
// asAddr casts p as a *netip.Addr.
|
||||
func (p unsafePointer) asAddr() *netip.Addr {
|
||||
return (*netip.Addr)(p.p)
|
||||
}
|
||||
|
||||
// asValue casts p as a reflect.Value containing a pointer to value of t.
|
||||
func (p unsafePointer) asValue(typ reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(typ, p.p)
|
||||
}
|
||||
|
||||
// asMemory returns the memory pointer at by p for a specified size.
|
||||
func (p unsafePointer) asMemory(size uintptr) []byte {
|
||||
return unsafe.Slice((*byte)(p.p), size)
|
||||
}
|
||||
|
||||
// visitStack is a stack of pointers visited.
|
||||
// Pointers are pushed onto the stack when visited, and popped when leaving.
|
||||
// The integer value is the depth at which the pointer was visited.
|
||||
// The length of this stack should be zero after every hashing operation.
|
||||
type visitStack map[unsafe.Pointer]int
|
||||
|
||||
func (v visitStack) seen(p unsafe.Pointer) (int, bool) {
|
||||
idx, ok := v[p]
|
||||
return idx, ok
|
||||
}
|
||||
|
||||
func (v *visitStack) push(p unsafe.Pointer) {
|
||||
if *v == nil {
|
||||
*v = make(map[unsafe.Pointer]int)
|
||||
}
|
||||
(*v)[p] = len(*v)
|
||||
}
|
||||
|
||||
func (v visitStack) pop(p unsafe.Pointer) {
|
||||
delete(v, p)
|
||||
}
|
||||
13
vendor/tailscale.com/util/deephash/pointer_norace.go
generated
vendored
Normal file
13
vendor/tailscale.com/util/deephash/pointer_norace.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !race
|
||||
|
||||
package deephash
|
||||
|
||||
import "reflect"
|
||||
|
||||
type pointer = unsafePointer
|
||||
|
||||
// pointerOf returns a pointer from v, which must be a reflect.Pointer.
|
||||
func pointerOf(v reflect.Value) pointer { return unsafePointerOf(v) }
|
||||
99
vendor/tailscale.com/util/deephash/pointer_race.go
generated
vendored
Normal file
99
vendor/tailscale.com/util/deephash/pointer_race.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build race
|
||||
|
||||
package deephash
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// pointer is a typed pointer that performs safety checks for every operation.
|
||||
type pointer struct {
|
||||
unsafePointer
|
||||
t reflect.Type // type of pointed-at value; may be nil
|
||||
n uintptr // size of valid memory after p
|
||||
}
|
||||
|
||||
// pointerOf returns a pointer from v, which must be a reflect.Pointer.
|
||||
func pointerOf(v reflect.Value) pointer {
|
||||
assert(v.Kind() == reflect.Pointer, "got %v, want pointer", v.Kind())
|
||||
te := v.Type().Elem()
|
||||
return pointer{unsafePointerOf(v), te, te.Size()}
|
||||
}
|
||||
|
||||
func (p pointer) pointerElem() pointer {
|
||||
assert(p.t.Kind() == reflect.Pointer, "got %v, want pointer", p.t.Kind())
|
||||
te := p.t.Elem()
|
||||
return pointer{p.unsafePointer.pointerElem(), te, te.Size()}
|
||||
}
|
||||
|
||||
func (p pointer) sliceLen() int {
|
||||
assert(p.t.Kind() == reflect.Slice, "got %v, want slice", p.t.Kind())
|
||||
return p.unsafePointer.sliceLen()
|
||||
}
|
||||
|
||||
func (p pointer) sliceArray() pointer {
|
||||
assert(p.t.Kind() == reflect.Slice, "got %v, want slice", p.t.Kind())
|
||||
n := p.sliceLen()
|
||||
assert(n >= 0, "got negative slice length %d", n)
|
||||
ta := reflect.ArrayOf(n, p.t.Elem())
|
||||
return pointer{p.unsafePointer.sliceArray(), ta, ta.Size()}
|
||||
}
|
||||
|
||||
func (p pointer) arrayIndex(index int, size uintptr) pointer {
|
||||
assert(p.t.Kind() == reflect.Array, "got %v, want array", p.t.Kind())
|
||||
assert(0 <= index && index < p.t.Len(), "got array of size %d, want to access element %d", p.t.Len(), index)
|
||||
assert(p.t.Elem().Size() == size, "got element size of %d, want %d", p.t.Elem().Size(), size)
|
||||
te := p.t.Elem()
|
||||
return pointer{p.unsafePointer.arrayIndex(index, size), te, te.Size()}
|
||||
}
|
||||
|
||||
func (p pointer) structField(index int, offset, size uintptr) pointer {
|
||||
assert(p.t.Kind() == reflect.Struct, "got %v, want struct", p.t.Kind())
|
||||
assert(p.n >= offset, "got size of %d, want excessive start offset of %d", p.n, offset)
|
||||
assert(p.n >= offset+size, "got size of %d, want excessive end offset of %d", p.n, offset+size)
|
||||
if index < 0 {
|
||||
return pointer{p.unsafePointer.structField(index, offset, size), nil, size}
|
||||
}
|
||||
sf := p.t.Field(index)
|
||||
t := sf.Type
|
||||
assert(sf.Offset == offset, "got offset of %d, want offset %d", sf.Offset, offset)
|
||||
assert(t.Size() == size, "got size of %d, want size %d", t.Size(), size)
|
||||
return pointer{p.unsafePointer.structField(index, offset, size), t, t.Size()}
|
||||
}
|
||||
|
||||
func (p pointer) asString() *string {
|
||||
assert(p.t.Kind() == reflect.String, "got %v, want string", p.t)
|
||||
return p.unsafePointer.asString()
|
||||
}
|
||||
|
||||
func (p pointer) asTime() *time.Time {
|
||||
assert(p.t == timeTimeType, "got %v, want %v", p.t, timeTimeType)
|
||||
return p.unsafePointer.asTime()
|
||||
}
|
||||
|
||||
func (p pointer) asAddr() *netip.Addr {
|
||||
assert(p.t == netipAddrType, "got %v, want %v", p.t, netipAddrType)
|
||||
return p.unsafePointer.asAddr()
|
||||
}
|
||||
|
||||
func (p pointer) asValue(typ reflect.Type) reflect.Value {
|
||||
assert(p.t == typ, "got %v, want %v", p.t, typ)
|
||||
return p.unsafePointer.asValue(typ)
|
||||
}
|
||||
|
||||
func (p pointer) asMemory(size uintptr) []byte {
|
||||
assert(p.n >= size, "got size of %d, want excessive size of %d", p.n, size)
|
||||
return p.unsafePointer.asMemory(size)
|
||||
}
|
||||
|
||||
func assert(b bool, f string, a ...any) {
|
||||
if !b {
|
||||
panic(fmt.Sprintf(f, a...))
|
||||
}
|
||||
}
|
||||
117
vendor/tailscale.com/util/deephash/types.go
generated
vendored
Normal file
117
vendor/tailscale.com/util/deephash/types.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package deephash
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
timeTimeType = reflect.TypeFor[time.Time]()
|
||||
netipAddrType = reflect.TypeFor[netip.Addr]()
|
||||
selfHasherType = reflect.TypeFor[SelfHasher]()
|
||||
)
|
||||
|
||||
// typeIsSpecialized reports whether this type has specialized hashing.
|
||||
// These are never memory hashable and never considered recursive.
|
||||
func typeIsSpecialized(t reflect.Type) bool {
|
||||
switch t {
|
||||
case timeTimeType, netipAddrType:
|
||||
return true
|
||||
default:
|
||||
if t.Kind() != reflect.Pointer && t.Kind() != reflect.Interface {
|
||||
if t.Implements(selfHasherType) || reflect.PointerTo(t).Implements(selfHasherType) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// typeIsMemHashable reports whether t can be hashed by directly hashing its
|
||||
// contiguous bytes in memory (e.g. structs with gaps are not mem-hashable).
|
||||
func typeIsMemHashable(t reflect.Type) bool {
|
||||
if typeIsSpecialized(t) {
|
||||
return false
|
||||
}
|
||||
if t.Size() == 0 {
|
||||
return true
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Bool,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64,
|
||||
reflect.Complex64, reflect.Complex128:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return typeIsMemHashable(t.Elem())
|
||||
case reflect.Struct:
|
||||
var sumFieldSize uintptr
|
||||
for i, numField := 0, t.NumField(); i < numField; i++ {
|
||||
sf := t.Field(i)
|
||||
if !typeIsMemHashable(sf.Type) {
|
||||
return false
|
||||
}
|
||||
sumFieldSize += sf.Type.Size()
|
||||
}
|
||||
return sumFieldSize == t.Size() // ensure no gaps
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// typeIsRecursive reports whether t has a path back to itself.
|
||||
// For interfaces, it currently always reports true.
|
||||
func typeIsRecursive(t reflect.Type) bool {
|
||||
inStack := map[reflect.Type]bool{}
|
||||
var visitType func(t reflect.Type) (isRecursiveSoFar bool)
|
||||
visitType = func(t reflect.Type) (isRecursiveSoFar bool) {
|
||||
// Check whether we have seen this type before.
|
||||
if inStack[t] {
|
||||
return true
|
||||
}
|
||||
inStack[t] = true
|
||||
defer func() {
|
||||
delete(inStack, t)
|
||||
}()
|
||||
|
||||
// Types with specialized hashing are never considered recursive.
|
||||
if typeIsSpecialized(t) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Any type that is memory hashable must not be recursive since
|
||||
// cycles can only occur if pointers are involved.
|
||||
if typeIsMemHashable(t) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Recursively check types that may contain pointers.
|
||||
switch t.Kind() {
|
||||
default:
|
||||
panic("unhandled kind " + t.Kind().String())
|
||||
case reflect.String, reflect.UnsafePointer, reflect.Func:
|
||||
return false
|
||||
case reflect.Interface:
|
||||
// Assume the worst for now. TODO(bradfitz): in some cases
|
||||
// we should be able to prove that it's not recursive. Not worth
|
||||
// it for now.
|
||||
return true
|
||||
case reflect.Array, reflect.Chan, reflect.Pointer, reflect.Slice:
|
||||
return visitType(t.Elem())
|
||||
case reflect.Map:
|
||||
return visitType(t.Key()) || visitType(t.Elem())
|
||||
case reflect.Struct:
|
||||
for i, numField := 0, t.NumField(); i < numField; i++ {
|
||||
if visitType(t.Field(i).Type) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return visitType(t)
|
||||
}
|
||||
Reference in New Issue
Block a user