This commit is contained in:
2026-02-19 10:07:43 +00:00
parent 007438e372
commit 6e637ecf77
1763 changed files with 60820 additions and 279516 deletions

23
vendor/tailscale.com/tka/aum.go generated vendored
View File

@@ -1,6 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (
@@ -29,8 +31,8 @@ func (h AUMHash) String() string {
// UnmarshalText implements encoding.TextUnmarshaler.
func (h *AUMHash) UnmarshalText(text []byte) error {
if l := base32StdNoPad.DecodedLen(len(text)); l != len(h) {
return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", l, len(text))
if ln := base32StdNoPad.DecodedLen(len(text)); ln != len(h) {
return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", ln, len(text))
}
if _, err := base32StdNoPad.Decode(h[:], text); err != nil {
return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err)
@@ -53,6 +55,17 @@ func (h AUMHash) IsZero() bool {
return h == (AUMHash{})
}
// PrevAUMHash represents the BLAKE2s digest of an Authority Update Message (AUM).
// Unlike an AUMHash, this can be empty if there is no previous AUM hash
// (which occurs in the genesis AUM).
type PrevAUMHash []byte
// String returns the PrevAUMHash encoded as base32.
// This is suitable for use as a filename, and for storing in text-preferred media.
func (h PrevAUMHash) String() string {
return base32StdNoPad.EncodeToString(h[:])
}
// AUMKind describes valid AUM types.
type AUMKind uint8
@@ -117,8 +130,8 @@ func (k AUMKind) String() string {
// behavior of old clients (which will ignore the field).
// - No floats!
type AUM struct {
MessageKind AUMKind `cbor:"1,keyasint"`
PrevAUMHash []byte `cbor:"2,keyasint"`
MessageKind AUMKind `cbor:"1,keyasint"`
PrevAUMHash PrevAUMHash `cbor:"2,keyasint"`
// Key encodes a public key to be added to the key authority.
// This field is used for AddKey AUMs.
@@ -224,7 +237,7 @@ func (a *AUM) Serialize() tkatype.MarshaledAUM {
// Further, experience with other attempts (JWS/JWT,SAML,X509 etc) has
// taught us that even subtle behaviors such as how you handle invalid
// or unrecognized fields + any invariants in subsequent re-serialization
// can easily lead to security-relevant logic bugs. Its certainly possible
// can easily lead to security-relevant logic bugs. It's certainly possible
// to invent a workable scheme by massaging a JSON parsing library, though
// profoundly unwise.
//

11
vendor/tailscale.com/tka/builder.go generated vendored
View File

@@ -1,6 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (
@@ -67,6 +69,11 @@ func (b *UpdateBuilder) AddKey(key Key) error {
if _, err := b.state.GetKey(keyID); err == nil {
return fmt.Errorf("cannot add key %v: already exists", key)
}
if len(b.state.Keys) >= maxKeys {
return fmt.Errorf("cannot add key %v: maximum number of keys reached", key)
}
return b.mkUpdate(AUM{MessageKind: AUMAddKey, Key: &key})
}
@@ -107,7 +114,7 @@ func (b *UpdateBuilder) generateCheckpoint() error {
}
}
// Checkpoints cant specify a parent AUM.
// Checkpoints can't specify a parent AUM.
state.LastAUMHash = nil
return b.mkUpdate(AUM{MessageKind: AUMCheckpoint, State: &state})
}
@@ -129,7 +136,7 @@ func (b *UpdateBuilder) Finalize(storage Chonk) ([]AUM, error) {
needCheckpoint = false
break
}
return nil, fmt.Errorf("reading AUM: %v", err)
return nil, fmt.Errorf("reading AUM (%v): %v", cursor, err)
}
if aum.MessageKind == AUMCheckpoint {

View File

@@ -1,6 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (

160
vendor/tailscale.com/tka/disabled_stub.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build ts_omit_tailnetlock
package tka
import (
"crypto/ed25519"
"errors"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/tkatype"
)
type Authority struct {
head AUM
oldestAncestor AUM
state State
}
func (*Authority) Head() AUMHash { return AUMHash{} }
// MarshalText returns a dummy value explaining that Tailnet Lock
// is not compiled in to this binary.
//
// We need to be able to marshal AUMHash to text because it's included
// in [netmap.NetworkMap], which gets serialised as JSON in the
// c2n /debug/netmap endpoint.
//
// We provide a basic marshaller so that endpoint works correctly
// with nodes that omit Tailnet Lock support, but we don't want the
// base32 dependency used for the regular marshaller, and we don't
// need unmarshalling support at time of writing (2025-11-18).
func (h AUMHash) MarshalText() ([]byte, error) {
return []byte("<tailnet-lock-omitted>"), nil
}
func (h *AUMHash) UnmarshalText(text []byte) error {
return errors.New("tailnet lock is not supported by this binary")
}
type State struct{}
// AUMKind describes valid AUM types.
type AUMKind uint8
type AUMHash [32]byte
type AUM struct {
MessageKind AUMKind `cbor:"1,keyasint"`
PrevAUMHash []byte `cbor:"2,keyasint"`
// Key encodes a public key to be added to the key authority.
// This field is used for AddKey AUMs.
Key *Key `cbor:"3,keyasint,omitempty"`
// KeyID references a public key which is part of the key authority.
// This field is used for RemoveKey and UpdateKey AUMs.
KeyID tkatype.KeyID `cbor:"4,keyasint,omitempty"`
// State describes the full state of the key authority.
// This field is used for Checkpoint AUMs.
State *State `cbor:"5,keyasint,omitempty"`
// Votes and Meta describe properties of a key in the key authority.
// These fields are used for UpdateKey AUMs.
Votes *uint `cbor:"6,keyasint,omitempty"`
Meta map[string]string `cbor:"7,keyasint,omitempty"`
// Signatures lists the signatures over this AUM.
// CBOR key 23 is the last key which can be encoded as a single byte.
Signatures []tkatype.Signature `cbor:"23,keyasint,omitempty"`
}
type Chonk interface {
// AUM returns the AUM with the specified digest.
//
// If the AUM does not exist, then os.ErrNotExist is returned.
AUM(hash AUMHash) (AUM, error)
// ChildAUMs returns all AUMs with a specified previous
// AUM hash.
ChildAUMs(prevAUMHash AUMHash) ([]AUM, error)
// CommitVerifiedAUMs durably stores the provided AUMs.
// Callers MUST ONLY provide AUMs which are verified (specifically,
// a call to aumVerify() must return a nil error).
// as the implementation assumes that only verified AUMs are stored.
CommitVerifiedAUMs(updates []AUM) error
// Heads returns AUMs for which there are no children. In other
// words, the latest AUM in all possible chains (the 'leaves').
Heads() ([]AUM, error)
// SetLastActiveAncestor is called to record the oldest-known AUM
// that contributed to the current state. This value is used as
// a hint on next startup to determine which chain to pick when computing
// the current state, if there are multiple distinct chains.
SetLastActiveAncestor(hash AUMHash) error
// LastActiveAncestor returns the oldest-known AUM that was (in a
// previous run) an ancestor of the current state. This is used
// as a hint to pick the correct chain in the event that the Chonk stores
// multiple distinct chains.
LastActiveAncestor() (*AUMHash, error)
}
// SigKind describes valid NodeKeySignature types.
type SigKind uint8
type NodeKeySignature struct {
// SigKind identifies the variety of signature.
SigKind SigKind `cbor:"1,keyasint"`
// Pubkey identifies the key.NodePublic which is being authorized.
// SigCredential signatures do not use this field.
Pubkey []byte `cbor:"2,keyasint,omitempty"`
// KeyID identifies which key in the tailnet key authority should
// be used to verify this signature. Only set for SigDirect and
// SigCredential signature kinds.
KeyID []byte `cbor:"3,keyasint,omitempty"`
// Signature is the packed (R, S) ed25519 signature over all other
// fields of the structure.
Signature []byte `cbor:"4,keyasint,omitempty"`
// Nested describes a NodeKeySignature which authorizes the node-key
// used as Pubkey. Only used for SigRotation signatures.
Nested *NodeKeySignature `cbor:"5,keyasint,omitempty"`
// WrappingPubkey specifies the ed25519 public key which must be used
// to sign a Signature which embeds this one.
//
// For SigRotation signatures multiple levels deep, intermediate
// signatures may omit this value, in which case the parent WrappingPubkey
// is used.
//
// SigCredential signatures use this field to specify the public key
// they are certifying, following the usual semanticsfor WrappingPubkey.
WrappingPubkey []byte `cbor:"6,keyasint,omitempty"`
}
type DeeplinkValidationResult struct {
}
func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) {
return wrappedAuthKey, false, nil, nil
}
func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.MarshaledSignature) (tkatype.MarshaledSignature, error) {
return nil, nil
}
func SignByCredential(privKey []byte, wrapped *NodeKeySignature, nodeKey key.NodePublic) (tkatype.MarshaledSignature, error) {
return nil, nil
}
func (s NodeKeySignature) String() string { return "" }

22
vendor/tailscale.com/tka/key.go generated vendored
View File

@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"github.com/hdevalence/ed25519consensus"
"tailscale.com/types/tkatype"
)
@@ -136,24 +135,3 @@ func (k Key) StaticValidate() error {
}
return nil
}
// Verify returns a nil error if the signature is valid over the
// provided AUM BLAKE2s digest, using the given key.
func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error {
// NOTE(tom): Even if we can compute the public from the KeyID,
// its possible for the KeyID to be attacker-controlled
// so we should use the public contained in the state machine.
switch key.Kind {
case Key25519:
if len(key.Public) != ed25519.PublicKeySize {
return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public))
}
if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) {
return nil
}
return errors.New("invalid signature")
default:
return fmt.Errorf("unhandled key type: %v", key.Kind)
}
}

4
vendor/tailscale.com/tka/sig.go generated vendored
View File

@@ -1,6 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (
@@ -275,7 +277,7 @@ func (s *NodeKeySignature) verifySignature(nodeKey key.NodePublic, verificationK
// Recurse to verify the signature on the nested structure.
var nestedPub key.NodePublic
// SigCredential signatures certify an indirection key rather than a node
// key, so theres no need to check the node key.
// key, so there's no need to check the node key.
if s.Nested.SigKind != SigCredential {
if err := nestedPub.UnmarshalBinary(s.Nested.Pubkey); err != nil {
return fmt.Errorf("nested pubkey: %v", err)

4
vendor/tailscale.com/tka/state.go generated vendored
View File

@@ -1,6 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (
@@ -138,7 +140,7 @@ func (s State) checkDisablement(secret []byte) bool {
// Specifically, the rules are:
// - The last AUM hash must match (transitively, this implies that this
// update follows the last update message applied to the state machine)
// - Or, the state machine knows no parent (its brand new).
// - Or, the state machine knows no parent (it's brand new).
func (s State) parentMatches(update AUM) bool {
if s.LastAUMHash == nil {
return true

43
vendor/tailscale.com/tka/sync.go generated vendored
View File

@@ -1,6 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (
@@ -30,6 +32,41 @@ type SyncOffer struct {
Ancestors []AUMHash
}
// ToSyncOffer creates a SyncOffer from the fields received in
// a [tailcfg.TKASyncOfferRequest].
func ToSyncOffer(head string, ancestors []string) (SyncOffer, error) {
var out SyncOffer
if err := out.Head.UnmarshalText([]byte(head)); err != nil {
return SyncOffer{}, fmt.Errorf("head.UnmarshalText: %v", err)
}
out.Ancestors = make([]AUMHash, len(ancestors))
for i, a := range ancestors {
if err := out.Ancestors[i].UnmarshalText([]byte(a)); err != nil {
return SyncOffer{}, fmt.Errorf("ancestor[%d].UnmarshalText: %v", i, err)
}
}
return out, nil
}
// FromSyncOffer marshals the fields of a SyncOffer so they can be
// sent in a [tailcfg.TKASyncOfferRequest].
func FromSyncOffer(offer SyncOffer) (head string, ancestors []string, err error) {
headBytes, err := offer.Head.MarshalText()
if err != nil {
return "", nil, fmt.Errorf("head.MarshalText: %v", err)
}
ancestors = make([]string, len(offer.Ancestors))
for i, ancestor := range offer.Ancestors {
hash, err := ancestor.MarshalText()
if err != nil {
return "", nil, fmt.Errorf("ancestor[%d].MarshalText: %v", i, err)
}
ancestors[i] = string(hash)
}
return string(headBytes), ancestors, nil
}
const (
// The starting number of AUMs to skip when listing
// ancestors in a SyncOffer.
@@ -52,7 +89,7 @@ const (
// can then be applied locally with Inform().
//
// This SyncOffer + AUM exchange should be performed by both ends,
// because its possible that either end has AUMs that the other needs
// because it's possible that either end has AUMs that the other needs
// to find out about.
func (a *Authority) SyncOffer(storage Chonk) (SyncOffer, error) {
oldest := a.oldestAncestor.Hash()
@@ -121,7 +158,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) (
}
// Case: 'head intersection'
// If we have the remote's head, its more likely than not that
// If we have the remote's head, it's more likely than not that
// we have updates that build on that head. To confirm this,
// we iterate backwards through our chain to see if the given
// head is an ancestor of our current chain.
@@ -163,7 +200,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) (
// Case: 'tail intersection'
// So we don't have a clue what the remote's head is, but
// if one of the ancestors they gave us is part of our chain,
// then theres an intersection, which is a starting point for
// then there's an intersection, which is a starting point for
// the remote to send us AUMs from.
//
// We iterate the list of ancestors in order because the remote

319
vendor/tailscale.com/tka/tailchonk.go generated vendored
View File

@@ -1,19 +1,26 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (
"bytes"
"errors"
"fmt"
"log"
"maps"
"os"
"path/filepath"
"slices"
"sync"
"time"
"github.com/fxamacker/cbor/v2"
"tailscale.com/atomicfile"
"tailscale.com/tstime"
"tailscale.com/util/testenv"
)
// Chonk implementations provide durable storage for AUMs and other
@@ -71,38 +78,69 @@ type CompactableChonk interface {
// PurgeAUMs permanently and irrevocably deletes the specified
// AUMs from storage.
PurgeAUMs(hashes []AUMHash) error
// RemoveAll permanently and completely clears the TKA state. This should
// be called when the user disables Tailnet Lock.
RemoveAll() error
}
// Mem implements in-memory storage of TKA state, suitable for
// tests.
// tests or cases where filesystem storage is unavailable.
//
// Mem implements the Chonk interface.
//
// Mem is thread-safe.
type Mem struct {
l sync.RWMutex
mu sync.RWMutex
aums map[AUMHash]AUM
commitTimes map[AUMHash]time.Time
clock tstime.Clock
// parentIndex is a map of AUMs to the AUMs for which they are
// the parent.
//
// For example, if parent index is {1 -> {2, 3, 4}}, that means
// that AUMs 2, 3, 4 all have aum.PrevAUMHash = 1.
parentIndex map[AUMHash][]AUMHash
lastActiveAncestor *AUMHash
}
// ChonkMem returns an implementation of Chonk which stores TKA state
// in-memory.
func ChonkMem() *Mem {
return &Mem{
clock: tstime.DefaultClock{},
}
}
// SetClock sets the clock used by [Mem]. This is only for use in tests,
// and will panic if called from non-test code.
func (c *Mem) SetClock(clock tstime.Clock) {
if !testenv.InTest() {
panic("used SetClock in non-test code")
}
c.clock = clock
}
func (c *Mem) SetLastActiveAncestor(hash AUMHash) error {
c.l.Lock()
defer c.l.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
c.lastActiveAncestor = &hash
return nil
}
func (c *Mem) LastActiveAncestor() (*AUMHash, error) {
c.l.RLock()
defer c.l.RUnlock()
c.mu.RLock()
defer c.mu.RUnlock()
return c.lastActiveAncestor, nil
}
// Heads returns AUMs for which there are no children. In other
// words, the latest AUM in all chains (the 'leaf').
func (c *Mem) Heads() ([]AUM, error) {
c.l.RLock()
defer c.l.RUnlock()
c.mu.RLock()
defer c.mu.RUnlock()
out := make([]AUM, 0, 6)
// An AUM is a 'head' if there are no nodes for which it is the parent.
@@ -116,8 +154,8 @@ func (c *Mem) Heads() ([]AUM, error) {
// AUM returns the AUM with the specified digest.
func (c *Mem) AUM(hash AUMHash) (AUM, error) {
c.l.RLock()
defer c.l.RUnlock()
c.mu.RLock()
defer c.mu.RUnlock()
aum, ok := c.aums[hash]
if !ok {
return AUM{}, os.ErrNotExist
@@ -125,24 +163,11 @@ func (c *Mem) AUM(hash AUMHash) (AUM, error) {
return aum, nil
}
// Orphans returns all AUMs which do not have a parent.
func (c *Mem) Orphans() ([]AUM, error) {
c.l.RLock()
defer c.l.RUnlock()
out := make([]AUM, 0, 6)
for _, a := range c.aums {
if _, ok := a.Parent(); !ok {
out = append(out, a)
}
}
return out, nil
}
// ChildAUMs returns all AUMs with a specified previous
// AUM hash.
func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) {
c.l.RLock()
defer c.l.RUnlock()
c.mu.RLock()
defer c.mu.RUnlock()
out := make([]AUM, 0, 6)
for _, entry := range c.parentIndex[prevAUMHash] {
out = append(out, c.aums[entry])
@@ -156,17 +181,19 @@ func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) {
// as the rest of the TKA implementation assumes that only
// verified AUMs are stored.
func (c *Mem) CommitVerifiedAUMs(updates []AUM) error {
c.l.Lock()
defer c.l.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
if c.aums == nil {
c.parentIndex = make(map[AUMHash][]AUMHash, 64)
c.aums = make(map[AUMHash]AUM, 64)
c.commitTimes = make(map[AUMHash]time.Time, 64)
}
updateLoop:
for _, aum := range updates {
aumHash := aum.Hash()
c.aums[aumHash] = aum
c.commitTimes[aumHash] = c.now()
parent, ok := aum.Parent()
if ok {
@@ -182,6 +209,81 @@ updateLoop:
return nil
}
// now returns the current time, optionally using the overridden
// clock if set.
func (c *Mem) now() time.Time {
if c.clock == nil {
return time.Now()
} else {
return c.clock.Now()
}
}
// RemoveAll permanently and completely clears the TKA state.
func (c *Mem) RemoveAll() error {
c.mu.Lock()
defer c.mu.Unlock()
c.aums = nil
c.commitTimes = nil
c.parentIndex = nil
c.lastActiveAncestor = nil
return nil
}
// AllAUMs returns all AUMs stored in the chonk.
func (c *Mem) AllAUMs() ([]AUMHash, error) {
c.mu.RLock()
defer c.mu.RUnlock()
return slices.Collect(maps.Keys(c.aums)), nil
}
// CommitTime returns the time at which the AUM was committed.
//
// If the AUM does not exist, then os.ErrNotExist is returned.
func (c *Mem) CommitTime(h AUMHash) (time.Time, error) {
c.mu.RLock()
defer c.mu.RUnlock()
t, ok := c.commitTimes[h]
if ok {
return t, nil
} else {
return time.Time{}, os.ErrNotExist
}
}
// PurgeAUMs marks the specified AUMs for deletion from storage.
func (c *Mem) PurgeAUMs(hashes []AUMHash) error {
c.mu.Lock()
defer c.mu.Unlock()
for _, h := range hashes {
// Remove the deleted AUM from the list of its parents' children.
//
// However, we leave the list of this AUM's children in parentIndex,
// so we can find them later in ChildAUMs().
if aum, ok := c.aums[h]; ok {
parent, hasParent := aum.Parent()
if hasParent {
c.parentIndex[parent] = slices.DeleteFunc(
c.parentIndex[parent],
func(other AUMHash) bool { return bytes.Equal(h[:], other[:]) },
)
if len(c.parentIndex[parent]) == 0 {
delete(c.parentIndex, parent)
}
}
}
// Delete this AUM from the list of AUMs and commit times.
delete(c.aums, h)
delete(c.commitTimes, h)
}
return nil
}
// FS implements filesystem storage of TKA state.
//
// FS implements the Chonk interface.
@@ -193,6 +295,10 @@ type FS struct {
// ChonkDir returns an implementation of Chonk which uses the
// given directory to store TKA state.
func ChonkDir(dir string) (*FS, error) {
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) {
return nil, fmt.Errorf("creating chonk root dir: %v", err)
}
stat, err := os.Stat(dir)
if err != nil {
return nil, err
@@ -217,10 +323,14 @@ func ChonkDir(dir string) (*FS, error) {
// CBOR was chosen because we are already using it and it serializes
// much smaller than JSON for AUMs. The 'keyasint' thing isn't essential
// but again it saves a bunch of bytes.
//
// We have removed the following fields from fsHashInfo, but they may be
// present in data stored in existing deployments. Do not reuse these values,
// to avoid getting unexpected values from legacy data:
// - cbor:1, Children
type fsHashInfo struct {
Children []AUMHash `cbor:"1,keyasint"`
AUM *AUM `cbor:"2,keyasint"`
CreatedUnix int64 `cbor:"3,keyasint,omitempty"`
AUM *AUM `cbor:"2,keyasint"`
CreatedUnix int64 `cbor:"3,keyasint,omitempty"`
// PurgedUnix is set when the AUM is deleted. The value is
// the unix epoch at the time it was deleted.
@@ -296,32 +406,15 @@ func (c *FS) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) {
c.mu.RLock()
defer c.mu.RUnlock()
info, err := c.get(prevAUMHash)
if err != nil {
if os.IsNotExist(err) {
// not knowing about this hash is not an error
return nil, nil
}
return nil, err
}
// NOTE(tom): We don't check PurgedUnix here because 'purged'
// only applies to that specific AUM (i.e. info.AUM) and not to
// any information about children stored against that hash.
var out []AUM
out := make([]AUM, len(info.Children))
for i, h := range info.Children {
c, err := c.get(h)
if err != nil {
// We expect any AUM recorded as a child on its parent to exist.
return nil, fmt.Errorf("reading child %d of %x: %v", i, h, err)
err := c.scanHashes(func(info *fsHashInfo) {
if info.AUM != nil && bytes.Equal(info.AUM.PrevAUMHash, prevAUMHash[:]) {
out = append(out, *info.AUM)
}
if c.AUM == nil || c.PurgedUnix > 0 {
return nil, fmt.Errorf("child %d of %x: AUM not stored", i, h)
}
out[i] = *c.AUM
}
})
return out, nil
return out, err
}
func (c *FS) get(h AUMHash) (*fsHashInfo, error) {
@@ -357,13 +450,50 @@ func (c *FS) Heads() ([]AUM, error) {
c.mu.RLock()
defer c.mu.RUnlock()
out := make([]AUM, 0, 6) // 6 is arbitrary.
err := c.scanHashes(func(info *fsHashInfo) {
if len(info.Children) == 0 && info.AUM != nil && info.PurgedUnix == 0 {
out = append(out, *info.AUM)
// Scan the complete list of AUMs, and build a list of all parent hashes.
// This tells us which AUMs have children.
var parentHashes []AUMHash
allAUMs, err := c.AllAUMs()
if err != nil {
return nil, err
}
for _, h := range allAUMs {
aum, err := c.AUM(h)
if err != nil {
return nil, err
}
})
return out, err
parent, hasParent := aum.Parent()
if !hasParent {
continue
}
if !slices.Contains(parentHashes, parent) {
parentHashes = append(parentHashes, parent)
}
}
// Now scan a second time, and only include AUMs which weren't marked as
// the parent of any other AUM.
out := make([]AUM, 0, 6) // 6 is arbitrary.
for _, h := range allAUMs {
if slices.Contains(parentHashes, h) {
continue
}
aum, err := c.AUM(h)
if err != nil {
return nil, err
}
out = append(out, aum)
}
return out, nil
}
// RemoveAll permanently and completely clears the TKA state.
func (c *FS) RemoveAll() error {
return os.RemoveAll(c.base)
}
// AllAUMs returns all AUMs stored in the chonk.
@@ -373,7 +503,7 @@ func (c *FS) AllAUMs() ([]AUMHash, error) {
out := make([]AUMHash, 0, 6) // 6 is arbitrary.
err := c.scanHashes(func(info *fsHashInfo) {
if info.AUM != nil && info.PurgedUnix == 0 {
if info.AUM != nil {
out = append(out, info.AUM.Hash())
}
})
@@ -394,14 +524,24 @@ func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error {
return fmt.Errorf("reading prefix dir: %v", err)
}
for _, file := range files {
// Ignore files whose names aren't valid AUM hashes, which may be
// temporary files which are partway through being written, or other
// files added by the OS (like .DS_Store) which we can ignore.
// TODO(alexc): it might be useful to append a suffix like `.aum` to
// filenames, so we can more easily distinguish between AUMs and
// arbitrary other files.
var h AUMHash
if err := h.UnmarshalText([]byte(file.Name())); err != nil {
return fmt.Errorf("invalid aum file: %s: %w", file.Name(), err)
log.Printf("ignoring unexpected non-AUM: %s: %v", file.Name(), err)
continue
}
info, err := c.get(h)
if err != nil {
return fmt.Errorf("reading %x: %v", h, err)
}
if info.PurgedUnix > 0 {
continue
}
eachHashInfo(info)
}
@@ -456,24 +596,6 @@ func (c *FS) CommitVerifiedAUMs(updates []AUM) error {
for i, aum := range updates {
h := aum.Hash()
// We keep track of children against their parent so that
// ChildAUMs() do not need to scan all AUMs.
parent, hasParent := aum.Parent()
if hasParent {
err := c.commit(parent, func(info *fsHashInfo) {
// Only add it if its not already there.
for i := range info.Children {
if info.Children[i] == h {
return
}
}
info.Children = append(info.Children, h)
})
if err != nil {
return fmt.Errorf("committing update[%d] to parent %x: %v", i, parent, err)
}
}
err := c.commit(h, func(info *fsHashInfo) {
info.PurgedUnix = 0 // just in-case it was set for some reason
info.AUM = &aum
@@ -576,7 +698,7 @@ const (
)
// markActiveChain marks AUMs in the active chain.
// All AUMs that are within minChain ancestors of head are
// All AUMs that are within minChain ancestors of head, or are marked as young, are
// marked retainStateActive, and all remaining ancestors are
// marked retainStateCandidate.
//
@@ -602,27 +724,30 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in
// We've reached the end of the chain we have stored.
return h, nil
}
return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d): %w", i, err)
return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d, %v): %w", i, parent, err)
}
}
// If we got this far, we have at least minChain AUMs stored, and minChain number
// of ancestors have been marked for retention. We now continue to iterate backwards
// till we find an AUM which we can compact to (a Checkpoint AUM).
// till we find an AUM which we can compact to: either a Checkpoint AUM which is old
// enough, or the genesis AUM.
for {
h := next.Hash()
verdict[h] |= retainStateActive
if next.MessageKind == AUMCheckpoint {
lastActiveAncestor = h
break
}
parent, hasParent := next.Parent()
if !hasParent {
return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate lastActiveAncestor")
isYoung := verdict[h]&retainStateYoung != 0
if next.MessageKind == AUMCheckpoint {
lastActiveAncestor = h
if !isYoung || !hasParent {
break
}
}
if next, err = storage.AUM(parent); err != nil {
return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err)
return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err)
}
}
@@ -638,7 +763,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in
// We've reached the end of the chain we have stored.
break
}
return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate): %w", err)
return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate, %v): %w", parent, err)
}
}
@@ -676,7 +801,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState
toScan := make([]AUMHash, 0, len(verdict))
for h, v := range verdict {
if (v & retainAUMMask) == 0 {
continue // not marked for retention, so dont need to consider it
continue // not marked for retention, so don't need to consider it
}
if h == candidateAncestor {
continue
@@ -750,7 +875,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState
if didAdjustCandidateAncestor {
var next AUM
if next, err = storage.AUM(candidateAncestor); err != nil {
return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err)
return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", candidateAncestor, err)
}
for {
@@ -766,7 +891,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState
return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate candidateAncestor")
}
if next, err = storage.AUM(parent); err != nil {
return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err)
return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err)
}
}
}
@@ -779,7 +904,7 @@ func markDescendantAUMs(storage Chonk, verdict map[AUMHash]retainState) error {
toScan := make([]AUMHash, 0, len(verdict))
for h, v := range verdict {
if v&retainAUMMask == 0 {
continue // not marked, so dont need to mark descendants
continue // not marked, so don't need to mark descendants
}
toScan = append(toScan, h)
}
@@ -825,12 +950,12 @@ func Compact(storage CompactableChonk, head AUMHash, opts CompactionOptions) (la
verdict[h] = 0
}
if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil {
return AUMHash{}, fmt.Errorf("marking active chain: %w", err)
}
if err := markYoungAUMs(storage, verdict, opts.MinAge); err != nil {
return AUMHash{}, fmt.Errorf("marking young AUMs: %w", err)
}
if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil {
return AUMHash{}, fmt.Errorf("marking active chain: %w", err)
}
if err := markDescendantAUMs(storage, verdict); err != nil {
return AUMHash{}, fmt.Errorf("marking descendant AUMs: %w", err)
}

49
vendor/tailscale.com/tka/tka.go generated vendored
View File

@@ -1,7 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package tka (WIP) implements the Tailnet Key Authority.
//go:build !ts_omit_tailnetlock
// Package tka implements the Tailnet Key Authority (TKA) for Tailnet Lock.
package tka
import (
@@ -92,7 +94,7 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int
// candidates.Oldest needs to be computed by working backwards from
// head as far as we can.
iterAgain := true // if theres still work to be done.
iterAgain := true // if there's still work to be done.
for i := 0; iterAgain; i++ {
if i >= maxIter {
return nil, fmt.Errorf("iteration limit exceeded (%d)", maxIter)
@@ -100,14 +102,14 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int
iterAgain = false
for j := range candidates {
parent, hasParent := candidates[j].Oldest.Parent()
parentHash, hasParent := candidates[j].Oldest.Parent()
if hasParent {
parent, err := storage.AUM(parent)
parent, err := storage.AUM(parentHash)
if err != nil {
if err == os.ErrNotExist {
continue
}
return nil, fmt.Errorf("reading parent: %v", err)
return nil, fmt.Errorf("reading parent %s: %v", parentHash, err)
}
candidates[j].Oldest = parent
if lastKnownOldest != nil && *lastKnownOldest == parent.Hash() {
@@ -208,7 +210,7 @@ func fastForwardWithAdvancer(
}
nextAUM, err := storage.AUM(*startState.LastAUMHash)
if err != nil {
return AUM{}, State{}, fmt.Errorf("reading next: %v", err)
return AUM{}, State{}, fmt.Errorf("reading next (%v): %v", *startState.LastAUMHash, err)
}
curs := nextAUM
@@ -293,9 +295,9 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error)
}
// If we got here, the current state is dependent on the previous.
// Keep iterating backwards till thats not the case.
// Keep iterating backwards till that's not the case.
if curs, err = storage.AUM(parent); err != nil {
return State{}, fmt.Errorf("reading parent: %v", err)
return State{}, fmt.Errorf("reading parent (%v): %v", parent, err)
}
}
@@ -322,7 +324,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error)
return curs.Hash() == wantHash
})
// fastForward only terminates before the done condition if it
// doesnt have any later AUMs to process. This cant be the case
// doesn't have any later AUMs to process. This can't be the case
// as we've already iterated through them above so they must exist,
// but we check anyway to be super duper sure.
if err == nil && *state.LastAUMHash != wantHash {
@@ -334,13 +336,13 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error)
// computeActiveAncestor determines which ancestor AUM to use as the
// ancestor of the valid chain.
//
// If all the chains end up having the same ancestor, then thats the
// If all the chains end up having the same ancestor, then that's the
// only possible ancestor, ezpz. However if there are multiple distinct
// ancestors, that means there are distinct chains, and we need some
// hint to choose what to use. For that, we rely on the chainsThroughActive
// bit, which signals to us that that ancestor was part of the
// chain in a previous run.
func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) {
func computeActiveAncestor(chains []chain) (AUMHash, error) {
// Dedupe possible ancestors, tracking if they were part of
// the active chain on a previous run.
ancestors := make(map[AUMHash]bool, len(chains))
@@ -355,7 +357,7 @@ func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) {
}
}
// Theres more than one, so we need to use the ancestor that was
// There's more than one, so we need to use the ancestor that was
// part of the active chain in a previous iteration.
// Note that there can only be one distinct ancestor that was
// formerly part of the active chain, because AUMs can only have
@@ -389,8 +391,12 @@ func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (c
return chain{}, fmt.Errorf("computing candidates: %v", err)
}
if len(chains) == 0 {
return chain{}, errors.New("no chain candidates in AUM storage")
}
// Find the right ancestor.
oldestHash, err := computeActiveAncestor(storage, chains)
oldestHash, err := computeActiveAncestor(chains)
if err != nil {
return chain{}, fmt.Errorf("computing ancestor: %v", err)
}
@@ -440,6 +446,13 @@ func aumVerify(aum AUM, state State, isGenesisAUM bool) error {
return fmt.Errorf("signature %d: %v", i, err)
}
}
if aum.MessageKind == AUMRemoveKey && len(state.Keys) == 1 {
if kid, err := state.Keys[0].ID(); err == nil && bytes.Equal(aum.KeyID, kid) {
return errors.New("cannot remove the last key in the state")
}
}
return nil
}
@@ -466,7 +479,7 @@ func (a *Authority) Head() AUMHash {
// Open initializes an existing TKA from the given tailchonk.
//
// Only use this if the current node has initialized an Authority before.
// If a TKA exists on other nodes but theres nothing locally, use Bootstrap().
// If a TKA exists on other nodes but there's nothing locally, use Bootstrap().
// If no TKA exists anywhere and you are creating it for the first
// time, use New().
func Open(storage Chonk) (*Authority, error) {
@@ -579,14 +592,14 @@ func (a *Authority) InformIdempotent(storage Chonk, updates []AUM) (Authority, e
toCommit := make([]AUM, 0, len(updates))
prevHash := a.Head()
// The state at HEAD is the current state of the authority. Its likely
// The state at HEAD is the current state of the authority. It's likely
// to be needed, so we prefill it rather than computing it.
stateAt[prevHash] = a.state
// Optimization: If the set of updates is a chain building from
// the current head, EG:
// <a.Head()> ==> updates[0] ==> updates[1] ...
// Then theres no need to recompute the resulting state from the
// Then there's no need to recompute the resulting state from the
// stored ancestor, because the last state computed during iteration
// is the new state. This should be the common case.
// isHeadChain keeps track of this.
@@ -766,8 +779,8 @@ func (a *Authority) findParentForRewrite(storage Chonk, removeKeys []tkatype.Key
}
}
if !keyTrusted {
// Success: the revoked keys are not trusted!
// Lets check that our key was trusted to ensure
// Success: the revoked keys are not trusted.
// Check that our key was trusted to ensure
// we can sign a fork from here.
if _, err := state.GetKey(ourKey); err == nil {
break

36
vendor/tailscale.com/tka/verify.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_tailnetlock
package tka
import (
"crypto/ed25519"
"errors"
"fmt"
"github.com/hdevalence/ed25519consensus"
"tailscale.com/types/tkatype"
)
// signatureVerify returns a nil error if the signature is valid over the
// provided AUM BLAKE2s digest, using the given key.
func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error {
// NOTE(tom): Even if we can compute the public from the KeyID,
// it's possible for the KeyID to be attacker-controlled
// so we should use the public contained in the state machine.
switch key.Kind {
case Key25519:
if len(key.Public) != ed25519.PublicKeySize {
return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public))
}
if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) {
return nil
}
return errors.New("invalid signature")
default:
return fmt.Errorf("unhandled key type: %v", key.Kind)
}
}

18
vendor/tailscale.com/tka/verify_disabled.go generated vendored Normal file
View File

@@ -0,0 +1,18 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build ts_omit_tailnetlock
package tka
import (
"errors"
"tailscale.com/types/tkatype"
)
// signatureVerify returns a nil error if the signature is valid over the
// provided AUM BLAKE2s digest, using the given key.
func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error {
return errors.New("tailnetlock disabled in build")
}