Update dependencies
This commit is contained in:
348
vendor/tailscale.com/tka/aum.go
generated
vendored
Normal file
348
vendor/tailscale.com/tka/aum.go
generated
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"encoding/base32"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"golang.org/x/crypto/blake2s"
|
||||
"tailscale.com/types/tkatype"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// AUMHash represents the BLAKE2s digest of an Authority Update Message (AUM).
|
||||
type AUMHash [blake2s.Size]byte
|
||||
|
||||
var base32StdNoPad = base32.StdEncoding.WithPadding(base32.NoPadding)
|
||||
|
||||
// String returns the AUMHash encoded as base32.
|
||||
// This is suitable for use as a filename, and for storing in text-preferred media.
|
||||
func (h AUMHash) String() string {
|
||||
return base32StdNoPad.EncodeToString(h[:])
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (h *AUMHash) UnmarshalText(text []byte) error {
|
||||
if l := base32StdNoPad.DecodedLen(len(text)); l != len(h) {
|
||||
return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", l, len(text))
|
||||
}
|
||||
if _, err := base32StdNoPad.Decode(h[:], text); err != nil {
|
||||
return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendText implements encoding.TextAppender.
|
||||
func (h AUMHash) AppendText(b []byte) ([]byte, error) {
|
||||
return base32StdNoPad.AppendEncode(b, h[:]), nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (h AUMHash) MarshalText() ([]byte, error) {
|
||||
return h.AppendText(nil)
|
||||
}
|
||||
|
||||
// IsZero returns true if the hash is the empty value.
|
||||
func (h AUMHash) IsZero() bool {
|
||||
return h == (AUMHash{})
|
||||
}
|
||||
|
||||
// AUMKind describes valid AUM types.
|
||||
type AUMKind uint8
|
||||
|
||||
// Valid AUM types. Do NOT reorder.
|
||||
const (
|
||||
AUMInvalid AUMKind = iota
|
||||
// An AddKey AUM describes a new key trusted by the TKA.
|
||||
//
|
||||
// Only the Key optional field may be set.
|
||||
AUMAddKey
|
||||
// A RemoveKey AUM describes the removal of a key trusted by TKA.
|
||||
//
|
||||
// Only the KeyID optional field may be set.
|
||||
AUMRemoveKey
|
||||
// A NoOp AUM carries no information and is used in tests.
|
||||
AUMNoOp
|
||||
// A UpdateKey AUM updates the metadata or votes of an existing key.
|
||||
//
|
||||
// Only KeyID, along with either/or Meta or Votes optional fields
|
||||
// may be set.
|
||||
AUMUpdateKey
|
||||
// A Checkpoint AUM specifies the full state of the TKA.
|
||||
//
|
||||
// Only the State optional field may be set.
|
||||
AUMCheckpoint
|
||||
)
|
||||
|
||||
func (k AUMKind) String() string {
|
||||
switch k {
|
||||
case AUMInvalid:
|
||||
return "invalid"
|
||||
case AUMAddKey:
|
||||
return "add-key"
|
||||
case AUMRemoveKey:
|
||||
return "remove-key"
|
||||
case AUMNoOp:
|
||||
return "no-op"
|
||||
case AUMCheckpoint:
|
||||
return "checkpoint"
|
||||
case AUMUpdateKey:
|
||||
return "update-key"
|
||||
default:
|
||||
return fmt.Sprintf("AUM?<%d>", int(k))
|
||||
}
|
||||
}
|
||||
|
||||
// AUM describes an Authority Update Message.
|
||||
//
|
||||
// The rules for adding new types of AUMs (MessageKind):
|
||||
// - CBOR key IDs must never be changed.
|
||||
// - New AUM types must not change semantics that are manipulated by other
|
||||
// AUM types.
|
||||
// - The serialization of existing data cannot change (in other words, if
|
||||
// an existing serialization test in aum_test.go fails, you need to try a
|
||||
// different approach).
|
||||
//
|
||||
// The rules for adding new fields are as follows:
|
||||
// - Must all be optional.
|
||||
// - An unset value must not result in serialization overhead. This is
|
||||
// necessary so the serialization of older AUMs stays the same.
|
||||
// - New processing semantics of the new fields must be compatible with the
|
||||
// behavior of old clients (which will ignore the field).
|
||||
// - No floats!
|
||||
type AUM struct {
|
||||
MessageKind AUMKind `cbor:"1,keyasint"`
|
||||
PrevAUMHash []byte `cbor:"2,keyasint"`
|
||||
|
||||
// Key encodes a public key to be added to the key authority.
|
||||
// This field is used for AddKey AUMs.
|
||||
Key *Key `cbor:"3,keyasint,omitempty"`
|
||||
|
||||
// KeyID references a public key which is part of the key authority.
|
||||
// This field is used for RemoveKey and UpdateKey AUMs.
|
||||
KeyID tkatype.KeyID `cbor:"4,keyasint,omitempty"`
|
||||
|
||||
// State describes the full state of the key authority.
|
||||
// This field is used for Checkpoint AUMs.
|
||||
State *State `cbor:"5,keyasint,omitempty"`
|
||||
|
||||
// Votes and Meta describe properties of a key in the key authority.
|
||||
// These fields are used for UpdateKey AUMs.
|
||||
Votes *uint `cbor:"6,keyasint,omitempty"`
|
||||
Meta map[string]string `cbor:"7,keyasint,omitempty"`
|
||||
|
||||
// Signatures lists the signatures over this AUM.
|
||||
// CBOR key 23 is the last key which can be encoded as a single byte.
|
||||
Signatures []tkatype.Signature `cbor:"23,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// StaticValidate returns a nil error if the AUM is well-formed.
|
||||
func (a *AUM) StaticValidate() error {
|
||||
if a.Key != nil {
|
||||
if err := a.Key.StaticValidate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.PrevAUMHash != nil && len(a.PrevAUMHash) == 0 {
|
||||
return errors.New("absent parent must be represented by a nil slice")
|
||||
}
|
||||
for i, sig := range a.Signatures {
|
||||
if len(sig.KeyID) != 32 || len(sig.Signature) != ed25519.SignatureSize {
|
||||
return fmt.Errorf("signature %d has missing keyID or malformed signature", i)
|
||||
}
|
||||
}
|
||||
|
||||
if a.State != nil {
|
||||
if err := a.State.staticValidateCheckpoint(); err != nil {
|
||||
return fmt.Errorf("checkpoint state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch a.MessageKind {
|
||||
case AUMAddKey:
|
||||
if a.Key == nil {
|
||||
return errors.New("AddKey AUMs must contain a key")
|
||||
}
|
||||
if a.KeyID != nil || a.State != nil || a.Votes != nil || a.Meta != nil {
|
||||
return errors.New("AddKey AUMs may only specify a Key")
|
||||
}
|
||||
case AUMRemoveKey:
|
||||
if len(a.KeyID) == 0 {
|
||||
return errors.New("RemoveKey AUMs must specify a key ID")
|
||||
}
|
||||
if a.Key != nil || a.State != nil || a.Votes != nil || a.Meta != nil {
|
||||
return errors.New("RemoveKey AUMs may only specify a KeyID")
|
||||
}
|
||||
case AUMUpdateKey:
|
||||
if len(a.KeyID) == 0 {
|
||||
return errors.New("UpdateKey AUMs must specify a key ID")
|
||||
}
|
||||
if a.Meta == nil && a.Votes == nil {
|
||||
return errors.New("UpdateKey AUMs must contain an update to votes or key metadata")
|
||||
}
|
||||
if a.Key != nil || a.State != nil {
|
||||
return errors.New("UpdateKey AUMs may only specify KeyID, Votes, and Meta")
|
||||
}
|
||||
case AUMCheckpoint:
|
||||
if a.State == nil {
|
||||
return errors.New("Checkpoint AUMs must specify the state")
|
||||
}
|
||||
if a.KeyID != nil || a.Key != nil || a.Votes != nil || a.Meta != nil {
|
||||
return errors.New("Checkpoint AUMs may only specify State")
|
||||
}
|
||||
|
||||
case AUMNoOp:
|
||||
default:
|
||||
// An AUM with an unknown message kind was received! That means
|
||||
// that a future version of tailscaled added some feature we don't
|
||||
// understand.
|
||||
//
|
||||
// The future-compatibility contract for AUM message types is that
|
||||
// they must only add new features, not change the semantics of existing
|
||||
// mechanisms or features. As such, old clients can safely ignore them.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize returns the given AUM in a serialized format.
|
||||
//
|
||||
// We would implement encoding.BinaryMarshaler, except that would
|
||||
// unfortunately get called by the cbor marshaller resulting in infinite
|
||||
// recursion.
|
||||
func (a *AUM) Serialize() tkatype.MarshaledAUM {
|
||||
// Why CBOR and not something like JSON?
|
||||
//
|
||||
// The main function of an AUM is to carry signed data. Signatures are
|
||||
// over digests, so the serialized representation must be deterministic.
|
||||
// Further, experience with other attempts (JWS/JWT,SAML,X509 etc) has
|
||||
// taught us that even subtle behaviors such as how you handle invalid
|
||||
// or unrecognized fields + any invariants in subsequent re-serialization
|
||||
// can easily lead to security-relevant logic bugs. Its certainly possible
|
||||
// to invent a workable scheme by massaging a JSON parsing library, though
|
||||
// profoundly unwise.
|
||||
//
|
||||
// CBOR is one of the few encoding schemes that are appropriate for use
|
||||
// with signatures and has security-conscious parsing + serialization
|
||||
// rules baked into the spec. We use the CTAP2 mode, which is well
|
||||
// understood + widely-implemented, and already proven for use in signing
|
||||
// assertions through its use by FIDO2 devices.
|
||||
out := bytes.NewBuffer(make([]byte, 0, 128))
|
||||
encoder, err := cbor.CTAP2EncOptions().EncMode()
|
||||
if err != nil {
|
||||
// Deterministic validation of encoding options, should
|
||||
// never fail.
|
||||
panic(err)
|
||||
}
|
||||
if err := encoder.NewEncoder(out).Encode(a); err != nil {
|
||||
// Writing to a bytes.Buffer should never fail.
|
||||
panic(err)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
// Unserialize decodes bytes representing a marshaled AUM.
|
||||
//
|
||||
// We would implement encoding.BinaryUnmarshaler, except that would
|
||||
// unfortunately get called by the cbor unmarshaller resulting in infinite
|
||||
// recursion.
|
||||
func (a *AUM) Unserialize(data []byte) error {
|
||||
dec, _ := cborDecOpts.DecMode()
|
||||
return dec.Unmarshal(data, a)
|
||||
}
|
||||
|
||||
// Hash returns a cryptographic digest of all AUM contents.
|
||||
func (a *AUM) Hash() AUMHash {
|
||||
return blake2s.Sum256(a.Serialize())
|
||||
}
|
||||
|
||||
// SigHash returns the cryptographic digest which a signature
|
||||
// is over.
|
||||
//
|
||||
// This is identical to Hash() except the Signatures are not
|
||||
// serialized. Without this, the hash used for signatures
|
||||
// would be circularly dependent on the signatures.
|
||||
func (a AUM) SigHash() tkatype.AUMSigHash {
|
||||
dupe := a
|
||||
dupe.Signatures = nil
|
||||
return blake2s.Sum256(dupe.Serialize())
|
||||
}
|
||||
|
||||
// Parent returns the parent's AUM hash and true, or a
|
||||
// zero value and false if there was no parent.
|
||||
func (a *AUM) Parent() (h AUMHash, ok bool) {
|
||||
if len(a.PrevAUMHash) > 0 {
|
||||
copy(h[:], a.PrevAUMHash)
|
||||
return h, true
|
||||
}
|
||||
return h, false
|
||||
}
|
||||
|
||||
func (a *AUM) sign25519(priv ed25519.PrivateKey) error {
|
||||
key := Key{Kind: Key25519, Public: priv.Public().(ed25519.PublicKey)}
|
||||
sigHash := a.SigHash()
|
||||
|
||||
keyID, err := key.ID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.Signatures = append(a.Signatures, tkatype.Signature{
|
||||
KeyID: keyID,
|
||||
Signature: ed25519.Sign(priv, sigHash[:]),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Weight computes the 'signature weight' of the AUM
|
||||
// based on keys in the state machine. The caller must
|
||||
// ensure that all signatures are valid.
|
||||
//
|
||||
// More formally: W = Sum(key.votes)
|
||||
//
|
||||
// AUMs with a higher weight than their siblings
|
||||
// are preferred when resolving forks in the AUM chain.
|
||||
func (a *AUM) Weight(state State) uint {
|
||||
var weight uint
|
||||
|
||||
// Track the keys that have already been used, so two
|
||||
// signatures with the same key do not result in 2x
|
||||
// the weight.
|
||||
//
|
||||
// Despite the wire encoding being []byte, all KeyIDs are
|
||||
// 32 bytes. As such, we use that as the key for the map,
|
||||
// because map keys cannot be slices.
|
||||
seenKeys := make(set.Set[[32]byte], 6)
|
||||
for _, sig := range a.Signatures {
|
||||
if len(sig.KeyID) != 32 {
|
||||
panic("unexpected: keyIDs are 32 bytes")
|
||||
}
|
||||
|
||||
var keyID [32]byte
|
||||
copy(keyID[:], sig.KeyID)
|
||||
|
||||
key, err := state.GetKey(sig.KeyID)
|
||||
if err != nil {
|
||||
if err == ErrNoSuchKey {
|
||||
// Signatures with an unknown key do not contribute
|
||||
// to the weight.
|
||||
continue
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
if seenKeys.Contains(keyID) {
|
||||
continue
|
||||
}
|
||||
|
||||
weight += key.Votes
|
||||
seenKeys.Add(keyID)
|
||||
}
|
||||
|
||||
return weight
|
||||
}
|
||||
180
vendor/tailscale.com/tka/builder.go
generated
vendored
Normal file
180
vendor/tailscale.com/tka/builder.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"tailscale.com/types/tkatype"
|
||||
)
|
||||
|
||||
// Types implementing Signer can sign update messages.
|
||||
type Signer interface {
|
||||
// SignAUM returns signatures for the AUM encoded by the given AUMSigHash.
|
||||
SignAUM(tkatype.AUMSigHash) ([]tkatype.Signature, error)
|
||||
}
|
||||
|
||||
// UpdateBuilder implements a builder for changes to the tailnet
|
||||
// key authority.
|
||||
//
|
||||
// Finalize must be called to compute the update messages, which
|
||||
// must then be applied to all Authority objects using Inform().
|
||||
type UpdateBuilder struct {
|
||||
a *Authority
|
||||
signer Signer
|
||||
|
||||
state State
|
||||
parent AUMHash
|
||||
|
||||
out []AUM
|
||||
}
|
||||
|
||||
func (b *UpdateBuilder) mkUpdate(update AUM) error {
|
||||
prevHash := make([]byte, len(b.parent))
|
||||
copy(prevHash, b.parent[:])
|
||||
update.PrevAUMHash = prevHash
|
||||
|
||||
if b.signer != nil {
|
||||
sigs, err := b.signer.SignAUM(update.SigHash())
|
||||
if err != nil {
|
||||
return fmt.Errorf("signing failed: %v", err)
|
||||
}
|
||||
update.Signatures = append(update.Signatures, sigs...)
|
||||
}
|
||||
if err := update.StaticValidate(); err != nil {
|
||||
return fmt.Errorf("generated update was invalid: %v", err)
|
||||
}
|
||||
state, err := b.state.applyVerifiedAUM(update)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update cannot be applied: %v", err)
|
||||
}
|
||||
|
||||
b.state = state
|
||||
b.parent = update.Hash()
|
||||
b.out = append(b.out, update)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddKey adds a new key to the authority.
|
||||
func (b *UpdateBuilder) AddKey(key Key) error {
|
||||
keyID, err := key.ID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := b.state.GetKey(keyID); err == nil {
|
||||
return fmt.Errorf("cannot add key %v: already exists", key)
|
||||
}
|
||||
return b.mkUpdate(AUM{MessageKind: AUMAddKey, Key: &key})
|
||||
}
|
||||
|
||||
// RemoveKey removes a key from the authority.
|
||||
func (b *UpdateBuilder) RemoveKey(keyID tkatype.KeyID) error {
|
||||
if _, err := b.state.GetKey(keyID); err != nil {
|
||||
return fmt.Errorf("failed reading key %x: %v", keyID, err)
|
||||
}
|
||||
return b.mkUpdate(AUM{MessageKind: AUMRemoveKey, KeyID: keyID})
|
||||
}
|
||||
|
||||
// SetKeyVote updates the number of votes of an existing key.
|
||||
func (b *UpdateBuilder) SetKeyVote(keyID tkatype.KeyID, votes uint) error {
|
||||
if _, err := b.state.GetKey(keyID); err != nil {
|
||||
return fmt.Errorf("failed reading key %x: %v", keyID, err)
|
||||
}
|
||||
return b.mkUpdate(AUM{MessageKind: AUMUpdateKey, Votes: &votes, KeyID: keyID})
|
||||
}
|
||||
|
||||
// SetKeyMeta updates key-value metadata stored against an existing key.
|
||||
//
|
||||
// TODO(tom): Provide an API to update specific values rather than the whole
|
||||
// map.
|
||||
func (b *UpdateBuilder) SetKeyMeta(keyID tkatype.KeyID, meta map[string]string) error {
|
||||
if _, err := b.state.GetKey(keyID); err != nil {
|
||||
return fmt.Errorf("failed reading key %x: %v", keyID, err)
|
||||
}
|
||||
return b.mkUpdate(AUM{MessageKind: AUMUpdateKey, Meta: meta, KeyID: keyID})
|
||||
}
|
||||
|
||||
func (b *UpdateBuilder) generateCheckpoint() error {
|
||||
// Compute the checkpoint state.
|
||||
state := b.a.state
|
||||
for i, update := range b.out {
|
||||
var err error
|
||||
if state, err = state.applyVerifiedAUM(update); err != nil {
|
||||
return fmt.Errorf("applying update %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Checkpoints cant specify a parent AUM.
|
||||
state.LastAUMHash = nil
|
||||
return b.mkUpdate(AUM{MessageKind: AUMCheckpoint, State: &state})
|
||||
}
|
||||
|
||||
// checkpointEvery sets how often a checkpoint AUM should be generated.
|
||||
const checkpointEvery = 50
|
||||
|
||||
// Finalize returns the set of update message to actuate the update.
|
||||
func (b *UpdateBuilder) Finalize(storage Chonk) ([]AUM, error) {
|
||||
var (
|
||||
needCheckpoint bool = true
|
||||
cursor AUMHash = b.a.Head()
|
||||
)
|
||||
for i := len(b.out); i < checkpointEvery; i++ {
|
||||
aum, err := storage.AUM(cursor)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
// The available chain is shorter than the interval to checkpoint at.
|
||||
needCheckpoint = false
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("reading AUM: %v", err)
|
||||
}
|
||||
|
||||
if aum.MessageKind == AUMCheckpoint {
|
||||
needCheckpoint = false
|
||||
break
|
||||
}
|
||||
|
||||
parent, hasParent := aum.Parent()
|
||||
if !hasParent {
|
||||
// We've hit the genesis update, so the chain is shorter than the interval to checkpoint at.
|
||||
needCheckpoint = false
|
||||
break
|
||||
}
|
||||
cursor = parent
|
||||
}
|
||||
|
||||
if needCheckpoint {
|
||||
if err := b.generateCheckpoint(); err != nil {
|
||||
return nil, fmt.Errorf("generating checkpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check no AUMs were applied in the meantime
|
||||
if len(b.out) > 0 {
|
||||
if parent, _ := b.out[0].Parent(); parent != b.a.Head() {
|
||||
return nil, fmt.Errorf("updates no longer apply to head: based on %x but head is %x", parent, b.a.Head())
|
||||
}
|
||||
}
|
||||
return b.out, nil
|
||||
}
|
||||
|
||||
// NewUpdater returns a builder you can use to make changes to
|
||||
// the tailnet key authority.
|
||||
//
|
||||
// The provided signer function, if non-nil, is called with each update
|
||||
// to compute and apply signatures.
|
||||
//
|
||||
// Updates are specified by calling methods on the returned UpdatedBuilder.
|
||||
// Call Finalize() when you are done to obtain the specific update messages
|
||||
// which actuate the changes.
|
||||
func (a *Authority) NewUpdater(signer Signer) *UpdateBuilder {
|
||||
return &UpdateBuilder{
|
||||
a: a,
|
||||
signer: signer,
|
||||
parent: a.Head(),
|
||||
state: a.state,
|
||||
}
|
||||
}
|
||||
221
vendor/tailscale.com/tka/deeplink.go
generated
vendored
Normal file
221
vendor/tailscale.com/tka/deeplink.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
DeeplinkTailscaleURLScheme = "tailscale"
|
||||
DeeplinkCommandSign = "sign-device"
|
||||
)
|
||||
|
||||
// generateHMAC computes a SHA-256 HMAC for the concatenation of components,
|
||||
// using the Authority stateID as secret.
|
||||
func (a *Authority) generateHMAC(params NewDeeplinkParams) []byte {
|
||||
stateID, _ := a.StateIDs()
|
||||
|
||||
key := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(key, stateID)
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte(params.NodeKey))
|
||||
mac.Write([]byte(params.TLPub))
|
||||
mac.Write([]byte(params.DeviceName))
|
||||
mac.Write([]byte(params.OSName))
|
||||
mac.Write([]byte(params.LoginName))
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
type NewDeeplinkParams struct {
|
||||
NodeKey string
|
||||
TLPub string
|
||||
DeviceName string
|
||||
OSName string
|
||||
LoginName string
|
||||
}
|
||||
|
||||
// NewDeeplink creates a signed deeplink using the authority's stateID as a
|
||||
// secret. This deeplink can then be validated by ValidateDeeplink.
|
||||
func (a *Authority) NewDeeplink(params NewDeeplinkParams) (string, error) {
|
||||
if params.NodeKey == "" || !strings.HasPrefix(params.NodeKey, "nodekey:") {
|
||||
return "", fmt.Errorf("invalid node key %q", params.NodeKey)
|
||||
}
|
||||
if params.TLPub == "" || !strings.HasPrefix(params.TLPub, "tlpub:") {
|
||||
return "", fmt.Errorf("invalid tlpub %q", params.TLPub)
|
||||
}
|
||||
if params.DeviceName == "" {
|
||||
return "", fmt.Errorf("invalid device name %q", params.DeviceName)
|
||||
}
|
||||
if params.OSName == "" {
|
||||
return "", fmt.Errorf("invalid os name %q", params.OSName)
|
||||
}
|
||||
if params.LoginName == "" {
|
||||
return "", fmt.Errorf("invalid login name %q", params.LoginName)
|
||||
}
|
||||
|
||||
u := url.URL{
|
||||
Scheme: DeeplinkTailscaleURLScheme,
|
||||
Host: DeeplinkCommandSign,
|
||||
Path: "/v1/",
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("nk", params.NodeKey)
|
||||
v.Set("tp", params.TLPub)
|
||||
v.Set("dn", params.DeviceName)
|
||||
v.Set("os", params.OSName)
|
||||
v.Set("em", params.LoginName)
|
||||
|
||||
hmac := a.generateHMAC(params)
|
||||
v.Set("hm", hex.EncodeToString(hmac))
|
||||
|
||||
u.RawQuery = v.Encode()
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
type DeeplinkValidationResult struct {
|
||||
IsValid bool
|
||||
Error string
|
||||
Version uint8
|
||||
NodeKey string
|
||||
TLPub string
|
||||
DeviceName string
|
||||
OSName string
|
||||
EmailAddress string
|
||||
}
|
||||
|
||||
// ValidateDeeplink validates a device signing deeplink using the authority's stateID.
|
||||
// The input urlString follows this structure:
|
||||
//
|
||||
// tailscale://sign-device/v1/?nk=xxx&tp=xxx&dn=xxx&os=xxx&em=xxx&hm=xxx
|
||||
//
|
||||
// where:
|
||||
// - "nk" is the nodekey of the node being signed
|
||||
// - "tp" is the tailnet lock public key
|
||||
// - "dn" is the name of the node
|
||||
// - "os" is the operating system of the node
|
||||
// - "em" is the email address associated with the node
|
||||
// - "hm" is a SHA-256 HMAC computed over the concatenation of the above fields, encoded as a hex string
|
||||
func (a *Authority) ValidateDeeplink(urlString string) DeeplinkValidationResult {
|
||||
parsedUrl, err := url.Parse(urlString)
|
||||
if err != nil {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
if parsedUrl.Scheme != DeeplinkTailscaleURLScheme {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: fmt.Sprintf("unhandled scheme %s, expected %s", parsedUrl.Scheme, DeeplinkTailscaleURLScheme),
|
||||
}
|
||||
}
|
||||
|
||||
if parsedUrl.Host != DeeplinkCommandSign {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: fmt.Sprintf("unhandled host %s, expected %s", parsedUrl.Host, DeeplinkCommandSign),
|
||||
}
|
||||
}
|
||||
|
||||
path := parsedUrl.EscapedPath()
|
||||
pathComponents := strings.Split(path, "/")
|
||||
if len(pathComponents) != 3 {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "invalid path components number found",
|
||||
}
|
||||
}
|
||||
|
||||
if pathComponents[1] != "v1" {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: fmt.Sprintf("expected v1 deeplink version, found something else: %s", pathComponents[1]),
|
||||
}
|
||||
}
|
||||
|
||||
nodeKey := parsedUrl.Query().Get("nk")
|
||||
if len(nodeKey) == 0 {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "missing nk (NodeKey) query parameter",
|
||||
}
|
||||
}
|
||||
|
||||
tlPub := parsedUrl.Query().Get("tp")
|
||||
if len(tlPub) == 0 {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "missing tp (TLPub) query parameter",
|
||||
}
|
||||
}
|
||||
|
||||
deviceName := parsedUrl.Query().Get("dn")
|
||||
if len(deviceName) == 0 {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "missing dn (DeviceName) query parameter",
|
||||
}
|
||||
}
|
||||
|
||||
osName := parsedUrl.Query().Get("os")
|
||||
if len(deviceName) == 0 {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "missing os (OSName) query parameter",
|
||||
}
|
||||
}
|
||||
|
||||
emailAddress := parsedUrl.Query().Get("em")
|
||||
if len(emailAddress) == 0 {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "missing em (EmailAddress) query parameter",
|
||||
}
|
||||
}
|
||||
|
||||
hmacString := parsedUrl.Query().Get("hm")
|
||||
if len(hmacString) == 0 {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "missing hm (HMAC) query parameter",
|
||||
}
|
||||
}
|
||||
|
||||
computedHMAC := a.generateHMAC(NewDeeplinkParams{
|
||||
NodeKey: nodeKey,
|
||||
TLPub: tlPub,
|
||||
DeviceName: deviceName,
|
||||
OSName: osName,
|
||||
LoginName: emailAddress,
|
||||
})
|
||||
|
||||
hmacHexBytes, err := hex.DecodeString(hmacString)
|
||||
if err != nil {
|
||||
return DeeplinkValidationResult{IsValid: false, Error: "could not hex-decode hmac"}
|
||||
}
|
||||
|
||||
if !hmac.Equal(computedHMAC, hmacHexBytes) {
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: false,
|
||||
Error: "hmac authentication failed",
|
||||
}
|
||||
}
|
||||
|
||||
return DeeplinkValidationResult{
|
||||
IsValid: true,
|
||||
NodeKey: nodeKey,
|
||||
TLPub: tlPub,
|
||||
DeviceName: deviceName,
|
||||
OSName: osName,
|
||||
EmailAddress: emailAddress,
|
||||
}
|
||||
}
|
||||
159
vendor/tailscale.com/tka/key.go
generated
vendored
Normal file
159
vendor/tailscale.com/tka/key.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hdevalence/ed25519consensus"
|
||||
"tailscale.com/types/tkatype"
|
||||
)
|
||||
|
||||
// KeyKind describes the different varieties of a Key.
|
||||
type KeyKind uint8
|
||||
|
||||
// Valid KeyKind values.
|
||||
const (
|
||||
KeyInvalid KeyKind = iota
|
||||
Key25519
|
||||
)
|
||||
|
||||
func (k KeyKind) String() string {
|
||||
switch k {
|
||||
case KeyInvalid:
|
||||
return "invalid"
|
||||
case Key25519:
|
||||
return "25519"
|
||||
default:
|
||||
return fmt.Sprintf("Key?<%d>", int(k))
|
||||
}
|
||||
}
|
||||
|
||||
// Key describes the public components of a key known to network-lock.
|
||||
type Key struct {
|
||||
Kind KeyKind `cbor:"1,keyasint"`
|
||||
|
||||
// Votes describes the weight applied to signatures using this key.
|
||||
// Weighting is used to deterministically resolve branches in the AUM
|
||||
// chain (i.e. forks, where two AUMs exist with the same parent).
|
||||
Votes uint `cbor:"2,keyasint"`
|
||||
|
||||
// Public encodes the public key of the key. For 25519 keys,
|
||||
// this is simply the point on the curve representing the public
|
||||
// key.
|
||||
Public []byte `cbor:"3,keyasint"`
|
||||
|
||||
// Meta describes arbitrary metadata about the key. This could be
|
||||
// used to store the name of the key, for instance.
|
||||
Meta map[string]string `cbor:"12,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// Clone makes an independent copy of Key.
|
||||
//
|
||||
// NOTE: There is a difference between a nil slice and an empty slice for encoding purposes,
|
||||
// so an implementation of Clone() must take care to preserve this.
|
||||
func (k Key) Clone() Key {
|
||||
out := k
|
||||
|
||||
if k.Public != nil {
|
||||
out.Public = make([]byte, len(k.Public))
|
||||
copy(out.Public, k.Public)
|
||||
}
|
||||
|
||||
if k.Meta != nil {
|
||||
out.Meta = make(map[string]string, len(k.Meta))
|
||||
for k, v := range k.Meta {
|
||||
out.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// MustID returns the KeyID of the key, panicking if an error is
|
||||
// encountered. This must only be used for tests.
|
||||
func (k Key) MustID() tkatype.KeyID {
|
||||
id, err := k.ID()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// ID returns the KeyID of the key.
|
||||
func (k Key) ID() (tkatype.KeyID, error) {
|
||||
switch k.Kind {
|
||||
// Because 25519 public keys are so short, we just use the 32-byte
|
||||
// public as their 'key ID'.
|
||||
case Key25519:
|
||||
return tkatype.KeyID(k.Public), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown key kind: %v", k.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Ed25519 returns the ed25519 public key encoded by Key. An error is
|
||||
// returned for keys which do not represent ed25519 public keys.
|
||||
func (k Key) Ed25519() (ed25519.PublicKey, error) {
|
||||
switch k.Kind {
|
||||
case Key25519:
|
||||
return ed25519.PublicKey(k.Public), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("key is of type %v, not ed25519", k.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
const maxMetaBytes = 512
|
||||
|
||||
func (k Key) StaticValidate() error {
|
||||
if k.Votes > 4096 {
|
||||
return fmt.Errorf("excessive key weight: %d > 4096", k.Votes)
|
||||
}
|
||||
if k.Votes == 0 {
|
||||
return errors.New("key votes must be non-zero")
|
||||
}
|
||||
|
||||
// We have an arbitrary upper limit on the amount
|
||||
// of metadata that can be associated with a key, so
|
||||
// people don't start using it as a key-value store and
|
||||
// causing pathological cases due to the number + size of
|
||||
// AUMs.
|
||||
var metaBytes uint
|
||||
for k, v := range k.Meta {
|
||||
metaBytes += uint(len(k) + len(v))
|
||||
}
|
||||
if metaBytes > maxMetaBytes {
|
||||
return fmt.Errorf("key metadata too big (%d > %d)", metaBytes, maxMetaBytes)
|
||||
}
|
||||
|
||||
switch k.Kind {
|
||||
case Key25519:
|
||||
default:
|
||||
return fmt.Errorf("unrecognized key kind: %v", k.Kind)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify returns a nil error if the signature is valid over the
|
||||
// provided AUM BLAKE2s digest, using the given key.
|
||||
func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error {
|
||||
// NOTE(tom): Even if we can compute the public from the KeyID,
|
||||
// its possible for the KeyID to be attacker-controlled
|
||||
// so we should use the public contained in the state machine.
|
||||
switch key.Kind {
|
||||
case Key25519:
|
||||
if len(key.Public) != ed25519.PublicKeySize {
|
||||
return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public))
|
||||
}
|
||||
if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) {
|
||||
return nil
|
||||
}
|
||||
return errors.New("invalid signature")
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unhandled key type: %v", key.Kind)
|
||||
}
|
||||
}
|
||||
496
vendor/tailscale.com/tka/sig.go
generated
vendored
Normal file
496
vendor/tailscale.com/tka/sig.go
generated
vendored
Normal file
@@ -0,0 +1,496 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/hdevalence/ed25519consensus"
|
||||
"golang.org/x/crypto/blake2s"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/tkatype"
|
||||
)
|
||||
|
||||
//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=NodeKeySignature
|
||||
|
||||
// SigKind describes valid NodeKeySignature types.
|
||||
type SigKind uint8
|
||||
|
||||
const (
|
||||
SigInvalid SigKind = iota
|
||||
// SigDirect describes a signature over a specific node key, signed
|
||||
// by a key in the tailnet key authority referenced by the specified keyID.
|
||||
SigDirect
|
||||
// SigRotation describes a signature over a specific node key, signed
|
||||
// by the rotation key authorized by a nested NodeKeySignature structure.
|
||||
//
|
||||
// While it is possible to nest rotations multiple times up to the CBOR
|
||||
// nesting limit, it is intended that nodes simply regenerate their outer
|
||||
// SigRotation signature and sign it again with their rotation key. That
|
||||
// way, SigRotation nesting should only be 2 deep in the common case.
|
||||
SigRotation
|
||||
// SigCredential describes a signature over a specific public key, signed
|
||||
// by a key in the tailnet key authority referenced by the specified keyID.
|
||||
// In effect, SigCredential delegates the ability to make a signature to
|
||||
// a different public/private key pair.
|
||||
//
|
||||
// It is intended that a different public/private key pair be generated
|
||||
// for each different SigCredential that is created. Implementors must
|
||||
// take care that the private side is only known to the entity that needs
|
||||
// to generate the wrapping SigRotation signature, and it is immediately
|
||||
// discarded after use.
|
||||
//
|
||||
// SigCredential is expected to be nested in a SigRotation signature.
|
||||
SigCredential
|
||||
)
|
||||
|
||||
func (s SigKind) String() string {
|
||||
switch s {
|
||||
case SigInvalid:
|
||||
return "invalid"
|
||||
case SigDirect:
|
||||
return "direct"
|
||||
case SigRotation:
|
||||
return "rotation"
|
||||
case SigCredential:
|
||||
return "credential"
|
||||
default:
|
||||
return fmt.Sprintf("Sig?<%d>", int(s))
|
||||
}
|
||||
}
|
||||
|
||||
// NodeKeySignature encapsulates a signature that authorizes a specific
|
||||
// node key, based on verification from keys in the tailnet key authority.
|
||||
type NodeKeySignature struct {
|
||||
// SigKind identifies the variety of signature.
|
||||
SigKind SigKind `cbor:"1,keyasint"`
|
||||
// Pubkey identifies the key.NodePublic which is being authorized.
|
||||
// SigCredential signatures do not use this field.
|
||||
Pubkey []byte `cbor:"2,keyasint,omitempty"`
|
||||
|
||||
// KeyID identifies which key in the tailnet key authority should
|
||||
// be used to verify this signature. Only set for SigDirect and
|
||||
// SigCredential signature kinds.
|
||||
KeyID []byte `cbor:"3,keyasint,omitempty"`
|
||||
|
||||
// Signature is the packed (R, S) ed25519 signature over all other
|
||||
// fields of the structure.
|
||||
Signature []byte `cbor:"4,keyasint,omitempty"`
|
||||
|
||||
// Nested describes a NodeKeySignature which authorizes the node-key
|
||||
// used as Pubkey. Only used for SigRotation signatures.
|
||||
Nested *NodeKeySignature `cbor:"5,keyasint,omitempty"`
|
||||
|
||||
// WrappingPubkey specifies the ed25519 public key which must be used
|
||||
// to sign a Signature which embeds this one.
|
||||
//
|
||||
// For SigRotation signatures multiple levels deep, intermediate
|
||||
// signatures may omit this value, in which case the parent WrappingPubkey
|
||||
// is used.
|
||||
//
|
||||
// SigCredential signatures use this field to specify the public key
|
||||
// they are certifying, following the usual semanticsfor WrappingPubkey.
|
||||
WrappingPubkey []byte `cbor:"6,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// String returns a human-readable representation of the NodeKeySignature,
|
||||
// making it easy to see nested signatures.
|
||||
func (s NodeKeySignature) String() string {
|
||||
var b strings.Builder
|
||||
var addToBuf func(NodeKeySignature, int)
|
||||
addToBuf = func(sig NodeKeySignature, depth int) {
|
||||
indent := strings.Repeat(" ", depth)
|
||||
b.WriteString(indent + "SigKind: " + sig.SigKind.String() + "\n")
|
||||
if len(sig.Pubkey) > 0 {
|
||||
var pubKey string
|
||||
var np key.NodePublic
|
||||
if err := np.UnmarshalBinary(sig.Pubkey); err != nil {
|
||||
pubKey = fmt.Sprintf("<error: %s>", err)
|
||||
} else {
|
||||
pubKey = np.ShortString()
|
||||
}
|
||||
b.WriteString(indent + "Pubkey: " + pubKey + "\n")
|
||||
}
|
||||
if len(sig.KeyID) > 0 {
|
||||
keyID := key.NLPublicFromEd25519Unsafe(sig.KeyID).CLIString()
|
||||
b.WriteString(indent + "KeyID: " + keyID + "\n")
|
||||
}
|
||||
if len(sig.WrappingPubkey) > 0 {
|
||||
pubKey := key.NLPublicFromEd25519Unsafe(sig.WrappingPubkey).CLIString()
|
||||
b.WriteString(indent + "WrappingPubkey: " + pubKey + "\n")
|
||||
}
|
||||
if sig.Nested != nil {
|
||||
b.WriteString(indent + "Nested:\n")
|
||||
addToBuf(*sig.Nested, depth+1)
|
||||
}
|
||||
}
|
||||
addToBuf(s, 0)
|
||||
return strings.TrimSpace(b.String())
|
||||
}
|
||||
|
||||
// UnverifiedWrappingPublic returns the public key which must sign a
|
||||
// signature which embeds this one, if any.
|
||||
//
|
||||
// See docs on NodeKeySignature.WrappingPubkey & SigRotation for documentation
|
||||
// about wrapping public keys.
|
||||
//
|
||||
// SAFETY: The caller MUST verify the signature using
|
||||
// Authority.NodeKeyAuthorized if treating this as authentic information.
|
||||
func (s NodeKeySignature) UnverifiedWrappingPublic() (pub ed25519.PublicKey, ok bool) {
|
||||
return s.wrappingPublic()
|
||||
}
|
||||
|
||||
// wrappingPublic returns the public key which must sign a signature which
|
||||
// embeds this one, if any.
|
||||
func (s NodeKeySignature) wrappingPublic() (pub ed25519.PublicKey, ok bool) {
|
||||
if len(s.WrappingPubkey) > 0 {
|
||||
return ed25519.PublicKey(s.WrappingPubkey), true
|
||||
}
|
||||
|
||||
switch s.SigKind {
|
||||
case SigRotation:
|
||||
if s.Nested == nil {
|
||||
return nil, false
|
||||
}
|
||||
return s.Nested.wrappingPublic()
|
||||
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// UnverifiedAuthorizingKeyID returns the KeyID of the key which authorizes
|
||||
// this signature.
|
||||
//
|
||||
// SAFETY: The caller MUST verify the signature using
|
||||
// Authority.NodeKeyAuthorized if treating this as authentic information.
|
||||
func (s NodeKeySignature) UnverifiedAuthorizingKeyID() (tkatype.KeyID, error) {
|
||||
return s.authorizingKeyID()
|
||||
}
|
||||
|
||||
// authorizingKeyID returns the KeyID of the key trusted by network-lock which authorizes
|
||||
// this signature.
|
||||
func (s NodeKeySignature) authorizingKeyID() (tkatype.KeyID, error) {
|
||||
switch s.SigKind {
|
||||
case SigDirect, SigCredential:
|
||||
if len(s.KeyID) == 0 {
|
||||
return tkatype.KeyID{}, errors.New("invalid signature: no keyID present")
|
||||
}
|
||||
return tkatype.KeyID(s.KeyID), nil
|
||||
|
||||
case SigRotation:
|
||||
if s.Nested == nil {
|
||||
return tkatype.KeyID{}, errors.New("invalid signature: rotation signature missing nested signature")
|
||||
}
|
||||
return s.Nested.authorizingKeyID()
|
||||
|
||||
default:
|
||||
return tkatype.KeyID{}, fmt.Errorf("unhandled signature type: %v", s.SigKind)
|
||||
}
|
||||
}
|
||||
|
||||
// SigHash returns the cryptographic digest which a signature
|
||||
// is over.
|
||||
//
|
||||
// This is a hash of the serialized structure, sans the signature.
|
||||
// Without this exclusion, the hash used for the signature
|
||||
// would be circularly dependent on the signature.
|
||||
func (s NodeKeySignature) SigHash() [blake2s.Size]byte {
|
||||
dupe := s
|
||||
dupe.Signature = nil
|
||||
return blake2s.Sum256(dupe.Serialize())
|
||||
}
|
||||
|
||||
// Serialize returns the given NKS in a serialized format.
|
||||
//
|
||||
// We would implement encoding.BinaryMarshaler, except that would
|
||||
// unfortunately get called by the cbor marshaller resulting in infinite
|
||||
// recursion.
|
||||
func (s *NodeKeySignature) Serialize() tkatype.MarshaledSignature {
|
||||
out := bytes.NewBuffer(make([]byte, 0, 128)) // 64byte sig + 32byte keyID + 32byte headroom
|
||||
encoder, err := cbor.CTAP2EncOptions().EncMode()
|
||||
if err != nil {
|
||||
// Deterministic validation of encoding options, should
|
||||
// never fail.
|
||||
panic(err)
|
||||
}
|
||||
if err := encoder.NewEncoder(out).Encode(s); err != nil {
|
||||
// Writing to a bytes.Buffer should never fail.
|
||||
panic(err)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
// Unserialize decodes bytes representing a marshaled NKS.
|
||||
//
|
||||
// We would implement encoding.BinaryUnmarshaler, except that would
|
||||
// unfortunately get called by the cbor unmarshaller resulting in infinite
|
||||
// recursion.
|
||||
func (s *NodeKeySignature) Unserialize(data []byte) error {
|
||||
dec, _ := cborDecOpts.DecMode()
|
||||
return dec.Unmarshal(data, s)
|
||||
}
|
||||
|
||||
// verifySignature checks that the NodeKeySignature is authentic & certified
|
||||
// by the given verificationKey. Additionally, SigDirect and SigRotation
|
||||
// signatures are checked to ensure they authorize the given nodeKey.
|
||||
func (s *NodeKeySignature) verifySignature(nodeKey key.NodePublic, verificationKey Key) error {
|
||||
if s.SigKind != SigCredential {
|
||||
nodeBytes, err := nodeKey.MarshalBinary()
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshalling pubkey: %v", err)
|
||||
}
|
||||
if !bytes.Equal(nodeBytes, s.Pubkey) {
|
||||
return errors.New("signature does not authorize nodeKey")
|
||||
}
|
||||
}
|
||||
|
||||
sigHash := s.SigHash()
|
||||
switch s.SigKind {
|
||||
case SigRotation:
|
||||
if s.Nested == nil {
|
||||
return errors.New("nested signatures must nest a signature")
|
||||
}
|
||||
|
||||
// Verify the signature using the nested rotation key.
|
||||
verifyPub, ok := s.Nested.wrappingPublic()
|
||||
if !ok {
|
||||
return errors.New("missing rotation key")
|
||||
}
|
||||
if len(verifyPub) != ed25519.PublicKeySize {
|
||||
return fmt.Errorf("bad rotation key length: %d", len(verifyPub))
|
||||
}
|
||||
if !ed25519.Verify(ed25519.PublicKey(verifyPub[:]), sigHash[:], s.Signature) {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
|
||||
// Recurse to verify the signature on the nested structure.
|
||||
var nestedPub key.NodePublic
|
||||
// SigCredential signatures certify an indirection key rather than a node
|
||||
// key, so theres no need to check the node key.
|
||||
if s.Nested.SigKind != SigCredential {
|
||||
if err := nestedPub.UnmarshalBinary(s.Nested.Pubkey); err != nil {
|
||||
return fmt.Errorf("nested pubkey: %v", err)
|
||||
}
|
||||
}
|
||||
if err := s.Nested.verifySignature(nestedPub, verificationKey); err != nil {
|
||||
return fmt.Errorf("nested: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
case SigDirect, SigCredential:
|
||||
if s.Nested != nil {
|
||||
return fmt.Errorf("invalid signature: signatures of type %v cannot nest another signature", s.SigKind)
|
||||
}
|
||||
switch verificationKey.Kind {
|
||||
case Key25519:
|
||||
if len(verificationKey.Public) != ed25519.PublicKeySize {
|
||||
return fmt.Errorf("ed25519 key has wrong length: %d", len(verificationKey.Public))
|
||||
}
|
||||
if ed25519consensus.Verify(ed25519.PublicKey(verificationKey.Public), sigHash[:], s.Signature) {
|
||||
return nil
|
||||
}
|
||||
return errors.New("invalid signature")
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unhandled key type: %v", verificationKey.Kind)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unhandled signature type: %v", s.SigKind)
|
||||
}
|
||||
}
|
||||
|
||||
// RotationDetails holds additional information about a nodeKeySignature
|
||||
// of kind SigRotation.
|
||||
type RotationDetails struct {
|
||||
// PrevNodeKeys is a list of node keys which have been rotated out.
|
||||
PrevNodeKeys []key.NodePublic
|
||||
|
||||
// InitialSig is the first signature in the chain which led to
|
||||
// this rotating signature.
|
||||
InitialSig *NodeKeySignature
|
||||
}
|
||||
|
||||
// rotationDetails returns the RotationDetails for a SigRotation signature.
|
||||
func (s *NodeKeySignature) rotationDetails() (*RotationDetails, error) {
|
||||
if s.SigKind != SigRotation {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sri := &RotationDetails{}
|
||||
nested := s.Nested
|
||||
for nested != nil {
|
||||
if len(nested.Pubkey) > 0 {
|
||||
var nestedPub key.NodePublic
|
||||
if err := nestedPub.UnmarshalBinary(nested.Pubkey); err != nil {
|
||||
return nil, fmt.Errorf("nested pubkey: %v", err)
|
||||
}
|
||||
sri.PrevNodeKeys = append(sri.PrevNodeKeys, nestedPub)
|
||||
}
|
||||
if nested.SigKind != SigRotation {
|
||||
break
|
||||
}
|
||||
nested = nested.Nested
|
||||
}
|
||||
sri.InitialSig = nested
|
||||
return sri, nil
|
||||
}
|
||||
|
||||
// ResignNKS re-signs a node-key signature for a new node-key.
|
||||
//
|
||||
// This only matters on network-locked tailnets, because node-key signatures are
|
||||
// how other nodes know that a node-key is authentic. When the node-key is
|
||||
// rotated then the existing signature becomes invalid, so this function is
|
||||
// responsible for generating a new wrapping signature to certify the new node-key.
|
||||
//
|
||||
// The signature itself is a SigRotation signature, which embeds the old signature
|
||||
// and certifies the new node-key as a replacement for the old by signing the new
|
||||
// signature with RotationPubkey (which is the node's own network-lock key).
|
||||
func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.MarshaledSignature) (tkatype.MarshaledSignature, error) {
|
||||
var oldSig NodeKeySignature
|
||||
if err := oldSig.Unserialize(oldNKS); err != nil {
|
||||
return nil, fmt.Errorf("decoding NKS: %w", err)
|
||||
}
|
||||
|
||||
nk, err := nodeKey.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshalling node-key: %w", err)
|
||||
}
|
||||
|
||||
if bytes.Equal(nk, oldSig.Pubkey) {
|
||||
// The old signature is valid for the node-key we are using, so just
|
||||
// use it verbatim.
|
||||
return oldNKS, nil
|
||||
}
|
||||
|
||||
nested, err := maybeTrimRotationSignatureChain(oldSig, priv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("trimming rotation signature chain: %w", err)
|
||||
}
|
||||
|
||||
newSig := NodeKeySignature{
|
||||
SigKind: SigRotation,
|
||||
Pubkey: nk,
|
||||
Nested: &nested,
|
||||
}
|
||||
if newSig.Signature, err = priv.SignNKS(newSig.SigHash()); err != nil {
|
||||
return nil, fmt.Errorf("signing NKS: %w", err)
|
||||
}
|
||||
|
||||
return newSig.Serialize(), nil
|
||||
}
|
||||
|
||||
// maybeTrimRotationSignatureChain truncates rotation signature chain to ensure
|
||||
// it contains no more than 15 node keys.
|
||||
func maybeTrimRotationSignatureChain(sig NodeKeySignature, priv key.NLPrivate) (NodeKeySignature, error) {
|
||||
if sig.SigKind != SigRotation {
|
||||
return sig, nil
|
||||
}
|
||||
|
||||
// Collect all the previous node keys, ordered from newest to oldest.
|
||||
prevPubkeys := [][]byte{sig.Pubkey}
|
||||
nested := sig.Nested
|
||||
for nested != nil {
|
||||
if len(nested.Pubkey) > 0 {
|
||||
prevPubkeys = append(prevPubkeys, nested.Pubkey)
|
||||
}
|
||||
if nested.SigKind != SigRotation {
|
||||
break
|
||||
}
|
||||
nested = nested.Nested
|
||||
}
|
||||
|
||||
// Existing rotation signature with 15 keys is the maximum we can wrap in a
|
||||
// new signature without hitting the CBOR nesting limit of 16 (see
|
||||
// MaxNestedLevels in tka.go).
|
||||
const maxPrevKeys = 15
|
||||
if len(prevPubkeys) <= maxPrevKeys {
|
||||
return sig, nil
|
||||
}
|
||||
|
||||
// Create a new rotation signature chain, starting with the original
|
||||
// direct signature.
|
||||
var err error
|
||||
result := nested // original direct signature
|
||||
for i := maxPrevKeys - 2; i >= 0; i-- {
|
||||
result = &NodeKeySignature{
|
||||
SigKind: SigRotation,
|
||||
Pubkey: prevPubkeys[i],
|
||||
Nested: result,
|
||||
}
|
||||
if result.Signature, err = priv.SignNKS(result.SigHash()); err != nil {
|
||||
return sig, fmt.Errorf("signing NKS: %w", err)
|
||||
}
|
||||
}
|
||||
return *result, nil
|
||||
}
|
||||
|
||||
// SignByCredential signs a node public key by a private key which has its
|
||||
// signing authority delegated by a SigCredential signature. This is used by
|
||||
// wrapped auth keys.
|
||||
func SignByCredential(privKey []byte, wrapped *NodeKeySignature, nodeKey key.NodePublic) (tkatype.MarshaledSignature, error) {
|
||||
if wrapped.SigKind != SigCredential {
|
||||
return nil, fmt.Errorf("wrapped signature must be a credential, got %v", wrapped.SigKind)
|
||||
}
|
||||
|
||||
nk, err := nodeKey.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshalling node-key: %w", err)
|
||||
}
|
||||
|
||||
sig := &NodeKeySignature{
|
||||
SigKind: SigRotation,
|
||||
Pubkey: nk,
|
||||
Nested: wrapped,
|
||||
}
|
||||
sigHash := sig.SigHash()
|
||||
sig.Signature = ed25519.Sign(privKey, sigHash[:])
|
||||
return sig.Serialize(), nil
|
||||
}
|
||||
|
||||
// DecodeWrappedAuthkey separates wrapping information from an authkey, if any.
|
||||
// In all cases the authkey is returned, sans wrapping information if any.
|
||||
//
|
||||
// If the authkey is wrapped, isWrapped returns true, along with the wrapping signature
|
||||
// and private key.
|
||||
func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) {
|
||||
authKey, suffix, found := strings.Cut(wrappedAuthKey, "--TL")
|
||||
if !found {
|
||||
return wrappedAuthKey, false, nil, nil
|
||||
}
|
||||
sigBytes, privBytes, found := strings.Cut(suffix, "-")
|
||||
if !found {
|
||||
// TODO: propagate these errors to `tailscale up` output?
|
||||
logf("decoding wrapped auth-key: did not find delimiter")
|
||||
return wrappedAuthKey, false, nil, nil
|
||||
}
|
||||
|
||||
rawSig, err := base64.RawStdEncoding.DecodeString(sigBytes)
|
||||
if err != nil {
|
||||
logf("decoding wrapped auth-key: signature decode: %v", err)
|
||||
return wrappedAuthKey, false, nil, nil
|
||||
}
|
||||
rawPriv, err := base64.RawStdEncoding.DecodeString(privBytes)
|
||||
if err != nil {
|
||||
logf("decoding wrapped auth-key: priv decode: %v", err)
|
||||
return wrappedAuthKey, false, nil, nil
|
||||
}
|
||||
|
||||
sig = new(NodeKeySignature)
|
||||
if err := sig.Unserialize(rawSig); err != nil {
|
||||
logf("decoding wrapped auth-key: signature: %v", err)
|
||||
return wrappedAuthKey, false, nil, nil
|
||||
}
|
||||
priv = ed25519.PrivateKey(rawPriv)
|
||||
|
||||
return authKey, true, sig, priv
|
||||
}
|
||||
315
vendor/tailscale.com/tka/state.go
generated
vendored
Normal file
315
vendor/tailscale.com/tka/state.go
generated
vendored
Normal file
@@ -0,0 +1,315 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/argon2"
|
||||
"tailscale.com/types/tkatype"
|
||||
)
|
||||
|
||||
// ErrNoSuchKey is returned if the key referenced by a KeyID does not exist.
|
||||
var ErrNoSuchKey = errors.New("key not found")
|
||||
|
||||
// State describes Tailnet Key Authority state at an instant in time.
|
||||
//
|
||||
// State is mutated by applying Authority Update Messages (AUMs), resulting
|
||||
// in a new State.
|
||||
type State struct {
|
||||
// LastAUMHash is the blake2s digest of the last-applied AUM.
|
||||
// Because AUMs are strictly ordered and form a hash chain, we
|
||||
// check the previous AUM hash in an update we are applying
|
||||
// is the same as the LastAUMHash.
|
||||
LastAUMHash *AUMHash `cbor:"1,keyasint"`
|
||||
|
||||
// DisablementSecrets are KDF-derived values which can be used
|
||||
// to turn off the TKA in the event of a consensus-breaking bug.
|
||||
DisablementSecrets [][]byte `cbor:"2,keyasint"`
|
||||
|
||||
// Keys are the public keys of either:
|
||||
//
|
||||
// 1. The signing nodes currently trusted by the TKA.
|
||||
// 2. Ephemeral keys that were used to generate pre-signed auth keys.
|
||||
Keys []Key `cbor:"3,keyasint"`
|
||||
|
||||
// StateID's are nonce's, generated on enablement and fixed for
|
||||
// the lifetime of the Tailnet Key Authority. We generate 16-bytes
|
||||
// worth of keyspace here just in case we come up with a cool future
|
||||
// use for this.
|
||||
StateID1 uint64 `cbor:"4,keyasint,omitempty"`
|
||||
StateID2 uint64 `cbor:"5,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// GetKey returns the trusted key with the specified KeyID.
|
||||
func (s State) GetKey(key tkatype.KeyID) (Key, error) {
|
||||
for _, k := range s.Keys {
|
||||
keyID, err := k.ID()
|
||||
if err != nil {
|
||||
return Key{}, err
|
||||
}
|
||||
|
||||
if bytes.Equal(keyID, key) {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
return Key{}, ErrNoSuchKey
|
||||
}
|
||||
|
||||
// Clone makes an independent copy of State.
|
||||
//
|
||||
// NOTE: There is a difference between a nil slice and an empty
|
||||
// slice for encoding purposes, so an implementation of Clone()
|
||||
// must take care to preserve this.
|
||||
func (s State) Clone() State {
|
||||
out := State{
|
||||
StateID1: s.StateID1,
|
||||
StateID2: s.StateID2,
|
||||
}
|
||||
|
||||
if s.LastAUMHash != nil {
|
||||
dupe := *s.LastAUMHash
|
||||
out.LastAUMHash = &dupe
|
||||
}
|
||||
|
||||
if s.DisablementSecrets != nil {
|
||||
out.DisablementSecrets = make([][]byte, len(s.DisablementSecrets))
|
||||
for i := range s.DisablementSecrets {
|
||||
out.DisablementSecrets[i] = make([]byte, len(s.DisablementSecrets[i]))
|
||||
copy(out.DisablementSecrets[i], s.DisablementSecrets[i])
|
||||
}
|
||||
}
|
||||
|
||||
if s.Keys != nil {
|
||||
out.Keys = make([]Key, len(s.Keys))
|
||||
for i := range s.Keys {
|
||||
out.Keys[i] = s.Keys[i].Clone()
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// cloneForUpdate is like Clone, except LastAUMHash is set based
|
||||
// on the hash of the given update.
|
||||
func (s State) cloneForUpdate(update *AUM) State {
|
||||
out := s.Clone()
|
||||
aumHash := update.Hash()
|
||||
out.LastAUMHash = &aumHash
|
||||
return out
|
||||
}
|
||||
|
||||
const disablementLength = 32
|
||||
|
||||
var disablementSalt = []byte("tailscale network-lock disablement salt")
|
||||
|
||||
// DisablementKDF computes a public value which can be stored in a
|
||||
// key authority, but cannot be reversed to find the input secret.
|
||||
//
|
||||
// When the output of this function is stored in tka state (i.e. in
|
||||
// tka.State.DisablementSecrets) a call to Authority.ValidDisablement()
|
||||
// with the input of this function as the argument will return true.
|
||||
func DisablementKDF(secret []byte) []byte {
|
||||
// time = 4 (3 recommended, booped to 4 to compensate for less memory)
|
||||
// memory = 16 (32 recommended)
|
||||
// threads = 4
|
||||
// keyLen = 32 (256 bits)
|
||||
return argon2.Key(secret, disablementSalt, 4, 16*1024, 4, disablementLength)
|
||||
}
|
||||
|
||||
// checkDisablement returns true for a valid disablement secret.
|
||||
func (s State) checkDisablement(secret []byte) bool {
|
||||
derived := DisablementKDF(secret)
|
||||
for _, candidate := range s.DisablementSecrets {
|
||||
if bytes.Equal(derived, candidate) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parentMatches returns true if an AUM can chain to (be applied)
|
||||
// to the current state.
|
||||
//
|
||||
// Specifically, the rules are:
|
||||
// - The last AUM hash must match (transitively, this implies that this
|
||||
// update follows the last update message applied to the state machine)
|
||||
// - Or, the state machine knows no parent (its brand new).
|
||||
func (s State) parentMatches(update AUM) bool {
|
||||
if s.LastAUMHash == nil {
|
||||
return true
|
||||
}
|
||||
return bytes.Equal(s.LastAUMHash[:], update.PrevAUMHash)
|
||||
}
|
||||
|
||||
// applyVerifiedAUM computes a new state based on the update provided.
|
||||
//
|
||||
// The provided update MUST be verified: That is, the AUM must be well-formed
|
||||
// (as defined by StaticValidate()), and signatures over the AUM must have
|
||||
// been verified.
|
||||
func (s State) applyVerifiedAUM(update AUM) (State, error) {
|
||||
// Validate that the update message has the right parent.
|
||||
if !s.parentMatches(update) {
|
||||
return State{}, errors.New("parent AUMHash mismatch")
|
||||
}
|
||||
|
||||
switch update.MessageKind {
|
||||
case AUMNoOp:
|
||||
out := s.cloneForUpdate(&update)
|
||||
return out, nil
|
||||
|
||||
case AUMCheckpoint:
|
||||
if update.State == nil {
|
||||
return State{}, errors.New("missing checkpoint state")
|
||||
}
|
||||
id1Match, id2Match := update.State.StateID1 == s.StateID1, update.State.StateID2 == s.StateID2
|
||||
if !id1Match || !id2Match {
|
||||
return State{}, errors.New("checkpointed state has an incorrect stateID")
|
||||
}
|
||||
return update.State.cloneForUpdate(&update), nil
|
||||
|
||||
case AUMAddKey:
|
||||
if update.Key == nil {
|
||||
return State{}, errors.New("no key to add provided")
|
||||
}
|
||||
keyID, err := update.Key.ID()
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
}
|
||||
if _, err := s.GetKey(keyID); err == nil {
|
||||
return State{}, errors.New("key already exists")
|
||||
}
|
||||
out := s.cloneForUpdate(&update)
|
||||
out.Keys = append(out.Keys, *update.Key)
|
||||
return out, nil
|
||||
|
||||
case AUMUpdateKey:
|
||||
k, err := s.GetKey(update.KeyID)
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
}
|
||||
if update.Votes != nil {
|
||||
k.Votes = *update.Votes
|
||||
}
|
||||
if update.Meta != nil {
|
||||
k.Meta = update.Meta
|
||||
}
|
||||
if err := k.StaticValidate(); err != nil {
|
||||
return State{}, fmt.Errorf("updated key fails validation: %v", err)
|
||||
}
|
||||
out := s.cloneForUpdate(&update)
|
||||
for i := range out.Keys {
|
||||
keyID, err := out.Keys[i].ID()
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
}
|
||||
if bytes.Equal(keyID, update.KeyID) {
|
||||
out.Keys[i] = k
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
|
||||
case AUMRemoveKey:
|
||||
idx := -1
|
||||
for i := range s.Keys {
|
||||
keyID, err := s.Keys[i].ID()
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
}
|
||||
if bytes.Equal(update.KeyID, keyID) {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if idx < 0 {
|
||||
return State{}, ErrNoSuchKey
|
||||
}
|
||||
out := s.cloneForUpdate(&update)
|
||||
out.Keys = append(out.Keys[:idx], out.Keys[idx+1:]...)
|
||||
return out, nil
|
||||
|
||||
default:
|
||||
// An AUM with an unknown message kind was received! That means
|
||||
// that a future version of tailscaled added some feature we don't
|
||||
// understand.
|
||||
//
|
||||
// The future-compatibility contract for AUM message types is that
|
||||
// they must only add new features, not change the semantics of existing
|
||||
// mechanisms or features. As such, old clients can safely ignore them.
|
||||
out := s.cloneForUpdate(&update)
|
||||
return out, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Upper bound on checkpoint elements, chosen arbitrarily. Intended to
|
||||
// cap out insanely large AUMs.
|
||||
const (
|
||||
maxDisablementSecrets = 32
|
||||
maxKeys = 512
|
||||
)
|
||||
|
||||
// staticValidateCheckpoint validates that the state is well-formed for
|
||||
// inclusion in a checkpoint AUM.
|
||||
func (s *State) staticValidateCheckpoint() error {
|
||||
if s.LastAUMHash != nil {
|
||||
return errors.New("cannot specify a parent AUM")
|
||||
}
|
||||
if len(s.DisablementSecrets) == 0 {
|
||||
return errors.New("at least one disablement secret required")
|
||||
}
|
||||
if numDS := len(s.DisablementSecrets); numDS > maxDisablementSecrets {
|
||||
return fmt.Errorf("too many disablement secrets (%d, max %d)", numDS, maxDisablementSecrets)
|
||||
}
|
||||
for i, ds := range s.DisablementSecrets {
|
||||
if len(ds) != disablementLength {
|
||||
return fmt.Errorf("disablement[%d]: invalid length (got %d, want %d)", i, len(ds), disablementLength)
|
||||
}
|
||||
for j, ds2 := range s.DisablementSecrets {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(ds, ds2) {
|
||||
return fmt.Errorf("disablement[%d]: duplicates disablement[%d]", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.Keys) == 0 {
|
||||
return errors.New("at least one key is required")
|
||||
}
|
||||
if numKeys := len(s.Keys); numKeys > maxKeys {
|
||||
return fmt.Errorf("too many keys (%d, max %d)", numKeys, maxKeys)
|
||||
}
|
||||
for i, k := range s.Keys {
|
||||
if err := k.StaticValidate(); err != nil {
|
||||
return fmt.Errorf("key[%d]: %v", i, err)
|
||||
}
|
||||
}
|
||||
// NOTE: The max number of keys is constrained (512), so
|
||||
// O(n^2) is fine.
|
||||
for i, k := range s.Keys {
|
||||
for j, k2 := range s.Keys {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
|
||||
id1, err := k.ID()
|
||||
if err != nil {
|
||||
return fmt.Errorf("key[%d]: %w", i, err)
|
||||
}
|
||||
id2, err := k2.ID()
|
||||
if err != nil {
|
||||
return fmt.Errorf("key[%d]: %w", j, err)
|
||||
}
|
||||
|
||||
if bytes.Equal(id1, id2) {
|
||||
return fmt.Errorf("key[%d]: duplicates key[%d]", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
246
vendor/tailscale.com/tka/sync.go
generated
vendored
Normal file
246
vendor/tailscale.com/tka/sync.go
generated
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
// Max iterations searching for any intersection.
|
||||
maxSyncIter = 2000
|
||||
// Max iterations searching for a head intersection.
|
||||
maxSyncHeadIntersectionIter = 400
|
||||
)
|
||||
|
||||
// ErrNoIntersection is returned when a shared AUM could
|
||||
// not be determined when evaluating a remote sync offer.
|
||||
var ErrNoIntersection = errors.New("no intersection")
|
||||
|
||||
// SyncOffer conveys information about the current head & ancestor AUMs,
|
||||
// for the purpose of synchronization with some remote end.
|
||||
//
|
||||
// Ancestors should contain a subset of the ancestors of the chain.
|
||||
// The last entry in that slice is the oldest-known AUM in the chain.
|
||||
type SyncOffer struct {
|
||||
Head AUMHash
|
||||
Ancestors []AUMHash
|
||||
}
|
||||
|
||||
const (
|
||||
// The starting number of AUMs to skip when listing
|
||||
// ancestors in a SyncOffer.
|
||||
ancestorsSkipStart = 4
|
||||
|
||||
// How many bits to advance the skip count when listing
|
||||
// ancestors in a SyncOffer.
|
||||
//
|
||||
// 2 bits, so (4<<2), so after skipping 4 it skips 16.
|
||||
ancestorsSkipShift = 2
|
||||
)
|
||||
|
||||
// SyncOffer returns an abbreviated description of the current AUM
|
||||
// chain, which can be used to synchronize with another (untrusted)
|
||||
// Authority instance.
|
||||
//
|
||||
// The returned SyncOffer structure should be transmitted to the remote
|
||||
// Authority, which should call MissingAUMs() using it to determine
|
||||
// AUMs which need to be transmitted. This list of AUMs from the remote
|
||||
// can then be applied locally with Inform().
|
||||
//
|
||||
// This SyncOffer + AUM exchange should be performed by both ends,
|
||||
// because its possible that either end has AUMs that the other needs
|
||||
// to find out about.
|
||||
func (a *Authority) SyncOffer(storage Chonk) (SyncOffer, error) {
|
||||
oldest := a.oldestAncestor.Hash()
|
||||
|
||||
out := SyncOffer{
|
||||
Head: a.Head(),
|
||||
Ancestors: make([]AUMHash, 0, 6), // 6 chosen arbitrarily.
|
||||
}
|
||||
|
||||
// We send some subset of our ancestors to help the remote
|
||||
// find a more-recent 'head intersection'.
|
||||
// The number of AUMs between each ancestor entry gets
|
||||
// exponentially larger.
|
||||
var (
|
||||
skipAmount uint64 = ancestorsSkipStart
|
||||
curs AUMHash = a.Head()
|
||||
)
|
||||
for i := uint64(0); i < maxSyncHeadIntersectionIter; i++ {
|
||||
if i > 0 && (i%skipAmount) == 0 {
|
||||
out.Ancestors = append(out.Ancestors, curs)
|
||||
skipAmount = skipAmount << ancestorsSkipShift
|
||||
}
|
||||
|
||||
parent, err := storage.AUM(curs)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return SyncOffer{}, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// We add the oldest later on, so don't duplicate.
|
||||
if parent.Hash() == oldest {
|
||||
break
|
||||
}
|
||||
copy(curs[:], parent.PrevAUMHash)
|
||||
}
|
||||
|
||||
out.Ancestors = append(out.Ancestors, oldest)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// intersection describes how to synchronize AUMs with a remote
|
||||
// authority.
|
||||
type intersection struct {
|
||||
// if true, no exchange of AUMs is needed.
|
||||
upToDate bool
|
||||
|
||||
// headIntersection is the latest common AUM on the remote. In other
|
||||
// words, we need to send all AUMs since this one.
|
||||
headIntersection *AUMHash
|
||||
|
||||
// tailIntersection is the oldest common AUM on the remote. In other
|
||||
// words, we diverge with the remote after this AUM, so we both need
|
||||
// to transmit our AUM chain starting here.
|
||||
tailIntersection *AUMHash
|
||||
}
|
||||
|
||||
// computeSyncIntersection determines the common AUMs between a local and
|
||||
// remote SyncOffer. This intersection can be used to synchronize both
|
||||
// sides.
|
||||
func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) (*intersection, error) {
|
||||
// Simple case: up to date.
|
||||
if remoteOffer.Head == localOffer.Head {
|
||||
return &intersection{upToDate: true, headIntersection: &localOffer.Head}, nil
|
||||
}
|
||||
|
||||
// Case: 'head intersection'
|
||||
// If we have the remote's head, its more likely than not that
|
||||
// we have updates that build on that head. To confirm this,
|
||||
// we iterate backwards through our chain to see if the given
|
||||
// head is an ancestor of our current chain.
|
||||
//
|
||||
// In other words:
|
||||
// <Us> A -> B -> C
|
||||
// <Them> A -> B
|
||||
// ∴ their head intersects with our chain, we need to send C
|
||||
var hasRemoteHead bool
|
||||
_, err := storage.AUM(remoteOffer.Head)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
hasRemoteHead = true
|
||||
}
|
||||
|
||||
if hasRemoteHead {
|
||||
curs := localOffer.Head
|
||||
for range maxSyncHeadIntersectionIter {
|
||||
parent, err := storage.AUM(curs)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if parent.Hash() == remoteOffer.Head {
|
||||
h := parent.Hash()
|
||||
return &intersection{headIntersection: &h}, nil
|
||||
}
|
||||
|
||||
copy(curs[:], parent.PrevAUMHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Case: 'tail intersection'
|
||||
// So we don't have a clue what the remote's head is, but
|
||||
// if one of the ancestors they gave us is part of our chain,
|
||||
// then theres an intersection, which is a starting point for
|
||||
// the remote to send us AUMs from.
|
||||
//
|
||||
// We iterate the list of ancestors in order because the remote
|
||||
// ordered them such that the newer ones are earlier, so with
|
||||
// a bit of luck we can use an earlier one and hence do less work /
|
||||
// transmit fewer AUMs.
|
||||
for _, a := range remoteOffer.Ancestors {
|
||||
state, err := computeStateAt(storage, maxSyncIter, a)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return nil, fmt.Errorf("computeStateAt: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
end, _, err := fastForward(storage, maxSyncIter, state, func(curs AUM, _ State) bool {
|
||||
return curs.Hash() == localOffer.Head
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fastForward can terminate before the done condition if there are
|
||||
// no more children left, so we check again before considering this
|
||||
// an intersection.
|
||||
if end.Hash() == localOffer.Head {
|
||||
return &intersection{tailIntersection: &a}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrNoIntersection
|
||||
}
|
||||
|
||||
// MissingAUMs returns AUMs a remote may be missing based on the
|
||||
// remotes' SyncOffer.
|
||||
func (a *Authority) MissingAUMs(storage Chonk, remoteOffer SyncOffer) ([]AUM, error) {
|
||||
localOffer, err := a.SyncOffer(storage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("local syncOffer: %v", err)
|
||||
}
|
||||
intersection, err := computeSyncIntersection(storage, localOffer, remoteOffer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("intersection: %v", err)
|
||||
}
|
||||
if intersection.upToDate {
|
||||
return nil, nil
|
||||
}
|
||||
out := make([]AUM, 0, 12) // 12 chosen arbitrarily.
|
||||
|
||||
if intersection.headIntersection != nil {
|
||||
state, err := computeStateAt(storage, maxSyncIter, *intersection.headIntersection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, _, err = fastForward(storage, maxSyncIter, state, func(curs AUM, _ State) bool {
|
||||
if curs.Hash() != *intersection.headIntersection {
|
||||
out = append(out, curs)
|
||||
}
|
||||
return false
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
||||
if intersection.tailIntersection != nil {
|
||||
state, err := computeStateAt(storage, maxSyncIter, *intersection.tailIntersection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, _, err = fastForward(storage, maxSyncIter, state, func(curs AUM, _ State) bool {
|
||||
if curs.Hash() != *intersection.tailIntersection {
|
||||
out = append(out, curs)
|
||||
}
|
||||
return false
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
||||
panic("unreachable")
|
||||
}
|
||||
852
vendor/tailscale.com/tka/tailchonk.go
generated
vendored
Normal file
852
vendor/tailscale.com/tka/tailchonk.go
generated
vendored
Normal file
@@ -0,0 +1,852 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"tailscale.com/atomicfile"
|
||||
)
|
||||
|
||||
// Chonk implementations provide durable storage for AUMs and other
|
||||
// TKA state.
|
||||
//
|
||||
// All methods must be thread-safe.
|
||||
//
|
||||
// The name 'tailchonk' was coined by @catzkorn.
|
||||
type Chonk interface {
|
||||
// AUM returns the AUM with the specified digest.
|
||||
//
|
||||
// If the AUM does not exist, then os.ErrNotExist is returned.
|
||||
AUM(hash AUMHash) (AUM, error)
|
||||
|
||||
// ChildAUMs returns all AUMs with a specified previous
|
||||
// AUM hash.
|
||||
ChildAUMs(prevAUMHash AUMHash) ([]AUM, error)
|
||||
|
||||
// CommitVerifiedAUMs durably stores the provided AUMs.
|
||||
// Callers MUST ONLY provide AUMs which are verified (specifically,
|
||||
// a call to aumVerify() must return a nil error).
|
||||
// as the implementation assumes that only verified AUMs are stored.
|
||||
CommitVerifiedAUMs(updates []AUM) error
|
||||
|
||||
// Heads returns AUMs for which there are no children. In other
|
||||
// words, the latest AUM in all possible chains (the 'leaves').
|
||||
Heads() ([]AUM, error)
|
||||
|
||||
// SetLastActiveAncestor is called to record the oldest-known AUM
|
||||
// that contributed to the current state. This value is used as
|
||||
// a hint on next startup to determine which chain to pick when computing
|
||||
// the current state, if there are multiple distinct chains.
|
||||
SetLastActiveAncestor(hash AUMHash) error
|
||||
|
||||
// LastActiveAncestor returns the oldest-known AUM that was (in a
|
||||
// previous run) an ancestor of the current state. This is used
|
||||
// as a hint to pick the correct chain in the event that the Chonk stores
|
||||
// multiple distinct chains.
|
||||
LastActiveAncestor() (*AUMHash, error)
|
||||
}
|
||||
|
||||
// CompactableChonk implementation are extensions of Chonk, which are
|
||||
// able to be operated by compaction logic to deleted old AUMs.
|
||||
type CompactableChonk interface {
|
||||
Chonk
|
||||
|
||||
// AllAUMs returns all AUMs stored in the chonk.
|
||||
AllAUMs() ([]AUMHash, error)
|
||||
|
||||
// CommitTime returns the time at which the AUM was committed.
|
||||
//
|
||||
// If the AUM does not exist, then os.ErrNotExist is returned.
|
||||
CommitTime(hash AUMHash) (time.Time, error)
|
||||
|
||||
// PurgeAUMs permanently and irrevocably deletes the specified
|
||||
// AUMs from storage.
|
||||
PurgeAUMs(hashes []AUMHash) error
|
||||
}
|
||||
|
||||
// Mem implements in-memory storage of TKA state, suitable for
|
||||
// tests.
|
||||
//
|
||||
// Mem implements the Chonk interface.
|
||||
type Mem struct {
|
||||
l sync.RWMutex
|
||||
aums map[AUMHash]AUM
|
||||
parentIndex map[AUMHash][]AUMHash
|
||||
|
||||
lastActiveAncestor *AUMHash
|
||||
}
|
||||
|
||||
func (c *Mem) SetLastActiveAncestor(hash AUMHash) error {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
c.lastActiveAncestor = &hash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Mem) LastActiveAncestor() (*AUMHash, error) {
|
||||
c.l.RLock()
|
||||
defer c.l.RUnlock()
|
||||
return c.lastActiveAncestor, nil
|
||||
}
|
||||
|
||||
// Heads returns AUMs for which there are no children. In other
|
||||
// words, the latest AUM in all chains (the 'leaf').
|
||||
func (c *Mem) Heads() ([]AUM, error) {
|
||||
c.l.RLock()
|
||||
defer c.l.RUnlock()
|
||||
out := make([]AUM, 0, 6)
|
||||
|
||||
// An AUM is a 'head' if there are no nodes for which it is the parent.
|
||||
for _, a := range c.aums {
|
||||
if len(c.parentIndex[a.Hash()]) == 0 {
|
||||
out = append(out, a)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// AUM returns the AUM with the specified digest.
|
||||
func (c *Mem) AUM(hash AUMHash) (AUM, error) {
|
||||
c.l.RLock()
|
||||
defer c.l.RUnlock()
|
||||
aum, ok := c.aums[hash]
|
||||
if !ok {
|
||||
return AUM{}, os.ErrNotExist
|
||||
}
|
||||
return aum, nil
|
||||
}
|
||||
|
||||
// Orphans returns all AUMs which do not have a parent.
|
||||
func (c *Mem) Orphans() ([]AUM, error) {
|
||||
c.l.RLock()
|
||||
defer c.l.RUnlock()
|
||||
out := make([]AUM, 0, 6)
|
||||
for _, a := range c.aums {
|
||||
if _, ok := a.Parent(); !ok {
|
||||
out = append(out, a)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ChildAUMs returns all AUMs with a specified previous
|
||||
// AUM hash.
|
||||
func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) {
|
||||
c.l.RLock()
|
||||
defer c.l.RUnlock()
|
||||
out := make([]AUM, 0, 6)
|
||||
for _, entry := range c.parentIndex[prevAUMHash] {
|
||||
out = append(out, c.aums[entry])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// CommitVerifiedAUMs durably stores the provided AUMs.
|
||||
// Callers MUST ONLY provide well-formed and verified AUMs,
|
||||
// as the rest of the TKA implementation assumes that only
|
||||
// verified AUMs are stored.
|
||||
func (c *Mem) CommitVerifiedAUMs(updates []AUM) error {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
if c.aums == nil {
|
||||
c.parentIndex = make(map[AUMHash][]AUMHash, 64)
|
||||
c.aums = make(map[AUMHash]AUM, 64)
|
||||
}
|
||||
|
||||
updateLoop:
|
||||
for _, aum := range updates {
|
||||
aumHash := aum.Hash()
|
||||
c.aums[aumHash] = aum
|
||||
|
||||
parent, ok := aum.Parent()
|
||||
if ok {
|
||||
for _, exists := range c.parentIndex[parent] {
|
||||
if exists == aumHash {
|
||||
continue updateLoop
|
||||
}
|
||||
}
|
||||
c.parentIndex[parent] = append(c.parentIndex[parent], aumHash)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FS implements filesystem storage of TKA state.
|
||||
//
|
||||
// FS implements the Chonk interface.
|
||||
type FS struct {
|
||||
base string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// ChonkDir returns an implementation of Chonk which uses the
|
||||
// given directory to store TKA state.
|
||||
func ChonkDir(dir string) (*FS, error) {
|
||||
stat, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !stat.IsDir() {
|
||||
return nil, fmt.Errorf("chonk directory %q is a file", dir)
|
||||
}
|
||||
|
||||
// TODO(tom): *FS marks AUMs as deleted but does not actually
|
||||
// delete them, to avoid data loss in the event of a bug.
|
||||
// Implement deletion after we are fairly sure in the implementation.
|
||||
|
||||
return &FS{base: dir}, nil
|
||||
}
|
||||
|
||||
// fsHashInfo describes how information about an AUMHash is represented
|
||||
// on disk.
|
||||
//
|
||||
// The CBOR-serialization of this struct is stored to base/__/base32(hash)
|
||||
// where __ are the first two characters of base32(hash).
|
||||
//
|
||||
// CBOR was chosen because we are already using it and it serializes
|
||||
// much smaller than JSON for AUMs. The 'keyasint' thing isn't essential
|
||||
// but again it saves a bunch of bytes.
|
||||
type fsHashInfo struct {
|
||||
Children []AUMHash `cbor:"1,keyasint"`
|
||||
AUM *AUM `cbor:"2,keyasint"`
|
||||
CreatedUnix int64 `cbor:"3,keyasint,omitempty"`
|
||||
|
||||
// PurgedUnix is set when the AUM is deleted. The value is
|
||||
// the unix epoch at the time it was deleted.
|
||||
//
|
||||
// While a non-zero PurgedUnix symbolizes the AUM is deleted,
|
||||
// the fsHashInfo entry can continue to exist to track children
|
||||
// of this AUMHash.
|
||||
PurgedUnix int64 `cbor:"4,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// aumDir returns the directory an AUM is stored in, and its filename
|
||||
// within the directory.
|
||||
func (c *FS) aumDir(h AUMHash) (dir, base string) {
|
||||
s := h.String()
|
||||
return filepath.Join(c.base, s[:2]), s
|
||||
}
|
||||
|
||||
// AUM returns the AUM with the specified digest.
|
||||
//
|
||||
// If the AUM does not exist, then os.ErrNotExist is returned.
|
||||
func (c *FS) AUM(hash AUMHash) (AUM, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
info, err := c.get(hash)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return AUM{}, os.ErrNotExist
|
||||
}
|
||||
return AUM{}, err
|
||||
}
|
||||
if info.AUM == nil || info.PurgedUnix > 0 {
|
||||
return AUM{}, os.ErrNotExist
|
||||
}
|
||||
return *info.AUM, nil
|
||||
}
|
||||
|
||||
// CommitTime returns the time at which the AUM was committed.
|
||||
//
|
||||
// If the AUM does not exist, then os.ErrNotExist is returned.
|
||||
func (c *FS) CommitTime(h AUMHash) (time.Time, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
info, err := c.get(h)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return time.Time{}, os.ErrNotExist
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
if info.PurgedUnix > 0 {
|
||||
return time.Time{}, os.ErrNotExist
|
||||
}
|
||||
if info.CreatedUnix > 0 {
|
||||
return time.Unix(info.CreatedUnix, 0), nil
|
||||
}
|
||||
|
||||
// If we got this far, the AUM exists but CreatedUnix is not
|
||||
// set, presumably because this AUM was committed using a version
|
||||
// of tailscaled that pre-dates the introduction of CreatedUnix.
|
||||
// As such, we use the file modification time as a suitable analog.
|
||||
dir, base := c.aumDir(h)
|
||||
s, err := os.Stat(filepath.Join(dir, base))
|
||||
if err != nil {
|
||||
return time.Time{}, nil
|
||||
}
|
||||
return s.ModTime(), nil
|
||||
}
|
||||
|
||||
// AUM returns any known AUMs with a specific parent hash.
|
||||
func (c *FS) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
info, err := c.get(prevAUMHash)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// not knowing about this hash is not an error
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// NOTE(tom): We don't check PurgedUnix here because 'purged'
|
||||
// only applies to that specific AUM (i.e. info.AUM) and not to
|
||||
// any information about children stored against that hash.
|
||||
|
||||
out := make([]AUM, len(info.Children))
|
||||
for i, h := range info.Children {
|
||||
c, err := c.get(h)
|
||||
if err != nil {
|
||||
// We expect any AUM recorded as a child on its parent to exist.
|
||||
return nil, fmt.Errorf("reading child %d of %x: %v", i, h, err)
|
||||
}
|
||||
if c.AUM == nil || c.PurgedUnix > 0 {
|
||||
return nil, fmt.Errorf("child %d of %x: AUM not stored", i, h)
|
||||
}
|
||||
out[i] = *c.AUM
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *FS) get(h AUMHash) (*fsHashInfo, error) {
|
||||
dir, base := c.aumDir(h)
|
||||
f, err := os.Open(filepath.Join(dir, base))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m, err := cborDecOpts.DecMode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out fsHashInfo
|
||||
if err := m.NewDecoder(f).Decode(&out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if out.AUM != nil && out.AUM.Hash() != h {
|
||||
return nil, fmt.Errorf("%s: AUM does not match file name hash %s", f.Name(), out.AUM.Hash())
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
// Heads returns AUMs for which there are no children. In other
|
||||
// words, the latest AUM in all possible chains (the 'leaves').
|
||||
//
|
||||
// Heads is expected to be called infrequently compared to AUM() or
|
||||
// ChildAUMs(), so we haven't put any work into maintaining an index.
|
||||
// Instead, the full set of AUMs is scanned.
|
||||
func (c *FS) Heads() ([]AUM, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
out := make([]AUM, 0, 6) // 6 is arbitrary.
|
||||
err := c.scanHashes(func(info *fsHashInfo) {
|
||||
if len(info.Children) == 0 && info.AUM != nil && info.PurgedUnix == 0 {
|
||||
out = append(out, *info.AUM)
|
||||
}
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
||||
// AllAUMs returns all AUMs stored in the chonk.
|
||||
func (c *FS) AllAUMs() ([]AUMHash, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
out := make([]AUMHash, 0, 6) // 6 is arbitrary.
|
||||
err := c.scanHashes(func(info *fsHashInfo) {
|
||||
if info.AUM != nil && info.PurgedUnix == 0 {
|
||||
out = append(out, info.AUM.Hash())
|
||||
}
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error {
|
||||
prefixDirs, err := os.ReadDir(c.base)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading prefix dirs: %v", err)
|
||||
}
|
||||
for _, prefix := range prefixDirs {
|
||||
if !prefix.IsDir() {
|
||||
continue
|
||||
}
|
||||
files, err := os.ReadDir(filepath.Join(c.base, prefix.Name()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading prefix dir: %v", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
var h AUMHash
|
||||
if err := h.UnmarshalText([]byte(file.Name())); err != nil {
|
||||
return fmt.Errorf("invalid aum file: %s: %w", file.Name(), err)
|
||||
}
|
||||
info, err := c.get(h)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading %x: %v", h, err)
|
||||
}
|
||||
|
||||
eachHashInfo(info)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLastActiveAncestor is called to record the oldest-known AUM
|
||||
// that contributed to the current state. This value is used as
|
||||
// a hint on next startup to determine which chain to pick when computing
|
||||
// the current state, if there are multiple distinct chains.
|
||||
func (c *FS) SetLastActiveAncestor(hash AUMHash) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return atomicfile.WriteFile(filepath.Join(c.base, "last_active_ancestor"), hash[:], 0644)
|
||||
}
|
||||
|
||||
// LastActiveAncestor returns the oldest-known AUM that was (in a
|
||||
// previous run) an ancestor of the current state. This is used
|
||||
// as a hint to pick the correct chain in the event that the Chonk stores
|
||||
// multiple distinct chains.
|
||||
//
|
||||
// Nil is returned if no last-active ancestor is set.
|
||||
func (c *FS) LastActiveAncestor() (*AUMHash, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
hash, err := os.ReadFile(filepath.Join(c.base, "last_active_ancestor"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil // Not exist == none set.
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out AUMHash
|
||||
if len(hash) != len(out) {
|
||||
return nil, fmt.Errorf("stored hash is of wrong length: %d != %d", len(hash), len(out))
|
||||
}
|
||||
copy(out[:], hash)
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
// CommitVerifiedAUMs durably stores the provided AUMs.
|
||||
// Callers MUST ONLY provide AUMs which are verified (specifically,
|
||||
// a call to aumVerify must return a nil error), as the
|
||||
// implementation assumes that only verified AUMs are stored.
|
||||
func (c *FS) CommitVerifiedAUMs(updates []AUM) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for i, aum := range updates {
|
||||
h := aum.Hash()
|
||||
// We keep track of children against their parent so that
|
||||
// ChildAUMs() do not need to scan all AUMs.
|
||||
parent, hasParent := aum.Parent()
|
||||
if hasParent {
|
||||
err := c.commit(parent, func(info *fsHashInfo) {
|
||||
// Only add it if its not already there.
|
||||
for i := range info.Children {
|
||||
if info.Children[i] == h {
|
||||
return
|
||||
}
|
||||
}
|
||||
info.Children = append(info.Children, h)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("committing update[%d] to parent %x: %v", i, parent, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := c.commit(h, func(info *fsHashInfo) {
|
||||
info.PurgedUnix = 0 // just in-case it was set for some reason
|
||||
info.AUM = &aum
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("committing update[%d] (%x): %v", i, h, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PurgeAUMs marks the specified AUMs for deletion from storage.
|
||||
func (c *FS) PurgeAUMs(hashes []AUMHash) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for i, h := range hashes {
|
||||
stored, err := c.get(h)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading %d (%x): %w", i, h, err)
|
||||
}
|
||||
if stored.AUM == nil || stored.PurgedUnix > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
err = c.commit(h, func(info *fsHashInfo) {
|
||||
info.PurgedUnix = now.Unix()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("committing purge[%d] (%x): %w", i, h, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// commit calls the provided updater function to record changes relevant
|
||||
// to the given hash. The caller is expected to update the AUM and
|
||||
// Children fields, as relevant.
|
||||
func (c *FS) commit(h AUMHash, updater func(*fsHashInfo)) error {
|
||||
toCommit := fsHashInfo{}
|
||||
|
||||
existing, err := c.get(h)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
toCommit.CreatedUnix = time.Now().Unix()
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
toCommit = *existing
|
||||
}
|
||||
|
||||
updater(&toCommit)
|
||||
if toCommit.AUM != nil && toCommit.AUM.Hash() != h {
|
||||
return fmt.Errorf("cannot commit AUM with hash %x to %x", toCommit.AUM.Hash(), h)
|
||||
}
|
||||
|
||||
dir, base := c.aumDir(h)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("creating directory: %v", err)
|
||||
}
|
||||
|
||||
m, err := cbor.CTAP2EncOptions().EncMode()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cbor EncMode: %v", err)
|
||||
}
|
||||
|
||||
var buff bytes.Buffer
|
||||
if err := m.NewEncoder(&buff).Encode(toCommit); err != nil {
|
||||
return fmt.Errorf("encoding: %v", err)
|
||||
}
|
||||
return atomicfile.WriteFile(filepath.Join(dir, base), buff.Bytes(), 0644)
|
||||
}
|
||||
|
||||
// CompactionOptions describes tuneables to use when compacting a Chonk.
|
||||
type CompactionOptions struct {
|
||||
// The minimum number of ancestor AUMs to remember. The actual length
|
||||
// of the chain post-compaction may be longer to reach a Checkpoint AUM.
|
||||
MinChain int
|
||||
// The minimum duration to store an AUM before it is a candidate for deletion.
|
||||
MinAge time.Duration
|
||||
}
|
||||
|
||||
// retainState tracks the state of an AUM hash as it is being considered for
|
||||
// deletion.
|
||||
type retainState uint8
|
||||
|
||||
// Valid retainState flags.
|
||||
const (
|
||||
retainStateActive retainState = 1 << iota // The AUM is part of the active chain and less than MinChain hops from HEAD.
|
||||
retainStateYoung // The AUM is younger than MinAge.
|
||||
retainStateLeaf // The AUM is a descendant of an AUM to be retained.
|
||||
retainStateAncestor // The AUM is part of a chain between a retained AUM and the new lastActiveAncestor.
|
||||
retainStateCandidate // The AUM is part of the active chain.
|
||||
|
||||
// retainAUMMask is a bit mask of any bit which should prevent
|
||||
// the deletion of an AUM.
|
||||
retainAUMMask retainState = retainStateActive | retainStateYoung | retainStateLeaf | retainStateAncestor
|
||||
)
|
||||
|
||||
// markActiveChain marks AUMs in the active chain.
|
||||
// All AUMs that are within minChain ancestors of head are
|
||||
// marked retainStateActive, and all remaining ancestors are
|
||||
// marked retainStateCandidate.
|
||||
//
|
||||
// markActiveChain returns the next ancestor AUM which is a checkpoint AUM.
|
||||
func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain int, head AUMHash) (lastActiveAncestor AUMHash, err error) {
|
||||
next, err := storage.AUM(head)
|
||||
if err != nil {
|
||||
return AUMHash{}, err
|
||||
}
|
||||
|
||||
for i := range minChain {
|
||||
h := next.Hash()
|
||||
verdict[h] |= retainStateActive
|
||||
|
||||
parent, hasParent := next.Parent()
|
||||
if !hasParent {
|
||||
// Genesis AUM (beginning of time). The chain isnt long enough to need truncating.
|
||||
return h, nil
|
||||
}
|
||||
|
||||
if next, err = storage.AUM(parent); err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
// We've reached the end of the chain we have stored.
|
||||
return h, nil
|
||||
}
|
||||
return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d): %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If we got this far, we have at least minChain AUMs stored, and minChain number
|
||||
// of ancestors have been marked for retention. We now continue to iterate backwards
|
||||
// till we find an AUM which we can compact to (a Checkpoint AUM).
|
||||
for {
|
||||
h := next.Hash()
|
||||
verdict[h] |= retainStateActive
|
||||
if next.MessageKind == AUMCheckpoint {
|
||||
lastActiveAncestor = h
|
||||
break
|
||||
}
|
||||
|
||||
parent, hasParent := next.Parent()
|
||||
if !hasParent {
|
||||
return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate lastActiveAncestor")
|
||||
}
|
||||
if next, err = storage.AUM(parent); err != nil {
|
||||
return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Mark remaining known ancestors as retainStateCandidate.
|
||||
for {
|
||||
parent, hasParent := next.Parent()
|
||||
if !hasParent {
|
||||
break
|
||||
}
|
||||
verdict[parent] |= retainStateCandidate
|
||||
if next, err = storage.AUM(parent); err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
// We've reached the end of the chain we have stored.
|
||||
break
|
||||
}
|
||||
return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate): %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return lastActiveAncestor, nil
|
||||
}
|
||||
|
||||
// markYoungAUMs marks all AUMs younger than minAge for retention. All
|
||||
// candidate AUMs must exist in verdict.
|
||||
func markYoungAUMs(storage CompactableChonk, verdict map[AUMHash]retainState, minAge time.Duration) error {
|
||||
minTime := time.Now().Add(-minAge)
|
||||
for h := range verdict {
|
||||
commitTime, err := storage.CommitTime(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if commitTime.After(minTime) {
|
||||
verdict[h] |= retainStateYoung
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// markAncestorIntersectionAUMs walks backwards from all AUMs to be retained,
|
||||
// ensuring they intersect with candidateAncestor. All AUMs between a retained
|
||||
// AUM and candidateAncestor are marked for retention.
|
||||
//
|
||||
// If there is no intersection between candidateAncestor and the ancestors of
|
||||
// a retained AUM (this can happen if a retained AUM intersects the main chain
|
||||
// before candidateAncestor) then candidate ancestor is recomputed based on
|
||||
// the new oldest intersection.
|
||||
//
|
||||
// The final value for lastActiveAncestor is returned.
|
||||
func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState, candidateAncestor AUMHash) (lastActiveAncestor AUMHash, err error) {
|
||||
toScan := make([]AUMHash, 0, len(verdict))
|
||||
for h, v := range verdict {
|
||||
if (v & retainAUMMask) == 0 {
|
||||
continue // not marked for retention, so dont need to consider it
|
||||
}
|
||||
if h == candidateAncestor {
|
||||
continue
|
||||
}
|
||||
toScan = append(toScan, h)
|
||||
}
|
||||
|
||||
var didAdjustCandidateAncestor bool
|
||||
for len(toScan) > 0 {
|
||||
nextIterScan := make([]AUMHash, 0, len(verdict))
|
||||
for _, h := range toScan {
|
||||
if verdict[h]&retainStateAncestor != 0 {
|
||||
// This AUM and its ancestors have already been iterated.
|
||||
continue
|
||||
}
|
||||
verdict[h] |= retainStateAncestor
|
||||
|
||||
a, err := storage.AUM(h)
|
||||
if err != nil {
|
||||
return AUMHash{}, fmt.Errorf("reading %v: %w", h, err)
|
||||
}
|
||||
parent, hasParent := a.Parent()
|
||||
if !hasParent {
|
||||
return AUMHash{}, errors.New("reached genesis AUM without intersecting with candidate ancestor")
|
||||
}
|
||||
|
||||
if verdict[parent]&retainAUMMask != 0 {
|
||||
// Includes candidateAncestor (has retainStateActive set)
|
||||
continue
|
||||
}
|
||||
if verdict[parent]&retainStateCandidate != 0 {
|
||||
// We've intersected with the active chain but haven't done so through
|
||||
// candidateAncestor. That means that we intersect the active chain
|
||||
// before candidateAncestor, hence candidateAncestor actually needs
|
||||
// to be earlier than it is now.
|
||||
candidateAncestor = parent
|
||||
didAdjustCandidateAncestor = true
|
||||
verdict[parent] |= retainStateAncestor
|
||||
|
||||
// There could be AUMs on the active chain between our new candidateAncestor
|
||||
// and the old one, make sure they are marked as retained.
|
||||
next := parent
|
||||
childLoop:
|
||||
for {
|
||||
children, err := storage.ChildAUMs(next)
|
||||
if err != nil {
|
||||
return AUMHash{}, fmt.Errorf("reading children %v: %w", next, err)
|
||||
}
|
||||
// While there can be many children of an AUM, there can only be
|
||||
// one child on the active chain (it will have retainStateCandidate set).
|
||||
for _, a := range children {
|
||||
h := a.Hash()
|
||||
if v := verdict[h]; v&retainStateCandidate != 0 && v&retainStateActive == 0 {
|
||||
verdict[h] |= retainStateAncestor
|
||||
next = h
|
||||
continue childLoop
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
nextIterScan = append(nextIterScan, parent)
|
||||
}
|
||||
toScan = nextIterScan
|
||||
}
|
||||
|
||||
// If candidateAncestor was adjusted backwards, then it may not be a checkpoint
|
||||
// (and hence a valid compaction candidate). If so, iterate backwards and adjust
|
||||
// the candidateAncestor till we find a checkpoint.
|
||||
if didAdjustCandidateAncestor {
|
||||
var next AUM
|
||||
if next, err = storage.AUM(candidateAncestor); err != nil {
|
||||
return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
h := next.Hash()
|
||||
verdict[h] |= retainStateActive
|
||||
if next.MessageKind == AUMCheckpoint {
|
||||
candidateAncestor = h
|
||||
break
|
||||
}
|
||||
|
||||
parent, hasParent := next.Parent()
|
||||
if !hasParent {
|
||||
return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate candidateAncestor")
|
||||
}
|
||||
if next, err = storage.AUM(parent); err != nil {
|
||||
return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return candidateAncestor, nil
|
||||
}
|
||||
|
||||
// markDescendantAUMs marks all children of a retained AUM as retained.
|
||||
func markDescendantAUMs(storage Chonk, verdict map[AUMHash]retainState) error {
|
||||
toScan := make([]AUMHash, 0, len(verdict))
|
||||
for h, v := range verdict {
|
||||
if v&retainAUMMask == 0 {
|
||||
continue // not marked, so dont need to mark descendants
|
||||
}
|
||||
toScan = append(toScan, h)
|
||||
}
|
||||
|
||||
for len(toScan) > 0 {
|
||||
nextIterScan := make([]AUMHash, 0, len(verdict))
|
||||
for _, h := range toScan {
|
||||
if verdict[h]&retainStateLeaf != 0 {
|
||||
// This AUM and its descendants have already been marked.
|
||||
continue
|
||||
}
|
||||
verdict[h] |= retainStateLeaf
|
||||
|
||||
children, err := storage.ChildAUMs(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, a := range children {
|
||||
nextIterScan = append(nextIterScan, a.Hash())
|
||||
}
|
||||
}
|
||||
toScan = nextIterScan
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compact deletes old AUMs from storage, based on the parameters given in opts.
|
||||
func Compact(storage CompactableChonk, head AUMHash, opts CompactionOptions) (lastActiveAncestor AUMHash, err error) {
|
||||
if opts.MinChain == 0 {
|
||||
return AUMHash{}, errors.New("opts.MinChain must be set")
|
||||
}
|
||||
if opts.MinAge == 0 {
|
||||
return AUMHash{}, errors.New("opts.MinAge must be set")
|
||||
}
|
||||
|
||||
all, err := storage.AllAUMs()
|
||||
if err != nil {
|
||||
return AUMHash{}, fmt.Errorf("AllAUMs: %w", err)
|
||||
}
|
||||
verdict := make(map[AUMHash]retainState, len(all))
|
||||
for _, h := range all {
|
||||
verdict[h] = 0
|
||||
}
|
||||
|
||||
if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil {
|
||||
return AUMHash{}, fmt.Errorf("marking active chain: %w", err)
|
||||
}
|
||||
if err := markYoungAUMs(storage, verdict, opts.MinAge); err != nil {
|
||||
return AUMHash{}, fmt.Errorf("marking young AUMs: %w", err)
|
||||
}
|
||||
if err := markDescendantAUMs(storage, verdict); err != nil {
|
||||
return AUMHash{}, fmt.Errorf("marking descendant AUMs: %w", err)
|
||||
}
|
||||
if lastActiveAncestor, err = markAncestorIntersectionAUMs(storage, verdict, lastActiveAncestor); err != nil {
|
||||
return AUMHash{}, fmt.Errorf("marking ancestor intersection: %w", err)
|
||||
}
|
||||
|
||||
toDelete := make([]AUMHash, 0, len(verdict))
|
||||
for h, v := range verdict {
|
||||
if v&retainAUMMask == 0 { // no retention set
|
||||
toDelete = append(toDelete, h)
|
||||
}
|
||||
}
|
||||
|
||||
if err := storage.SetLastActiveAncestor(lastActiveAncestor); err != nil {
|
||||
return AUMHash{}, err
|
||||
}
|
||||
return lastActiveAncestor, storage.PurgeAUMs(toDelete)
|
||||
}
|
||||
850
vendor/tailscale.com/tka/tka.go
generated
vendored
Normal file
850
vendor/tailscale.com/tka/tka.go
generated
vendored
Normal file
@@ -0,0 +1,850 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package tka (WIP) implements the Tailnet Key Authority.
|
||||
package tka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/tkatype"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// Strict settings for the CBOR decoder.
|
||||
var cborDecOpts = cbor.DecOptions{
|
||||
DupMapKey: cbor.DupMapKeyEnforcedAPF,
|
||||
IndefLength: cbor.IndefLengthForbidden,
|
||||
TagsMd: cbor.TagsForbidden,
|
||||
|
||||
// Arbitrarily-chosen maximums.
|
||||
MaxNestedLevels: 16, // Most likely to be hit for SigRotation sigs.
|
||||
MaxArrayElements: 4096,
|
||||
MaxMapPairs: 1024,
|
||||
}
|
||||
|
||||
// Arbitrarily chosen limit on scanning AUM trees.
|
||||
const maxScanIterations = 2000
|
||||
|
||||
// Authority is a Tailnet Key Authority. This type is the main coupling
|
||||
// point to the rest of the tailscale client.
|
||||
//
|
||||
// Authority objects can either be created from an existing, non-empty
|
||||
// tailchonk (via tka.Open()), or created from scratch using tka.Bootstrap()
|
||||
// or tka.Create().
|
||||
type Authority struct {
|
||||
head AUM
|
||||
oldestAncestor AUM
|
||||
state State
|
||||
}
|
||||
|
||||
// Clone duplicates the Authority structure.
|
||||
func (a *Authority) Clone() *Authority {
|
||||
return &Authority{
|
||||
head: a.head,
|
||||
oldestAncestor: a.oldestAncestor,
|
||||
state: a.state.Clone(),
|
||||
}
|
||||
}
|
||||
|
||||
// A chain describes a linear sequence of updates from Oldest to Head,
|
||||
// resulting in some State at Head.
|
||||
type chain struct {
|
||||
Oldest AUM
|
||||
Head AUM
|
||||
|
||||
state State
|
||||
|
||||
// Set to true if the AUM chain intersects with the active
|
||||
// chain from a previous run.
|
||||
chainsThroughActive bool
|
||||
}
|
||||
|
||||
// computeChainCandidates returns all possible chains based on AUMs stored
|
||||
// in the given tailchonk. A chain is defined as a unique (oldest, newest)
|
||||
// AUM tuple. chain.state is not yet populated in returned chains.
|
||||
//
|
||||
// If lastKnownOldest is provided, any chain that includes the given AUM
|
||||
// has the chainsThroughActive field set to true. This bit is leveraged
|
||||
// in computeActiveAncestor() to filter out irrelevant chains when determining
|
||||
// the active ancestor from a list of distinct chains.
|
||||
func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int) ([]chain, error) {
|
||||
heads, err := storage.Heads()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading heads: %v", err)
|
||||
}
|
||||
candidates := make([]chain, len(heads))
|
||||
for i := range heads {
|
||||
// Oldest is iteratively computed below.
|
||||
candidates[i] = chain{Oldest: heads[i], Head: heads[i]}
|
||||
}
|
||||
// Not strictly necessary, but simplifies checks in tests.
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
ih, jh := candidates[i].Oldest.Hash(), candidates[j].Oldest.Hash()
|
||||
return bytes.Compare(ih[:], jh[:]) < 0
|
||||
})
|
||||
|
||||
// candidates.Oldest needs to be computed by working backwards from
|
||||
// head as far as we can.
|
||||
iterAgain := true // if theres still work to be done.
|
||||
for i := 0; iterAgain; i++ {
|
||||
if i >= maxIter {
|
||||
return nil, fmt.Errorf("iteration limit exceeded (%d)", maxIter)
|
||||
}
|
||||
|
||||
iterAgain = false
|
||||
for j := range candidates {
|
||||
parent, hasParent := candidates[j].Oldest.Parent()
|
||||
if hasParent {
|
||||
parent, err := storage.AUM(parent)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("reading parent: %v", err)
|
||||
}
|
||||
candidates[j].Oldest = parent
|
||||
if lastKnownOldest != nil && *lastKnownOldest == parent.Hash() {
|
||||
candidates[j].chainsThroughActive = true
|
||||
}
|
||||
iterAgain = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// pickNextAUM returns the AUM which should be used as the next
|
||||
// AUM in the chain, possibly applying fork resolution logic.
|
||||
//
|
||||
// In other words: given an AUM with 3 children like this:
|
||||
//
|
||||
// / - 1
|
||||
// P - 2
|
||||
// \ - 3
|
||||
//
|
||||
// pickNextAUM will determine and return the correct branch.
|
||||
//
|
||||
// This method takes ownership of the provided slice.
|
||||
func pickNextAUM(state State, candidates []AUM) AUM {
|
||||
switch len(candidates) {
|
||||
case 0:
|
||||
panic("pickNextAUM called with empty candidate set")
|
||||
case 1:
|
||||
return candidates[0]
|
||||
}
|
||||
|
||||
// Oooof, we have some forks in the chain. We need to pick which
|
||||
// one to use by applying the Fork Resolution Algorithm ✨
|
||||
//
|
||||
// The rules are this:
|
||||
// 1. The child with the highest signature weight is chosen.
|
||||
// 2. If equal, the child which is a RemoveKey AUM is chosen.
|
||||
// 3. If equal, the child with the lowest AUM hash is chosen.
|
||||
sort.Slice(candidates, func(j, i int) bool {
|
||||
// Rule 1.
|
||||
iSigWeight, jSigWeight := candidates[i].Weight(state), candidates[j].Weight(state)
|
||||
if iSigWeight != jSigWeight {
|
||||
return iSigWeight < jSigWeight
|
||||
}
|
||||
|
||||
// Rule 2.
|
||||
if iKind, jKind := candidates[i].MessageKind, candidates[j].MessageKind; iKind != jKind &&
|
||||
(iKind == AUMRemoveKey || jKind == AUMRemoveKey) {
|
||||
return jKind == AUMRemoveKey
|
||||
}
|
||||
|
||||
// Rule 3.
|
||||
iHash, jHash := candidates[i].Hash(), candidates[j].Hash()
|
||||
return bytes.Compare(iHash[:], jHash[:]) > 0
|
||||
})
|
||||
|
||||
return candidates[0]
|
||||
}
|
||||
|
||||
// advanceByPrimary computes the next AUM to advance with based on
|
||||
// deterministic fork-resolution rules. All nodes should apply this logic
|
||||
// when computing the primary chain, hence achieving consensus on what the
|
||||
// primary chain (and hence, the shared state) is.
|
||||
//
|
||||
// This method returns the chosen AUM & the state obtained by applying that
|
||||
// AUM.
|
||||
//
|
||||
// The return value for next is nil if there are no children AUMs, hence
|
||||
// the provided state is at head (up to date).
|
||||
func advanceByPrimary(state State, candidates []AUM) (next *AUM, out State, err error) {
|
||||
if len(candidates) == 0 {
|
||||
return nil, state, nil
|
||||
}
|
||||
|
||||
aum := pickNextAUM(state, candidates)
|
||||
|
||||
if state, err = state.applyVerifiedAUM(aum); err != nil {
|
||||
return nil, State{}, fmt.Errorf("advancing state: %v", err)
|
||||
}
|
||||
return &aum, state, nil
|
||||
}
|
||||
|
||||
// fastForwardWithAdvancer iteratively advances the current state by calling
|
||||
// the given advancer to get+apply the next update. This process is repeated
|
||||
// until the given termination function returns true or there is no more
|
||||
// progress possible.
|
||||
//
|
||||
// The last-processed AUM, and the state computed after applying the last AUM,
|
||||
// are returned.
|
||||
func fastForwardWithAdvancer(
|
||||
storage Chonk, maxIter int, startState State,
|
||||
advancer func(state State, candidates []AUM) (next *AUM, out State, err error),
|
||||
done func(curAUM AUM, curState State) bool,
|
||||
) (AUM, State, error) {
|
||||
if startState.LastAUMHash == nil {
|
||||
return AUM{}, State{}, errors.New("invalid initial state")
|
||||
}
|
||||
nextAUM, err := storage.AUM(*startState.LastAUMHash)
|
||||
if err != nil {
|
||||
return AUM{}, State{}, fmt.Errorf("reading next: %v", err)
|
||||
}
|
||||
|
||||
curs := nextAUM
|
||||
state := startState
|
||||
for range maxIter {
|
||||
if done != nil && done(curs, state) {
|
||||
return curs, state, nil
|
||||
}
|
||||
|
||||
children, err := storage.ChildAUMs(curs.Hash())
|
||||
if err != nil {
|
||||
return AUM{}, State{}, fmt.Errorf("getting children of %X: %v", curs.Hash(), err)
|
||||
}
|
||||
next, nextState, err := advancer(state, children)
|
||||
if err != nil {
|
||||
return AUM{}, State{}, fmt.Errorf("advance %X: %v", curs.Hash(), err)
|
||||
}
|
||||
if next == nil {
|
||||
// There were no more children, we are at 'head'.
|
||||
return curs, state, nil
|
||||
}
|
||||
curs = *next
|
||||
state = nextState
|
||||
}
|
||||
|
||||
return AUM{}, State{}, fmt.Errorf("iteration limit exceeded (%d)", maxIter)
|
||||
}
|
||||
|
||||
// fastForward iteratively advances the current state based on known AUMs until
|
||||
// the given termination function returns true or there is no more progress possible.
|
||||
//
|
||||
// The last-processed AUM, and the state computed after applying the last AUM,
|
||||
// are returned.
|
||||
func fastForward(storage Chonk, maxIter int, startState State, done func(curAUM AUM, curState State) bool) (AUM, State, error) {
|
||||
return fastForwardWithAdvancer(storage, maxIter, startState, advanceByPrimary, done)
|
||||
}
|
||||
|
||||
// computeStateAt returns the State at wantHash.
|
||||
func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) {
|
||||
topAUM, err := storage.AUM(wantHash)
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
}
|
||||
|
||||
// Iterate backwards till we find a starting point to compute
|
||||
// the state from.
|
||||
//
|
||||
// Valid starting points are either a checkpoint AUM, or a
|
||||
// genesis AUM.
|
||||
var (
|
||||
curs = topAUM
|
||||
state State
|
||||
path = make(set.Set[AUMHash], 32) // 32 chosen arbitrarily.
|
||||
)
|
||||
for i := 0; true; i++ {
|
||||
if i > maxIter {
|
||||
return State{}, fmt.Errorf("iteration limit exceeded (%d)", maxIter)
|
||||
}
|
||||
path.Add(curs.Hash())
|
||||
|
||||
// Checkpoints encapsulate the state at that point, dope.
|
||||
if curs.MessageKind == AUMCheckpoint {
|
||||
state = curs.State.cloneForUpdate(&curs)
|
||||
break
|
||||
}
|
||||
parent, hasParent := curs.Parent()
|
||||
if !hasParent {
|
||||
// This is a 'genesis' update: there are none before it, so
|
||||
// this AUM can be applied to the empty state to determine
|
||||
// the state at this AUM.
|
||||
//
|
||||
// It is only valid for NoOp, AddKey, and Checkpoint AUMs
|
||||
// to be a genesis update. Checkpoint was handled earlier.
|
||||
if mk := curs.MessageKind; mk == AUMNoOp || mk == AUMAddKey {
|
||||
var err error
|
||||
if state, err = (State{}).applyVerifiedAUM(curs); err != nil {
|
||||
return State{}, fmt.Errorf("applying genesis (%+v): %v", curs, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
return State{}, fmt.Errorf("invalid genesis update: %+v", curs)
|
||||
}
|
||||
|
||||
// If we got here, the current state is dependent on the previous.
|
||||
// Keep iterating backwards till thats not the case.
|
||||
if curs, err = storage.AUM(parent); err != nil {
|
||||
return State{}, fmt.Errorf("reading parent: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// We now know some starting point state. Iterate forward till we
|
||||
// are at the AUM we want state for.
|
||||
//
|
||||
// We want to fast forward based on the path we took above, which
|
||||
// (in the case of a non-primary fork) may differ from a regular
|
||||
// fast-forward (which follows standard fork-resolution rules). As
|
||||
// such, we use a custom advancer here.
|
||||
advancer := func(state State, candidates []AUM) (next *AUM, out State, err error) {
|
||||
for _, c := range candidates {
|
||||
if path.Contains(c.Hash()) {
|
||||
if state, err = state.applyVerifiedAUM(c); err != nil {
|
||||
return nil, State{}, fmt.Errorf("advancing state: %v", err)
|
||||
}
|
||||
return &c, state, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, State{}, errors.New("no candidate matching path")
|
||||
}
|
||||
_, state, err = fastForwardWithAdvancer(storage, maxIter, state, advancer, func(curs AUM, _ State) bool {
|
||||
return curs.Hash() == wantHash
|
||||
})
|
||||
// fastForward only terminates before the done condition if it
|
||||
// doesnt have any later AUMs to process. This cant be the case
|
||||
// as we've already iterated through them above so they must exist,
|
||||
// but we check anyway to be super duper sure.
|
||||
if err == nil && *state.LastAUMHash != wantHash {
|
||||
return State{}, errors.New("unexpected fastForward outcome")
|
||||
}
|
||||
return state, err
|
||||
}
|
||||
|
||||
// computeActiveAncestor determines which ancestor AUM to use as the
|
||||
// ancestor of the valid chain.
|
||||
//
|
||||
// If all the chains end up having the same ancestor, then thats the
|
||||
// only possible ancestor, ezpz. However if there are multiple distinct
|
||||
// ancestors, that means there are distinct chains, and we need some
|
||||
// hint to choose what to use. For that, we rely on the chainsThroughActive
|
||||
// bit, which signals to us that that ancestor was part of the
|
||||
// chain in a previous run.
|
||||
func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) {
|
||||
// Dedupe possible ancestors, tracking if they were part of
|
||||
// the active chain on a previous run.
|
||||
ancestors := make(map[AUMHash]bool, len(chains))
|
||||
for _, c := range chains {
|
||||
ancestors[c.Oldest.Hash()] = c.chainsThroughActive
|
||||
}
|
||||
|
||||
if len(ancestors) == 1 {
|
||||
// There's only one. DOPE.
|
||||
for k := range ancestors {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Theres more than one, so we need to use the ancestor that was
|
||||
// part of the active chain in a previous iteration.
|
||||
// Note that there can only be one distinct ancestor that was
|
||||
// formerly part of the active chain, because AUMs can only have
|
||||
// one parent and would have converged to a common ancestor.
|
||||
for k, chainsThroughActive := range ancestors {
|
||||
if chainsThroughActive {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
return AUMHash{}, errors.New("multiple distinct chains")
|
||||
}
|
||||
|
||||
// computeActiveChain bootstraps the runtime state of the Authority when
|
||||
// starting entirely off stored state.
|
||||
//
|
||||
// TODO(tom): Don't look at head states, just iterate forward from
|
||||
// the ancestor.
|
||||
//
|
||||
// The algorithm is as follows:
|
||||
// 1. Determine all possible 'head' (like in git) states.
|
||||
// 2. Filter these possible chains based on whether the ancestor was
|
||||
// formerly (in a previous run) part of the chain.
|
||||
// 3. Compute the state of the state machine at this ancestor. This is
|
||||
// needed for fast-forward, as each update operates on the state of
|
||||
// the update preceding it.
|
||||
// 4. Iteratively apply updates till we reach head ('fast forward').
|
||||
func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (chain, error) {
|
||||
chains, err := computeChainCandidates(storage, lastKnownOldest, maxIter)
|
||||
if err != nil {
|
||||
return chain{}, fmt.Errorf("computing candidates: %v", err)
|
||||
}
|
||||
|
||||
// Find the right ancestor.
|
||||
oldestHash, err := computeActiveAncestor(storage, chains)
|
||||
if err != nil {
|
||||
return chain{}, fmt.Errorf("computing ancestor: %v", err)
|
||||
}
|
||||
ancestor, err := storage.AUM(oldestHash)
|
||||
if err != nil {
|
||||
return chain{}, err
|
||||
}
|
||||
|
||||
// At this stage we know the ancestor AUM, so we have excluded distinct
|
||||
// chains but we might still have forks (so we don't know the head AUM).
|
||||
//
|
||||
// We iterate forward from the ancestor AUM, handling any forks as we go
|
||||
// till we arrive at a head.
|
||||
out := chain{Oldest: ancestor, Head: ancestor}
|
||||
if out.state, err = computeStateAt(storage, maxIter, oldestHash); err != nil {
|
||||
return chain{}, fmt.Errorf("bootstrapping state: %v", err)
|
||||
}
|
||||
out.Head, out.state, err = fastForward(storage, maxIter, out.state, nil)
|
||||
if err != nil {
|
||||
return chain{}, fmt.Errorf("fast forward: %v", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// aumVerify verifies if an AUM is well-formed, correctly signed, and
|
||||
// can be accepted for storage.
|
||||
func aumVerify(aum AUM, state State, isGenesisAUM bool) error {
|
||||
if err := aum.StaticValidate(); err != nil {
|
||||
return fmt.Errorf("invalid: %v", err)
|
||||
}
|
||||
if !isGenesisAUM {
|
||||
if err := checkParent(aum, state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(aum.Signatures) == 0 {
|
||||
return errors.New("unsigned AUM")
|
||||
}
|
||||
sigHash := aum.SigHash()
|
||||
for i, sig := range aum.Signatures {
|
||||
key, err := state.GetKey(sig.KeyID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad keyID on signature %d: %v", i, err)
|
||||
}
|
||||
if err := signatureVerify(&sig, sigHash, key); err != nil {
|
||||
return fmt.Errorf("signature %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkParent(aum AUM, state State) error {
|
||||
parent, hasParent := aum.Parent()
|
||||
if !hasParent {
|
||||
return errors.New("aum has no parent")
|
||||
}
|
||||
if state.LastAUMHash == nil {
|
||||
return errors.New("cannot check update parent hash against a state with no previous AUM")
|
||||
}
|
||||
if *state.LastAUMHash != parent {
|
||||
return fmt.Errorf("aum with parent %x cannot be applied to a state with parent %x", state.LastAUMHash, parent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Head returns the AUM digest of the latest update applied to the state
|
||||
// machine.
|
||||
func (a *Authority) Head() AUMHash {
|
||||
return *a.state.LastAUMHash
|
||||
}
|
||||
|
||||
// Open initializes an existing TKA from the given tailchonk.
|
||||
//
|
||||
// Only use this if the current node has initialized an Authority before.
|
||||
// If a TKA exists on other nodes but theres nothing locally, use Bootstrap().
|
||||
// If no TKA exists anywhere and you are creating it for the first
|
||||
// time, use New().
|
||||
func Open(storage Chonk) (*Authority, error) {
|
||||
a, err := storage.LastActiveAncestor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading last ancestor: %v", err)
|
||||
}
|
||||
|
||||
c, err := computeActiveChain(storage, a, maxScanIterations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("active chain: %v", err)
|
||||
}
|
||||
|
||||
return &Authority{
|
||||
head: c.Head,
|
||||
oldestAncestor: c.Oldest,
|
||||
state: c.state,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create initializes a brand-new TKA, generating a genesis update
|
||||
// and committing it to the given storage.
|
||||
//
|
||||
// The given signer must also be present in state as a trusted key.
|
||||
//
|
||||
// Do not use this to initialize a TKA that already exists, use Open()
|
||||
// or Bootstrap() instead.
|
||||
func Create(storage Chonk, state State, signer Signer) (*Authority, AUM, error) {
|
||||
// Generate & sign a checkpoint, our genesis update.
|
||||
genesis := AUM{
|
||||
MessageKind: AUMCheckpoint,
|
||||
State: &state,
|
||||
}
|
||||
if err := genesis.StaticValidate(); err != nil {
|
||||
// This serves as an easy way to validate the given state.
|
||||
return nil, AUM{}, fmt.Errorf("invalid state: %v", err)
|
||||
}
|
||||
sigs, err := signer.SignAUM(genesis.SigHash())
|
||||
if err != nil {
|
||||
return nil, AUM{}, fmt.Errorf("signing failed: %v", err)
|
||||
}
|
||||
genesis.Signatures = append(genesis.Signatures, sigs...)
|
||||
|
||||
a, err := Bootstrap(storage, genesis)
|
||||
return a, genesis, err
|
||||
}
|
||||
|
||||
// Bootstrap initializes a TKA based on the given checkpoint.
|
||||
//
|
||||
// Call this when setting up a new nodes' TKA, but other nodes
|
||||
// with initialized TKA's exist.
|
||||
//
|
||||
// Pass the returned genesis AUM from Create(), or a later checkpoint AUM.
|
||||
//
|
||||
// TODO(tom): We should test an authority bootstrapped from a later checkpoint
|
||||
// works fine with sync and everything.
|
||||
func Bootstrap(storage Chonk, bootstrap AUM) (*Authority, error) {
|
||||
heads, err := storage.Heads()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading heads: %v", err)
|
||||
}
|
||||
if len(heads) != 0 {
|
||||
return nil, errors.New("tailchonk is not empty")
|
||||
}
|
||||
|
||||
// Check the AUM is well-formed.
|
||||
if bootstrap.MessageKind != AUMCheckpoint {
|
||||
return nil, fmt.Errorf("bootstrap AUMs must be checkpoint messages, got %v", bootstrap.MessageKind)
|
||||
}
|
||||
if bootstrap.State == nil {
|
||||
return nil, errors.New("bootstrap AUM is missing state")
|
||||
}
|
||||
if err := aumVerify(bootstrap, *bootstrap.State, true); err != nil {
|
||||
return nil, fmt.Errorf("invalid bootstrap: %v", err)
|
||||
}
|
||||
|
||||
// Everything looks good, write it to storage.
|
||||
if err := storage.CommitVerifiedAUMs([]AUM{bootstrap}); err != nil {
|
||||
return nil, fmt.Errorf("commit: %v", err)
|
||||
}
|
||||
if err := storage.SetLastActiveAncestor(bootstrap.Hash()); err != nil {
|
||||
return nil, fmt.Errorf("set ancestor: %v", err)
|
||||
}
|
||||
|
||||
return Open(storage)
|
||||
}
|
||||
|
||||
// ValidDisablement returns true if the disablement secret was correct.
|
||||
//
|
||||
// If this method returns true, the caller should shut down the authority
|
||||
// and purge all network-lock state.
|
||||
func (a *Authority) ValidDisablement(secret []byte) bool {
|
||||
return a.state.checkDisablement(secret)
|
||||
}
|
||||
|
||||
// InformIdempotent returns a new Authority based on applying the given
|
||||
// updates, with the given updates committed to storage.
|
||||
//
|
||||
// If any of the updates could not be applied:
|
||||
// - An error is returned
|
||||
// - No changes to storage are made.
|
||||
//
|
||||
// MissingAUMs() should be used to get a list of updates appropriate for
|
||||
// this function. In any case, updates should be ordered oldest to newest.
|
||||
func (a *Authority) InformIdempotent(storage Chonk, updates []AUM) (Authority, error) {
|
||||
if len(updates) == 0 {
|
||||
return Authority{}, errors.New("inform called with empty slice")
|
||||
}
|
||||
stateAt := make(map[AUMHash]State, len(updates)+1)
|
||||
toCommit := make([]AUM, 0, len(updates))
|
||||
prevHash := a.Head()
|
||||
|
||||
// The state at HEAD is the current state of the authority. Its likely
|
||||
// to be needed, so we prefill it rather than computing it.
|
||||
stateAt[prevHash] = a.state
|
||||
|
||||
// Optimization: If the set of updates is a chain building from
|
||||
// the current head, EG:
|
||||
// <a.Head()> ==> updates[0] ==> updates[1] ...
|
||||
// Then theres no need to recompute the resulting state from the
|
||||
// stored ancestor, because the last state computed during iteration
|
||||
// is the new state. This should be the common case.
|
||||
// isHeadChain keeps track of this.
|
||||
isHeadChain := true
|
||||
|
||||
for i, update := range updates {
|
||||
hash := update.Hash()
|
||||
// Check if we already have this AUM thus don't need to process it.
|
||||
if _, err := storage.AUM(hash); err == nil {
|
||||
isHeadChain = false // Disable the head-chain optimization.
|
||||
continue
|
||||
}
|
||||
|
||||
parent, hasParent := update.Parent()
|
||||
if !hasParent {
|
||||
return Authority{}, fmt.Errorf("update %d: missing parent", i)
|
||||
}
|
||||
|
||||
state, hasState := stateAt[parent]
|
||||
var err error
|
||||
if !hasState {
|
||||
if state, err = computeStateAt(storage, maxScanIterations, parent); err != nil {
|
||||
return Authority{}, fmt.Errorf("update %d computing state: %v", i, err)
|
||||
}
|
||||
stateAt[parent] = state
|
||||
}
|
||||
|
||||
if err := aumVerify(update, state, false); err != nil {
|
||||
return Authority{}, fmt.Errorf("update %d invalid: %v", i, err)
|
||||
}
|
||||
if stateAt[hash], err = state.applyVerifiedAUM(update); err != nil {
|
||||
return Authority{}, fmt.Errorf("update %d cannot be applied: %v", i, err)
|
||||
}
|
||||
|
||||
if isHeadChain && parent != prevHash {
|
||||
isHeadChain = false
|
||||
}
|
||||
prevHash = hash
|
||||
toCommit = append(toCommit, update)
|
||||
}
|
||||
|
||||
if err := storage.CommitVerifiedAUMs(toCommit); err != nil {
|
||||
return Authority{}, fmt.Errorf("commit: %v", err)
|
||||
}
|
||||
|
||||
if isHeadChain {
|
||||
// Head-chain fastpath: We can use the state we computed
|
||||
// in the last iteration.
|
||||
return Authority{
|
||||
head: updates[len(updates)-1],
|
||||
oldestAncestor: a.oldestAncestor,
|
||||
state: stateAt[prevHash],
|
||||
}, nil
|
||||
}
|
||||
|
||||
oldestAncestor := a.oldestAncestor.Hash()
|
||||
c, err := computeActiveChain(storage, &oldestAncestor, maxScanIterations)
|
||||
if err != nil {
|
||||
return Authority{}, fmt.Errorf("recomputing active chain: %v", err)
|
||||
}
|
||||
return Authority{
|
||||
head: c.Head,
|
||||
oldestAncestor: c.Oldest,
|
||||
state: c.state,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Inform is the same as InformIdempotent, except the state of the Authority
|
||||
// is updated in-place.
|
||||
func (a *Authority) Inform(storage Chonk, updates []AUM) error {
|
||||
newAuthority, err := a.InformIdempotent(storage, updates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*a = newAuthority
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeKeyAuthorized checks if the provided nodeKeySignature authorizes
|
||||
// the given node key.
|
||||
func (a *Authority) NodeKeyAuthorized(nodeKey key.NodePublic, nodeKeySignature tkatype.MarshaledSignature) error {
|
||||
_, err := a.NodeKeyAuthorizedWithDetails(nodeKey, nodeKeySignature)
|
||||
return err
|
||||
}
|
||||
|
||||
// NodeKeyAuthorized checks if the provided nodeKeySignature authorizes
|
||||
// the given node key, and returns RotationDetails if the signature is
|
||||
// a valid rotation signature.
|
||||
func (a *Authority) NodeKeyAuthorizedWithDetails(nodeKey key.NodePublic, nodeKeySignature tkatype.MarshaledSignature) (*RotationDetails, error) {
|
||||
var decoded NodeKeySignature
|
||||
if err := decoded.Unserialize(nodeKeySignature); err != nil {
|
||||
return nil, fmt.Errorf("unserialize: %v", err)
|
||||
}
|
||||
if decoded.SigKind == SigCredential {
|
||||
return nil, errors.New("credential signatures cannot authorize nodes on their own")
|
||||
}
|
||||
|
||||
kID, err := decoded.authorizingKeyID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := a.state.GetKey(kID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("key: %v", err)
|
||||
}
|
||||
|
||||
if err := decoded.verifySignature(nodeKey, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return decoded.rotationDetails()
|
||||
}
|
||||
|
||||
// KeyTrusted returns true if the given keyID is trusted by the tailnet
|
||||
// key authority.
|
||||
func (a *Authority) KeyTrusted(keyID tkatype.KeyID) bool {
|
||||
_, err := a.state.GetKey(keyID)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Keys returns the set of keys trusted by the tailnet key authority.
|
||||
func (a *Authority) Keys() []Key {
|
||||
out := make([]Key, len(a.state.Keys))
|
||||
for i := range a.state.Keys {
|
||||
out[i] = a.state.Keys[i].Clone()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// StateIDs returns the stateIDs for this tailnet key authority. These
|
||||
// are values that are fixed for the lifetime of the authority: see
|
||||
// comments on the relevant fields in state.go.
|
||||
func (a *Authority) StateIDs() (uint64, uint64) {
|
||||
return a.state.StateID1, a.state.StateID2
|
||||
}
|
||||
|
||||
// Compact deletes historical AUMs based on the given compaction options.
|
||||
func (a *Authority) Compact(storage CompactableChonk, o CompactionOptions) error {
|
||||
newAncestor, err := Compact(storage, a.head.Hash(), o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ancestor, err := storage.AUM(newAncestor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.oldestAncestor = ancestor
|
||||
return nil
|
||||
}
|
||||
|
||||
// findParentForRewrite finds the parent AUM to use when rewriting state to
|
||||
// retroactively remove trust in the specified keys.
|
||||
func (a *Authority) findParentForRewrite(storage Chonk, removeKeys []tkatype.KeyID, ourKey tkatype.KeyID) (AUMHash, error) {
|
||||
cursor := a.Head()
|
||||
|
||||
for {
|
||||
if cursor == a.oldestAncestor.Hash() {
|
||||
// We've reached as far back in our history as we can,
|
||||
// so we have to rewrite from here.
|
||||
break
|
||||
}
|
||||
|
||||
aum, err := storage.AUM(cursor)
|
||||
if err != nil {
|
||||
return AUMHash{}, fmt.Errorf("reading AUM %v: %w", cursor, err)
|
||||
}
|
||||
|
||||
// An ideal rewrite parent trusts none of the keys to be removed.
|
||||
state, err := computeStateAt(storage, maxScanIterations, cursor)
|
||||
if err != nil {
|
||||
return AUMHash{}, fmt.Errorf("computing state for %v: %w", cursor, err)
|
||||
}
|
||||
keyTrusted := false
|
||||
for _, key := range removeKeys {
|
||||
if _, err := state.GetKey(key); err == nil {
|
||||
keyTrusted = true
|
||||
}
|
||||
}
|
||||
if !keyTrusted {
|
||||
// Success: the revoked keys are not trusted!
|
||||
// Lets check that our key was trusted to ensure
|
||||
// we can sign a fork from here.
|
||||
if _, err := state.GetKey(ourKey); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
parent, hasParent := aum.Parent()
|
||||
if !hasParent {
|
||||
// This is the genesis AUM, so we have to rewrite from here.
|
||||
break
|
||||
}
|
||||
cursor = parent
|
||||
}
|
||||
|
||||
return cursor, nil
|
||||
}
|
||||
|
||||
// MakeRetroactiveRevocation generates a forking update which revokes the specified keys, in
|
||||
// such a manner that any malicious use of those keys is erased.
|
||||
//
|
||||
// If forkFrom is specified, it is used as the parent AUM to fork from. If the zero value,
|
||||
// the parent AUM is determined automatically.
|
||||
//
|
||||
// The generated AUM must be signed with more signatures than the sum of key votes that
|
||||
// were compromised, before being consumed by tka.Authority methods.
|
||||
func (a *Authority) MakeRetroactiveRevocation(storage Chonk, removeKeys []tkatype.KeyID, ourKey tkatype.KeyID, forkFrom AUMHash) (*AUM, error) {
|
||||
var parent AUMHash
|
||||
if forkFrom == (AUMHash{}) {
|
||||
// Make sure at least one of the recovery keys is currently trusted.
|
||||
foundKey := false
|
||||
for _, k := range removeKeys {
|
||||
if _, err := a.state.GetKey(k); err == nil {
|
||||
foundKey = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundKey {
|
||||
return nil, errors.New("no provided key is currently trusted")
|
||||
}
|
||||
|
||||
p, err := a.findParentForRewrite(storage, removeKeys, ourKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("finding parent: %v", err)
|
||||
}
|
||||
parent = p
|
||||
} else {
|
||||
parent = forkFrom
|
||||
}
|
||||
|
||||
// Construct the new state where the revoked keys are no longer trusted.
|
||||
state := a.state.Clone()
|
||||
for _, keyToRevoke := range removeKeys {
|
||||
idx := -1
|
||||
for i := range state.Keys {
|
||||
keyID, err := state.Keys[i].ID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("computing keyID: %v", err)
|
||||
}
|
||||
if bytes.Equal(keyToRevoke, keyID) {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if idx >= 0 {
|
||||
state.Keys = append(state.Keys[:idx], state.Keys[idx+1:]...)
|
||||
}
|
||||
}
|
||||
if len(state.Keys) == 0 {
|
||||
return nil, errors.New("cannot revoke all trusted keys")
|
||||
}
|
||||
state.LastAUMHash = nil // checkpoints can't specify a LastAUMHash
|
||||
|
||||
forkingAUM := &AUM{
|
||||
MessageKind: AUMCheckpoint,
|
||||
State: &state,
|
||||
PrevAUMHash: parent[:],
|
||||
}
|
||||
|
||||
return forkingAUM, forkingAUM.StaticValidate()
|
||||
}
|
||||
32
vendor/tailscale.com/tka/tka_clone.go
generated
vendored
Normal file
32
vendor/tailscale.com/tka/tka_clone.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT.
|
||||
|
||||
package tka
|
||||
|
||||
// Clone makes a deep copy of NodeKeySignature.
|
||||
// The result aliases no memory with the original.
|
||||
func (src *NodeKeySignature) Clone() *NodeKeySignature {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
dst := new(NodeKeySignature)
|
||||
*dst = *src
|
||||
dst.Pubkey = append(src.Pubkey[:0:0], src.Pubkey...)
|
||||
dst.KeyID = append(src.KeyID[:0:0], src.KeyID...)
|
||||
dst.Signature = append(src.Signature[:0:0], src.Signature...)
|
||||
dst.Nested = src.Nested.Clone()
|
||||
dst.WrappingPubkey = append(src.WrappingPubkey[:0:0], src.WrappingPubkey...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _NodeKeySignatureCloneNeedsRegeneration = NodeKeySignature(struct {
|
||||
SigKind SigKind
|
||||
Pubkey []byte
|
||||
KeyID []byte
|
||||
Signature []byte
|
||||
Nested *NodeKeySignature
|
||||
WrappingPubkey []byte
|
||||
}{})
|
||||
Reference in New Issue
Block a user