Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

408
vendor/tailscale.com/control/controlbase/conn.go generated vendored Normal file
View File

@@ -0,0 +1,408 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package controlbase implements the base transport of the Tailscale
// 2021 control protocol.
//
// The base transport implements Noise IK, instantiated with
// Curve25519, ChaCha20Poly1305 and BLAKE2s.
package controlbase
import (
"crypto/cipher"
"encoding/binary"
"fmt"
"net"
"sync"
"time"
"golang.org/x/crypto/blake2s"
chp "golang.org/x/crypto/chacha20poly1305"
"tailscale.com/types/key"
)
const (
// maxMessageSize is the maximum size of a protocol frame on the
// wire, including header and payload.
maxMessageSize = 4096
// maxCiphertextSize is the maximum amount of ciphertext bytes
// that one protocol frame can carry, after framing.
maxCiphertextSize = maxMessageSize - 3
// maxPlaintextSize is the maximum amount of plaintext bytes that
// one protocol frame can carry, after encryption and framing.
maxPlaintextSize = maxCiphertextSize - chp.Overhead
)
// A Conn is a secured Noise connection. It implements the net.Conn
// interface, with the unusual trait that any write error (including a
// SetWriteDeadline induced i/o timeout) causes all future writes to
// fail.
type Conn struct {
conn net.Conn
version uint16
peer key.MachinePublic
handshakeHash [blake2s.Size]byte
rx rxState
tx txState
}
// rxState is all the Conn state that Read uses.
type rxState struct {
sync.Mutex
cipher cipher.AEAD
nonce nonce
buf *maxMsgBuffer // or nil when reads exhausted
n int // number of valid bytes in buf
next int // offset of next undecrypted packet
plaintext []byte // slice into buf of decrypted bytes
hdrBuf [headerLen]byte // small buffer used when buf is nil
}
// txState is all the Conn state that Write uses.
type txState struct {
sync.Mutex
cipher cipher.AEAD
nonce nonce
err error // records the first partial write error for all future calls
}
// ProtocolVersion returns the protocol version that was used to
// establish this Conn.
func (c *Conn) ProtocolVersion() int {
return int(c.version)
}
// HandshakeHash returns the Noise handshake hash for the connection,
// which can be used to bind other messages to this connection
// (i.e. to ensure that the message wasn't replayed from a different
// connection).
func (c *Conn) HandshakeHash() [blake2s.Size]byte {
return c.handshakeHash
}
// Peer returns the peer's long-term public key.
func (c *Conn) Peer() key.MachinePublic {
return c.peer
}
// readNLocked reads into c.rx.buf until buf contains at least total
// bytes. Returns a slice of the total bytes in rxBuf, or an
// error if fewer than total bytes are available.
//
// It may be called with a nil c.rx.buf only if total == headerLen.
//
// On success, c.rx.buf will be non-nil.
func (c *Conn) readNLocked(total int) ([]byte, error) {
if total > maxMessageSize {
return nil, errReadTooBig{total}
}
for {
if total <= c.rx.n {
return c.rx.buf[:total], nil
}
var n int
var err error
if c.rx.buf == nil {
if c.rx.n != 0 || total != headerLen {
panic("unexpected")
}
// Optimization to reduce memory usage.
// Most connections are blocked forever waiting for
// a read, so we don't want c.rx.buf to be allocated until
// we know there's data to read. Instead, when we're
// waiting for data to arrive here, read into the
// 3 byte hdrBuf:
n, err = c.conn.Read(c.rx.hdrBuf[:])
if n > 0 {
c.rx.buf = getMaxMsgBuffer()
copy(c.rx.buf[:], c.rx.hdrBuf[:n])
}
} else {
n, err = c.conn.Read(c.rx.buf[c.rx.n:])
}
c.rx.n += n
if err != nil {
return nil, err
}
}
}
// decryptLocked decrypts msg (which is header+ciphertext) in-place
// and sets c.rx.plaintext to the decrypted bytes.
func (c *Conn) decryptLocked(msg []byte) (err error) {
if msgType := msg[0]; msgType != msgTypeRecord {
return fmt.Errorf("received message with unexpected type %d, want %d", msgType, msgTypeRecord)
}
// We don't check the length field here, because the caller
// already did in order to figure out how big the msg slice should
// be.
ciphertext := msg[headerLen:]
if !c.rx.nonce.Valid() {
return errCipherExhausted{}
}
c.rx.plaintext, err = c.rx.cipher.Open(ciphertext[:0], c.rx.nonce[:], ciphertext, nil)
c.rx.nonce.Increment()
if err != nil {
// Once a decryption has failed, our Conn is no longer
// synchronized with our peer. Nuke the cipher state to be
// safe, so that no further decryptions are attempted. Future
// read attempts will return net.ErrClosed.
c.rx.cipher = nil
}
return err
}
// encryptLocked encrypts plaintext into buf (including the
// packet header) and returns a slice of the ciphertext, or an error
// if the cipher is exhausted (i.e. can no longer be used safely).
func (c *Conn) encryptLocked(plaintext []byte, buf *maxMsgBuffer) ([]byte, error) {
if !c.tx.nonce.Valid() {
// Received 2^64-1 messages on this cipher state. Connection
// is no longer usable.
return nil, errCipherExhausted{}
}
buf[0] = msgTypeRecord
binary.BigEndian.PutUint16(buf[1:headerLen], uint16(len(plaintext)+chp.Overhead))
ret := c.tx.cipher.Seal(buf[:headerLen], c.tx.nonce[:], plaintext, nil)
c.tx.nonce.Increment()
return ret, nil
}
// wholeMessageLocked returns a slice of one whole Noise transport
// message from c.rx.buf, if one whole message is available, and
// advances the read state to the next Noise message in the
// buffer. Returns nil without advancing read state if there isn't one
// whole message in c.rx.buf.
func (c *Conn) wholeMessageLocked() []byte {
available := c.rx.n - c.rx.next
if available < headerLen {
return nil
}
bs := c.rx.buf[c.rx.next:c.rx.n]
totalSize := headerLen + int(binary.BigEndian.Uint16(bs[1:3]))
if len(bs) < totalSize {
return nil
}
c.rx.next += totalSize
return bs[:totalSize]
}
// decryptOneLocked decrypts one Noise transport message, reading from
// c.conn as needed, and sets c.rx.plaintext to point to the decrypted
// bytes. c.rx.plaintext is only valid if err == nil.
func (c *Conn) decryptOneLocked() error {
c.rx.plaintext = nil
// Fast path: do we have one whole ciphertext frame buffered
// already?
if bs := c.wholeMessageLocked(); bs != nil {
return c.decryptLocked(bs)
}
if c.rx.next != 0 {
// To simplify the read logic, move the remainder of the
// buffered bytes back to the head of the buffer, so we can
// grow it without worrying about wraparound.
c.rx.n = copy(c.rx.buf[:], c.rx.buf[c.rx.next:c.rx.n])
c.rx.next = 0
}
// Return our buffer to the pool if it's empty, lest we be
// blocked in a long Read call, reading the 3 byte header. We
// don't to keep that buffer unnecessarily alive.
if c.rx.n == 0 && c.rx.next == 0 && c.rx.buf != nil {
bufPool.Put(c.rx.buf)
c.rx.buf = nil
}
bs, err := c.readNLocked(headerLen)
if err != nil {
return err
}
// The rest of the header (besides the length field) gets verified
// in decryptLocked, not here.
messageLen := headerLen + int(binary.BigEndian.Uint16(bs[1:3]))
bs, err = c.readNLocked(messageLen)
if err != nil {
return err
}
c.rx.next = len(bs)
return c.decryptLocked(bs)
}
// Read implements io.Reader.
func (c *Conn) Read(bs []byte) (int, error) {
c.rx.Lock()
defer c.rx.Unlock()
if c.rx.cipher == nil {
return 0, net.ErrClosed
}
// If no plaintext is buffered, decrypt incoming frames until we
// have some plaintext. Zero-byte Noise frames are allowed in this
// protocol, which is why we have to loop here rather than decrypt
// a single additional frame.
for len(c.rx.plaintext) == 0 {
if err := c.decryptOneLocked(); err != nil {
return 0, err
}
}
n := copy(bs, c.rx.plaintext)
c.rx.plaintext = c.rx.plaintext[n:]
// Lose slice's underlying array pointer to unneeded memory so
// GC can collect more.
if len(c.rx.plaintext) == 0 {
c.rx.plaintext = nil
}
return n, nil
}
// Write implements io.Writer.
func (c *Conn) Write(bs []byte) (n int, err error) {
c.tx.Lock()
defer c.tx.Unlock()
if c.tx.err != nil {
return 0, c.tx.err
}
defer func() {
if err != nil {
// All write errors are fatal for this conn, so clear the
// cipher state whenever an error happens.
c.tx.cipher = nil
}
if c.tx.err == nil {
// Only set c.tx.err if not nil so that we can return one
// error on the first failure, and a different one for
// subsequent calls. See the error handling around Write
// below for why.
c.tx.err = err
}
}()
if c.tx.cipher == nil {
return 0, net.ErrClosed
}
buf := getMaxMsgBuffer()
defer bufPool.Put(buf)
var sent int
for len(bs) > 0 {
toSend := bs
if len(toSend) > maxPlaintextSize {
toSend = bs[:maxPlaintextSize]
}
bs = bs[len(toSend):]
ciphertext, err := c.encryptLocked(toSend, buf)
if err != nil {
return sent, err
}
if _, err := c.conn.Write(ciphertext); err != nil {
// Return the raw error on the Write that actually
// failed. For future writes, return that error wrapped in
// a desync error.
c.tx.err = errPartialWrite{err}
return sent, err
}
sent += len(toSend)
}
return sent, nil
}
// Close implements io.Closer.
func (c *Conn) Close() error {
closeErr := c.conn.Close() // unblocks any waiting reads or writes
// Remove references to live cipher state. Strictly speaking this
// is unnecessary, but we want to try and hand the active cipher
// state to the garbage collector promptly, to preserve perfect
// forward secrecy as much as we can.
c.rx.Lock()
c.rx.cipher = nil
c.rx.Unlock()
c.tx.Lock()
c.tx.cipher = nil
c.tx.Unlock()
return closeErr
}
func (c *Conn) LocalAddr() net.Addr { return c.conn.LocalAddr() }
func (c *Conn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() }
func (c *Conn) SetDeadline(t time.Time) error { return c.conn.SetDeadline(t) }
func (c *Conn) SetReadDeadline(t time.Time) error { return c.conn.SetReadDeadline(t) }
func (c *Conn) SetWriteDeadline(t time.Time) error { return c.conn.SetWriteDeadline(t) }
// errCipherExhausted is the error returned when we run out of nonces
// on a cipher.
type errCipherExhausted struct{}
func (errCipherExhausted) Error() string {
return "cipher exhausted, no more nonces available for current key"
}
func (errCipherExhausted) Timeout() bool { return false }
func (errCipherExhausted) Temporary() bool { return false }
// errPartialWrite is the error returned when the cipher state has
// become unusable due to a past partial write.
type errPartialWrite struct {
err error
}
func (e errPartialWrite) Error() string {
return fmt.Sprintf("cipher state desynchronized due to partial write (%v)", e.err)
}
func (e errPartialWrite) Unwrap() error { return e.err }
func (e errPartialWrite) Temporary() bool { return false }
func (e errPartialWrite) Timeout() bool { return false }
// errReadTooBig is the error returned when the peer sent an
// unacceptably large Noise frame.
type errReadTooBig struct {
requested int
}
func (e errReadTooBig) Error() string {
return fmt.Sprintf("requested read of %d bytes exceeds max allowed Noise frame size", e.requested)
}
func (e errReadTooBig) Temporary() bool {
// permanent error because this error only occurs when our peer
// sends us a frame so large we're unwilling to ever decode it.
return false
}
func (e errReadTooBig) Timeout() bool { return false }
type nonce [chp.NonceSize]byte
func (n *nonce) Valid() bool {
return binary.BigEndian.Uint32(n[:4]) == 0 && binary.BigEndian.Uint64(n[4:]) != invalidNonce
}
func (n *nonce) Increment() {
if !n.Valid() {
panic("increment of invalid nonce")
}
binary.BigEndian.PutUint64(n[4:], 1+binary.BigEndian.Uint64(n[4:]))
}
type maxMsgBuffer [maxMessageSize]byte
// bufPool holds the temporary buffers for Conn.Read & Write.
var bufPool = &sync.Pool{
New: func() any {
return new(maxMsgBuffer)
},
}
func getMaxMsgBuffer() *maxMsgBuffer {
return bufPool.Get().(*maxMsgBuffer)
}

494
vendor/tailscale.com/control/controlbase/handshake.go generated vendored Normal file
View File

@@ -0,0 +1,494 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlbase
import (
"context"
"crypto/cipher"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"net"
"strconv"
"time"
"go4.org/mem"
"golang.org/x/crypto/blake2s"
chp "golang.org/x/crypto/chacha20poly1305"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/hkdf"
"tailscale.com/types/key"
)
const (
// protocolName is the name of the specific instantiation of Noise
// that the control protocol uses. This string's value is fixed by
// the Noise spec, and shouldn't be changed unless we're updating
// the control protocol to use a different Noise instance.
protocolName = "Noise_IK_25519_ChaChaPoly_BLAKE2s"
// protocolVersion is the version of the control protocol that
// Client will use when initiating a handshake.
//protocolVersion uint16 = 1
// protocolVersionPrefix is the name portion of the protocol
// name+version string that gets mixed into the handshake as a
// prologue.
//
// This mixing verifies that both clients agree that they're
// executing the control protocol at a specific version that
// matches the advertised version in the cleartext packet header.
protocolVersionPrefix = "Tailscale Control Protocol v"
invalidNonce = ^uint64(0)
)
func protocolVersionPrologue(version uint16) []byte {
ret := make([]byte, 0, len(protocolVersionPrefix)+5) // 5 bytes is enough to encode all possible version numbers.
ret = append(ret, protocolVersionPrefix...)
return strconv.AppendUint(ret, uint64(version), 10)
}
// HandshakeContinuation upgrades a net.Conn to a Conn. The net.Conn
// is assumed to have already sent the client>server handshake
// initiation message.
type HandshakeContinuation func(context.Context, net.Conn) (*Conn, error)
// ClientDeferred initiates a control client handshake, returning the
// initial message to send to the server and a continuation to
// finalize the handshake.
//
// ClientDeferred is split in this way for RTT reduction: we run this
// protocol after negotiating a protocol switch from HTTP/HTTPS. If we
// completely serialized the negotiation followed by the handshake,
// we'd pay an extra RTT to transmit the handshake initiation after
// protocol switching. By splitting the handshake into an initial
// message and a continuation, we can embed the handshake initiation
// into the HTTP protocol switching request and avoid a bit of delay.
func ClientDeferred(machineKey key.MachinePrivate, controlKey key.MachinePublic, protocolVersion uint16) (initialHandshake []byte, continueHandshake HandshakeContinuation, err error) {
var s symmetricState
s.Initialize()
// prologue
s.MixHash(protocolVersionPrologue(protocolVersion))
// <- s
// ...
s.MixHash(controlKey.UntypedBytes())
// -> e, es, s, ss
init := mkInitiationMessage(protocolVersion)
machineEphemeral := key.NewMachine()
machineEphemeralPub := machineEphemeral.Public()
copy(init.EphemeralPub(), machineEphemeralPub.UntypedBytes())
s.MixHash(machineEphemeralPub.UntypedBytes())
cipher, err := s.MixDH(machineEphemeral, controlKey)
if err != nil {
return nil, nil, fmt.Errorf("computing es: %w", err)
}
machineKeyPub := machineKey.Public()
s.EncryptAndHash(cipher, init.MachinePub(), machineKeyPub.UntypedBytes())
cipher, err = s.MixDH(machineKey, controlKey)
if err != nil {
return nil, nil, fmt.Errorf("computing ss: %w", err)
}
s.EncryptAndHash(cipher, init.Tag(), nil) // empty message payload
cont := func(ctx context.Context, conn net.Conn) (*Conn, error) {
return continueClientHandshake(ctx, conn, &s, machineKey, machineEphemeral, controlKey, protocolVersion)
}
return init[:], cont, nil
}
// Client wraps ClientDeferred and immediately invokes the returned
// continuation with conn.
//
// This is a helper for when you don't need the fancy
// continuation-style handshake, and just want to synchronously
// upgrade a net.Conn to a secure transport.
func Client(ctx context.Context, conn net.Conn, machineKey key.MachinePrivate, controlKey key.MachinePublic, protocolVersion uint16) (*Conn, error) {
init, cont, err := ClientDeferred(machineKey, controlKey, protocolVersion)
if err != nil {
return nil, err
}
if _, err := conn.Write(init); err != nil {
return nil, err
}
return cont(ctx, conn)
}
func continueClientHandshake(ctx context.Context, conn net.Conn, s *symmetricState, machineKey, machineEphemeral key.MachinePrivate, controlKey key.MachinePublic, protocolVersion uint16) (*Conn, error) {
// No matter what, this function can only run once per s. Ensure
// attempted reuse causes a panic.
defer func() {
s.finished = true
}()
if deadline, ok := ctx.Deadline(); ok {
if err := conn.SetDeadline(deadline); err != nil {
return nil, fmt.Errorf("setting conn deadline: %w", err)
}
defer func() {
conn.SetDeadline(time.Time{})
}()
}
// Read in the payload and look for errors/protocol violations from the server.
var resp responseMessage
if _, err := io.ReadFull(conn, resp.Header()); err != nil {
return nil, fmt.Errorf("reading response header: %w", err)
}
if resp.Type() != msgTypeResponse {
if resp.Type() != msgTypeError {
return nil, fmt.Errorf("unexpected response message type %d", resp.Type())
}
msg := make([]byte, resp.Length())
if _, err := io.ReadFull(conn, msg); err != nil {
return nil, err
}
return nil, fmt.Errorf("server error: %q", msg)
}
if resp.Length() != len(resp.Payload()) {
return nil, fmt.Errorf("wrong length %d received for handshake response", resp.Length())
}
if _, err := io.ReadFull(conn, resp.Payload()); err != nil {
return nil, err
}
// <- e, ee, se
controlEphemeralPub := key.MachinePublicFromRaw32(mem.B(resp.EphemeralPub()))
s.MixHash(controlEphemeralPub.UntypedBytes())
if _, err := s.MixDH(machineEphemeral, controlEphemeralPub); err != nil {
return nil, fmt.Errorf("computing ee: %w", err)
}
cipher, err := s.MixDH(machineKey, controlEphemeralPub)
if err != nil {
return nil, fmt.Errorf("computing se: %w", err)
}
if err := s.DecryptAndHash(cipher, nil, resp.Tag()); err != nil {
return nil, fmt.Errorf("decrypting payload: %w", err)
}
c1, c2, err := s.Split()
if err != nil {
return nil, fmt.Errorf("finalizing handshake: %w", err)
}
c := &Conn{
conn: conn,
version: protocolVersion,
peer: controlKey,
handshakeHash: s.h,
tx: txState{
cipher: c1,
},
rx: rxState{
cipher: c2,
},
}
return c, nil
}
// Server initiates a control server handshake, returning the resulting
// control connection.
//
// optionalInit can be the client's initial handshake message as
// returned by ClientDeferred, or nil in which case the initial
// message is read from conn.
//
// The context deadline, if any, covers the entire handshaking
// process.
func Server(ctx context.Context, conn net.Conn, controlKey key.MachinePrivate, optionalInit []byte) (*Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
if err := conn.SetDeadline(deadline); err != nil {
return nil, fmt.Errorf("setting conn deadline: %w", err)
}
defer func() {
conn.SetDeadline(time.Time{})
}()
}
// Deliberately does not support formatting, so that we don't echo
// attacker-controlled input back to them.
sendErr := func(msg string) error {
if len(msg) >= 1<<16 {
msg = msg[:1<<16]
}
var hdr [headerLen]byte
hdr[0] = msgTypeError
binary.BigEndian.PutUint16(hdr[1:3], uint16(len(msg)))
if _, err := conn.Write(hdr[:]); err != nil {
return fmt.Errorf("sending %q error to client: %w", msg, err)
}
if _, err := io.WriteString(conn, msg); err != nil {
return fmt.Errorf("sending %q error to client: %w", msg, err)
}
return fmt.Errorf("refused client handshake: %q", msg)
}
var s symmetricState
s.Initialize()
var init initiationMessage
if optionalInit != nil {
if len(optionalInit) != len(init) {
return nil, sendErr("wrong handshake initiation size")
}
copy(init[:], optionalInit)
} else if _, err := io.ReadFull(conn, init.Header()); err != nil {
return nil, err
}
// Just a rename to make it more obvious what the value is. In the
// current implementation we don't need to block any protocol
// versions at this layer, it's safe to let the handshake proceed
// and then let the caller make decisions based on the agreed-upon
// protocol version.
clientVersion := init.Version()
if init.Type() != msgTypeInitiation {
return nil, sendErr("unexpected handshake message type")
}
if init.Length() != len(init.Payload()) {
return nil, sendErr("wrong handshake initiation length")
}
// if optionalInit was provided, we have the payload already.
if optionalInit == nil {
if _, err := io.ReadFull(conn, init.Payload()); err != nil {
return nil, err
}
}
// prologue. Can only do this once we at least think the client is
// handshaking using a supported version.
s.MixHash(protocolVersionPrologue(clientVersion))
// <- s
// ...
controlKeyPub := controlKey.Public()
s.MixHash(controlKeyPub.UntypedBytes())
// -> e, es, s, ss
machineEphemeralPub := key.MachinePublicFromRaw32(mem.B(init.EphemeralPub()))
s.MixHash(machineEphemeralPub.UntypedBytes())
cipher, err := s.MixDH(controlKey, machineEphemeralPub)
if err != nil {
return nil, fmt.Errorf("computing es: %w", err)
}
var machineKeyBytes [32]byte
if err := s.DecryptAndHash(cipher, machineKeyBytes[:], init.MachinePub()); err != nil {
return nil, fmt.Errorf("decrypting machine key: %w", err)
}
machineKey := key.MachinePublicFromRaw32(mem.B(machineKeyBytes[:]))
cipher, err = s.MixDH(controlKey, machineKey)
if err != nil {
return nil, fmt.Errorf("computing ss: %w", err)
}
if err := s.DecryptAndHash(cipher, nil, init.Tag()); err != nil {
return nil, fmt.Errorf("decrypting initiation tag: %w", err)
}
// <- e, ee, se
resp := mkResponseMessage()
controlEphemeral := key.NewMachine()
controlEphemeralPub := controlEphemeral.Public()
copy(resp.EphemeralPub(), controlEphemeralPub.UntypedBytes())
s.MixHash(controlEphemeralPub.UntypedBytes())
if _, err := s.MixDH(controlEphemeral, machineEphemeralPub); err != nil {
return nil, fmt.Errorf("computing ee: %w", err)
}
cipher, err = s.MixDH(controlEphemeral, machineKey)
if err != nil {
return nil, fmt.Errorf("computing se: %w", err)
}
s.EncryptAndHash(cipher, resp.Tag(), nil) // empty message payload
c1, c2, err := s.Split()
if err != nil {
return nil, fmt.Errorf("finalizing handshake: %w", err)
}
if _, err := conn.Write(resp[:]); err != nil {
return nil, err
}
c := &Conn{
conn: conn,
version: clientVersion,
peer: machineKey,
handshakeHash: s.h,
tx: txState{
cipher: c2,
},
rx: rxState{
cipher: c1,
},
}
return c, nil
}
// symmetricState contains the state of an in-flight handshake.
type symmetricState struct {
finished bool
h [blake2s.Size]byte // hash of currently-processed handshake state
ck [blake2s.Size]byte // chaining key used to construct session keys at the end of the handshake
}
func (s *symmetricState) checkFinished() {
if s.finished {
panic("attempted to use symmetricState after Split was called")
}
}
// Initialize sets s to the initial handshake state, prior to
// processing any handshake messages.
func (s *symmetricState) Initialize() {
s.checkFinished()
s.h = blake2s.Sum256([]byte(protocolName))
s.ck = s.h
}
// MixHash updates s.h to be BLAKE2s(s.h || data), where || is
// concatenation.
func (s *symmetricState) MixHash(data []byte) {
s.checkFinished()
h := newBLAKE2s()
h.Write(s.h[:])
h.Write(data)
h.Sum(s.h[:0])
}
// MixDH updates s.ck with the result of X25519(priv, pub) and returns
// a singleUseCHP that can be used to encrypt or decrypt handshake
// data.
//
// MixDH corresponds to MixKey(X25519(...))) in the spec. Implementing
// it as a single function allows for strongly-typed arguments that
// reduce the risk of error in the caller (e.g. invoking X25519 with
// two private keys, or two public keys), and thus producing the wrong
// calculation.
func (s *symmetricState) MixDH(priv key.MachinePrivate, pub key.MachinePublic) (*singleUseCHP, error) {
s.checkFinished()
keyData, err := curve25519.X25519(priv.UntypedBytes(), pub.UntypedBytes())
if err != nil {
return nil, fmt.Errorf("computing X25519: %w", err)
}
r := hkdf.New(newBLAKE2s, keyData, s.ck[:], nil)
if _, err := io.ReadFull(r, s.ck[:]); err != nil {
return nil, fmt.Errorf("extracting ck: %w", err)
}
var k [chp.KeySize]byte
if _, err := io.ReadFull(r, k[:]); err != nil {
return nil, fmt.Errorf("extracting k: %w", err)
}
return newSingleUseCHP(k), nil
}
// EncryptAndHash encrypts plaintext into ciphertext (which must be
// the correct size to hold the encrypted plaintext) using cipher,
// mixes the ciphertext into s.h, and returns the ciphertext.
func (s *symmetricState) EncryptAndHash(cipher *singleUseCHP, ciphertext, plaintext []byte) {
s.checkFinished()
if len(ciphertext) != len(plaintext)+chp.Overhead {
panic("ciphertext is wrong size for given plaintext")
}
ret := cipher.Seal(ciphertext[:0], plaintext, s.h[:])
s.MixHash(ret)
}
// DecryptAndHash decrypts the given ciphertext into plaintext (which
// must be the correct size to hold the decrypted ciphertext) using
// cipher. If decryption is successful, it mixes the ciphertext into
// s.h.
func (s *symmetricState) DecryptAndHash(cipher *singleUseCHP, plaintext, ciphertext []byte) error {
s.checkFinished()
if len(ciphertext) != len(plaintext)+chp.Overhead {
return errors.New("plaintext is wrong size for given ciphertext")
}
if _, err := cipher.Open(plaintext[:0], ciphertext, s.h[:]); err != nil {
return err
}
s.MixHash(ciphertext)
return nil
}
// Split returns two ChaCha20Poly1305 ciphers with keys derived from
// the current handshake state. Methods on s cannot be used again
// after calling Split.
func (s *symmetricState) Split() (c1, c2 cipher.AEAD, err error) {
s.finished = true
var k1, k2 [chp.KeySize]byte
r := hkdf.New(newBLAKE2s, nil, s.ck[:], nil)
if _, err := io.ReadFull(r, k1[:]); err != nil {
return nil, nil, fmt.Errorf("extracting k1: %w", err)
}
if _, err := io.ReadFull(r, k2[:]); err != nil {
return nil, nil, fmt.Errorf("extracting k2: %w", err)
}
c1, err = chp.New(k1[:])
if err != nil {
return nil, nil, fmt.Errorf("constructing AEAD c1: %w", err)
}
c2, err = chp.New(k2[:])
if err != nil {
return nil, nil, fmt.Errorf("constructing AEAD c2: %w", err)
}
return c1, c2, nil
}
// newBLAKE2s returns a hash.Hash implementing BLAKE2s, or panics on
// error.
func newBLAKE2s() hash.Hash {
h, err := blake2s.New256(nil)
if err != nil {
// Should never happen, errors only happen when using BLAKE2s
// in MAC mode with a key.
panic(err)
}
return h
}
// newCHP returns a cipher.AEAD implementing ChaCha20Poly1305, or
// panics on error.
func newCHP(key [chp.KeySize]byte) cipher.AEAD {
aead, err := chp.New(key[:])
if err != nil {
// Can only happen if we passed a key of the wrong length. The
// function signature prevents that.
panic(err)
}
return aead
}
// singleUseCHP is an instance of ChaCha20Poly1305 that can be used
// only once, either for encrypting or decrypting, but not both. The
// chosen operation is always executed with an all-zeros
// nonce. Subsequent calls to either Seal or Open panic.
type singleUseCHP struct {
c cipher.AEAD
}
func newSingleUseCHP(key [chp.KeySize]byte) *singleUseCHP {
return &singleUseCHP{newCHP(key)}
}
func (c *singleUseCHP) Seal(dst, plaintext, additionalData []byte) []byte {
if c.c == nil {
panic("Attempted reuse of singleUseAEAD")
}
cipher := c.c
c.c = nil
var nonce [chp.NonceSize]byte
return cipher.Seal(dst, nonce[:], plaintext, additionalData)
}
func (c *singleUseCHP) Open(dst, ciphertext, additionalData []byte) ([]byte, error) {
if c.c == nil {
panic("Attempted reuse of singleUseAEAD")
}
cipher := c.c
c.c = nil
var nonce [chp.NonceSize]byte
return cipher.Open(dst, nonce[:], ciphertext, additionalData)
}

87
vendor/tailscale.com/control/controlbase/messages.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlbase
import "encoding/binary"
const (
// msgTypeInitiation frames carry a Noise IK handshake initiation message.
msgTypeInitiation = 1
// msgTypeResponse frames carry a Noise IK handshake response message.
msgTypeResponse = 2
// msgTypeError frames carry an unauthenticated human-readable
// error message.
//
// Errors reported in this message type must be treated as public
// hints only. They are not encrypted or authenticated, and so can
// be seen and tampered with on the wire.
msgTypeError = 3
// msgTypeRecord frames carry session data bytes.
msgTypeRecord = 4
// headerLen is the size of the header on all messages except msgTypeInitiation.
headerLen = 3
// initiationHeaderLen is the size of the header on all msgTypeInitiation messages.
initiationHeaderLen = 5
)
// initiationMessage is the protocol message sent from a client
// machine to a control server.
//
// 2b: protocol version
// 1b: message type (0x01)
// 2b: payload length (96)
// 5b: header (see headerLen for fields)
// 32b: client ephemeral public key (cleartext)
// 48b: client machine public key (encrypted)
// 16b: message tag (authenticates the whole message)
type initiationMessage [101]byte
func mkInitiationMessage(protocolVersion uint16) initiationMessage {
var ret initiationMessage
binary.BigEndian.PutUint16(ret[:2], protocolVersion)
ret[2] = msgTypeInitiation
binary.BigEndian.PutUint16(ret[3:5], uint16(len(ret.Payload())))
return ret
}
func (m *initiationMessage) Header() []byte { return m[:initiationHeaderLen] }
func (m *initiationMessage) Payload() []byte { return m[initiationHeaderLen:] }
func (m *initiationMessage) Version() uint16 { return binary.BigEndian.Uint16(m[:2]) }
func (m *initiationMessage) Type() byte { return m[2] }
func (m *initiationMessage) Length() int { return int(binary.BigEndian.Uint16(m[3:5])) }
func (m *initiationMessage) EphemeralPub() []byte {
return m[initiationHeaderLen : initiationHeaderLen+32]
}
func (m *initiationMessage) MachinePub() []byte {
return m[initiationHeaderLen+32 : initiationHeaderLen+32+48]
}
func (m *initiationMessage) Tag() []byte { return m[initiationHeaderLen+32+48:] }
// responseMessage is the protocol message sent from a control server
// to a client machine.
//
// 1b: message type (0x02)
// 2b: payload length (48)
// 32b: control ephemeral public key (cleartext)
// 16b: message tag (authenticates the whole message)
type responseMessage [51]byte
func mkResponseMessage() responseMessage {
var ret responseMessage
ret[0] = msgTypeResponse
binary.BigEndian.PutUint16(ret[1:], uint16(len(ret.Payload())))
return ret
}
func (m *responseMessage) Header() []byte { return m[:headerLen] }
func (m *responseMessage) Payload() []byte { return m[headerLen:] }
func (m *responseMessage) Type() byte { return m[0] }
func (m *responseMessage) Length() int { return int(binary.BigEndian.Uint16(m[1:3])) }
func (m *responseMessage) EphemeralPub() []byte { return m[headerLen : headerLen+32] }
func (m *responseMessage) Tag() []byte { return m[headerLen+32:] }

746
vendor/tailscale.com/control/controlclient/auto.go generated vendored Normal file
View File

@@ -0,0 +1,746 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"context"
"errors"
"fmt"
"net/http"
"sync"
"sync/atomic"
"time"
"tailscale.com/logtail/backoff"
"tailscale.com/net/sockstats"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/structs"
"tailscale.com/util/execqueue"
)
type LoginGoal struct {
_ structs.Incomparable
flags LoginFlags // flags to use when logging in
url string // auth url that needs to be visited
}
var _ Client = (*Auto)(nil)
// waitUnpause waits until either the client is unpaused or the Auto client is
// shut down. It reports whether the client should keep running (i.e. it's not
// closed).
func (c *Auto) waitUnpause(routineLogName string) (keepRunning bool) {
c.mu.Lock()
if !c.paused || c.closed {
defer c.mu.Unlock()
return !c.closed
}
unpaused := c.unpausedChanLocked()
c.mu.Unlock()
c.logf("%s: awaiting unpause", routineLogName)
return <-unpaused
}
// updateRoutine is responsible for informing the server of worthy changes to
// our local state. It runs in its own goroutine.
func (c *Auto) updateRoutine() {
defer close(c.updateDone)
bo := backoff.NewBackoff("updateRoutine", c.logf, 30*time.Second)
// lastUpdateGenInformed is the value of lastUpdateAt that we've successfully
// informed the server of.
var lastUpdateGenInformed updateGen
for {
if !c.waitUnpause("updateRoutine") {
c.logf("updateRoutine: exiting")
return
}
c.mu.Lock()
gen := c.lastUpdateGen
ctx := c.mapCtx
needUpdate := gen > 0 && gen != lastUpdateGenInformed && c.loggedIn
c.mu.Unlock()
if !needUpdate {
// Nothing to do, wait for a signal.
select {
case <-ctx.Done():
continue
case <-c.updateCh:
continue
}
}
t0 := c.clock.Now()
err := c.direct.SendUpdate(ctx)
d := time.Since(t0).Round(time.Millisecond)
if err != nil {
if ctx.Err() == nil {
c.direct.logf("lite map update error after %v: %v", d, err)
}
bo.BackOff(ctx, err)
continue
}
bo.BackOff(ctx, nil)
c.direct.logf("[v1] successful lite map update in %v", d)
lastUpdateGenInformed = gen
}
}
// atomicGen is an atomic int64 generator. It is used to generate monotonically
// increasing numbers for updateGen.
var atomicGen atomic.Int64
func nextUpdateGen() updateGen {
return updateGen(atomicGen.Add(1))
}
// updateGen is a monotonically increasing number that represents a particular
// update to the local state.
type updateGen int64
// Auto connects to a tailcontrol server for a node.
// It's a concrete implementation of the Client interface.
type Auto struct {
direct *Direct // our interface to the server APIs
clock tstime.Clock
logf logger.Logf
closed bool
updateCh chan struct{} // readable when we should inform the server of a change
observer Observer // called to update Client status; always non-nil
observerQueue execqueue.ExecQueue
unregisterHealthWatch func()
mu sync.Mutex // mutex guards the following fields
wantLoggedIn bool // whether the user wants to be logged in per last method call
urlToVisit string // the last url we were told to visit
expiry time.Time
// lastUpdateGen is the gen of last update we had an update worth sending to
// the server.
lastUpdateGen updateGen
paused bool // whether we should stop making HTTP requests
unpauseWaiters []chan bool // chans that gets sent true (once) on wake, or false on Shutdown
loggedIn bool // true if currently logged in
loginGoal *LoginGoal // non-nil if some login activity is desired
inMapPoll bool // true once we get the first MapResponse in a stream; false when HTTP response ends
state State // TODO(bradfitz): delete this, make it computed by method from other state
authCtx context.Context // context used for auth requests
mapCtx context.Context // context used for netmap and update requests
authCancel func() // cancel authCtx
mapCancel func() // cancel mapCtx
authDone chan struct{} // when closed, authRoutine is done
mapDone chan struct{} // when closed, mapRoutine is done
updateDone chan struct{} // when closed, updateRoutine is done
}
// New creates and starts a new Auto.
func New(opts Options) (*Auto, error) {
c, err := NewNoStart(opts)
if c != nil {
c.Start()
}
return c, err
}
// NewNoStart creates a new Auto, but without calling Start on it.
func NewNoStart(opts Options) (_ *Auto, err error) {
direct, err := NewDirect(opts)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
direct.Close()
}
}()
if opts.Observer == nil {
return nil, errors.New("missing required Options.Observer")
}
if opts.Logf == nil {
opts.Logf = func(fmt string, args ...any) {}
}
if opts.Clock == nil {
opts.Clock = tstime.StdClock{}
}
c := &Auto{
direct: direct,
clock: opts.Clock,
logf: opts.Logf,
updateCh: make(chan struct{}, 1),
authDone: make(chan struct{}),
mapDone: make(chan struct{}),
updateDone: make(chan struct{}),
observer: opts.Observer,
}
c.authCtx, c.authCancel = context.WithCancel(context.Background())
c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf)
c.mapCtx, c.mapCancel = context.WithCancel(context.Background())
c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf)
c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(direct.ReportHealthChange)
return c, nil
}
// SetPaused controls whether HTTP activity should be paused.
//
// The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once.
func (c *Auto) SetPaused(paused bool) {
c.mu.Lock()
defer c.mu.Unlock()
if paused == c.paused || c.closed {
return
}
c.logf("setPaused(%v)", paused)
c.paused = paused
if paused {
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
return
}
for _, ch := range c.unpauseWaiters {
ch <- true
}
c.unpauseWaiters = nil
}
// Start starts the client's goroutines.
//
// It should only be called for clients created by NewNoStart.
func (c *Auto) Start() {
go c.authRoutine()
go c.mapRoutine()
go c.updateRoutine()
}
// updateControl sends a new OmitPeers, non-streaming map request (to just send
// Hostinfo/Netinfo/Endpoints info, while keeping an existing streaming response
// open).
//
// It should be called whenever there's something new to tell the server.
func (c *Auto) updateControl() {
gen := nextUpdateGen()
c.mu.Lock()
if gen < c.lastUpdateGen {
// This update is out of date.
c.mu.Unlock()
return
}
c.lastUpdateGen = gen
c.mu.Unlock()
select {
case c.updateCh <- struct{}{}:
default:
}
}
// cancelAuthCtxLocked is like cancelAuthCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelAuthCtxLocked() {
if c.authCancel != nil {
c.authCancel()
}
if !c.closed {
c.authCtx, c.authCancel = context.WithCancel(context.Background())
c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, c.logf)
}
}
// cancelMapCtxLocked is like cancelMapCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelMapCtxLocked() {
if c.mapCancel != nil {
c.mapCancel()
}
if !c.closed {
c.mapCtx, c.mapCancel = context.WithCancel(context.Background())
c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, c.logf)
}
}
// restartMap cancels the existing mapPoll and liteUpdates, and then starts a
// new one.
func (c *Auto) restartMap() {
c.mu.Lock()
c.cancelMapCtxLocked()
synced := c.inMapPoll
c.mu.Unlock()
c.logf("[v1] restartMap: synced=%v", synced)
c.updateControl()
}
func (c *Auto) authRoutine() {
defer close(c.authDone)
bo := backoff.NewBackoff("authRoutine", c.logf, 30*time.Second)
for {
if !c.waitUnpause("authRoutine") {
c.logf("authRoutine: exiting")
return
}
c.mu.Lock()
goal := c.loginGoal
ctx := c.authCtx
if goal != nil {
c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, true)
} else {
c.logf("[v1] authRoutine: %s; goal=nil paused=%v", c.state, c.paused)
}
c.mu.Unlock()
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
if ctx.Err() == nil {
c.sendStatus("authRoutine-report", err, "", nil)
}
}
if goal == nil {
c.direct.health.SetAuthRoutineInError(nil)
// Wait for user to Login or Logout.
<-ctx.Done()
c.logf("[v1] authRoutine: context done.")
continue
}
c.mu.Lock()
c.urlToVisit = goal.url
if goal.url != "" {
c.state = StateURLVisitRequired
} else {
c.state = StateAuthenticating
}
c.mu.Unlock()
var url string
var err error
var f string
if goal.url != "" {
url, err = c.direct.WaitLoginURL(ctx, goal.url)
f = "WaitLoginURL"
} else {
url, err = c.direct.TryLogin(ctx, goal.flags)
f = "TryLogin"
}
if err != nil {
c.direct.health.SetAuthRoutineInError(err)
report(err, f)
bo.BackOff(ctx, err)
continue
}
if url != "" {
// goal.url ought to be empty here. However, not all control servers
// get this right, and logging about it here just generates noise.
//
// TODO(bradfitz): I don't follow that comment. Our own testcontrol
// used by tstest/integration hits this path, in fact.
if c.direct.panicOnUse {
panic("tainted client")
}
c.mu.Lock()
c.urlToVisit = url
c.loginGoal = &LoginGoal{
flags: LoginDefault,
url: url,
}
c.state = StateURLVisitRequired
c.mu.Unlock()
c.sendStatus("authRoutine-url", err, url, nil)
if goal.url == url {
// The server sent us the same URL we already tried,
// backoff to avoid a busy loop.
bo.BackOff(ctx, errors.New("login URL not changing"))
} else {
bo.BackOff(ctx, nil)
}
continue
}
// success
c.direct.health.SetAuthRoutineInError(nil)
c.mu.Lock()
c.urlToVisit = ""
c.loggedIn = true
c.loginGoal = nil
c.state = StateAuthenticated
c.mu.Unlock()
c.sendStatus("authRoutine-success", nil, "", nil)
c.restartMap()
bo.BackOff(ctx, nil)
}
}
// ExpiryForTests returns the credential expiration time, or the zero value if
// the expiration time isn't known. It's used in tests only.
func (c *Auto) ExpiryForTests() time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.expiry
}
// DirectForTest returns the underlying direct client object.
// It's used in tests only.
func (c *Auto) DirectForTest() *Direct {
return c.direct
}
// unpausedChanLocked returns a new channel that gets sent
// either a true when unpaused or false on Auto.Shutdown.
//
// c.mu must be held
func (c *Auto) unpausedChanLocked() <-chan bool {
unpaused := make(chan bool, 1)
c.unpauseWaiters = append(c.unpauseWaiters, unpaused)
return unpaused
}
// mapRoutineState is the state of Auto.mapRoutine while it's running.
type mapRoutineState struct {
c *Auto
bo *backoff.Backoff
}
var _ NetmapDeltaUpdater = mapRoutineState{}
func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) {
c := mrs.c
c.mu.Lock()
ctx := c.mapCtx
c.inMapPoll = true
if c.loggedIn {
c.state = StateSynchronized
}
c.expiry = nm.Expiry
stillAuthed := c.loggedIn
c.logf("[v1] mapRoutine: netmap received: %s", c.state)
c.mu.Unlock()
if stillAuthed {
c.sendStatus("mapRoutine-got-netmap", nil, "", nm)
}
// Reset the backoff timer if we got a netmap.
mrs.bo.BackOff(ctx, nil)
}
func (mrs mapRoutineState) UpdateNetmapDelta(muts []netmap.NodeMutation) bool {
c := mrs.c
c.mu.Lock()
goodState := c.loggedIn && c.inMapPoll
ndu, canDelta := c.observer.(NetmapDeltaUpdater)
c.mu.Unlock()
if !goodState || !canDelta {
return false
}
ctx, cancel := context.WithTimeout(c.mapCtx, 2*time.Second)
defer cancel()
var ok bool
err := c.observerQueue.RunSync(ctx, func() {
ok = ndu.UpdateNetmapDelta(muts)
})
return err == nil && ok
}
// mapRoutine is responsible for keeping a read-only streaming connection to the
// control server, and keeping the netmap up to date.
func (c *Auto) mapRoutine() {
defer close(c.mapDone)
mrs := mapRoutineState{
c: c,
bo: backoff.NewBackoff("mapRoutine", c.logf, 30*time.Second),
}
for {
if !c.waitUnpause("mapRoutine") {
c.logf("mapRoutine: exiting")
return
}
c.mu.Lock()
c.logf("[v1] mapRoutine: %s", c.state)
loggedIn := c.loggedIn
ctx := c.mapCtx
c.mu.Unlock()
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
err = fmt.Errorf("%s: %w", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
if ctx.Err() == nil {
c.sendStatus("mapRoutine1", err, "", nil)
}
}
if !loggedIn {
// Wait for something interesting to happen
c.mu.Lock()
c.inMapPoll = false
c.mu.Unlock()
<-ctx.Done()
c.logf("[v1] mapRoutine: context done.")
continue
}
c.direct.health.SetOutOfPollNetMap()
err := c.direct.PollNetMap(ctx, mrs)
c.direct.health.SetOutOfPollNetMap()
c.mu.Lock()
c.inMapPoll = false
if c.state == StateSynchronized {
c.state = StateAuthenticated
}
paused := c.paused
c.mu.Unlock()
if paused {
mrs.bo.BackOff(ctx, nil)
c.logf("mapRoutine: paused")
} else {
mrs.bo.BackOff(ctx, err)
report(err, "PollNetMap")
}
}
}
func (c *Auto) AuthCantContinue() bool {
if c == nil {
return true
}
c.mu.Lock()
defer c.mu.Unlock()
return !c.loggedIn && (c.loginGoal == nil || c.loginGoal.url != "")
}
func (c *Auto) SetHostinfo(hi *tailcfg.Hostinfo) {
if hi == nil {
panic("nil Hostinfo")
}
if !c.direct.SetHostinfo(hi) {
// No changes. Don't log.
return
}
// Send new Hostinfo to server
c.updateControl()
}
func (c *Auto) SetNetInfo(ni *tailcfg.NetInfo) {
if ni == nil {
panic("nil NetInfo")
}
if !c.direct.SetNetInfo(ni) {
return
}
// Send new NetInfo to server
c.updateControl()
}
// SetTKAHead updates the TKA head hash that map-request infrastructure sends.
func (c *Auto) SetTKAHead(headHash string) {
if !c.direct.SetTKAHead(headHash) {
return
}
// Send new TKAHead to server
c.updateControl()
}
// sendStatus can not be called with the c.mu held.
func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkMap) {
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return
}
state := c.state
loggedIn := c.loggedIn
inMapPoll := c.inMapPoll
c.mu.Unlock()
c.logf("[v1] sendStatus: %s: %v", who, state)
var p persist.PersistView
if nm != nil && loggedIn && inMapPoll {
p = c.direct.GetPersist()
} else {
// don't send netmap status, as it's misleading when we're
// not logged in.
nm = nil
}
new := Status{
URL: url,
Persist: p,
NetMap: nm,
Err: err,
state: state,
}
// Launch a new goroutine to avoid blocking the caller while the observer
// does its thing, which may result in a call back into the client.
c.observerQueue.Add(func() {
c.observer.SetControlClientStatus(c, new)
})
}
func (c *Auto) Login(flags LoginFlags) {
c.logf("client.Login(%v)", flags)
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return
}
if c.direct != nil && c.direct.panicOnUse {
panic("tainted client")
}
c.wantLoggedIn = true
c.loginGoal = &LoginGoal{
flags: flags,
}
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
}
var ErrClientClosed = errors.New("client closed")
func (c *Auto) Logout(ctx context.Context) error {
c.logf("client.Logout()")
c.mu.Lock()
c.wantLoggedIn = false
c.loginGoal = nil
closed := c.closed
if c.direct != nil && c.direct.panicOnUse {
panic("tainted client")
}
c.mu.Unlock()
if closed {
return ErrClientClosed
}
if err := c.direct.TryLogout(ctx); err != nil {
return err
}
c.mu.Lock()
c.loggedIn = false
c.state = StateNotAuthenticated
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
c.mu.Unlock()
c.sendStatus("authRoutine-wantout", nil, "", nil)
return nil
}
func (c *Auto) SetExpirySooner(ctx context.Context, expiry time.Time) error {
return c.direct.SetExpirySooner(ctx, expiry)
}
// UpdateEndpoints sets the client's discovered endpoints and sends
// them to the control server if they've changed.
//
// It does not retain the provided slice.
func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) {
changed := c.direct.SetEndpoints(endpoints)
if changed {
c.updateControl()
}
}
func (c *Auto) Shutdown() {
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return
}
c.logf("client.Shutdown ...")
direct := c.direct
c.closed = true
c.observerQueue.Shutdown()
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
for _, w := range c.unpauseWaiters {
w <- false
}
c.unpauseWaiters = nil
c.mu.Unlock()
c.unregisterHealthWatch()
<-c.authDone
<-c.mapDone
<-c.updateDone
if direct != nil {
direct.Close()
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
c.observerQueue.Wait(ctx)
c.logf("Client.Shutdown done.")
}
// NodePublicKey returns the node public key currently in use. This is
// used exclusively in tests.
func (c *Auto) TestOnlyNodePublicKey() key.NodePublic {
priv := c.direct.GetPersist()
return priv.PrivateNodeKey().Public()
}
func (c *Auto) TestOnlySetAuthKey(authkey string) {
c.direct.mu.Lock()
defer c.direct.mu.Unlock()
c.direct.authKey = authkey
}
func (c *Auto) TestOnlyTimeNow() time.Time {
return c.clock.Now()
}
// SetDNS sends the SetDNSRequest request to the control plane server,
// requesting a DNS record be created or updated.
func (c *Auto) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) error {
return c.direct.SetDNS(ctx, req)
}
func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) {
return c.direct.DoNoiseRequest(req)
}
// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used
// once (and must be used once) to make a single HTTP request over the noise
// channel to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
return c.direct.GetSingleUseNoiseRoundTripper(ctx)
}

90
vendor/tailscale.com/control/controlclient/client.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package controlclient implements the client for the Tailscale
// control plane.
//
// It handles authentication, port picking, and collects the local
// network configuration.
package controlclient
import (
"context"
"tailscale.com/tailcfg"
)
// LoginFlags is a bitmask of options to change the behavior of Client.Login
// and LocalBackend.
type LoginFlags int
const (
LoginDefault = LoginFlags(0)
LoginInteractive = LoginFlags(1 << iota) // force user login and key refresh
LoginEphemeral // set RegisterRequest.Ephemeral
// LocalBackendStartKeyOSNeutral instructs NewLocalBackend to start the
// LocalBackend without any OS-dependent StateStore StartKey behavior.
//
// See https://github.com/tailscale/tailscale/issues/6973.
LocalBackendStartKeyOSNeutral
)
// Client represents a client connection to the control server.
// Currently this is done through a pair of polling https requests in
// the Auto client, but that might change eventually.
//
// The Client must be comparable as it is used by the Observer to detect stale
// clients.
type Client interface {
// Shutdown closes this session, which should not be used any further
// afterwards.
Shutdown()
// Login begins an interactive or non-interactive login process.
// Client will eventually call the Status callback with either a
// LoginFinished flag (on success) or an auth URL (if further
// interaction is needed). It merely sets the process in motion,
// and doesn't wait for it to complete.
Login(LoginFlags)
// Logout starts a synchronous logout process. It doesn't return
// until the logout operation has been completed.
Logout(context.Context) error
// SetPaused pauses or unpauses the controlclient activity as much
// as possible, without losing its internal state, to minimize
// unnecessary network activity.
// TODO: It might be better to simply shutdown the controlclient and
// make a new one when it's time to unpause.
SetPaused(bool)
// AuthCantContinue returns whether authentication is blocked. If it
// is, you either need to visit the auth URL (previously sent in a
// Status callback) or call the Login function appropriately.
// TODO: this probably belongs in the Status itself instead.
AuthCantContinue() bool
// SetHostinfo changes the Hostinfo structure that will be sent in
// subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
SetHostinfo(*tailcfg.Hostinfo)
// SetNetinfo changes the NetIinfo structure that will be sent in
// subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
SetNetInfo(*tailcfg.NetInfo)
// SetTKAHead changes the TKA head hash value that will be sent in
// subsequent netmap requests.
SetTKAHead(headHash string)
// UpdateEndpoints changes the Endpoint structure that will be sent
// in subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
UpdateEndpoints(endpoints []tailcfg.Endpoint)
}
// UserVisibleError is an error that should be shown to users.
type UserVisibleError string
func (e UserVisibleError) Error() string { return string(e) }
func (e UserVisibleError) UserVisibleError() string { return string(e) }

1699
vendor/tailscale.com/control/controlclient/direct.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

832
vendor/tailscale.com/control/controlclient/map.go generated vendored Normal file
View File

@@ -0,0 +1,832 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"cmp"
"context"
"encoding/json"
"fmt"
"maps"
"net"
"reflect"
"runtime"
"runtime/debug"
"slices"
"sort"
"strconv"
"sync"
"time"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/ptr"
"tailscale.com/types/views"
"tailscale.com/util/clientmetric"
"tailscale.com/util/mak"
"tailscale.com/util/set"
"tailscale.com/wgengine/filter"
)
// mapSession holds the state over a long-polled "map" request to the
// control plane.
//
// It accepts incremental tailcfg.MapResponse values to
// netMapForResponse and returns fully inflated NetworkMaps, filling
// in the omitted data implicit from prior MapResponse values from
// within the same session (the same long-poll HTTP response to the
// one MapRequest).
type mapSession struct {
// Immutable fields.
netmapUpdater NetmapUpdater // called on changes (in addition to the optional hooks below)
controlKnobs *controlknobs.Knobs // or nil
privateNodeKey key.NodePrivate
publicNodeKey key.NodePublic
logf logger.Logf
vlogf logger.Logf
machinePubKey key.MachinePublic
altClock tstime.Clock // if nil, regular time is used
cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context
// sessionAliveCtx is a Background-based context that's alive for the
// duration of the mapSession that we own the lifetime of. It's closed by
// sessionAliveCtxClose.
sessionAliveCtx context.Context
sessionAliveCtxClose context.CancelFunc // closes sessionAliveCtx
// Optional hooks, guaranteed non-nil (set to no-op funcs) by the
// newMapSession constructor. They must be overridden if desired
// before the mapSession is used.
// onDebug specifies what to do with a *tailcfg.Debug message.
onDebug func(context.Context, *tailcfg.Debug) error
// onSelfNodeChanged is called before the NetmapUpdater if the self node was
// changed.
onSelfNodeChanged func(*netmap.NetworkMap)
// Fields storing state over the course of multiple MapResponses.
lastPrintMap time.Time
lastNode tailcfg.NodeView
lastCapSet set.Set[tailcfg.NodeCapability]
peers map[tailcfg.NodeID]*tailcfg.NodeView // pointer to view (oddly). same pointers as sortedPeers.
sortedPeers []*tailcfg.NodeView // same pointers as peers, but sorted by Node.ID
lastDNSConfig *tailcfg.DNSConfig
lastDERPMap *tailcfg.DERPMap
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile
lastPacketFilterRules views.Slice[tailcfg.FilterRule] // concatenation of all namedPacketFilters
namedPacketFilters map[string]views.Slice[tailcfg.FilterRule]
lastParsedPacketFilter []filter.Match
lastSSHPolicy *tailcfg.SSHPolicy
collectServices bool
lastDomain string
lastDomainAuditLogID string
lastHealth []string
lastPopBrowserURL string
lastTKAInfo *tailcfg.TKAInfo
lastNetmapSummary string // from NetworkMap.VeryConcise
lastMaxExpiry time.Duration
}
// newMapSession returns a mostly unconfigured new mapSession.
//
// Modify its optional fields on the returned value before use.
//
// It must have its Close method called to release resources.
func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater, controlKnobs *controlknobs.Knobs) *mapSession {
ms := &mapSession{
netmapUpdater: nu,
controlKnobs: controlKnobs,
privateNodeKey: privateNodeKey,
publicNodeKey: privateNodeKey.Public(),
lastDNSConfig: new(tailcfg.DNSConfig),
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
// Non-nil no-op defaults, to be optionally overridden by the caller.
logf: logger.Discard,
vlogf: logger.Discard,
cancel: func() {},
onDebug: func(context.Context, *tailcfg.Debug) error { return nil },
onSelfNodeChanged: func(*netmap.NetworkMap) {},
}
ms.sessionAliveCtx, ms.sessionAliveCtxClose = context.WithCancel(context.Background())
return ms
}
// occasionallyPrintSummary logs summary at most once very 5 minutes. The
// summary is the Netmap.VeryConcise result from the last received map response.
func (ms *mapSession) occasionallyPrintSummary(summary string) {
// Occasionally print the netmap header.
// This is handy for debugging, and our logs processing
// pipeline depends on it. (TODO: Remove this dependency.)
now := ms.clock().Now()
if now.Sub(ms.lastPrintMap) < 5*time.Minute {
return
}
ms.lastPrintMap = now
ms.logf("[v1] new network map (periodic):\n%s", summary)
}
func (ms *mapSession) clock() tstime.Clock {
return cmp.Or[tstime.Clock](ms.altClock, tstime.StdClock{})
}
func (ms *mapSession) Close() {
ms.sessionAliveCtxClose()
}
// HandleNonKeepAliveMapResponse handles a non-KeepAlive MapResponse (full or
// incremental).
//
// All fields that are valid on a KeepAlive MapResponse have already been
// handled.
//
// TODO(bradfitz): make this handle all fields later. For now (2023-08-20) this
// is [re]factoring progress enough.
func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *tailcfg.MapResponse) error {
if debug := resp.Debug; debug != nil {
if err := ms.onDebug(ctx, debug); err != nil {
return err
}
}
if DevKnob.StripEndpoints() {
for _, p := range resp.Peers {
p.Endpoints = nil
}
for _, p := range resp.PeersChanged {
p.Endpoints = nil
}
}
// For responses that mutate the self node, check for updated nodeAttrs.
if resp.Node != nil {
if DevKnob.StripCaps() {
resp.Node.Capabilities = nil
resp.Node.CapMap = nil
}
// If the server is old and is still sending us Capabilities instead of
// CapMap, convert it to CapMap early so the rest of the client code can
// work only in terms of CapMap.
for _, c := range resp.Node.Capabilities {
if _, ok := resp.Node.CapMap[c]; !ok {
mak.Set(&resp.Node.CapMap, c, nil)
}
}
ms.controlKnobs.UpdateFromNodeAttributes(resp.Node.CapMap)
}
// Call Node.InitDisplayNames on any changed nodes.
initDisplayNames(cmp.Or(resp.Node.View(), ms.lastNode), resp)
ms.patchifyPeersChanged(resp)
ms.updateStateFromResponse(resp)
if ms.tryHandleIncrementally(resp) {
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
return nil
}
// We have to rebuild the whole netmap (lots of garbage & work downstream of
// our UpdateFullNetmap call). This is the part we tried to avoid but
// some field mutations (especially rare ones) aren't yet handled.
if runtime.GOOS == "ios" {
// Memory is tight on iOS. Free what we can while we
// can before this potential burst of in-use memory.
debug.FreeOSMemory()
}
nm := ms.netmap()
ms.lastNetmapSummary = nm.VeryConcise()
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
// If the self node changed, we might need to update persist.
if resp.Node != nil {
ms.onSelfNodeChanged(nm)
}
ms.netmapUpdater.UpdateFullNetmap(nm)
return nil
}
func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool {
if ms.controlKnobs != nil && ms.controlKnobs.DisableDeltaUpdates.Load() {
return false
}
nud, ok := ms.netmapUpdater.(NetmapDeltaUpdater)
if !ok {
return false
}
mutations, ok := netmap.MutationsFromMapResponse(res, time.Now())
if ok && len(mutations) > 0 {
return nud.UpdateNetmapDelta(mutations)
}
return ok
}
// updateStats are some stats from updateStateFromResponse, primarily for
// testing. It's meant to be cheap enough to always compute, though. It doesn't
// allocate.
type updateStats struct {
allNew bool
added int
removed int
changed int
}
// updateStateFromResponse updates ms from res. It takes ownership of res.
func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
ms.updatePeersStateFromResponse(resp)
if resp.Node != nil {
ms.lastNode = resp.Node.View()
capSet := set.Set[tailcfg.NodeCapability]{}
for _, c := range resp.Node.Capabilities {
capSet.Add(c)
}
for c := range resp.Node.CapMap {
capSet.Add(c)
}
ms.lastCapSet = capSet
}
for _, up := range resp.UserProfiles {
ms.lastUserProfile[up.ID] = up
}
// TODO(bradfitz): clean up old user profiles? maybe not worth it.
if dm := resp.DERPMap; dm != nil {
ms.vlogf("netmap: new map contains DERP map")
// Zero-valued fields in a DERPMap mean that we're not changing
// anything and are using the previous value(s).
if ldm := ms.lastDERPMap; ldm != nil {
if dm.Regions == nil {
dm.Regions = ldm.Regions
dm.OmitDefaultRegions = ldm.OmitDefaultRegions
}
if dm.HomeParams == nil {
dm.HomeParams = ldm.HomeParams
} else if oldhh := ldm.HomeParams; oldhh != nil {
// Propagate sub-fields of HomeParams
hh := dm.HomeParams
if hh.RegionScore == nil {
hh.RegionScore = oldhh.RegionScore
}
}
}
ms.lastDERPMap = dm
}
var packetFilterChanged bool
// Older way, one big blob:
if pf := resp.PacketFilter; pf != nil {
packetFilterChanged = true
mak.Set(&ms.namedPacketFilters, "base", views.SliceOf(pf))
}
// Newer way, named chunks:
if m := resp.PacketFilters; m != nil {
packetFilterChanged = true
if v, ok := m["*"]; ok && v == nil {
ms.namedPacketFilters = nil
}
for k, v := range m {
if k == "*" {
continue
}
if v != nil {
mak.Set(&ms.namedPacketFilters, k, views.SliceOf(v))
} else {
delete(ms.namedPacketFilters, k)
}
}
}
if packetFilterChanged {
var concat []tailcfg.FilterRule
for _, v := range slices.Sorted(maps.Keys(ms.namedPacketFilters)) {
concat = ms.namedPacketFilters[v].AppendTo(concat)
}
ms.lastPacketFilterRules = views.SliceOf(concat)
var err error
ms.lastParsedPacketFilter, err = filter.MatchesFromFilterRules(concat)
if err != nil {
ms.logf("parsePacketFilter: %v", err)
}
}
if c := resp.DNSConfig; c != nil {
ms.lastDNSConfig = c
}
if p := resp.SSHPolicy; p != nil {
ms.lastSSHPolicy = p
}
if v, ok := resp.CollectServices.Get(); ok {
ms.collectServices = v
}
if resp.Domain != "" {
ms.lastDomain = resp.Domain
}
if resp.DomainDataPlaneAuditLogID != "" {
ms.lastDomainAuditLogID = resp.DomainDataPlaneAuditLogID
}
if resp.Health != nil {
ms.lastHealth = resp.Health
}
if resp.TKAInfo != nil {
ms.lastTKAInfo = resp.TKAInfo
}
if resp.MaxKeyDuration > 0 {
ms.lastMaxExpiry = resp.MaxKeyDuration
}
}
var (
patchDERPRegion = clientmetric.NewCounter("controlclient_patch_derp")
patchEndpoints = clientmetric.NewCounter("controlclient_patch_endpoints")
patchCap = clientmetric.NewCounter("controlclient_patch_capver")
patchKey = clientmetric.NewCounter("controlclient_patch_key")
patchDiscoKey = clientmetric.NewCounter("controlclient_patch_discokey")
patchOnline = clientmetric.NewCounter("controlclient_patch_online")
patchLastSeen = clientmetric.NewCounter("controlclient_patch_lastseen")
patchKeyExpiry = clientmetric.NewCounter("controlclient_patch_keyexpiry")
patchCapMap = clientmetric.NewCounter("controlclient_patch_capmap")
patchKeySignature = clientmetric.NewCounter("controlclient_patch_keysig")
patchifiedPeer = clientmetric.NewCounter("controlclient_patchified_peer")
patchifiedPeerEqual = clientmetric.NewCounter("controlclient_patchified_peer_equal")
)
// updatePeersStateFromResponseres updates ms.peers and ms.sortedPeers from res. It takes ownership of res.
func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) {
defer func() {
if stats.removed > 0 || stats.added > 0 {
ms.rebuildSorted()
}
}()
if ms.peers == nil {
ms.peers = make(map[tailcfg.NodeID]*tailcfg.NodeView)
}
if len(resp.Peers) > 0 {
// Not delta encoded.
stats.allNew = true
keep := make(map[tailcfg.NodeID]bool, len(resp.Peers))
for _, n := range resp.Peers {
keep[n.ID] = true
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for id := range ms.peers {
if !keep[id] {
stats.removed++
delete(ms.peers, id)
}
}
// Peers precludes all other delta operations so just return.
return
}
for _, id := range resp.PeersRemoved {
if _, ok := ms.peers[id]; ok {
delete(ms.peers, id)
stats.removed++
}
}
for _, n := range resp.PeersChanged {
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for nodeID, seen := range resp.PeerSeenChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
if seen {
mut.LastSeen = ptr.To(clock.Now())
} else {
mut.LastSeen = nil
}
*vp = mut.View()
stats.changed++
}
}
for nodeID, online := range resp.OnlineChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
mut.Online = ptr.To(online)
*vp = mut.View()
stats.changed++
}
}
for _, pc := range resp.PeersChangedPatch {
vp, ok := ms.peers[pc.NodeID]
if !ok {
continue
}
stats.changed++
mut := vp.AsStruct()
if pc.DERPRegion != 0 {
mut.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, pc.DERPRegion)
patchDERPRegion.Add(1)
}
if pc.Cap != 0 {
mut.Cap = pc.Cap
patchCap.Add(1)
}
if pc.Endpoints != nil {
mut.Endpoints = pc.Endpoints
patchEndpoints.Add(1)
}
if pc.Key != nil {
mut.Key = *pc.Key
patchKey.Add(1)
}
if pc.DiscoKey != nil {
mut.DiscoKey = *pc.DiscoKey
patchDiscoKey.Add(1)
}
if v := pc.Online; v != nil {
mut.Online = ptr.To(*v)
patchOnline.Add(1)
}
if v := pc.LastSeen; v != nil {
mut.LastSeen = ptr.To(*v)
patchLastSeen.Add(1)
}
if v := pc.KeyExpiry; v != nil {
mut.KeyExpiry = *v
patchKeyExpiry.Add(1)
}
if v := pc.KeySignature; v != nil {
mut.KeySignature = v
patchKeySignature.Add(1)
}
if v := pc.CapMap; v != nil {
mut.CapMap = v
patchCapMap.Add(1)
}
*vp = mut.View()
}
return
}
// rebuildSorted rebuilds ms.sortedPeers from ms.peers. It should be called
// after any additions or removals from peers.
func (ms *mapSession) rebuildSorted() {
if ms.sortedPeers == nil {
ms.sortedPeers = make([]*tailcfg.NodeView, 0, len(ms.peers))
} else {
if len(ms.sortedPeers) > len(ms.peers) {
clear(ms.sortedPeers[len(ms.peers):])
}
ms.sortedPeers = ms.sortedPeers[:0]
}
for _, p := range ms.peers {
ms.sortedPeers = append(ms.sortedPeers, p)
}
sort.Slice(ms.sortedPeers, func(i, j int) bool {
return ms.sortedPeers[i].ID() < ms.sortedPeers[j].ID()
})
}
func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) {
if userID == 0 {
return
}
if _, dup := nm.UserProfiles[userID]; dup {
// Already populated it from a previous peer.
return
}
if up, ok := ms.lastUserProfile[userID]; ok {
nm.UserProfiles[userID] = up
}
}
var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER")
// patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch
// when possible.
func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) {
filtered := resp.PeersChanged[:0]
for _, n := range resp.PeersChanged {
if p, ok := ms.patchifyPeer(n); ok {
patchifiedPeer.Add(1)
if debugPatchifyPeer() {
patchj, _ := json.Marshal(p)
ms.logf("debug: patchifyPeer[ID=%v]: %s", n.ID, patchj)
}
if p != nil {
resp.PeersChangedPatch = append(resp.PeersChangedPatch, p)
} else {
patchifiedPeerEqual.Add(1)
}
} else {
filtered = append(filtered, n)
}
}
resp.PeersChanged = filtered
if len(resp.PeersChanged) == 0 {
resp.PeersChanged = nil
}
}
var nodeFields = sync.OnceValue(getNodeFields)
// getNodeFields returns the fails of tailcfg.Node.
func getNodeFields() []string {
rt := reflect.TypeFor[tailcfg.Node]()
ret := make([]string, rt.NumField())
for i := range rt.NumField() {
ret[i] = rt.Field(i).Name
}
return ret
}
// patchifyPeer returns a *tailcfg.PeerChange of the session's existing copy of
// the n.ID Node to n.
//
// It returns ok=false if a patch can't be made, (V, ok) on a delta, or (nil,
// true) if all the fields were identical (a zero change).
func (ms *mapSession) patchifyPeer(n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
was, ok := ms.peers[n.ID]
if !ok {
return nil, false
}
return peerChangeDiff(*was, n)
}
// peerChangeDiff returns the difference from 'was' to 'n', if possible.
//
// It returns (nil, true) if the fields were identical.
func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
var ret *tailcfg.PeerChange
pc := func() *tailcfg.PeerChange {
if ret == nil {
ret = new(tailcfg.PeerChange)
}
return ret
}
for _, field := range nodeFields() {
switch field {
default:
// The whole point of using reflect in this function is to panic
// here in tests if we forget to handle a new field.
panic("unhandled field: " + field)
case "computedHostIfDifferent", "ComputedName", "ComputedNameWithHost":
// Caller's responsibility to have populated these.
continue
case "DataPlaneAuditLogID":
// Not sent for peers.
case "Capabilities":
// Deprecated; see https://github.com/tailscale/tailscale/issues/11508
// And it was never sent by any known control server.
case "ID":
if was.ID() != n.ID {
return nil, false
}
case "StableID":
if was.StableID() != n.StableID {
return nil, false
}
case "Name":
if was.Name() != n.Name {
return nil, false
}
case "User":
if was.User() != n.User {
return nil, false
}
case "Sharer":
if was.Sharer() != n.Sharer {
return nil, false
}
case "Key":
if was.Key() != n.Key {
pc().Key = ptr.To(n.Key)
}
case "KeyExpiry":
if !was.KeyExpiry().Equal(n.KeyExpiry) {
pc().KeyExpiry = ptr.To(n.KeyExpiry)
}
case "KeySignature":
if !was.KeySignature().Equal(n.KeySignature) {
pc().KeySignature = slices.Clone(n.KeySignature)
}
case "Machine":
if was.Machine() != n.Machine {
return nil, false
}
case "DiscoKey":
if was.DiscoKey() != n.DiscoKey {
pc().DiscoKey = ptr.To(n.DiscoKey)
}
case "Addresses":
if !views.SliceEqual(was.Addresses(), views.SliceOf(n.Addresses)) {
return nil, false
}
case "AllowedIPs":
if !views.SliceEqual(was.AllowedIPs(), views.SliceOf(n.AllowedIPs)) {
return nil, false
}
case "Endpoints":
if !views.SliceEqual(was.Endpoints(), views.SliceOf(n.Endpoints)) {
pc().Endpoints = slices.Clone(n.Endpoints)
}
case "DERP":
if was.DERP() != n.DERP {
ip, portStr, err := net.SplitHostPort(n.DERP)
if err != nil || ip != "127.3.3.40" {
return nil, false
}
port, err := strconv.Atoi(portStr)
if err != nil || port < 1 || port > 65535 {
return nil, false
}
pc().DERPRegion = port
}
case "Hostinfo":
if !was.Hostinfo().Valid() && !n.Hostinfo.Valid() {
continue
}
if !was.Hostinfo().Valid() || !n.Hostinfo.Valid() {
return nil, false
}
if !was.Hostinfo().Equal(n.Hostinfo) {
return nil, false
}
case "Created":
if !was.Created().Equal(n.Created) {
return nil, false
}
case "Cap":
if was.Cap() != n.Cap {
pc().Cap = n.Cap
}
case "CapMap":
if len(n.CapMap) != was.CapMap().Len() {
if n.CapMap == nil {
pc().CapMap = make(tailcfg.NodeCapMap)
} else {
pc().CapMap = maps.Clone(n.CapMap)
}
break
}
was.CapMap().Range(func(k tailcfg.NodeCapability, v views.Slice[tailcfg.RawMessage]) bool {
nv, ok := n.CapMap[k]
if !ok || !views.SliceEqual(v, views.SliceOf(nv)) {
pc().CapMap = maps.Clone(n.CapMap)
return false
}
return true
})
case "Tags":
if !views.SliceEqual(was.Tags(), views.SliceOf(n.Tags)) {
return nil, false
}
case "PrimaryRoutes":
if !views.SliceEqual(was.PrimaryRoutes(), views.SliceOf(n.PrimaryRoutes)) {
return nil, false
}
case "Online":
wasOnline := was.Online()
if n.Online != nil && wasOnline != nil && *n.Online != *wasOnline {
pc().Online = ptr.To(*n.Online)
}
case "LastSeen":
wasSeen := was.LastSeen()
if n.LastSeen != nil && wasSeen != nil && !wasSeen.Equal(*n.LastSeen) {
pc().LastSeen = ptr.To(*n.LastSeen)
}
case "MachineAuthorized":
if was.MachineAuthorized() != n.MachineAuthorized {
return nil, false
}
case "UnsignedPeerAPIOnly":
if was.UnsignedPeerAPIOnly() != n.UnsignedPeerAPIOnly {
return nil, false
}
case "IsWireGuardOnly":
if was.IsWireGuardOnly() != n.IsWireGuardOnly {
return nil, false
}
case "IsJailed":
if was.IsJailed() != n.IsJailed {
return nil, false
}
case "Expired":
if was.Expired() != n.Expired {
return nil, false
}
case "SelfNodeV4MasqAddrForThisPeer":
va, vb := was.SelfNodeV4MasqAddrForThisPeer(), n.SelfNodeV4MasqAddrForThisPeer
if va == nil && vb == nil {
continue
}
if va == nil || vb == nil || *va != *vb {
return nil, false
}
case "SelfNodeV6MasqAddrForThisPeer":
va, vb := was.SelfNodeV6MasqAddrForThisPeer(), n.SelfNodeV6MasqAddrForThisPeer
if va == nil && vb == nil {
continue
}
if va == nil || vb == nil || *va != *vb {
return nil, false
}
case "ExitNodeDNSResolvers":
va, vb := was.ExitNodeDNSResolvers(), views.SliceOfViews(n.ExitNodeDNSResolvers)
if va.Len() != vb.Len() {
return nil, false
}
for i := range va.Len() {
if !va.At(i).Equal(vb.At(i)) {
return nil, false
}
}
}
}
if ret != nil {
ret.NodeID = n.ID
}
return ret, true
}
// netmap returns a fully populated NetworkMap from the last state seen from
// a call to updateStateFromResponse, filling in omitted
// information from prior MapResponse values.
func (ms *mapSession) netmap() *netmap.NetworkMap {
peerViews := make([]tailcfg.NodeView, len(ms.sortedPeers))
for i, vp := range ms.sortedPeers {
peerViews[i] = *vp
}
nm := &netmap.NetworkMap{
NodeKey: ms.publicNodeKey,
PrivateKey: ms.privateNodeKey,
MachineKey: ms.machinePubKey,
Peers: peerViews,
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile),
Domain: ms.lastDomain,
DomainAuditLogID: ms.lastDomainAuditLogID,
DNS: *ms.lastDNSConfig,
PacketFilter: ms.lastParsedPacketFilter,
PacketFilterRules: ms.lastPacketFilterRules,
SSHPolicy: ms.lastSSHPolicy,
CollectServices: ms.collectServices,
DERPMap: ms.lastDERPMap,
ControlHealth: ms.lastHealth,
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
MaxKeyDuration: ms.lastMaxExpiry,
}
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
ms.logf("error unmarshalling TKAHead: %v", err)
nm.TKAEnabled = false
}
}
if node := ms.lastNode; node.Valid() {
nm.SelfNode = node
nm.Expiry = node.KeyExpiry()
nm.Name = node.Name()
nm.AllCaps = ms.lastCapSet
}
ms.addUserProfile(nm, nm.User())
for _, peer := range peerViews {
ms.addUserProfile(nm, peer.Sharer())
ms.addUserProfile(nm, peer.User())
}
if DevKnob.ForceProxyDNS() {
nm.DNS.Proxied = true
}
return nm
}

406
vendor/tailscale.com/control/controlclient/noise.go generated vendored Normal file
View File

@@ -0,0 +1,406 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"bytes"
"cmp"
"context"
"encoding/json"
"errors"
"math"
"net/http"
"net/url"
"sync"
"time"
"golang.org/x/net/http2"
"tailscale.com/control/controlhttp"
"tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/internal/noiseconn"
"tailscale.com/net/dnscache"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
"tailscale.com/util/singleflight"
"tailscale.com/util/testenv"
)
// NoiseClient provides a http.Client to connect to tailcontrol over
// the ts2021 protocol.
type NoiseClient struct {
// Client is an HTTP client to talk to the coordination server.
// It automatically makes a new Noise connection as needed.
// It does not support node key proofs. To do that, call
// noiseClient.getConn instead to make a connection.
*http.Client
// h2t is the HTTP/2 transport we use a bit to create new
// *http2.ClientConns. We don't use its connection pool and we don't use its
// dialing. We use it for exactly one reason: its idle timeout that can only
// be configured via the HTTP/1 config. And then we call NewClientConn (with
// an existing Noise connection) on the http2.Transport which sets up an
// http2.ClientConn using that idle timeout from an http1.Transport.
h2t *http2.Transport
// sfDial ensures that two concurrent requests for a noise connection only
// produce one shared one between the two callers.
sfDial singleflight.Group[struct{}, *noiseconn.Conn]
dialer *tsdial.Dialer
dnsCache *dnscache.Resolver
privKey key.MachinePrivate
serverPubKey key.MachinePublic
host string // the host part of serverURL
httpPort string // the default port to dial
httpsPort string // the fallback Noise-over-https port or empty if none
// dialPlan optionally returns a ControlDialPlan previously received
// from the control server; either the function or the return value can
// be nil.
dialPlan func() *tailcfg.ControlDialPlan
logf logger.Logf
netMon *netmon.Monitor
health *health.Tracker
// mu only protects the following variables.
mu sync.Mutex
closed bool
last *noiseconn.Conn // or nil
nextID int
connPool map[int]*noiseconn.Conn // active connections not yet closed; see noiseconn.Conn.Close
}
// NoiseOpts contains options for the NewNoiseClient function. All fields are
// required unless otherwise specified.
type NoiseOpts struct {
// PrivKey is this node's private key.
PrivKey key.MachinePrivate
// ServerPubKey is the public key of the server.
ServerPubKey key.MachinePublic
// ServerURL is the URL of the server to connect to.
ServerURL string
// Dialer's SystemDial function is used to connect to the server.
Dialer *tsdial.Dialer
// DNSCache is the caching Resolver to use to connect to the server.
//
// This field can be nil.
DNSCache *dnscache.Resolver
// Logf is the log function to use. This field can be nil.
Logf logger.Logf
// NetMon is the network monitor that, if set, will be used to get the
// network interface state. This field can be nil; if so, the current
// state will be looked up dynamically.
NetMon *netmon.Monitor
// HealthTracker, if non-nil, is the health tracker to use.
HealthTracker *health.Tracker
// DialPlan, if set, is a function that should return an explicit plan
// on how to connect to the server.
DialPlan func() *tailcfg.ControlDialPlan
}
// controlIsPlaintext is whether we should assume that the controlplane is only accessible
// over plaintext HTTP (as the first hop, before the ts2021 encryption begins).
// This is used by some tests which don't have a real TLS certificate.
var controlIsPlaintext = envknob.RegisterBool("TS_CONTROL_IS_PLAINTEXT_HTTP")
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://<host>:<port> (no trailing slash).
//
// netMon may be nil, if non-nil it's used to do faster interface lookups.
// dialPlan may be nil
func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
u, err := url.Parse(opts.ServerURL)
if err != nil {
return nil, err
}
var httpPort string
var httpsPort string
if port := u.Port(); port != "" {
// If there is an explicit port specified, trust the scheme and hope for the best
if u.Scheme == "http" {
httpPort = port
httpsPort = "443"
if (testenv.InTest() || controlIsPlaintext()) && (u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost") {
httpsPort = ""
}
} else {
httpPort = "80"
httpsPort = port
}
} else {
// Otherwise, use the standard ports
httpPort = "80"
httpsPort = "443"
}
np := &NoiseClient{
serverPubKey: opts.ServerPubKey,
privKey: opts.PrivKey,
host: u.Hostname(),
httpPort: httpPort,
httpsPort: httpsPort,
dialer: opts.Dialer,
dnsCache: opts.DNSCache,
dialPlan: opts.DialPlan,
logf: opts.Logf,
netMon: opts.NetMon,
health: opts.HealthTracker,
}
// Create the HTTP/2 Transport using a net/http.Transport
// (which only does HTTP/1) because it's the only way to
// configure certain properties on the http2.Transport. But we
// never actually use the net/http.Transport for any HTTP/1
// requests.
h2Transport, err := http2.ConfigureTransports(&http.Transport{
IdleConnTimeout: time.Minute,
})
if err != nil {
return nil, err
}
np.h2t = h2Transport
np.Client = &http.Client{Transport: np}
return np, nil
}
// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once
// (and must be used once) to make a single HTTP request over the noise channel
// to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
for tries := 0; tries < 3; tries++ {
conn, err := nc.getConn(ctx)
if err != nil {
return nil, nil, err
}
ok, earlyPayloadMaybeNil, err := conn.ReserveNewRequest(ctx)
if err != nil {
return nil, nil, err
}
if ok {
return conn, earlyPayloadMaybeNil, nil
}
}
return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection")
}
// contextErr is an error that wraps another error and is used to indicate that
// the error was because a context expired.
type contextErr struct {
err error
}
func (e contextErr) Error() string {
return e.err.Error()
}
func (e contextErr) Unwrap() error {
return e.err
}
// getConn returns a noiseconn.Conn that can be used to make requests to the
// coordination server. It may return a cached connection or create a new one.
// Dials are singleflighted, so concurrent calls to getConn may only dial once.
// As such, context values may not be respected as there are no guarantees that
// the context passed to getConn is the same as the context passed to dial.
func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) {
nc.mu.Lock()
if last := nc.last; last != nil && last.CanTakeNewRequest() {
nc.mu.Unlock()
return last, nil
}
nc.mu.Unlock()
for {
// We singeflight the dial to avoid making multiple connections, however
// that means that we can't simply cancel the dial if the context is
// canceled. Instead, we have to additionally check that the context
// which was canceled is our context and retry if our context is still
// valid.
conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseconn.Conn, error) {
c, err := nc.dial(ctx)
if err != nil {
if ctx.Err() != nil {
return nil, contextErr{ctx.Err()}
}
return nil, err
}
return c, nil
})
var ce contextErr
if err == nil || !errors.As(err, &ce) {
return conn, err
}
if ctx.Err() == nil {
// The dial failed because of a context error, but our context
// is still valid. Retry.
continue
}
// The dial failed because our context was canceled. Return the
// underlying error.
return nil, ce.Unwrap()
}
}
func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) {
ctx := req.Context()
conn, err := nc.getConn(ctx)
if err != nil {
return nil, err
}
return conn.RoundTrip(req)
}
// connClosed removes the connection with the provided ID from the pool
// of active connections.
func (nc *NoiseClient) connClosed(id int) {
nc.mu.Lock()
defer nc.mu.Unlock()
conn := nc.connPool[id]
if conn != nil {
delete(nc.connPool, id)
if nc.last == conn {
nc.last = nil
}
}
}
// Close closes all the underlying noise connections.
// It is a no-op and returns nil if the connection is already closed.
func (nc *NoiseClient) Close() error {
nc.mu.Lock()
nc.closed = true
conns := nc.connPool
nc.connPool = nil
nc.mu.Unlock()
var errors []error
for _, c := range conns {
if err := c.Close(); err != nil {
errors = append(errors, err)
}
}
return multierr.New(errors...)
}
// dial opens a new connection to tailcontrol, fetching the server noise key
// if not cached.
func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) {
nc.mu.Lock()
connID := nc.nextID
nc.nextID++
nc.mu.Unlock()
if tailcfg.CurrentCapabilityVersion > math.MaxUint16 {
// Panic, because a test should have started failing several
// thousand version numbers before getting to this point.
panic("capability version is too high to fit in the wire protocol")
}
var dialPlan *tailcfg.ControlDialPlan
if nc.dialPlan != nil {
dialPlan = nc.dialPlan()
}
// If we have a dial plan, then set our timeout as slightly longer than
// the maximum amount of time contained therein; we assume that
// explicit instructions on timeouts are more useful than a single
// hard-coded timeout.
//
// The default value of 5 is chosen so that, when there's no dial plan,
// we retain the previous behaviour of 10 seconds end-to-end timeout.
timeoutSec := 5.0
if dialPlan != nil {
for _, c := range dialPlan.Candidates {
if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec {
timeoutSec = v
}
}
}
// After we establish a connection, we need some time to actually
// upgrade it into a Noise connection. With a ballpark worst-case RTT
// of 1000ms, give ourselves an extra 5 seconds to complete the
// handshake.
timeoutSec += 5
// Be extremely defensive and ensure that the timeout is in the range
// [5, 60] seconds (e.g. if we accidentally get a negative number).
if timeoutSec > 60 {
timeoutSec = 60
} else if timeoutSec < 5 {
timeoutSec = 5
}
timeout := time.Duration(timeoutSec * float64(time.Second))
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
clientConn, err := (&controlhttp.Dialer{
Hostname: nc.host,
HTTPPort: nc.httpPort,
HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort),
MachineKey: nc.privKey,
ControlKey: nc.serverPubKey,
ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion),
Dialer: nc.dialer.SystemDial,
DNSCache: nc.dnsCache,
DialPlan: dialPlan,
Logf: nc.logf,
NetMon: nc.netMon,
HealthTracker: nc.health,
Clock: tstime.StdClock{},
}).Dial(ctx)
if err != nil {
return nil, err
}
ncc, err := noiseconn.New(clientConn.Conn, nc.h2t, connID, nc.connClosed)
if err != nil {
return nil, err
}
nc.mu.Lock()
if nc.closed {
nc.mu.Unlock()
ncc.Close() // Needs to be called without holding the lock.
return nil, errors.New("noise client closed")
}
defer nc.mu.Unlock()
mak.Set(&nc.connPool, connID, ncc)
nc.last = ncc
return ncc, nil
}
// post does a POST to the control server at the given path, JSON-encoding body.
// The provided nodeKey is an optional load balancing hint.
func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) {
jbody, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, "POST", "https://"+nc.host+path, bytes.NewReader(jbody))
if err != nil {
return nil, err
}
addLBHeader(req, nodeKey)
req.Header.Set("Content-Type", "application/json")
conn, err := nc.getConn(ctx)
if err != nil {
return nil, err
}
return conn.RoundTrip(req)
}

42
vendor/tailscale.com/control/controlclient/sign.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"crypto"
"errors"
"fmt"
"time"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
var (
errNoCertStore = errors.New("no certificate store")
errCertificateNotConfigured = errors.New("no certificate subject configured")
errUnsupportedSignatureVersion = errors.New("unsupported signature version")
)
// HashRegisterRequest generates the hash required sign or verify a
// tailcfg.RegisterRequest.
func HashRegisterRequest(
version tailcfg.SignatureType, ts time.Time, serverURL string, deviceCert []byte,
serverPubKey, machinePubKey key.MachinePublic) ([]byte, error) {
h := crypto.SHA256.New()
// hash.Hash.Write never returns an error, so we don't check for one here.
switch version {
case tailcfg.SignatureV1:
fmt.Fprintf(h, "%s%s%s%s%s",
ts.UTC().Format(time.RFC3339), serverURL, deviceCert, serverPubKey.ShortString(), machinePubKey.ShortString())
case tailcfg.SignatureV2:
fmt.Fprintf(h, "%s%s%s%s%s",
ts.UTC().Format(time.RFC3339), serverURL, deviceCert, serverPubKey, machinePubKey)
default:
return nil, errUnsupportedSignatureVersion
}
return h.Sum(nil), nil
}

View File

@@ -0,0 +1,205 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build windows
// darwin,cgo is also supported by certstore but untested, so it is not enabled.
package controlclient
import (
"crypto"
"crypto/rsa"
"crypto/x509"
"errors"
"fmt"
"sync"
"time"
"github.com/tailscale/certstore"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/util/syspolicy"
)
var getMachineCertificateSubjectOnce struct {
sync.Once
v string // Subject of machine certificate to search for
}
// getMachineCertificateSubject returns the exact name of a Subject that needs
// to be present in an identity's certificate chain to sign a RegisterRequest,
// formatted as per pkix.Name.String(). The Subject may be that of the identity
// itself, an intermediate CA or the root CA.
//
// If getMachineCertificateSubject() returns "" then no lookup will occur and
// each RegisterRequest will be unsigned.
//
// Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA"
func getMachineCertificateSubject() string {
getMachineCertificateSubjectOnce.Do(func() {
getMachineCertificateSubjectOnce.v, _ = syspolicy.GetString(syspolicy.MachineCertificateSubject, "")
})
return getMachineCertificateSubjectOnce.v
}
var (
errNoMatch = errors.New("no matching certificate")
errBadRequest = errors.New("malformed request")
)
func isSupportedCertificate(cert *x509.Certificate) bool {
return cert.PublicKeyAlgorithm == x509.RSA
}
func isSubjectInChain(subject string, chain []*x509.Certificate) bool {
if len(chain) == 0 || chain[0] == nil {
return false
}
for _, c := range chain {
if c == nil {
continue
}
if c.Subject.String() == subject {
return true
}
}
return false
}
func selectIdentityFromSlice(subject string, ids []certstore.Identity, now time.Time) (certstore.Identity, []*x509.Certificate) {
var bestCandidate struct {
id certstore.Identity
chain []*x509.Certificate
}
for _, id := range ids {
chain, err := id.CertificateChain()
if err != nil {
continue
}
if len(chain) < 1 {
continue
}
if !isSupportedCertificate(chain[0]) {
continue
}
if now.Before(chain[0].NotBefore) || now.After(chain[0].NotAfter) {
// Certificate is not valid at this time
continue
}
if !isSubjectInChain(subject, chain) {
continue
}
// Select the most recently issued certificate. If there is a tie, pick
// one arbitrarily.
if len(bestCandidate.chain) > 0 && bestCandidate.chain[0].NotBefore.After(chain[0].NotBefore) {
continue
}
bestCandidate.id = id
bestCandidate.chain = chain
}
return bestCandidate.id, bestCandidate.chain
}
// findIdentity locates an identity from the Windows or Darwin certificate
// store. It returns the first certificate with a matching Subject anywhere in
// its certificate chain, so it is possible to search for the leaf certificate,
// intermediate CA or root CA. If err is nil then the returned identity will
// never be nil (if no identity is found, the error errNoMatch will be
// returned). If an identity is returned then its certificate chain is also
// returned.
func findIdentity(subject string, st certstore.Store) (certstore.Identity, []*x509.Certificate, error) {
ids, err := st.Identities()
if err != nil {
return nil, nil, err
}
selected, chain := selectIdentityFromSlice(subject, ids, clock.Now())
for _, id := range ids {
if id != selected {
id.Close()
}
}
if selected == nil {
return nil, nil, errNoMatch
}
return selected, chain, nil
}
// signRegisterRequest looks for a suitable machine identity from the local
// system certificate store, and if one is found, signs the RegisterRequest
// using that identity's public key. In addition to the signature, the full
// certificate chain is included so that the control server can validate the
// certificate from a copy of the root CA's certificate.
func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("signRegisterRequest: %w", err)
}
}()
if req.Timestamp == nil {
return errBadRequest
}
machineCertificateSubject := getMachineCertificateSubject()
if machineCertificateSubject == "" {
return errCertificateNotConfigured
}
st, err := certstore.Open(certstore.System)
if err != nil {
return fmt.Errorf("open cert store: %w", err)
}
defer st.Close()
id, chain, err := findIdentity(machineCertificateSubject, st)
if err != nil {
return fmt.Errorf("find identity: %w", err)
}
defer id.Close()
signer, err := id.Signer()
if err != nil {
return fmt.Errorf("create signer: %w", err)
}
cl := 0
for _, c := range chain {
cl += len(c.Raw)
}
req.DeviceCert = make([]byte, 0, cl)
for _, c := range chain {
req.DeviceCert = append(req.DeviceCert, c.Raw...)
}
req.SignatureType = tailcfg.SignatureV2
h, err := HashRegisterRequest(req.SignatureType, req.Timestamp.UTC(), serverURL, req.DeviceCert, serverPubKey, machinePubKey)
if err != nil {
return fmt.Errorf("hash: %w", err)
}
req.Signature, err = signer.Sign(nil, h, &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: crypto.SHA256,
})
if err != nil {
return fmt.Errorf("sign: %w", err)
}
return nil
}

View File

@@ -0,0 +1,16 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package controlclient
import (
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
// signRegisterRequest on non-supported platforms always returns errNoCertStore.
func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error {
return errNoCertStore
}

125
vendor/tailscale.com/control/controlclient/status.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"encoding/json"
"fmt"
"reflect"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/structs"
)
// State is the high-level state of the client. It is used only in
// unit tests for proper sequencing, don't depend on it anywhere else.
//
// TODO(apenwarr): eliminate the state, as it's now obsolete.
//
// apenwarr: Historical note: controlclient.Auto was originally
// intended to be the state machine for the whole tailscale client, but that
// turned out to not be the right abstraction layer, and it moved to
// ipn.Backend. Since ipn.Backend now has a state machine, it would be
// much better if controlclient could be a simple stateless API. But the
// current server-side API (two interlocking polling https calls) makes that
// very hard to implement. A server side API change could untangle this and
// remove all the statefulness.
type State int
const (
StateNew = State(iota)
StateNotAuthenticated
StateAuthenticating
StateURLVisitRequired
StateAuthenticated
StateSynchronized // connected and received map update
)
func (s State) AppendText(b []byte) ([]byte, error) {
return append(b, s.String()...), nil
}
func (s State) MarshalText() ([]byte, error) {
return []byte(s.String()), nil
}
func (s State) String() string {
switch s {
case StateNew:
return "state:new"
case StateNotAuthenticated:
return "state:not-authenticated"
case StateAuthenticating:
return "state:authenticating"
case StateURLVisitRequired:
return "state:url-visit-required"
case StateAuthenticated:
return "state:authenticated"
case StateSynchronized:
return "state:synchronized"
default:
return fmt.Sprintf("state:unknown:%d", int(s))
}
}
type Status struct {
_ structs.Incomparable
// Err, if non-nil, is an error that occurred while logging in.
//
// If it's of type UserVisibleError then it's meant to be shown to users in
// their Tailscale client. Otherwise it's just logged to tailscaled's logs.
Err error
// URL, if non-empty, is the interactive URL to visit to finish logging in.
URL string
// NetMap is the latest server-pushed state of the tailnet network.
NetMap *netmap.NetworkMap
// Persist, when Valid, is the locally persisted configuration.
//
// TODO(bradfitz,maisem): clarify this.
Persist persist.PersistView
// state is the internal state. It should not be exposed outside this
// package, but we have some automated tests elsewhere that need to
// use it via the StateForTest accessor.
// TODO(apenwarr): Unexport or remove these.
state State
}
// LoginFinished reports whether the controlclient is in its "StateAuthenticated"
// state where it's in a happy register state but not yet in a map poll.
//
// TODO(bradfitz): delete this and everything around Status.state.
func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated }
// StateForTest returns the internal state of s for tests only.
func (s *Status) StateForTest() State { return s.state }
// SetStateForTest sets the internal state of s for tests only.
func (s *Status) SetStateForTest(state State) { s.state = state }
// Equal reports whether s and s2 are equal.
func (s *Status) Equal(s2 *Status) bool {
if s == nil && s2 == nil {
return true
}
return s != nil && s2 != nil &&
s.Err == s2.Err &&
s.URL == s2.URL &&
s.state == s2.state &&
reflect.DeepEqual(s.Persist, s2.Persist) &&
reflect.DeepEqual(s.NetMap, s2.NetMap)
}
func (s Status) String() string {
b, err := json.MarshalIndent(s, "", "\t")
if err != nil {
panic(err)
}
return s.state.String() + " " + string(b)
}

612
vendor/tailscale.com/control/controlhttp/client.go generated vendored Normal file
View File

@@ -0,0 +1,612 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !js
// Package controlhttp implements the Tailscale 2021 control protocol
// base transport over HTTP.
//
// This tunnels the protocol in control/controlbase over HTTP with a
// variety of compatibility fallbacks for handling picky or deep
// inspecting proxies.
//
// In the happy path, a client makes a single cleartext HTTP request
// to the server, the server responds with 101 Switching Protocols,
// and the control base protocol takes place over plain TCP.
//
// In the compatibility path, the client does the above over HTTPS,
// resulting in double encryption (once for the control transport, and
// once for the outer TLS layer).
package controlhttp
import (
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"math"
"net"
"net/http"
"net/http/httptrace"
"net/netip"
"net/url"
"runtime"
"sort"
"sync/atomic"
"time"
"tailscale.com/control/controlbase"
"tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/net/dnscache"
"tailscale.com/net/dnsfallback"
"tailscale.com/net/netutil"
"tailscale.com/net/sockstats"
"tailscale.com/net/tlsdial"
"tailscale.com/net/tshttpproxy"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/util/multierr"
)
var stdDialer net.Dialer
// Dial connects to the HTTP server at this Dialer's Host:HTTPPort, requests to
// switch to the Tailscale control protocol, and returns an established control
// protocol connection.
//
// If Dial fails to connect using HTTP, it also tries to tunnel over TLS to the
// Dialer's Host:HTTPSPort as a compatibility fallback.
//
// The provided ctx is only used for the initial connection, until
// Dial returns. It does not affect the connection once established.
func (a *Dialer) Dial(ctx context.Context) (*ClientConn, error) {
if a.Hostname == "" {
return nil, errors.New("required Dialer.Hostname empty")
}
return a.dial(ctx)
}
func (a *Dialer) logf(format string, args ...any) {
if a.Logf != nil {
a.Logf(format, args...)
}
}
func (a *Dialer) getProxyFunc() func(*http.Request) (*url.URL, error) {
if a.proxyFunc != nil {
return a.proxyFunc
}
return tshttpproxy.ProxyFromEnvironment
}
// httpsFallbackDelay is how long we'll wait for a.HTTPPort to work before
// starting to try a.HTTPSPort.
func (a *Dialer) httpsFallbackDelay() time.Duration {
if v := a.testFallbackDelay; v != 0 {
return v
}
return 500 * time.Millisecond
}
var _ = envknob.RegisterBool("TS_USE_CONTROL_DIAL_PLAN") // to record at init time whether it's in use
func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) {
// If we don't have a dial plan, just fall back to dialing the single
// host we know about.
useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN")
if !useDialPlan || a.DialPlan == nil || len(a.DialPlan.Candidates) == 0 {
return a.dialHost(ctx, netip.Addr{})
}
candidates := a.DialPlan.Candidates
// Otherwise, we try dialing per the plan. Store the highest priority
// in the list, so that if we get a connection to one of those
// candidates we can return quickly.
var highestPriority int = math.MinInt
for _, c := range candidates {
if c.Priority > highestPriority {
highestPriority = c.Priority
}
}
// This context allows us to cancel in-flight connections if we get a
// highest-priority connection before we're all done.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Now, for each candidate, kick off a dial in parallel.
type dialResult struct {
conn *ClientConn
err error
addr netip.Addr
priority int
}
resultsCh := make(chan dialResult, len(candidates))
var pending atomic.Int32
pending.Store(int32(len(candidates)))
for _, c := range candidates {
go func(ctx context.Context, c tailcfg.ControlIPCandidate) {
var (
conn *ClientConn
err error
)
// Always send results back to our channel.
defer func() {
resultsCh <- dialResult{conn, err, c.IP, c.Priority}
if pending.Add(-1) == 0 {
close(resultsCh)
}
}()
// If non-zero, wait the configured start timeout
// before we do anything.
if c.DialStartDelaySec > 0 {
a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP)
tmr, tmrChannel := a.clock().NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second)))
defer tmr.Stop()
select {
case <-ctx.Done():
err = ctx.Err()
return
case <-tmrChannel:
}
}
// Now, create a sub-context with the given timeout and
// try dialing the provided host.
ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second)))
defer cancel()
// This will dial, and the defer above sends it back to our parent.
a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP)
conn, err = a.dialHost(ctx, c.IP)
}(ctx, c)
}
var results []dialResult
for res := range resultsCh {
// If we get a response that has the highest priority, we don't
// need to wait for any of the other connections to finish; we
// can just return this connection.
//
// TODO(andrew): we could make this better by keeping track of
// the highest remaining priority dynamically, instead of just
// checking for the highest total
if res.priority == highestPriority && res.conn != nil {
a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, res.addr)
// Drain the channel and any existing connections in
// the background.
go func() {
for _, res := range results {
if res.conn != nil {
res.conn.Close()
}
}
for res := range resultsCh {
if res.conn != nil {
res.conn.Close()
}
}
if a.drainFinished != nil {
close(a.drainFinished)
}
}()
return res.conn, nil
}
// This isn't a highest-priority result, so just store it until
// we're done.
results = append(results, res)
}
// After we finish this function, close any remaining open connections.
defer func() {
for _, result := range results {
// Note: below, we nil out the returned connection (if
// any) in the slice so we don't close it.
if result.conn != nil {
result.conn.Close()
}
}
// We don't drain asynchronously after this point, so notify our
// channel when we return.
if a.drainFinished != nil {
close(a.drainFinished)
}
}()
// Sort by priority, then take the first non-error response.
sort.Slice(results, func(i, j int) bool {
// NOTE: intentionally inverted so that the highest priority
// item comes first
return results[i].priority > results[j].priority
})
var (
conn *ClientConn
errs []error
)
for i, result := range results {
if result.err != nil {
errs = append(errs, result.err)
continue
}
a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, result.addr)
conn = result.conn
results[i].conn = nil // so we don't close it in the defer
return conn, nil
}
merr := multierr.New(errs...)
// If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS.
a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error())
return a.dialHost(ctx, netip.Addr{})
}
// The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to
// always use port 443 HTTPS connections to the controlplane and not try the
// port 80 HTTP fast path.
//
// This is currently (2023-01-17) needed for Docker Desktop's "VPNKit" proxy
// that breaks port 80 for us post-Noise-handshake, causing us to never try port
// 443. Until one of Docker's proxy and/or this package's port 443 fallback is
// fixed, this is a workaround. It might also be useful for future debugging.
var forceNoise443 = envknob.RegisterBool("TS_FORCE_NOISE_443")
// forceNoise443 reports whether the controlclient noise dialer should always
// use HTTPS connections as its underlay connection (double crypto). This can
// be necessary when networks or middle boxes are messing with port 80.
func (d *Dialer) forceNoise443() bool {
if forceNoise443() {
return true
}
if d.HealthTracker.LastNoiseDialWasRecent() {
// If we dialed recently, assume there was a recent failure and fall
// back to HTTPS dials for the subsequent retries.
//
// This heuristic works around networks where port 80 is MITMed and
// appears to work for a bit post-Upgrade but then gets closed,
// such as seen in https://github.com/tailscale/tailscale/issues/13597.
d.logf("controlhttp: forcing port 443 dial due to recent noise dial")
return true
}
return false
}
func (d *Dialer) clock() tstime.Clock {
if d.Clock != nil {
return d.Clock
}
return tstime.StdClock{}
}
var debugNoiseDial = envknob.RegisterBool("TS_DEBUG_NOISE_DIAL")
// dialHost connects to the configured Dialer.Hostname and upgrades the
// connection into a controlbase.Conn.
//
// If optAddr is valid, then no DNS is used and the connection will be made to the
// provided address.
func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, error) {
// Create one shared context used by both port 80 and port 443 dials.
// If port 80 is still in flight when 443 returns, this deferred cancel
// will stop the port 80 dial.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx = sockstats.WithSockStats(ctx, sockstats.LabelControlClientDialer, a.logf)
// u80 and u443 are the URLs we'll try to hit over HTTP or HTTPS,
// respectively, in order to do the HTTP upgrade to a net.Conn over which
// we'll speak Noise.
u80 := &url.URL{
Scheme: "http",
Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPPort, "80")),
Path: serverUpgradePath,
}
u443 := &url.URL{
Scheme: "https",
Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPSPort, "443")),
Path: serverUpgradePath,
}
if a.HTTPSPort == NoPort {
u443 = nil
}
type tryURLRes struct {
u *url.URL // input (the URL conn+err are for/from)
conn *ClientConn // result (mutually exclusive with err)
err error
}
ch := make(chan tryURLRes) // must be unbuffered
try := func(u *url.URL) {
if debugNoiseDial() {
a.logf("trying noise dial (%v, %v) ...", u, optAddr)
}
cbConn, err := a.dialURL(ctx, u, optAddr)
if debugNoiseDial() {
a.logf("noise dial (%v, %v) = (%v, %v)", u, optAddr, cbConn, err)
}
select {
case ch <- tryURLRes{u, cbConn, err}:
case <-ctx.Done():
if cbConn != nil {
cbConn.Close()
}
}
}
forceTLS := a.forceNoise443()
// Start the plaintext HTTP attempt first, unless disabled by the envknob.
if !forceTLS || u443 == nil {
go try(u80)
}
// In case outbound port 80 blocked or MITM'ed poorly, start a backup timer
// to dial port 443 if port 80 doesn't either succeed or fail quickly.
var try443Timer tstime.TimerController
if u443 != nil {
delay := a.httpsFallbackDelay()
if forceTLS {
delay = 0
}
try443Timer = a.clock().AfterFunc(delay, func() { try(u443) })
defer try443Timer.Stop()
}
var err80, err443 error
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("connection attempts aborted by context: %w", ctx.Err())
case res := <-ch:
if res.err == nil {
return res.conn, nil
}
switch res.u {
case u80:
// Connecting over plain HTTP failed; assume it's an HTTP proxy
// being difficult and see if we can get through over HTTPS.
err80 = res.err
// Stop the fallback timer and run it immediately. We don't use
// Timer.Reset(0) here because on AfterFuncs, that can run it
// again.
if try443Timer != nil && try443Timer.Stop() {
go try(u443)
} // else we lost the race and it started already which is what we want
case u443:
err443 = res.err
default:
panic("invalid")
}
if err80 != nil && err443 != nil {
return nil, fmt.Errorf("all connection attempts failed (HTTP: %v, HTTPS: %v)", err80, err443)
}
}
}
}
// dialURL attempts to connect to the given URL.
//
// If optAddr is valid, then no DNS is used and the connection will be made to the
// provided address.
func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr) (*ClientConn, error) {
init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion)
if err != nil {
return nil, err
}
netConn, err := a.tryURLUpgrade(ctx, u, optAddr, init)
if err != nil {
return nil, err
}
cbConn, err := cont(ctx, netConn)
if err != nil {
netConn.Close()
return nil, err
}
return &ClientConn{
Conn: cbConn,
}, nil
}
// resolver returns a.DNSCache if non-nil or a new *dnscache.Resolver
// otherwise.
func (a *Dialer) resolver() *dnscache.Resolver {
if a.DNSCache != nil {
return a.DNSCache
}
return &dnscache.Resolver{
Forward: dnscache.Get().Forward,
LookupIPFallback: dnsfallback.MakeLookupFunc(a.logf, a.NetMon),
UseLastGood: true,
Logf: a.Logf, // not a.logf method; we want to propagate nil-ness
}
}
func isLoopback(a net.Addr) bool {
if ta, ok := a.(*net.TCPAddr); ok {
return ta.IP.IsLoopback()
}
return false
}
var macOSScreenTime = health.Register(&health.Warnable{
Code: "macos-screen-time",
Severity: health.SeverityHigh,
Title: "Tailscale blocked by Screen Time",
Text: func(args health.Args) string {
return "macOS Screen Time seems to be blocking Tailscale. Try disabling Screen Time in System Settings > Screen Time > Content & Privacy > Access to Web Content."
},
ImpactsConnectivity: true,
})
// tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn.
//
// If optAddr is valid, then no DNS is used and the connection will be made to
// the provided address.
//
// Only the provided ctx is used, not a.ctx.
func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, init []byte) (_ net.Conn, retErr error) {
var dns *dnscache.Resolver
// If we were provided an address to dial, then create a resolver that just
// returns that value; otherwise, fall back to DNS.
if optAddr.IsValid() {
dns = &dnscache.Resolver{
SingleHostStaticResult: []netip.Addr{optAddr},
SingleHost: u.Hostname(),
Logf: a.Logf, // not a.logf method; we want to propagate nil-ness
}
} else {
dns = a.resolver()
}
var dialer dnscache.DialContextFunc
if a.Dialer != nil {
dialer = a.Dialer
} else {
dialer = stdDialer.DialContext
}
// On macOS, see if Screen Time is blocking things.
if runtime.GOOS == "darwin" {
var proxydIntercepted atomic.Bool // intercepted by macOS webfilterproxyd
origDialer := dialer
dialer = func(ctx context.Context, network, address string) (net.Conn, error) {
c, err := origDialer(ctx, network, address)
if err != nil {
return nil, err
}
if isLoopback(c.LocalAddr()) && isLoopback(c.RemoteAddr()) {
proxydIntercepted.Store(true)
}
return c, nil
}
defer func() {
if retErr != nil && proxydIntercepted.Load() {
a.HealthTracker.SetUnhealthy(macOSScreenTime, nil)
retErr = fmt.Errorf("macOS Screen Time is blocking network access: %w", retErr)
} else {
a.HealthTracker.SetHealthy(macOSScreenTime)
}
}()
}
tr := http.DefaultTransport.(*http.Transport).Clone()
defer tr.CloseIdleConnections()
tr.Proxy = a.getProxyFunc()
tshttpproxy.SetTransportGetProxyConnectHeader(tr)
tr.DialContext = dnscache.Dialer(dialer, dns)
// Disable HTTP2, since h2 can't do protocol switching.
tr.TLSClientConfig.NextProtos = []string{}
tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
tr.TLSClientConfig = tlsdial.Config(a.Hostname, a.HealthTracker, tr.TLSClientConfig)
if !tr.TLSClientConfig.InsecureSkipVerify {
panic("unexpected") // should be set by tlsdial.Config
}
verify := tr.TLSClientConfig.VerifyConnection
if verify == nil {
panic("unexpected") // should be set by tlsdial.Config
}
// Demote all cert verification errors to log messages. We don't actually
// care about the TLS security (because we just do the Noise crypto atop whatever
// connection we get, including HTTP port 80 plaintext) so this permits
// middleboxes to MITM their users. All they'll see is some Noise.
tr.TLSClientConfig.VerifyConnection = func(cs tls.ConnectionState) error {
if err := verify(cs); err != nil && a.Logf != nil && !a.omitCertErrorLogging {
a.Logf("warning: TLS cert verificication for %q failed: %v", a.Hostname, err)
}
return nil // regardless
}
tr.DialTLSContext = dnscache.TLSDialer(dialer, dns, tr.TLSClientConfig)
tr.DisableCompression = true
// (mis)use httptrace to extract the underlying net.Conn from the
// transport. The transport handles 101 Switching Protocols correctly,
// such that the Conn will not be reused or kept alive by the transport
// once the response has been handed back from RoundTrip.
//
// In theory, the machinery of net/http should make it such that
// the trace callback happens-before we get the response, but
// there's no promise of that. So, to make sure, we use a buffered
// channel as a synchronization step to avoid data races.
//
// Note that even though we're able to extract a net.Conn via this
// mechanism, we must still keep using the eventual resp.Body to
// read from, because it includes a buffer we can't get rid of. If
// the server never sends any data after sending the HTTP
// response, we could get away with it, but violating this
// assumption leads to very mysterious transport errors (lockups,
// unexpected EOFs...), and we're bound to forget someday and
// introduce a protocol optimization at a higher level that starts
// eagerly transmitting from the server.
var lastConn syncs.AtomicValue[net.Conn]
trace := httptrace.ClientTrace{
// Even though we only make a single HTTP request which should
// require a single connection, the context (with the attached
// trace configuration) might be used by our custom dialer to
// make other HTTP requests (e.g. BootstrapDNS). We only care
// about the last connection made, which should be the one to
// the control server.
GotConn: func(info httptrace.GotConnInfo) {
lastConn.Store(info.Conn)
},
}
ctx = httptrace.WithClientTrace(ctx, &trace)
req := &http.Request{
Method: "POST",
URL: u,
Header: http.Header{
"Upgrade": []string{upgradeHeaderValue},
"Connection": []string{"upgrade"},
handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
},
}
req = req.WithContext(ctx)
resp, err := tr.RoundTrip(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusSwitchingProtocols {
return nil, fmt.Errorf("unexpected HTTP response: %s", resp.Status)
}
// From here on, the underlying net.Conn is ours to use, but there
// is still a read buffer attached to it within resp.Body. So, we
// must direct I/O through resp.Body, but we can still use the
// underlying net.Conn for stuff like deadlines.
switchedConn := lastConn.Load()
if switchedConn == nil {
resp.Body.Close()
return nil, fmt.Errorf("httptrace didn't provide a connection")
}
if next := resp.Header.Get("Upgrade"); next != upgradeHeaderValue {
resp.Body.Close()
return nil, fmt.Errorf("server switched to unexpected protocol %q", next)
}
rwc, ok := resp.Body.(io.ReadWriteCloser)
if !ok {
resp.Body.Close()
return nil, errors.New("http Transport did not provide a writable body")
}
return netutil.NewAltReadWriteCloserConn(rwc, switchedConn), nil
}

View File

@@ -0,0 +1,17 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlhttp
import (
"tailscale.com/control/controlbase"
)
// ClientConn is a Tailscale control client as returned by the Dialer.
//
// It's effectively just a *controlbase.Conn (which it embeds) with
// optional metadata.
type ClientConn struct {
// Conn is the noise connection.
*controlbase.Conn
}

61
vendor/tailscale.com/control/controlhttp/client_js.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlhttp
import (
"context"
"encoding/base64"
"errors"
"net"
"net/url"
"github.com/coder/websocket"
"tailscale.com/control/controlbase"
"tailscale.com/net/wsconn"
)
// Variant of Dial that tunnels the request over WebSockets, since we cannot do
// bi-directional communication over an HTTP connection when in JS.
func (d *Dialer) Dial(ctx context.Context) (*ClientConn, error) {
if d.Hostname == "" {
return nil, errors.New("required Dialer.Hostname empty")
}
init, cont, err := controlbase.ClientDeferred(d.MachineKey, d.ControlKey, d.ProtocolVersion)
if err != nil {
return nil, err
}
wsScheme := "wss"
host := d.Hostname
// If using a custom control server (on a non-standard port), prefer that.
// This mirrors the port selection in newNoiseClient from noise.go.
if d.HTTPPort != "" && d.HTTPPort != "80" && d.HTTPSPort == "443" {
wsScheme = "ws"
host = net.JoinHostPort(host, d.HTTPPort)
}
wsURL := &url.URL{
Scheme: wsScheme,
Host: host,
Path: serverUpgradePath,
// Can't set HTTP headers on the websocket request, so we have to to send
// the handshake via an HTTP header.
RawQuery: url.Values{
handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
}.Encode(),
}
wsConn, _, err := websocket.Dial(ctx, wsURL.String(), &websocket.DialOptions{
Subprotocols: []string{upgradeHeaderValue},
})
if err != nil {
return nil, err
}
netConn := wsconn.NetConn(context.Background(), wsConn, websocket.MessageBinary, wsURL.String())
cbConn, err := cont(ctx, netConn)
if err != nil {
netConn.Close()
return nil, err
}
return &ClientConn{Conn: cbConn}, nil
}

116
vendor/tailscale.com/control/controlhttp/constants.go generated vendored Normal file
View File

@@ -0,0 +1,116 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlhttp
import (
"net/http"
"net/url"
"time"
"tailscale.com/health"
"tailscale.com/net/dnscache"
"tailscale.com/net/netmon"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
)
const (
// upgradeHeader is the value of the Upgrade HTTP header used to
// indicate the Tailscale control protocol.
upgradeHeaderValue = "tailscale-control-protocol"
// handshakeHeaderName is the HTTP request header that can
// optionally contain base64-encoded initial handshake
// payload, to save an RTT.
handshakeHeaderName = "X-Tailscale-Handshake"
// serverUpgradePath is where the server-side HTTP handler to
// to do the protocol switch is located.
serverUpgradePath = "/ts2021"
)
// NoPort is a sentinel value for Dialer.HTTPSPort to indicate that HTTPS
// should not be tried on any port. It exists primarily for some localhost
// tests where the control plane only runs on HTTP.
const NoPort = "none"
// Dialer contains configuration on how to dial the Tailscale control server.
type Dialer struct {
// Hostname is the hostname to connect to, with no port number.
//
// This field is required.
Hostname string
// MachineKey contains the current machine's private key.
//
// This field is required.
MachineKey key.MachinePrivate
// ControlKey contains the expected public key for the control server.
//
// This field is required.
ControlKey key.MachinePublic
// ProtocolVersion is the expected protocol version to negotiate.
//
// This field is required.
ProtocolVersion uint16
// HTTPPort is the port number to use when making a HTTP connection.
//
// If not specified, this defaults to port 80.
HTTPPort string
// HTTPSPort is the port number to use when making a HTTPS connection.
//
// If not specified, this defaults to port 443.
//
// If "none" (NoPort), HTTPS is disabled.
HTTPSPort string
// Dialer is the dialer used to make outbound connections.
//
// If not specified, this defaults to net.Dialer.DialContext.
Dialer dnscache.DialContextFunc
// DNSCache is the caching Resolver used by this Dialer.
//
// If not specified, a new Resolver is created per attempt.
DNSCache *dnscache.Resolver
// Logf, if set, is a logging function to use; if unset, logs are
// dropped.
Logf logger.Logf
NetMon *netmon.Monitor
// HealthTracker, if non-nil, is the health tracker to use.
HealthTracker *health.Tracker
// DialPlan, if set, contains instructions from the control server on
// how to connect to it. If present, we will try the methods in this
// plan before falling back to DNS.
DialPlan *tailcfg.ControlDialPlan
proxyFunc func(*http.Request) (*url.URL, error) // or nil
// For tests only
drainFinished chan struct{}
omitCertErrorLogging bool
testFallbackDelay time.Duration
// Clock, if non-nil, overrides the clock to use.
// If nil, tstime.StdClock is used.
// This exists primarily for tests.
Clock tstime.Clock
}
func strDef(v1, v2 string) string {
if v1 != "" {
return v1
}
return v2
}

217
vendor/tailscale.com/control/controlhttp/server.go generated vendored Normal file
View File

@@ -0,0 +1,217 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ios
package controlhttp
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net"
"net/http"
"strings"
"time"
"github.com/coder/websocket"
"tailscale.com/control/controlbase"
"tailscale.com/net/netutil"
"tailscale.com/net/wsconn"
"tailscale.com/types/key"
)
// AcceptHTTP upgrades the HTTP request given by w and r into a Tailscale
// control protocol base transport connection.
//
// AcceptHTTP always writes an HTTP response to w. The caller must not attempt
// their own response after calling AcceptHTTP.
//
// earlyWrite optionally specifies a func to write to the noise connection
// (encrypted). It receives the negotiated version and a writer to write to, if
// desired.
func AcceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate, earlyWrite func(protocolVersion int, w io.Writer) error) (*controlbase.Conn, error) {
return acceptHTTP(ctx, w, r, private, earlyWrite)
}
func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate, earlyWrite func(protocolVersion int, w io.Writer) error) (_ *controlbase.Conn, retErr error) {
next := strings.ToLower(r.Header.Get("Upgrade"))
if next == "" {
http.Error(w, "missing next protocol", http.StatusBadRequest)
return nil, errors.New("no next protocol in HTTP request")
}
if next == "websocket" {
return acceptWebsocket(ctx, w, r, private)
}
if next != upgradeHeaderValue {
http.Error(w, "unknown next protocol", http.StatusBadRequest)
return nil, fmt.Errorf("client requested unhandled next protocol %q", next)
}
initB64 := r.Header.Get(handshakeHeaderName)
if initB64 == "" {
http.Error(w, "missing Tailscale handshake header", http.StatusBadRequest)
return nil, errors.New("no tailscale handshake header in HTTP request")
}
init, err := base64.StdEncoding.DecodeString(initB64)
if err != nil {
http.Error(w, "invalid tailscale handshake header", http.StatusBadRequest)
return nil, fmt.Errorf("decoding base64 handshake header: %v", err)
}
hijacker, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "make request over HTTP/1", http.StatusBadRequest)
return nil, errors.New("can't hijack client connection")
}
w.Header().Set("Upgrade", upgradeHeaderValue)
w.Header().Set("Connection", "upgrade")
w.WriteHeader(http.StatusSwitchingProtocols)
conn, brw, err := hijacker.Hijack()
if err != nil {
return nil, fmt.Errorf("hijacking client connection: %w", err)
}
defer func() {
if retErr != nil {
conn.Close()
}
}()
if err := brw.Flush(); err != nil {
return nil, fmt.Errorf("flushing hijacked HTTP buffer: %w", err)
}
conn = netutil.NewDrainBufConn(conn, brw.Reader)
cwc := newWriteCorkingConn(conn)
nc, err := controlbase.Server(ctx, cwc, private, init)
if err != nil {
return nil, fmt.Errorf("noise handshake failed: %w", err)
}
if earlyWrite != nil {
if deadline, ok := ctx.Deadline(); ok {
if err := conn.SetDeadline(deadline); err != nil {
return nil, fmt.Errorf("setting conn deadline: %w", err)
}
defer conn.SetDeadline(time.Time{})
}
if err := earlyWrite(nc.ProtocolVersion(), nc); err != nil {
return nil, err
}
}
if err := cwc.uncork(); err != nil {
return nil, err
}
return nc, nil
}
// acceptWebsocket upgrades a WebSocket connection (from a client that cannot
// speak HTTP) to a Tailscale control protocol base transport connection.
func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate) (*controlbase.Conn, error) {
c, err := websocket.Accept(w, r, &websocket.AcceptOptions{
Subprotocols: []string{upgradeHeaderValue},
OriginPatterns: []string{"*"},
// Disable compression because we transmit Noise messages that are not
// compressible.
// Additionally, Safari has a broken implementation of compression
// (see https://github.com/nhooyr/websocket/issues/218) that makes
// enabling it actively harmful.
CompressionMode: websocket.CompressionDisabled,
})
if err != nil {
return nil, fmt.Errorf("Could not accept WebSocket connection %v", err)
}
if c.Subprotocol() != upgradeHeaderValue {
c.Close(websocket.StatusPolicyViolation, "client must speak the control subprotocol")
return nil, fmt.Errorf("Unexpected subprotocol %q", c.Subprotocol())
}
if err := r.ParseForm(); err != nil {
c.Close(websocket.StatusPolicyViolation, "Could not parse parameters")
return nil, fmt.Errorf("parse query parameters: %v", err)
}
initB64 := r.Form.Get(handshakeHeaderName)
if initB64 == "" {
c.Close(websocket.StatusPolicyViolation, "missing Tailscale handshake parameter")
return nil, errors.New("no tailscale handshake parameter in HTTP request")
}
init, err := base64.StdEncoding.DecodeString(initB64)
if err != nil {
c.Close(websocket.StatusPolicyViolation, "invalid tailscale handshake parameter")
return nil, fmt.Errorf("decoding base64 handshake parameter: %v", err)
}
conn := wsconn.NetConn(ctx, c, websocket.MessageBinary, r.RemoteAddr)
nc, err := controlbase.Server(ctx, conn, private, init)
if err != nil {
conn.Close()
return nil, fmt.Errorf("noise handshake failed: %w", err)
}
return nc, nil
}
// corkConn is a net.Conn wrapper that initially buffers all writes until uncork
// is called. If the conn is corked and a Read occurs, the Read will flush any
// buffered (corked) write.
//
// Until uncorked, Read/Write/uncork may be not called concurrently.
//
// Deadlines still work, but a corked write ignores deadlines until a Read or
// uncork goes to do that Write.
//
// Use newWriteCorkingConn to create one.
type corkConn struct {
net.Conn
corked bool
buf []byte // corked data
}
func newWriteCorkingConn(c net.Conn) *corkConn {
return &corkConn{Conn: c, corked: true}
}
func (c *corkConn) Write(b []byte) (int, error) {
if c.corked {
c.buf = append(c.buf, b...)
return len(b), nil
}
return c.Conn.Write(b)
}
func (c *corkConn) Read(b []byte) (int, error) {
if c.corked {
if err := c.flush(); err != nil {
return 0, err
}
}
return c.Conn.Read(b)
}
// uncork flushes any buffered data and uncorks the connection so future Writes
// don't buffer. It may not be called concurrently with reads or writes and
// may only be called once.
func (c *corkConn) uncork() error {
if !c.corked {
panic("usage error; uncork called twice") // worth panicking to catch misuse
}
err := c.flush()
c.corked = false
return err
}
func (c *corkConn) flush() error {
if len(c.buf) == 0 {
return nil
}
_, err := c.Conn.Write(c.buf)
c.buf = nil
return err
}

View File

@@ -0,0 +1,191 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package controlknobs contains client options configurable from control which can be turned on
// or off. The ability to turn options on and off is for incrementally adding features in.
package controlknobs
import (
"sync/atomic"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/types/opt"
)
// Knobs is the set of knobs that the control plane's coordination server can
// adjust at runtime.
type Knobs struct {
// DisableUPnP indicates whether to attempt UPnP mapping.
DisableUPnP atomic.Bool
// KeepFullWGConfig is whether we should disable the lazy wireguard
// programming and instead give WireGuard the full netmap always, even for
// idle peers.
KeepFullWGConfig atomic.Bool
// RandomizeClientPort is whether control says we should randomize
// the client port.
RandomizeClientPort atomic.Bool
// OneCGNAT is whether the the node should make one big CGNAT route
// in the OS rather than one /32 per peer.
OneCGNAT syncs.AtomicValue[opt.Bool]
// ForceBackgroundSTUN forces netcheck STUN queries to keep
// running in magicsock, even when idle.
ForceBackgroundSTUN atomic.Bool
// DisableDeltaUpdates is whether the node should not process
// incremental (delta) netmap updates and should treat all netmap
// changes as "full" ones as tailscaled did in 1.48.x and earlier.
DisableDeltaUpdates atomic.Bool
// PeerMTUEnable is whether the node should do peer path MTU discovery.
PeerMTUEnable atomic.Bool
// DisableDNSForwarderTCPRetries is whether the DNS forwarder should
// skip retrying truncated queries over TCP.
DisableDNSForwarderTCPRetries atomic.Bool
// SilentDisco is whether the node should suppress disco heartbeats to its
// peers.
SilentDisco atomic.Bool
// LinuxForceIPTables is whether the node should use iptables for Linux
// netfiltering, unless overridden by the user.
LinuxForceIPTables atomic.Bool
// LinuxForceNfTables is whether the node should use nftables for Linux
// netfiltering, unless overridden by the user.
LinuxForceNfTables atomic.Bool
// SeamlessKeyRenewal is whether to enable the alpha functionality of
// renewing node keys without breaking connections.
// http://go/seamless-key-renewal
SeamlessKeyRenewal atomic.Bool
// ProbeUDPLifetime is whether the node should probe UDP path lifetime on
// the tail end of an active direct connection in magicsock.
ProbeUDPLifetime atomic.Bool
// AppCStoreRoutes is whether the node should store RouteInfo to StateStore
// if it's an app connector.
AppCStoreRoutes atomic.Bool
// UserDialUseRoutes is whether tsdial.Dialer.UserDial should use routes to determine
// how to dial the destination address. When true, it also makes the DNS forwarder
// use UserDial instead of SystemDial when dialing resolvers.
UserDialUseRoutes atomic.Bool
// DisableSplitDNSWhenNoCustomResolvers indicates that the node's DNS manager
// should not adopt a split DNS configuration even though the Config of the
// resolver only contains routes that do not specify custom resolver(s), hence
// all DNS queries can be safely sent to the upstream DNS resolver and the
// node's DNS forwarder doesn't need to handle all DNS traffic.
// This is for now (2024-06-06) an iOS-specific battery life optimization,
// and this knob allows us to disable the optimization remotely if needed.
DisableSplitDNSWhenNoCustomResolvers atomic.Bool
// DisableLocalDNSOverrideViaNRPT indicates that the node's DNS manager should not
// create a default (catch-all) Windows NRPT rule when "Override local DNS" is enabled.
// Without this rule, Windows 8.1 and newer devices issue parallel DNS requests to DNS servers
// associated with all network adapters, even when "Override local DNS" is enabled and/or
// a Mullvad exit node is being used, resulting in DNS leaks.
// We began creating this rule on 2024-06-14, and this knob
// allows us to disable the new behavior remotely if needed.
DisableLocalDNSOverrideViaNRPT atomic.Bool
// DisableCryptorouting indicates that the node should not use the
// magicsock crypto routing feature.
DisableCryptorouting atomic.Bool
// DisableCaptivePortalDetection is whether the node should not perform captive portal detection
// automatically when the network state changes.
DisableCaptivePortalDetection atomic.Bool
}
// UpdateFromNodeAttributes updates k (if non-nil) based on the provided self
// node attributes (Node.Capabilities).
func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) {
if k == nil {
return
}
has := capMap.Contains
var (
keepFullWG = has(tailcfg.NodeAttrDebugDisableWGTrim)
disableUPnP = has(tailcfg.NodeAttrDisableUPnP)
randomizeClientPort = has(tailcfg.NodeAttrRandomizeClientPort)
disableDeltaUpdates = has(tailcfg.NodeAttrDisableDeltaUpdates)
oneCGNAT opt.Bool
forceBackgroundSTUN = has(tailcfg.NodeAttrDebugForceBackgroundSTUN)
peerMTUEnable = has(tailcfg.NodeAttrPeerMTUEnable)
dnsForwarderDisableTCPRetries = has(tailcfg.NodeAttrDNSForwarderDisableTCPRetries)
silentDisco = has(tailcfg.NodeAttrSilentDisco)
forceIPTables = has(tailcfg.NodeAttrLinuxMustUseIPTables)
forceNfTables = has(tailcfg.NodeAttrLinuxMustUseNfTables)
seamlessKeyRenewal = has(tailcfg.NodeAttrSeamlessKeyRenewal)
probeUDPLifetime = has(tailcfg.NodeAttrProbeUDPLifetime)
appCStoreRoutes = has(tailcfg.NodeAttrStoreAppCRoutes)
userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes)
disableSplitDNSWhenNoCustomResolvers = has(tailcfg.NodeAttrDisableSplitDNSWhenNoCustomResolvers)
disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT)
disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting)
disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection)
)
if has(tailcfg.NodeAttrOneCGNATEnable) {
oneCGNAT.Set(true)
} else if has(tailcfg.NodeAttrOneCGNATDisable) {
oneCGNAT.Set(false)
}
k.KeepFullWGConfig.Store(keepFullWG)
k.DisableUPnP.Store(disableUPnP)
k.RandomizeClientPort.Store(randomizeClientPort)
k.OneCGNAT.Store(oneCGNAT)
k.ForceBackgroundSTUN.Store(forceBackgroundSTUN)
k.DisableDeltaUpdates.Store(disableDeltaUpdates)
k.PeerMTUEnable.Store(peerMTUEnable)
k.DisableDNSForwarderTCPRetries.Store(dnsForwarderDisableTCPRetries)
k.SilentDisco.Store(silentDisco)
k.LinuxForceIPTables.Store(forceIPTables)
k.LinuxForceNfTables.Store(forceNfTables)
k.SeamlessKeyRenewal.Store(seamlessKeyRenewal)
k.ProbeUDPLifetime.Store(probeUDPLifetime)
k.AppCStoreRoutes.Store(appCStoreRoutes)
k.UserDialUseRoutes.Store(userDialUseRoutes)
k.DisableSplitDNSWhenNoCustomResolvers.Store(disableSplitDNSWhenNoCustomResolvers)
k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT)
k.DisableCryptorouting.Store(disableCryptorouting)
k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection)
}
// AsDebugJSON returns k as something that can be marshalled with json.Marshal
// for debug.
func (k *Knobs) AsDebugJSON() map[string]any {
if k == nil {
return nil
}
return map[string]any{
"DisableUPnP": k.DisableUPnP.Load(),
"KeepFullWGConfig": k.KeepFullWGConfig.Load(),
"RandomizeClientPort": k.RandomizeClientPort.Load(),
"OneCGNAT": k.OneCGNAT.Load(),
"ForceBackgroundSTUN": k.ForceBackgroundSTUN.Load(),
"DisableDeltaUpdates": k.DisableDeltaUpdates.Load(),
"PeerMTUEnable": k.PeerMTUEnable.Load(),
"DisableDNSForwarderTCPRetries": k.DisableDNSForwarderTCPRetries.Load(),
"SilentDisco": k.SilentDisco.Load(),
"LinuxForceIPTables": k.LinuxForceIPTables.Load(),
"LinuxForceNfTables": k.LinuxForceNfTables.Load(),
"SeamlessKeyRenewal": k.SeamlessKeyRenewal.Load(),
"ProbeUDPLifetime": k.ProbeUDPLifetime.Load(),
"AppCStoreRoutes": k.AppCStoreRoutes.Load(),
"UserDialUseRoutes": k.UserDialUseRoutes.Load(),
"DisableSplitDNSWhenNoCustomResolvers": k.DisableSplitDNSWhenNoCustomResolvers.Load(),
"DisableLocalDNSOverrideViaNRPT": k.DisableLocalDNSOverrideViaNRPT.Load(),
"DisableCryptorouting": k.DisableCryptorouting.Load(),
"DisableCaptivePortalDetection": k.DisableCaptivePortalDetection.Load(),
}
}