Update
This commit is contained in:
466
vendor/tailscale.com/ipn/auditlog/auditlog.go
generated
vendored
466
vendor/tailscale.com/ipn/auditlog/auditlog.go
generated
vendored
@@ -1,466 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package auditlog provides a mechanism for logging audit events.
|
||||
package auditlog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/rands"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// transaction represents an audit log that has not yet been sent to the control plane.
|
||||
type transaction struct {
|
||||
// EventID is the unique identifier for the event being logged.
|
||||
// This is used on the client side only and is not sent to control.
|
||||
EventID string `json:",omitempty"`
|
||||
// Retries is the number of times the logger has attempted to send this log.
|
||||
// This is used on the client side only and is not sent to control.
|
||||
Retries int `json:",omitempty"`
|
||||
|
||||
// Action is the action to be logged. It must correspond to a known action in the control plane.
|
||||
Action tailcfg.ClientAuditAction `json:",omitempty"`
|
||||
// Details is an opaque string specific to the action being logged. Empty strings may not
|
||||
// be valid depending on the action being logged.
|
||||
Details string `json:",omitempty"`
|
||||
// TimeStamp is the time at which the audit log was generated on the node.
|
||||
TimeStamp time.Time `json:",omitzero"`
|
||||
}
|
||||
|
||||
// Transport provides a means for a client to send audit logs to a consumer (typically the control plane).
|
||||
type Transport interface {
|
||||
// SendAuditLog sends an audit log to a consumer of audit logs.
|
||||
// Errors should be checked with [IsRetryableError] for retryability.
|
||||
SendAuditLog(context.Context, tailcfg.AuditLogRequest) error
|
||||
}
|
||||
|
||||
// LogStore provides a means for a [Logger] to persist logs to disk or memory.
|
||||
type LogStore interface {
|
||||
// Save saves the given data to a persistent store. Save will overwrite existing data
|
||||
// for the given key.
|
||||
save(key ipn.ProfileID, txns []*transaction) error
|
||||
|
||||
// Load retrieves the data from a persistent store. Returns a nil slice and
|
||||
// no error if no data exists for the given key.
|
||||
load(key ipn.ProfileID) ([]*transaction, error)
|
||||
}
|
||||
|
||||
// Opts contains the configuration options for a [Logger].
|
||||
type Opts struct {
|
||||
// RetryLimit is the maximum number of attempts the logger will make to send a log before giving up.
|
||||
RetryLimit int
|
||||
// Store is the persistent store used to save logs to disk. Must be non-nil.
|
||||
Store LogStore
|
||||
// Logf is the logger used to log messages from the audit logger. Must be non-nil.
|
||||
Logf logger.Logf
|
||||
}
|
||||
|
||||
// IsRetryableError returns true if the given error is retryable
|
||||
// See [controlclient.apiResponseError]. Potentially retryable errors implement the Retryable() method.
|
||||
func IsRetryableError(err error) bool {
|
||||
var retryable interface{ Retryable() bool }
|
||||
return errors.As(err, &retryable) && retryable.Retryable()
|
||||
}
|
||||
|
||||
type backoffOpts struct {
|
||||
min, max time.Duration
|
||||
multiplier float64
|
||||
}
|
||||
|
||||
// .5, 1, 2, 4, 8, 10, 10, 10, 10, 10...
|
||||
var defaultBackoffOpts = backoffOpts{
|
||||
min: time.Millisecond * 500,
|
||||
max: 10 * time.Second,
|
||||
multiplier: 2,
|
||||
}
|
||||
|
||||
// Logger provides a queue-based mechanism for submitting audit logs to the control plane - or
|
||||
// another suitable consumer. Logs are stored to disk and retried until they are successfully sent,
|
||||
// or until they permanently fail.
|
||||
//
|
||||
// Each individual profile/controlclient tuple should construct and manage a unique [Logger] instance.
|
||||
type Logger struct {
|
||||
logf logger.Logf
|
||||
retryLimit int // the maximum number of attempts to send a log before giving up.
|
||||
flusher chan struct{} // channel used to signal a flush operation.
|
||||
done chan struct{} // closed when the flush worker exits.
|
||||
ctx context.Context // canceled when the logger is stopped.
|
||||
ctxCancel context.CancelFunc // cancels ctx.
|
||||
backoffOpts // backoff settings for retry operations.
|
||||
|
||||
// mu protects the fields below.
|
||||
mu sync.Mutex
|
||||
store LogStore // persistent storage for unsent logs.
|
||||
profileID ipn.ProfileID // empty if [Logger.SetProfileID] has not been called.
|
||||
transport Transport // nil until [Logger.Start] is called.
|
||||
}
|
||||
|
||||
// NewLogger creates a new [Logger] with the given options.
|
||||
func NewLogger(opts Opts) *Logger {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
al := &Logger{
|
||||
retryLimit: opts.RetryLimit,
|
||||
logf: logger.WithPrefix(opts.Logf, "auditlog: "),
|
||||
store: opts.Store,
|
||||
flusher: make(chan struct{}, 1),
|
||||
done: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
ctxCancel: cancel,
|
||||
backoffOpts: defaultBackoffOpts,
|
||||
}
|
||||
al.logf("created")
|
||||
return al
|
||||
}
|
||||
|
||||
// FlushAndStop synchronously flushes all pending logs and stops the audit logger.
|
||||
// This will block until a final flush operation completes or context is done.
|
||||
// If the logger is already stopped, this will return immediately. All unsent
|
||||
// logs will be persisted to the store.
|
||||
func (al *Logger) FlushAndStop(ctx context.Context) {
|
||||
al.stop()
|
||||
al.flush(ctx)
|
||||
}
|
||||
|
||||
// SetProfileID sets the profileID for the logger. This must be called before any logs can be enqueued.
|
||||
// The profileID of a logger cannot be changed once set.
|
||||
func (al *Logger) SetProfileID(profileID ipn.ProfileID) error {
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
if al.profileID != "" {
|
||||
return errors.New("profileID already set")
|
||||
}
|
||||
|
||||
al.profileID = profileID
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the audit logger with the given transport.
|
||||
// It returns an error if the logger is already started.
|
||||
func (al *Logger) Start(t Transport) error {
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
|
||||
if al.transport != nil {
|
||||
return errors.New("already started")
|
||||
}
|
||||
|
||||
al.transport = t
|
||||
pending, err := al.storedCountLocked()
|
||||
if err != nil {
|
||||
al.logf("[unexpected] failed to restore logs: %v", err)
|
||||
}
|
||||
go al.flushWorker()
|
||||
if pending > 0 {
|
||||
al.flushAsync()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ErrAuditLogStorageFailure is returned when the logger fails to persist logs to the store.
|
||||
var ErrAuditLogStorageFailure = errors.New("audit log storage failure")
|
||||
|
||||
// Enqueue queues an audit log to be sent to the control plane (or another suitable consumer/transport).
|
||||
// This will return an error if the underlying store fails to save the log or we fail to generate a unique
|
||||
// eventID for the log.
|
||||
func (al *Logger) Enqueue(action tailcfg.ClientAuditAction, details string) error {
|
||||
txn := &transaction{
|
||||
Action: action,
|
||||
Details: details,
|
||||
TimeStamp: time.Now(),
|
||||
}
|
||||
// Generate a suitably random eventID for the transaction.
|
||||
txn.EventID = fmt.Sprint(txn.TimeStamp, rands.HexString(16))
|
||||
return al.enqueue(txn)
|
||||
}
|
||||
|
||||
// flushAsync requests an asynchronous flush.
|
||||
// It is a no-op if a flush is already pending.
|
||||
func (al *Logger) flushAsync() {
|
||||
select {
|
||||
case al.flusher <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (al *Logger) flushWorker() {
|
||||
defer close(al.done)
|
||||
|
||||
var retryDelay time.Duration
|
||||
retry := time.NewTimer(0)
|
||||
retry.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-al.ctx.Done():
|
||||
return
|
||||
case <-al.flusher:
|
||||
err := al.flush(al.ctx)
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
// The logger was stopped, no need to retry.
|
||||
return
|
||||
case err != nil:
|
||||
retryDelay = max(al.backoffOpts.min, min(retryDelay*time.Duration(al.backoffOpts.multiplier), al.backoffOpts.max))
|
||||
al.logf("retrying after %v, %v", retryDelay, err)
|
||||
retry.Reset(retryDelay)
|
||||
default:
|
||||
retryDelay = 0
|
||||
retry.Stop()
|
||||
}
|
||||
case <-retry.C:
|
||||
al.flushAsync()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// flush attempts to send all pending logs to the control plane.
|
||||
// l.mu must not be held.
|
||||
func (al *Logger) flush(ctx context.Context) error {
|
||||
al.mu.Lock()
|
||||
pending, err := al.store.load(al.profileID)
|
||||
t := al.transport
|
||||
al.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
// This will catch nil profileIDs
|
||||
return fmt.Errorf("failed to restore pending logs: %w", err)
|
||||
}
|
||||
if len(pending) == 0 {
|
||||
return nil
|
||||
}
|
||||
if t == nil {
|
||||
return errors.New("no transport")
|
||||
}
|
||||
|
||||
complete, unsent := al.sendToTransport(ctx, pending, t)
|
||||
al.markTransactionsDone(complete)
|
||||
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
if err = al.appendToStoreLocked(unsent); err != nil {
|
||||
al.logf("[unexpected] failed to persist logs: %v", err)
|
||||
}
|
||||
|
||||
if len(unsent) != 0 {
|
||||
return fmt.Errorf("failed to send %d logs", len(unsent))
|
||||
}
|
||||
|
||||
if len(complete) != 0 {
|
||||
al.logf("complete %d audit log transactions", len(complete))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendToTransport sends all pending logs to the control plane. Returns a pair of slices
|
||||
// containing the logs that were successfully sent (or failed permanently) and those that were not.
|
||||
//
|
||||
// This may require multiple round trips to the control plane and can be a long running transaction.
|
||||
func (al *Logger) sendToTransport(ctx context.Context, pending []*transaction, t Transport) (complete []*transaction, unsent []*transaction) {
|
||||
for i, txn := range pending {
|
||||
req := tailcfg.AuditLogRequest{
|
||||
Action: tailcfg.ClientAuditAction(txn.Action),
|
||||
Details: txn.Details,
|
||||
Timestamp: txn.TimeStamp,
|
||||
}
|
||||
|
||||
if err := t.SendAuditLog(ctx, req); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded):
|
||||
// The contex is done. All further attempts will fail.
|
||||
unsent = append(unsent, pending[i:]...)
|
||||
return complete, unsent
|
||||
case IsRetryableError(err) && txn.Retries+1 < al.retryLimit:
|
||||
// We permit a maximum number of retries for each log. All retriable
|
||||
// errors should be transient and we should be able to send the log eventually, but
|
||||
// we don't want logs to be persisted indefinitely.
|
||||
txn.Retries++
|
||||
unsent = append(unsent, txn)
|
||||
default:
|
||||
complete = append(complete, txn)
|
||||
al.logf("failed permanently: %v", err)
|
||||
}
|
||||
} else {
|
||||
// No error - we're done.
|
||||
complete = append(complete, txn)
|
||||
}
|
||||
}
|
||||
|
||||
return complete, unsent
|
||||
}
|
||||
|
||||
func (al *Logger) stop() {
|
||||
al.mu.Lock()
|
||||
t := al.transport
|
||||
al.mu.Unlock()
|
||||
|
||||
if t == nil {
|
||||
// No transport means no worker goroutine and done will not be
|
||||
// closed if we cancel the context.
|
||||
return
|
||||
}
|
||||
|
||||
al.ctxCancel()
|
||||
<-al.done
|
||||
al.logf("stopped for profileID: %v", al.profileID)
|
||||
}
|
||||
|
||||
// appendToStoreLocked persists logs to the store. This will deduplicate
|
||||
// logs so it is safe to call this with the same logs multiple time, to
|
||||
// requeue failed transactions for example.
|
||||
//
|
||||
// l.mu must be held.
|
||||
func (al *Logger) appendToStoreLocked(txns []*transaction) error {
|
||||
if len(txns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if al.profileID == "" {
|
||||
return errors.New("no logId set")
|
||||
}
|
||||
|
||||
persisted, err := al.store.load(al.profileID)
|
||||
if err != nil {
|
||||
al.logf("[unexpected] append failed to restore logs: %v", err)
|
||||
}
|
||||
|
||||
// The order is important here. We want the latest transactions first, which will
|
||||
// ensure when we dedup, the new transactions are seen and the older transactions
|
||||
// are discarded.
|
||||
txnsOut := append(txns, persisted...)
|
||||
txnsOut = deduplicateAndSort(txnsOut)
|
||||
|
||||
return al.store.save(al.profileID, txnsOut)
|
||||
}
|
||||
|
||||
// storedCountLocked returns the number of logs persisted to the store.
|
||||
// al.mu must be held.
|
||||
func (al *Logger) storedCountLocked() (int, error) {
|
||||
persisted, err := al.store.load(al.profileID)
|
||||
return len(persisted), err
|
||||
}
|
||||
|
||||
// markTransactionsDone removes logs from the store that are complete (sent or failed permanently).
|
||||
// al.mu must not be held.
|
||||
func (al *Logger) markTransactionsDone(sent []*transaction) {
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
|
||||
ids := set.Set[string]{}
|
||||
for _, txn := range sent {
|
||||
ids.Add(txn.EventID)
|
||||
}
|
||||
|
||||
persisted, err := al.store.load(al.profileID)
|
||||
if err != nil {
|
||||
al.logf("[unexpected] markTransactionsDone failed to restore logs: %v", err)
|
||||
}
|
||||
var unsent []*transaction
|
||||
for _, txn := range persisted {
|
||||
if !ids.Contains(txn.EventID) {
|
||||
unsent = append(unsent, txn)
|
||||
}
|
||||
}
|
||||
al.store.save(al.profileID, unsent)
|
||||
}
|
||||
|
||||
// deduplicateAndSort removes duplicate logs from the given slice and sorts them by timestamp.
|
||||
// The first log entry in the slice will be retained, subsequent logs with the same EventID will be discarded.
|
||||
func deduplicateAndSort(txns []*transaction) []*transaction {
|
||||
seen := set.Set[string]{}
|
||||
deduped := make([]*transaction, 0, len(txns))
|
||||
for _, txn := range txns {
|
||||
if !seen.Contains(txn.EventID) {
|
||||
deduped = append(deduped, txn)
|
||||
seen.Add(txn.EventID)
|
||||
}
|
||||
}
|
||||
// Sort logs by timestamp - oldest to newest. This will put the oldest logs at
|
||||
// the front of the queue.
|
||||
sort.Slice(deduped, func(i, j int) bool {
|
||||
return deduped[i].TimeStamp.Before(deduped[j].TimeStamp)
|
||||
})
|
||||
return deduped
|
||||
}
|
||||
|
||||
func (al *Logger) enqueue(txn *transaction) error {
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
|
||||
if err := al.appendToStoreLocked([]*transaction{txn}); err != nil {
|
||||
return fmt.Errorf("%w: %w", ErrAuditLogStorageFailure, err)
|
||||
}
|
||||
|
||||
// If a.transport is nil if the logger is stopped.
|
||||
if al.transport != nil {
|
||||
al.flushAsync()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ LogStore = (*logStateStore)(nil)
|
||||
|
||||
// logStateStore is a concrete implementation of [LogStore]
|
||||
// using [ipn.StateStore] as the underlying storage.
|
||||
type logStateStore struct {
|
||||
store ipn.StateStore
|
||||
}
|
||||
|
||||
// NewLogStore creates a new LogStateStore with the given [ipn.StateStore].
|
||||
func NewLogStore(store ipn.StateStore) LogStore {
|
||||
return &logStateStore{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *logStateStore) generateKey(key ipn.ProfileID) string {
|
||||
return "auditlog-" + string(key)
|
||||
}
|
||||
|
||||
// Save saves the given logs to an [ipn.StateStore]. This overwrites
|
||||
// any existing entries for the given key.
|
||||
func (s *logStateStore) save(key ipn.ProfileID, txns []*transaction) error {
|
||||
if key == "" {
|
||||
return errors.New("empty key")
|
||||
}
|
||||
|
||||
data, err := json.Marshal(txns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k := ipn.StateKey(s.generateKey(key))
|
||||
return s.store.WriteState(k, data)
|
||||
}
|
||||
|
||||
// Load retrieves the logs from an [ipn.StateStore].
|
||||
func (s *logStateStore) load(key ipn.ProfileID) ([]*transaction, error) {
|
||||
if key == "" {
|
||||
return nil, errors.New("empty key")
|
||||
}
|
||||
|
||||
k := ipn.StateKey(s.generateKey(key))
|
||||
data, err := s.store.ReadState(k)
|
||||
|
||||
switch {
|
||||
case errors.Is(err, ipn.ErrStateNotExist):
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var txns []*transaction
|
||||
err = json.Unmarshal(data, &txns)
|
||||
return txns, err
|
||||
}
|
||||
34
vendor/tailscale.com/ipn/backend.go
generated
vendored
34
vendor/tailscale.com/ipn/backend.go
generated
vendored
@@ -74,13 +74,17 @@ const (
|
||||
NotifyInitialPrefs NotifyWatchOpt = 1 << 2 // if set, the first Notify message (sent immediately) will contain the current Prefs
|
||||
NotifyInitialNetMap NotifyWatchOpt = 1 << 3 // if set, the first Notify message (sent immediately) will contain the current NetMap
|
||||
|
||||
NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // if set, private keys that would normally be sent in updates are zeroed out
|
||||
NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // (no-op) it used to redact private keys; now they always are and this does nothing
|
||||
NotifyInitialDriveShares NotifyWatchOpt = 1 << 5 // if set, the first Notify message (sent immediately) will contain the current Taildrive Shares
|
||||
NotifyInitialOutgoingFiles NotifyWatchOpt = 1 << 6 // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles
|
||||
|
||||
NotifyInitialHealthState NotifyWatchOpt = 1 << 7 // if set, the first Notify message (sent immediately) will contain the current health.State of the client
|
||||
|
||||
NotifyRateLimit NotifyWatchOpt = 1 << 8 // if set, rate limit spammy netmap updates to every few seconds
|
||||
|
||||
NotifyHealthActions NotifyWatchOpt = 1 << 9 // if set, include PrimaryActions in health.State. Otherwise append the action URL to the text
|
||||
|
||||
NotifyInitialSuggestedExitNode NotifyWatchOpt = 1 << 10 // if set, the first Notify message (sent immediately) will contain the current SuggestedExitNode if available
|
||||
)
|
||||
|
||||
// Notify is a communication from a backend (e.g. tailscaled) to a frontend
|
||||
@@ -96,7 +100,7 @@ type Notify struct {
|
||||
// This field is only set in the first message when requesting
|
||||
// NotifyInitialState. Clients must store it on their side as
|
||||
// following notifications will not include this field.
|
||||
SessionID string `json:",omitempty"`
|
||||
SessionID string `json:",omitzero"`
|
||||
|
||||
// ErrMessage, if non-nil, contains a critical error message.
|
||||
// For State InUseOtherUser, ErrMessage is not critical and just contains the details.
|
||||
@@ -114,7 +118,7 @@ type Notify struct {
|
||||
// user's preferred storage location.
|
||||
//
|
||||
// Deprecated: use LocalClient.AwaitWaitingFiles instead.
|
||||
FilesWaiting *empty.Message `json:",omitempty"`
|
||||
FilesWaiting *empty.Message `json:",omitzero"`
|
||||
|
||||
// IncomingFiles, if non-nil, specifies which files are in the
|
||||
// process of being received. A nil IncomingFiles means this
|
||||
@@ -123,22 +127,22 @@ type Notify struct {
|
||||
// of being transferred.
|
||||
//
|
||||
// Deprecated: use LocalClient.AwaitWaitingFiles instead.
|
||||
IncomingFiles []PartialFile `json:",omitempty"`
|
||||
IncomingFiles []PartialFile `json:",omitzero"`
|
||||
|
||||
// OutgoingFiles, if non-nil, tracks which files are in the process of
|
||||
// being sent via TailDrop, including files that finished, whether
|
||||
// successful or failed. This slice is sorted by Started time, then Name.
|
||||
OutgoingFiles []*OutgoingFile `json:",omitempty"`
|
||||
OutgoingFiles []*OutgoingFile `json:",omitzero"`
|
||||
|
||||
// LocalTCPPort, if non-nil, informs the UI frontend which
|
||||
// (non-zero) localhost TCP port it's listening on.
|
||||
// This is currently only used by Tailscale when run in the
|
||||
// macOS Network Extension.
|
||||
LocalTCPPort *uint16 `json:",omitempty"`
|
||||
LocalTCPPort *uint16 `json:",omitzero"`
|
||||
|
||||
// ClientVersion, if non-nil, describes whether a client version update
|
||||
// is available.
|
||||
ClientVersion *tailcfg.ClientVersion `json:",omitempty"`
|
||||
ClientVersion *tailcfg.ClientVersion `json:",omitzero"`
|
||||
|
||||
// DriveShares tracks the full set of current DriveShares that we're
|
||||
// publishing. Some client applications, like the MacOS and Windows clients,
|
||||
@@ -151,7 +155,11 @@ type Notify struct {
|
||||
// Health is the last-known health state of the backend. When this field is
|
||||
// non-nil, a change in health verified, and the API client should surface
|
||||
// any changes to the user in the UI.
|
||||
Health *health.State `json:",omitempty"`
|
||||
Health *health.State `json:",omitzero"`
|
||||
|
||||
// SuggestedExitNode, if non-nil, is the node that the backend has determined to
|
||||
// be the best exit node for the current network conditions.
|
||||
SuggestedExitNode *tailcfg.StableNodeID `json:",omitzero"`
|
||||
|
||||
// type is mirrored in xcode/IPN/Core/LocalAPI/Model/LocalAPIModel.swift
|
||||
}
|
||||
@@ -192,8 +200,16 @@ func (n Notify) String() string {
|
||||
if n.Health != nil {
|
||||
sb.WriteString("Health{...} ")
|
||||
}
|
||||
if n.SuggestedExitNode != nil {
|
||||
fmt.Fprintf(&sb, "SuggestedExitNode=%v ", *n.SuggestedExitNode)
|
||||
}
|
||||
|
||||
s := sb.String()
|
||||
return s[0:len(s)-1] + "}"
|
||||
if s == "Notify{" {
|
||||
return "Notify{}"
|
||||
} else {
|
||||
return s[0:len(s)-1] + "}"
|
||||
}
|
||||
}
|
||||
|
||||
// PartialFile represents an in-progress incoming file transfer.
|
||||
|
||||
5
vendor/tailscale.com/ipn/conffile/cloudconf.go
generated
vendored
5
vendor/tailscale.com/ipn/conffile/cloudconf.go
generated
vendored
@@ -10,6 +10,8 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/omit"
|
||||
)
|
||||
|
||||
@@ -35,6 +37,9 @@ func getEC2MetadataToken() (string, error) {
|
||||
}
|
||||
|
||||
func readVMUserData() ([]byte, error) {
|
||||
if !buildfeatures.HasAWS {
|
||||
return nil, feature.ErrUnavailable
|
||||
}
|
||||
// TODO(bradfitz): support GCP, Azure, Proxmox/cloud-init
|
||||
// (NoCloud/ConfigDrive ISO), etc.
|
||||
|
||||
|
||||
19
vendor/tailscale.com/ipn/conffile/conffile.go
generated
vendored
19
vendor/tailscale.com/ipn/conffile/conffile.go
generated
vendored
@@ -8,11 +8,11 @@ package conffile
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
@@ -51,10 +51,6 @@ func Load(path string) (*Config, error) {
|
||||
// compile-time for deadcode elimination
|
||||
return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS)
|
||||
}
|
||||
if hujsonStandardize == nil {
|
||||
// Build tags are wrong in conffile_hujson.go
|
||||
return nil, errors.New("[unexpected] config file loading not wired up")
|
||||
}
|
||||
var c Config
|
||||
c.Path = path
|
||||
var err error
|
||||
@@ -68,14 +64,21 @@ func Load(path string) (*Config, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Std, err = hujsonStandardize(c.Raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err)
|
||||
if buildfeatures.HasHuJSONConf && hujsonStandardize != nil {
|
||||
c.Std, err = hujsonStandardize(c.Raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err)
|
||||
}
|
||||
} else {
|
||||
c.Std = c.Raw // config file must be valid JSON with ts_omit_hujsonconf
|
||||
}
|
||||
var ver struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
if err := json.Unmarshal(c.Std, &ver); err != nil {
|
||||
if !buildfeatures.HasHuJSONConf {
|
||||
return nil, fmt.Errorf("error parsing config file %s, which must be valid standard JSON: %w", path, err)
|
||||
}
|
||||
return nil, fmt.Errorf("error parsing config file %s: %w", path, err)
|
||||
}
|
||||
switch ver.Version {
|
||||
|
||||
2
vendor/tailscale.com/ipn/conffile/conffile_hujson.go
generated
vendored
2
vendor/tailscale.com/ipn/conffile/conffile_hujson.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android
|
||||
//go:build !ios && !android && !ts_omit_hujsonconf
|
||||
|
||||
package conffile
|
||||
|
||||
|
||||
239
vendor/tailscale.com/ipn/conffile/serveconf.go
generated
vendored
Normal file
239
vendor/tailscale.com/ipn/conffile/serveconf.go
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_serve
|
||||
|
||||
package conffile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
jsonv2 "github.com/go-json-experiment/json"
|
||||
"github.com/go-json-experiment/json/jsontext"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
// ServicesConfigFile is the config file format for services configuration.
|
||||
type ServicesConfigFile struct {
|
||||
// Version is always "0.0.1" and always present.
|
||||
Version string `json:"version"`
|
||||
|
||||
Services map[tailcfg.ServiceName]*ServiceDetailsFile `json:"services,omitzero"`
|
||||
}
|
||||
|
||||
// ServiceDetailsFile is the config syntax for an individual Tailscale Service.
|
||||
type ServiceDetailsFile struct {
|
||||
// Version is always "0.0.1", set if and only if this is not inside a
|
||||
// [ServiceConfigFile].
|
||||
Version string `json:"version,omitzero"`
|
||||
|
||||
// Endpoints are sets of reverse proxy mappings from ProtoPortRanges on a
|
||||
// Service to Targets (proto+destination+port) on remote destinations (or
|
||||
// localhost).
|
||||
// For example, "tcp:443" -> "tcp://localhost:8000" is an endpoint definition
|
||||
// mapping traffic on the TCP port 443 of the Service to port 8080 on localhost.
|
||||
// The Proto in the key must be populated.
|
||||
// As a special case, if the only mapping provided is "*" -> "TUN", that
|
||||
// enables TUN/L3 mode, where packets are delivered to the Tailscale network
|
||||
// interface with the understanding that the user will deal with them manually.
|
||||
Endpoints map[*tailcfg.ProtoPortRange]*Target `json:"endpoints"`
|
||||
|
||||
// Advertised is a flag that tells control whether or not the client thinks
|
||||
// it is ready to host a particular Tailscale Service. If unset, it is
|
||||
// assumed to be true.
|
||||
Advertised opt.Bool `json:"advertised,omitzero"`
|
||||
}
|
||||
|
||||
// ServiceProtocol is the protocol of a Target.
|
||||
type ServiceProtocol string
|
||||
|
||||
const (
|
||||
ProtoHTTP ServiceProtocol = "http"
|
||||
ProtoHTTPS ServiceProtocol = "https"
|
||||
ProtoHTTPSInsecure ServiceProtocol = "https+insecure"
|
||||
ProtoTCP ServiceProtocol = "tcp"
|
||||
ProtoTLSTerminatedTCP ServiceProtocol = "tls-terminated-tcp"
|
||||
ProtoFile ServiceProtocol = "file"
|
||||
ProtoTUN ServiceProtocol = "TUN"
|
||||
)
|
||||
|
||||
// Target is a destination for traffic to go to when it arrives at a Tailscale
|
||||
// Service host.
|
||||
type Target struct {
|
||||
// The protocol over which to communicate with the Destination.
|
||||
// Protocol == ProtoTUN is a special case, activating "TUN mode" where
|
||||
// packets are delivered to the Tailscale TUN interface and then manually
|
||||
// handled by the user.
|
||||
Protocol ServiceProtocol
|
||||
|
||||
// If Protocol is ProtoFile, then Destination is a file path.
|
||||
// If Protocol is ProtoTUN, then Destination is empty.
|
||||
// Otherwise, it is a host.
|
||||
Destination string
|
||||
|
||||
// If Protocol is not ProtoFile or ProtoTUN, then DestinationPorts is the
|
||||
// set of ports on which to connect to the host referred to by Destination.
|
||||
DestinationPorts tailcfg.PortRange
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (t *Target) UnmarshalJSON(buf []byte) error {
|
||||
return jsonv2.Unmarshal(buf, t)
|
||||
}
|
||||
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (t *Target) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
var str string
|
||||
if err := jsonv2.UnmarshalDecode(dec, &str); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The TUN case does not look like a standard <url>://<proto> arrangement,
|
||||
// so handled separately.
|
||||
if str == "TUN" {
|
||||
t.Protocol = ProtoTUN
|
||||
t.Destination = ""
|
||||
t.DestinationPorts = tailcfg.PortRangeAny
|
||||
return nil
|
||||
}
|
||||
|
||||
proto, rest, found := strings.Cut(str, "://")
|
||||
if !found {
|
||||
return errors.New("handler not of form <proto>://<destination>")
|
||||
}
|
||||
|
||||
switch ServiceProtocol(proto) {
|
||||
case ProtoFile:
|
||||
target := path.Clean(rest)
|
||||
t.Protocol = ProtoFile
|
||||
t.Destination = target
|
||||
t.DestinationPorts = tailcfg.PortRange{}
|
||||
case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP:
|
||||
host, portRange, err := tailcfg.ParseHostPortRange(rest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Protocol = ServiceProtocol(proto)
|
||||
t.Destination = host
|
||||
t.DestinationPorts = portRange
|
||||
default:
|
||||
return errors.New("unsupported protocol")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Target) MarshalText() ([]byte, error) {
|
||||
var out string
|
||||
switch t.Protocol {
|
||||
case ProtoFile:
|
||||
out = fmt.Sprintf("%s://%s", t.Protocol, t.Destination)
|
||||
case ProtoTUN:
|
||||
out = "TUN"
|
||||
case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP:
|
||||
out = fmt.Sprintf("%s://%s", t.Protocol, net.JoinHostPort(t.Destination, t.DestinationPorts.String()))
|
||||
default:
|
||||
return nil, errors.New("unsupported protocol")
|
||||
}
|
||||
return []byte(out), nil
|
||||
}
|
||||
|
||||
func LoadServicesConfig(filename string, forService string) (*ServicesConfigFile, error) {
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var json []byte
|
||||
if hujsonStandardize != nil {
|
||||
json, err = hujsonStandardize(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
json = data
|
||||
}
|
||||
var ver struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
if err = jsonv2.Unmarshal(json, &ver); err != nil {
|
||||
return nil, fmt.Errorf("could not parse config file version: %w", err)
|
||||
}
|
||||
switch ver.Version {
|
||||
case "":
|
||||
return nil, errors.New("config file must have \"version\" field")
|
||||
case "0.0.1":
|
||||
return loadConfigV0(json, forService)
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported config file version %q", ver.Version)
|
||||
}
|
||||
|
||||
func loadConfigV0(json []byte, forService string) (*ServicesConfigFile, error) {
|
||||
var scf ServicesConfigFile
|
||||
if svcName := tailcfg.AsServiceName(forService); svcName != "" {
|
||||
var sdf ServiceDetailsFile
|
||||
err := jsonv2.Unmarshal(json, &sdf, jsonv2.RejectUnknownMembers(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mak.Set(&scf.Services, svcName, &sdf)
|
||||
|
||||
} else {
|
||||
err := jsonv2.Unmarshal(json, &scf, jsonv2.RejectUnknownMembers(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for svcName, svc := range scf.Services {
|
||||
if forService == "" && svc.Version != "" {
|
||||
return nil, errors.New("services cannot be versioned separately from config file")
|
||||
}
|
||||
if err := svcName.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if svc.Endpoints == nil {
|
||||
return nil, fmt.Errorf("service %q: missing \"endpoints\" field", svcName)
|
||||
}
|
||||
var sourcePorts []tailcfg.PortRange
|
||||
foundTUN := false
|
||||
foundNonTUN := false
|
||||
for ppr, target := range svc.Endpoints {
|
||||
if target.Protocol == "TUN" {
|
||||
if ppr.Proto != 0 || ppr.Ports != tailcfg.PortRangeAny {
|
||||
return nil, fmt.Errorf("service %q: destination \"TUN\" can only be used with source \"*\"", svcName)
|
||||
}
|
||||
foundTUN = true
|
||||
} else {
|
||||
if ppr.Ports.Last-ppr.Ports.First != target.DestinationPorts.Last-target.DestinationPorts.First {
|
||||
return nil, fmt.Errorf("service %q: source and destination port ranges must be of equal size", svcName.String())
|
||||
}
|
||||
foundNonTUN = true
|
||||
}
|
||||
if foundTUN && foundNonTUN {
|
||||
return nil, fmt.Errorf("service %q: cannot mix TUN mode with non-TUN mode", svcName)
|
||||
}
|
||||
if pr := findOverlappingRange(sourcePorts, ppr.Ports); pr != nil {
|
||||
return nil, fmt.Errorf("service %q: source port ranges %q and %q overlap", svcName, pr.String(), ppr.Ports.String())
|
||||
}
|
||||
sourcePorts = append(sourcePorts, ppr.Ports)
|
||||
}
|
||||
}
|
||||
return &scf, nil
|
||||
}
|
||||
|
||||
// findOverlappingRange finds and returns a reference to a [tailcfg.PortRange]
|
||||
// in haystack that overlaps with needle. It returns nil if it doesn't find one.
|
||||
func findOverlappingRange(haystack []tailcfg.PortRange, needle tailcfg.PortRange) *tailcfg.PortRange {
|
||||
for _, pr := range haystack {
|
||||
if pr.Contains(needle.First) || pr.Contains(needle.Last) || needle.Contains(pr.First) || needle.Contains(pr.Last) {
|
||||
return &pr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
6
vendor/tailscale.com/ipn/desktop/doc.go
generated
vendored
6
vendor/tailscale.com/ipn/desktop/doc.go
generated
vendored
@@ -1,6 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package desktop facilitates interaction with the desktop environment
|
||||
// and user sessions. As of 2025-02-06, it is only implemented for Windows.
|
||||
package desktop
|
||||
24
vendor/tailscale.com/ipn/desktop/mksyscall.go
generated
vendored
24
vendor/tailscale.com/ipn/desktop/mksyscall.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package desktop
|
||||
|
||||
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
|
||||
//go:generate go run golang.org/x/tools/cmd/goimports -w zsyscall_windows.go
|
||||
|
||||
//sys setLastError(dwErrorCode uint32) = kernel32.SetLastError
|
||||
|
||||
//sys registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) [atom==0] = user32.RegisterClassExW
|
||||
//sys createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) [hWnd==0] = user32.CreateWindowExW
|
||||
//sys defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) = user32.DefWindowProcW
|
||||
//sys setWindowLongPtr(hwnd windows.HWND, index int32, newLong uintptr) (res uintptr, err error) [res==0 && e1!=0] = user32.SetWindowLongPtrW
|
||||
//sys getWindowLongPtr(hwnd windows.HWND, index int32) (res uintptr, err error) [res==0 && e1!=0] = user32.GetWindowLongPtrW
|
||||
//sys sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) = user32.SendMessageW
|
||||
//sys getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) = user32.GetMessageW
|
||||
//sys translateMessage(lpMsg *_MSG) (res bool) = user32.TranslateMessage
|
||||
//sys dispatchMessage(lpMsg *_MSG) (res uintptr) = user32.DispatchMessageW
|
||||
//sys destroyWindow(hwnd windows.HWND) (err error) [int32(failretval)==0] = user32.DestroyWindow
|
||||
//sys postQuitMessage(exitCode int32) = user32.PostQuitMessage
|
||||
|
||||
//sys registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flags uint32) (err error) [int32(failretval)==0] = wtsapi32.WTSRegisterSessionNotificationEx
|
||||
//sys unregisterSessionNotification(hServer windows.Handle, hwnd windows.HWND) (err error) [int32(failretval)==0] = wtsapi32.WTSUnRegisterSessionNotificationEx
|
||||
58
vendor/tailscale.com/ipn/desktop/session.go
generated
vendored
58
vendor/tailscale.com/ipn/desktop/session.go
generated
vendored
@@ -1,58 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package desktop
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"tailscale.com/ipn/ipnauth"
|
||||
)
|
||||
|
||||
// SessionID is a unique identifier of a desktop session.
|
||||
type SessionID uint
|
||||
|
||||
// SessionStatus is the status of a desktop session.
|
||||
type SessionStatus int
|
||||
|
||||
const (
|
||||
// ClosedSession is a session that does not exist, is not yet initialized by the OS,
|
||||
// or has been terminated.
|
||||
ClosedSession SessionStatus = iota
|
||||
// ForegroundSession is a session that a user can interact with,
|
||||
// such as when attached to a physical console or an active,
|
||||
// unlocked RDP connection.
|
||||
ForegroundSession
|
||||
// BackgroundSession indicates that the session is locked, disconnected,
|
||||
// or otherwise running without user presence or interaction.
|
||||
BackgroundSession
|
||||
)
|
||||
|
||||
// String implements [fmt.Stringer].
|
||||
func (s SessionStatus) String() string {
|
||||
switch s {
|
||||
case ClosedSession:
|
||||
return "Closed"
|
||||
case ForegroundSession:
|
||||
return "Foreground"
|
||||
case BackgroundSession:
|
||||
return "Background"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// Session is a state of a desktop session at a given point in time.
|
||||
type Session struct {
|
||||
ID SessionID // Identifier of the session; can be reused after the session is closed.
|
||||
Status SessionStatus // The status of the session, such as foreground or background.
|
||||
User ipnauth.Actor // User logged into the session.
|
||||
}
|
||||
|
||||
// Description returns a human-readable description of the session.
|
||||
func (s *Session) Description() string {
|
||||
if maybeUsername, _ := s.User.Username(); maybeUsername != "" { // best effort
|
||||
return fmt.Sprintf("Session %d - %q (%s)", s.ID, maybeUsername, s.Status)
|
||||
}
|
||||
return fmt.Sprintf("Session %d (%s)", s.ID, s.Status)
|
||||
}
|
||||
60
vendor/tailscale.com/ipn/desktop/sessions.go
generated
vendored
60
vendor/tailscale.com/ipn/desktop/sessions.go
generated
vendored
@@ -1,60 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package desktop
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// ErrNotImplemented is returned by [NewSessionManager] when it is not
|
||||
// implemented for the current GOOS.
|
||||
var ErrNotImplemented = errors.New("not implemented for GOOS=" + runtime.GOOS)
|
||||
|
||||
// SessionInitCallback is a function that is called once per [Session].
|
||||
// It returns an optional cleanup function that is called when the session
|
||||
// is about to be destroyed, or nil if no cleanup is needed.
|
||||
// It is not safe to call SessionManager methods from within the callback.
|
||||
type SessionInitCallback func(session *Session) (cleanup func())
|
||||
|
||||
// SessionStateCallback is a function that reports the initial or updated
|
||||
// state of a [Session], such as when it transitions between foreground and background.
|
||||
// It is guaranteed to be called after all registered [SessionInitCallback] functions
|
||||
// have completed, and before any cleanup functions are called for the same session.
|
||||
// It is not safe to call SessionManager methods from within the callback.
|
||||
type SessionStateCallback func(session *Session)
|
||||
|
||||
// SessionManager is an interface that provides access to desktop sessions on the current platform.
|
||||
// It is safe for concurrent use.
|
||||
type SessionManager interface {
|
||||
// Init explicitly initializes the receiver.
|
||||
// Unless the receiver is explicitly initialized, it will be lazily initialized
|
||||
// on the first call to any other method.
|
||||
// It is safe to call Init multiple times.
|
||||
Init() error
|
||||
|
||||
// Sessions returns a session snapshot taken at the time of the call.
|
||||
// Since sessions can be created or destroyed at any time, it may become
|
||||
// outdated as soon as it is returned.
|
||||
//
|
||||
// It is primarily intended for logging and debugging.
|
||||
// Prefer registering a [SessionInitCallback] or [SessionStateCallback]
|
||||
// in contexts requiring stronger guarantees.
|
||||
Sessions() (map[SessionID]*Session, error)
|
||||
|
||||
// RegisterInitCallback registers a [SessionInitCallback] that is called for each existing session
|
||||
// and for each new session that is created, until the returned unregister function is called.
|
||||
// If the specified [SessionInitCallback] returns a cleanup function, it is called when the session
|
||||
// is about to be destroyed. The callback function is guaranteed to be called once and only once
|
||||
// for each existing and new session.
|
||||
RegisterInitCallback(cb SessionInitCallback) (unregister func(), err error)
|
||||
|
||||
// RegisterStateCallback registers a [SessionStateCallback] that is called for each existing session
|
||||
// and every time the state of a session changes, until the returned unregister function is called.
|
||||
RegisterStateCallback(cb SessionStateCallback) (unregister func(), err error)
|
||||
|
||||
// Close waits for all registered callbacks to complete
|
||||
// and releases resources associated with the receiver.
|
||||
Close() error
|
||||
}
|
||||
15
vendor/tailscale.com/ipn/desktop/sessions_notwindows.go
generated
vendored
15
vendor/tailscale.com/ipn/desktop/sessions_notwindows.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package desktop
|
||||
|
||||
import "tailscale.com/types/logger"
|
||||
|
||||
// NewSessionManager returns a new [SessionManager] for the current platform,
|
||||
// [ErrNotImplemented] if the platform is not supported, or an error if the
|
||||
// session manager could not be created.
|
||||
func NewSessionManager(logger.Logf) (SessionManager, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
672
vendor/tailscale.com/ipn/desktop/sessions_windows.go
generated
vendored
672
vendor/tailscale.com/ipn/desktop/sessions_windows.go
generated
vendored
@@ -1,672 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package desktop
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
"tailscale.com/ipn/ipnauth"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
// wtsManager is a [SessionManager] implementation for Windows.
|
||||
type wtsManager struct {
|
||||
logf logger.Logf
|
||||
ctx context.Context // cancelled when the manager is closed
|
||||
ctxCancel context.CancelFunc
|
||||
|
||||
initOnce func() error
|
||||
watcher *sessionWatcher
|
||||
|
||||
mu sync.Mutex
|
||||
sessions map[SessionID]*wtsSession
|
||||
initCbs set.HandleSet[SessionInitCallback]
|
||||
stateCbs set.HandleSet[SessionStateCallback]
|
||||
}
|
||||
|
||||
// NewSessionManager returns a new [SessionManager] for the current platform,
|
||||
func NewSessionManager(logf logger.Logf) (SessionManager, error) {
|
||||
ctx, ctxCancel := context.WithCancel(context.Background())
|
||||
m := &wtsManager{
|
||||
logf: logf,
|
||||
ctx: ctx,
|
||||
ctxCancel: ctxCancel,
|
||||
sessions: make(map[SessionID]*wtsSession),
|
||||
}
|
||||
m.watcher = newSessionWatcher(m.ctx, m.logf, m.sessionEventHandler)
|
||||
|
||||
m.initOnce = sync.OnceValue(func() error {
|
||||
if err := waitUntilWTSReady(m.ctx); err != nil {
|
||||
return fmt.Errorf("WTS is not ready: %w", err)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if err := m.watcher.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start session watcher: %w", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
m.sessions, err = enumerateSessions()
|
||||
return err // may be nil or non-nil
|
||||
})
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Init implements [SessionManager].
|
||||
func (m *wtsManager) Init() error {
|
||||
return m.initOnce()
|
||||
}
|
||||
|
||||
// Sessions implements [SessionManager].
|
||||
func (m *wtsManager) Sessions() (map[SessionID]*Session, error) {
|
||||
if err := m.initOnce(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
sessions := make(map[SessionID]*Session, len(m.sessions))
|
||||
for _, s := range m.sessions {
|
||||
sessions[s.id] = s.AsSession()
|
||||
}
|
||||
return sessions, nil
|
||||
}
|
||||
|
||||
// RegisterInitCallback implements [SessionManager].
|
||||
func (m *wtsManager) RegisterInitCallback(cb SessionInitCallback) (unregister func(), err error) {
|
||||
if err := m.initOnce(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cb == nil {
|
||||
return nil, errors.New("nil callback")
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
handle := m.initCbs.Add(cb)
|
||||
|
||||
// TODO(nickkhyl): enqueue callbacks in a separate goroutine?
|
||||
for _, s := range m.sessions {
|
||||
if cleanup := cb(s.AsSession()); cleanup != nil {
|
||||
s.cleanup = append(s.cleanup, cleanup)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
delete(m.initCbs, handle)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RegisterStateCallback implements [SessionManager].
|
||||
func (m *wtsManager) RegisterStateCallback(cb SessionStateCallback) (unregister func(), err error) {
|
||||
if err := m.initOnce(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cb == nil {
|
||||
return nil, errors.New("nil callback")
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
handle := m.stateCbs.Add(cb)
|
||||
|
||||
// TODO(nickkhyl): enqueue callbacks in a separate goroutine?
|
||||
for _, s := range m.sessions {
|
||||
cb(s.AsSession())
|
||||
}
|
||||
|
||||
return func() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
delete(m.stateCbs, handle)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *wtsManager) sessionEventHandler(id SessionID, event uint32) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
switch event {
|
||||
case windows.WTS_SESSION_LOGON:
|
||||
// The session may have been created after we started watching,
|
||||
// but before the initial enumeration was performed.
|
||||
// Do not create a new session if it already exists.
|
||||
if _, _, err := m.getOrCreateSessionLocked(id); err != nil {
|
||||
m.logf("[unexpected] getOrCreateSessionLocked(%d): %v", id, err)
|
||||
}
|
||||
case windows.WTS_SESSION_LOCK:
|
||||
if err := m.setSessionStatusLocked(id, BackgroundSession); err != nil {
|
||||
m.logf("[unexpected] setSessionStatusLocked(%d, BackgroundSession): %v", id, err)
|
||||
}
|
||||
case windows.WTS_SESSION_UNLOCK:
|
||||
if err := m.setSessionStatusLocked(id, ForegroundSession); err != nil {
|
||||
m.logf("[unexpected] setSessionStatusLocked(%d, ForegroundSession): %v", id, err)
|
||||
}
|
||||
case windows.WTS_SESSION_LOGOFF:
|
||||
if err := m.deleteSessionLocked(id); err != nil {
|
||||
m.logf("[unexpected] deleteSessionLocked(%d): %v", id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *wtsManager) getOrCreateSessionLocked(id SessionID) (_ *wtsSession, created bool, err error) {
|
||||
if s, ok := m.sessions[id]; ok {
|
||||
return s, false, nil
|
||||
}
|
||||
|
||||
s, err := newWTSSession(id, ForegroundSession)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
m.sessions[id] = s
|
||||
|
||||
session := s.AsSession()
|
||||
// TODO(nickkhyl): enqueue callbacks in a separate goroutine?
|
||||
for _, cb := range m.initCbs {
|
||||
if cleanup := cb(session); cleanup != nil {
|
||||
s.cleanup = append(s.cleanup, cleanup)
|
||||
}
|
||||
}
|
||||
for _, cb := range m.stateCbs {
|
||||
cb(session)
|
||||
}
|
||||
|
||||
return s, true, err
|
||||
}
|
||||
|
||||
func (m *wtsManager) setSessionStatusLocked(id SessionID, status SessionStatus) error {
|
||||
s, _, err := m.getOrCreateSessionLocked(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.status == status {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.status = status
|
||||
session := s.AsSession()
|
||||
// TODO(nickkhyl): enqueue callbacks in a separate goroutine?
|
||||
for _, cb := range m.stateCbs {
|
||||
cb(session)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *wtsManager) deleteSessionLocked(id SessionID) error {
|
||||
s, ok := m.sessions[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.status = ClosedSession
|
||||
session := s.AsSession()
|
||||
// TODO(nickkhyl): enqueue callbacks (and [wtsSession.close]!) in a separate goroutine?
|
||||
for _, cb := range m.stateCbs {
|
||||
cb(session)
|
||||
}
|
||||
|
||||
delete(m.sessions, id)
|
||||
return s.close()
|
||||
}
|
||||
|
||||
func (m *wtsManager) Close() error {
|
||||
m.ctxCancel()
|
||||
|
||||
if m.watcher != nil {
|
||||
err := m.watcher.Stop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.watcher = nil
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.initCbs = nil
|
||||
m.stateCbs = nil
|
||||
errs := make([]error, 0, len(m.sessions))
|
||||
for _, s := range m.sessions {
|
||||
errs = append(errs, s.close())
|
||||
}
|
||||
m.sessions = nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
type wtsSession struct {
|
||||
id SessionID
|
||||
user *ipnauth.WindowsActor
|
||||
|
||||
status SessionStatus
|
||||
|
||||
cleanup []func()
|
||||
}
|
||||
|
||||
func newWTSSession(id SessionID, status SessionStatus) (*wtsSession, error) {
|
||||
var token windows.Token
|
||||
if err := windows.WTSQueryUserToken(uint32(id), &token); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user, err := ipnauth.NewWindowsActorWithToken(token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &wtsSession{id, user, status, nil}, nil
|
||||
}
|
||||
|
||||
// enumerateSessions returns a map of all active WTS sessions.
|
||||
func enumerateSessions() (map[SessionID]*wtsSession, error) {
|
||||
const reserved, version uint32 = 0, 1
|
||||
var numSessions uint32
|
||||
var sessionInfos *windows.WTS_SESSION_INFO
|
||||
if err := windows.WTSEnumerateSessions(_WTS_CURRENT_SERVER_HANDLE, reserved, version, &sessionInfos, &numSessions); err != nil {
|
||||
return nil, fmt.Errorf("WTSEnumerateSessions failed: %w", err)
|
||||
}
|
||||
defer windows.WTSFreeMemory(uintptr(unsafe.Pointer(sessionInfos)))
|
||||
|
||||
sessions := make(map[SessionID]*wtsSession, numSessions)
|
||||
for _, si := range unsafe.Slice(sessionInfos, numSessions) {
|
||||
status := _WTS_CONNECTSTATE_CLASS(si.State).ToSessionStatus()
|
||||
if status == ClosedSession {
|
||||
// The session does not exist as far as we're concerned.
|
||||
// It may be in the process of being created or destroyed,
|
||||
// or be a special "listener" session, etc.
|
||||
continue
|
||||
}
|
||||
id := SessionID(si.SessionID)
|
||||
session, err := newWTSSession(id, status)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sessions[id] = session
|
||||
}
|
||||
return sessions, nil
|
||||
}
|
||||
|
||||
func (s *wtsSession) AsSession() *Session {
|
||||
return &Session{
|
||||
ID: s.id,
|
||||
Status: s.status,
|
||||
// wtsSession owns the user; don't let the caller close it
|
||||
User: ipnauth.WithoutClose(s.user),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *wtsSession) close() error {
|
||||
for _, cleanup := range m.cleanup {
|
||||
cleanup()
|
||||
}
|
||||
m.cleanup = nil
|
||||
|
||||
if m.user != nil {
|
||||
if err := m.user.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.user = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sessionEventHandler func(id SessionID, event uint32)
|
||||
|
||||
// TODO(nickkhyl): implement a sessionWatcher that does not use the message queue.
|
||||
// One possible approach is to have the tailscaled service register a HandlerEx function
|
||||
// and stream SERVICE_CONTROL_SESSIONCHANGE events to the tailscaled subprocess
|
||||
// (the actual tailscaled backend), exposing these events via [sessionWatcher]/[wtsManager].
|
||||
//
|
||||
// See tailscale/corp#26477 for details and tracking.
|
||||
type sessionWatcher struct {
|
||||
logf logger.Logf
|
||||
ctx context.Context // canceled to stop the watcher
|
||||
ctxCancel context.CancelFunc // cancels the watcher
|
||||
hWnd windows.HWND // window handle for receiving session change notifications
|
||||
handler sessionEventHandler // called on session events
|
||||
|
||||
mu sync.Mutex
|
||||
doneCh chan error // written to when the watcher exits; nil if not started
|
||||
}
|
||||
|
||||
func newSessionWatcher(ctx context.Context, logf logger.Logf, handler sessionEventHandler) *sessionWatcher {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &sessionWatcher{logf: logf, ctx: ctx, ctxCancel: cancel, handler: handler}
|
||||
}
|
||||
|
||||
func (sw *sessionWatcher) Start() error {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-sw.ctx.Done():
|
||||
return fmt.Errorf("sessionWatcher already stopped: %w", sw.ctx.Err())
|
||||
default:
|
||||
}
|
||||
|
||||
if sw.doneCh != nil {
|
||||
// Already started.
|
||||
return nil
|
||||
}
|
||||
sw.doneCh = make(chan error, 1)
|
||||
|
||||
startedCh := make(chan error, 1)
|
||||
go sw.run(startedCh, sw.doneCh)
|
||||
if err := <-startedCh; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Signal the window to unsubscribe from session notifications
|
||||
// and shut down gracefully when the sessionWatcher is stopped.
|
||||
context.AfterFunc(sw.ctx, func() {
|
||||
sendMessage(sw.hWnd, _WM_CLOSE, 0, 0)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sw *sessionWatcher) run(started, done chan<- error) {
|
||||
runtime.LockOSThread()
|
||||
defer func() {
|
||||
runtime.UnlockOSThread()
|
||||
close(done)
|
||||
}()
|
||||
err := sw.createMessageWindow()
|
||||
started <- err
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pumpThreadMessages()
|
||||
}
|
||||
|
||||
// Stop stops the session watcher and waits for it to exit.
|
||||
func (sw *sessionWatcher) Stop() error {
|
||||
sw.ctxCancel()
|
||||
|
||||
sw.mu.Lock()
|
||||
doneCh := sw.doneCh
|
||||
sw.doneCh = nil
|
||||
sw.mu.Unlock()
|
||||
|
||||
if doneCh != nil {
|
||||
return <-doneCh
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const watcherWindowClassName = "Tailscale-SessionManager"
|
||||
|
||||
var watcherWindowClassName16 = sync.OnceValue(func() *uint16 {
|
||||
return must.Get(syscall.UTF16PtrFromString(watcherWindowClassName))
|
||||
})
|
||||
|
||||
var registerSessionManagerWindowClass = sync.OnceValue(func() error {
|
||||
var hInst windows.Handle
|
||||
if err := windows.GetModuleHandleEx(0, nil, &hInst); err != nil {
|
||||
return fmt.Errorf("GetModuleHandle: %w", err)
|
||||
}
|
||||
wc := _WNDCLASSEX{
|
||||
CbSize: uint32(unsafe.Sizeof(_WNDCLASSEX{})),
|
||||
HInstance: hInst,
|
||||
LpfnWndProc: syscall.NewCallback(sessionWatcherWndProc),
|
||||
LpszClassName: watcherWindowClassName16(),
|
||||
}
|
||||
if _, err := registerClassEx(&wc); err != nil {
|
||||
return fmt.Errorf("RegisterClassEx(%q): %w", watcherWindowClassName, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
func (sw *sessionWatcher) createMessageWindow() error {
|
||||
if err := registerSessionManagerWindowClass(); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := createWindowEx(
|
||||
0, // dwExStyle
|
||||
watcherWindowClassName16(), // lpClassName
|
||||
nil, // lpWindowName
|
||||
0, // dwStyle
|
||||
0, // x
|
||||
0, // y
|
||||
0, // nWidth
|
||||
0, // nHeight
|
||||
_HWND_MESSAGE, // hWndParent; message-only window
|
||||
0, // hMenu
|
||||
0, // hInstance
|
||||
unsafe.Pointer(sw), // lpParam
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CreateWindowEx: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sw *sessionWatcher) wndProc(hWnd windows.HWND, msg uint32, wParam, lParam uintptr) (result uintptr) {
|
||||
switch msg {
|
||||
case _WM_CREATE:
|
||||
err := registerSessionNotification(_WTS_CURRENT_SERVER_HANDLE, hWnd, _NOTIFY_FOR_ALL_SESSIONS)
|
||||
if err != nil {
|
||||
sw.logf("[unexpected] failed to register for session notifications: %v", err)
|
||||
return ^uintptr(0)
|
||||
}
|
||||
sw.logf("registered for session notifications")
|
||||
case _WM_WTSSESSION_CHANGE:
|
||||
sw.handler(SessionID(lParam), uint32(wParam))
|
||||
return 0
|
||||
case _WM_CLOSE:
|
||||
if err := destroyWindow(hWnd); err != nil {
|
||||
sw.logf("[unexpected] failed to destroy window: %v", err)
|
||||
}
|
||||
return 0
|
||||
case _WM_DESTROY:
|
||||
err := unregisterSessionNotification(_WTS_CURRENT_SERVER_HANDLE, hWnd)
|
||||
if err != nil {
|
||||
sw.logf("[unexpected] failed to unregister session notifications callback: %v", err)
|
||||
}
|
||||
sw.logf("unregistered from session notifications")
|
||||
return 0
|
||||
case _WM_NCDESTROY:
|
||||
sw.hWnd = 0
|
||||
postQuitMessage(0) // quit the message loop for this thread
|
||||
}
|
||||
return defWindowProc(hWnd, msg, wParam, lParam)
|
||||
}
|
||||
|
||||
func (sw *sessionWatcher) setHandle(hwnd windows.HWND) error {
|
||||
sw.hWnd = hwnd
|
||||
setLastError(0)
|
||||
_, err := setWindowLongPtr(sw.hWnd, _GWLP_USERDATA, uintptr(unsafe.Pointer(sw)))
|
||||
return err // may be nil or non-nil
|
||||
}
|
||||
|
||||
func sessionWatcherByHandle(hwnd windows.HWND) *sessionWatcher {
|
||||
val, _ := getWindowLongPtr(hwnd, _GWLP_USERDATA)
|
||||
return (*sessionWatcher)(unsafe.Pointer(val))
|
||||
}
|
||||
|
||||
func sessionWatcherWndProc(hWnd windows.HWND, msg uint32, wParam, lParam uintptr) (result uintptr) {
|
||||
if msg == _WM_NCCREATE {
|
||||
cs := (*_CREATESTRUCT)(unsafe.Pointer(lParam))
|
||||
sw := (*sessionWatcher)(unsafe.Pointer(cs.CreateParams))
|
||||
if sw == nil {
|
||||
return 0
|
||||
}
|
||||
if err := sw.setHandle(hWnd); err != nil {
|
||||
return 0
|
||||
}
|
||||
return defWindowProc(hWnd, msg, wParam, lParam)
|
||||
}
|
||||
if sw := sessionWatcherByHandle(hWnd); sw != nil {
|
||||
return sw.wndProc(hWnd, msg, wParam, lParam)
|
||||
}
|
||||
return defWindowProc(hWnd, msg, wParam, lParam)
|
||||
}
|
||||
|
||||
func pumpThreadMessages() {
|
||||
var msg _MSG
|
||||
for getMessage(&msg, 0, 0, 0) != 0 {
|
||||
translateMessage(&msg)
|
||||
dispatchMessage(&msg)
|
||||
}
|
||||
}
|
||||
|
||||
// waitUntilWTSReady waits until the Windows Terminal Services (WTS) is ready.
|
||||
// This is necessary because the WTS API functions may fail if called before
|
||||
// the WTS is ready.
|
||||
//
|
||||
// https://web.archive.org/web/20250207011738/https://learn.microsoft.com/en-us/windows/win32/api/wtsapi32/nf-wtsapi32-wtsregistersessionnotificationex
|
||||
func waitUntilWTSReady(ctx context.Context) error {
|
||||
eventName16, err := windows.UTF16PtrFromString(`Global\TermSrvReadyEvent`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
event, err := windows.OpenEvent(windows.SYNCHRONIZE, false, eventName16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer windows.CloseHandle(event)
|
||||
return waitForContextOrHandle(ctx, event)
|
||||
}
|
||||
|
||||
// waitForContextOrHandle waits for either the context to be done or a handle to be signaled.
|
||||
func waitForContextOrHandle(ctx context.Context, handle windows.Handle) error {
|
||||
contextDoneEvent, cleanup, err := channelToEvent(ctx.Done())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
handles := []windows.Handle{contextDoneEvent, handle}
|
||||
waitCode, err := windows.WaitForMultipleObjects(handles, false, windows.INFINITE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
waitCode -= windows.WAIT_OBJECT_0
|
||||
if waitCode == 0 { // contextDoneEvent
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// channelToEvent returns an auto-reset event that is set when the channel
|
||||
// becomes receivable, including when the channel is closed.
|
||||
func channelToEvent[T any](c <-chan T) (evt windows.Handle, cleanup func(), err error) {
|
||||
evt, err = windows.CreateEvent(nil, 0, 0, nil)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
cancel := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-cancel:
|
||||
return
|
||||
case <-c:
|
||||
}
|
||||
windows.SetEvent(evt)
|
||||
}()
|
||||
|
||||
cleanup = func() {
|
||||
close(cancel)
|
||||
windows.CloseHandle(evt)
|
||||
}
|
||||
|
||||
return evt, cleanup, nil
|
||||
}
|
||||
|
||||
type _WNDCLASSEX struct {
|
||||
CbSize uint32
|
||||
Style uint32
|
||||
LpfnWndProc uintptr
|
||||
CbClsExtra int32
|
||||
CbWndExtra int32
|
||||
HInstance windows.Handle
|
||||
HIcon windows.Handle
|
||||
HCursor windows.Handle
|
||||
HbrBackground windows.Handle
|
||||
LpszMenuName *uint16
|
||||
LpszClassName *uint16
|
||||
HIconSm windows.Handle
|
||||
}
|
||||
|
||||
type _CREATESTRUCT struct {
|
||||
CreateParams uintptr
|
||||
Instance windows.Handle
|
||||
Menu windows.Handle
|
||||
Parent windows.HWND
|
||||
Cy int32
|
||||
Cx int32
|
||||
Y int32
|
||||
X int32
|
||||
Style int32
|
||||
Name *uint16
|
||||
ClassName *uint16
|
||||
ExStyle uint32
|
||||
}
|
||||
|
||||
type _POINT struct {
|
||||
X, Y int32
|
||||
}
|
||||
|
||||
type _MSG struct {
|
||||
HWnd windows.HWND
|
||||
Message uint32
|
||||
WParam uintptr
|
||||
LParam uintptr
|
||||
Time uint32
|
||||
Pt _POINT
|
||||
}
|
||||
|
||||
const (
|
||||
_WM_CREATE = 1
|
||||
_WM_DESTROY = 2
|
||||
_WM_CLOSE = 16
|
||||
_WM_NCCREATE = 129
|
||||
_WM_QUIT = 18
|
||||
_WM_NCDESTROY = 130
|
||||
|
||||
// _WM_WTSSESSION_CHANGE is a message sent to windows that have registered
|
||||
// for session change notifications, informing them of changes in session state.
|
||||
//
|
||||
// https://web.archive.org/web/20250207012421/https://learn.microsoft.com/en-us/windows/win32/termserv/wm-wtssession-change
|
||||
_WM_WTSSESSION_CHANGE = 0x02B1
|
||||
)
|
||||
|
||||
const _GWLP_USERDATA = -21
|
||||
|
||||
const _HWND_MESSAGE = ^windows.HWND(2)
|
||||
|
||||
// _NOTIFY_FOR_ALL_SESSIONS indicates that the window should receive
|
||||
// session change notifications for all sessions on the specified server.
|
||||
const _NOTIFY_FOR_ALL_SESSIONS = 1
|
||||
|
||||
// _WTS_CURRENT_SERVER_HANDLE indicates that the window should receive
|
||||
// session change notifications for the host itself rather than a remote server.
|
||||
const _WTS_CURRENT_SERVER_HANDLE = windows.Handle(0)
|
||||
|
||||
// _WTS_CONNECTSTATE_CLASS represents the connection state of a session.
|
||||
//
|
||||
// https://web.archive.org/web/20250206082427/https://learn.microsoft.com/en-us/windows/win32/api/wtsapi32/ne-wtsapi32-wts_connectstate_class
|
||||
type _WTS_CONNECTSTATE_CLASS int32
|
||||
|
||||
// ToSessionStatus converts cs to a [SessionStatus].
|
||||
func (cs _WTS_CONNECTSTATE_CLASS) ToSessionStatus() SessionStatus {
|
||||
switch cs {
|
||||
case windows.WTSActive:
|
||||
return ForegroundSession
|
||||
case windows.WTSDisconnected:
|
||||
return BackgroundSession
|
||||
default:
|
||||
// The session does not exist as far as we're concerned.
|
||||
return ClosedSession
|
||||
}
|
||||
}
|
||||
159
vendor/tailscale.com/ipn/desktop/zsyscall_windows.go
generated
vendored
159
vendor/tailscale.com/ipn/desktop/zsyscall_windows.go
generated
vendored
@@ -1,159 +0,0 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package desktop
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
moduser32 = windows.NewLazySystemDLL("user32.dll")
|
||||
modwtsapi32 = windows.NewLazySystemDLL("wtsapi32.dll")
|
||||
|
||||
procSetLastError = modkernel32.NewProc("SetLastError")
|
||||
procCreateWindowExW = moduser32.NewProc("CreateWindowExW")
|
||||
procDefWindowProcW = moduser32.NewProc("DefWindowProcW")
|
||||
procDestroyWindow = moduser32.NewProc("DestroyWindow")
|
||||
procDispatchMessageW = moduser32.NewProc("DispatchMessageW")
|
||||
procGetMessageW = moduser32.NewProc("GetMessageW")
|
||||
procGetWindowLongPtrW = moduser32.NewProc("GetWindowLongPtrW")
|
||||
procPostQuitMessage = moduser32.NewProc("PostQuitMessage")
|
||||
procRegisterClassExW = moduser32.NewProc("RegisterClassExW")
|
||||
procSendMessageW = moduser32.NewProc("SendMessageW")
|
||||
procSetWindowLongPtrW = moduser32.NewProc("SetWindowLongPtrW")
|
||||
procTranslateMessage = moduser32.NewProc("TranslateMessage")
|
||||
procWTSRegisterSessionNotificationEx = modwtsapi32.NewProc("WTSRegisterSessionNotificationEx")
|
||||
procWTSUnRegisterSessionNotificationEx = modwtsapi32.NewProc("WTSUnRegisterSessionNotificationEx")
|
||||
)
|
||||
|
||||
func setLastError(dwErrorCode uint32) {
|
||||
syscall.Syscall(procSetLastError.Addr(), 1, uintptr(dwErrorCode), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) {
|
||||
r0, _, e1 := syscall.Syscall12(procCreateWindowExW.Addr(), 12, uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam))
|
||||
hWnd = windows.HWND(r0)
|
||||
if hWnd == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) {
|
||||
r0, _, _ := syscall.Syscall6(procDefWindowProcW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0)
|
||||
res = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func destroyWindow(hwnd windows.HWND) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procDestroyWindow.Addr(), 1, uintptr(hwnd), 0, 0)
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func dispatchMessage(lpMsg *_MSG) (res uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procDispatchMessageW.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0)
|
||||
res = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) {
|
||||
r0, _, _ := syscall.Syscall6(procGetMessageW.Addr(), 4, uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax), 0, 0)
|
||||
ret = int32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getWindowLongPtr(hwnd windows.HWND, index int32) (res uintptr, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procGetWindowLongPtrW.Addr(), 2, uintptr(hwnd), uintptr(index), 0)
|
||||
res = uintptr(r0)
|
||||
if res == 0 && e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func postQuitMessage(exitCode int32) {
|
||||
syscall.Syscall(procPostQuitMessage.Addr(), 1, uintptr(exitCode), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procRegisterClassExW.Addr(), 1, uintptr(unsafe.Pointer(windowClass)), 0, 0)
|
||||
atom = uint16(r0)
|
||||
if atom == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) {
|
||||
r0, _, _ := syscall.Syscall6(procSendMessageW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0)
|
||||
res = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func setWindowLongPtr(hwnd windows.HWND, index int32, newLong uintptr) (res uintptr, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procSetWindowLongPtrW.Addr(), 3, uintptr(hwnd), uintptr(index), uintptr(newLong))
|
||||
res = uintptr(r0)
|
||||
if res == 0 && e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func translateMessage(lpMsg *_MSG) (res bool) {
|
||||
r0, _, _ := syscall.Syscall(procTranslateMessage.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0)
|
||||
res = r0 != 0
|
||||
return
|
||||
}
|
||||
|
||||
func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flags uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procWTSRegisterSessionNotificationEx.Addr(), 3, uintptr(hServer), uintptr(hwnd), uintptr(flags))
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unregisterSessionNotification(hServer windows.Handle, hwnd windows.HWND) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procWTSUnRegisterSessionNotificationEx.Addr(), 2, uintptr(hServer), uintptr(hwnd), 0)
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
90
vendor/tailscale.com/ipn/ipn_clone.go
generated
vendored
90
vendor/tailscale.com/ipn/ipn_clone.go
generated
vendored
@@ -61,43 +61,51 @@ func (src *Prefs) Clone() *Prefs {
|
||||
}
|
||||
}
|
||||
}
|
||||
if dst.RelayServerPort != nil {
|
||||
dst.RelayServerPort = ptr.To(*src.RelayServerPort)
|
||||
}
|
||||
dst.RelayServerStaticEndpoints = append(src.RelayServerStaticEndpoints[:0:0], src.RelayServerStaticEndpoints...)
|
||||
dst.Persist = src.Persist.Clone()
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _PrefsCloneNeedsRegeneration = Prefs(struct {
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
ExitNodeAllowLANAccess bool
|
||||
CorpDNS bool
|
||||
RunSSH bool
|
||||
RunWebClient bool
|
||||
WantRunning bool
|
||||
LoggedOut bool
|
||||
ShieldsUp bool
|
||||
AdvertiseTags []string
|
||||
Hostname string
|
||||
NotepadURLs bool
|
||||
ForceDaemon bool
|
||||
Egg bool
|
||||
AdvertiseRoutes []netip.Prefix
|
||||
AdvertiseServices []string
|
||||
NoSNAT bool
|
||||
NoStatefulFiltering opt.Bool
|
||||
NetfilterMode preftype.NetfilterMode
|
||||
OperatorUser string
|
||||
ProfileName string
|
||||
AutoUpdate AutoUpdatePrefs
|
||||
AppConnector AppConnectorPrefs
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
AutoExitNode ExitNodeExpression
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
ExitNodeAllowLANAccess bool
|
||||
CorpDNS bool
|
||||
RunSSH bool
|
||||
RunWebClient bool
|
||||
WantRunning bool
|
||||
LoggedOut bool
|
||||
ShieldsUp bool
|
||||
AdvertiseTags []string
|
||||
Hostname string
|
||||
NotepadURLs bool
|
||||
ForceDaemon bool
|
||||
Egg bool
|
||||
AdvertiseRoutes []netip.Prefix
|
||||
AdvertiseServices []string
|
||||
Sync opt.Bool
|
||||
NoSNAT bool
|
||||
NoStatefulFiltering opt.Bool
|
||||
NetfilterMode preftype.NetfilterMode
|
||||
OperatorUser string
|
||||
ProfileName string
|
||||
AutoUpdate AutoUpdatePrefs
|
||||
AppConnector AppConnectorPrefs
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
RelayServerPort *uint16
|
||||
RelayServerStaticEndpoints []netip.AddrPort
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of ServeConfig.
|
||||
@@ -213,10 +221,11 @@ func (src *TCPPortHandler) Clone() *TCPPortHandler {
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _TCPPortHandlerCloneNeedsRegeneration = TCPPortHandler(struct {
|
||||
HTTPS bool
|
||||
HTTP bool
|
||||
TCPForward string
|
||||
TerminateTLS string
|
||||
HTTPS bool
|
||||
HTTP bool
|
||||
TCPForward string
|
||||
TerminateTLS string
|
||||
ProxyProtocol int
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of HTTPHandler.
|
||||
@@ -227,14 +236,17 @@ func (src *HTTPHandler) Clone() *HTTPHandler {
|
||||
}
|
||||
dst := new(HTTPHandler)
|
||||
*dst = *src
|
||||
dst.AcceptAppCaps = append(src.AcceptAppCaps[:0:0], src.AcceptAppCaps...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct {
|
||||
Path string
|
||||
Proxy string
|
||||
Text string
|
||||
Path string
|
||||
Proxy string
|
||||
Text string
|
||||
AcceptAppCaps []tailcfg.PeerCapability
|
||||
Redirect string
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of WebServerConfig.
|
||||
@@ -251,7 +263,7 @@ func (src *WebServerConfig) Clone() *WebServerConfig {
|
||||
if v == nil {
|
||||
dst.Handlers[k] = nil
|
||||
} else {
|
||||
dst.Handlers[k] = ptr.To(*v)
|
||||
dst.Handlers[k] = v.Clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
670
vendor/tailscale.com/ipn/ipn_view.go
generated
vendored
670
vendor/tailscale.com/ipn/ipn_view.go
generated
vendored
@@ -6,10 +6,12 @@
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
jsonv1 "encoding/json"
|
||||
"errors"
|
||||
"net/netip"
|
||||
|
||||
jsonv2 "github.com/go-json-experiment/json"
|
||||
"github.com/go-json-experiment/json/jsontext"
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/opt"
|
||||
@@ -48,8 +50,17 @@ func (v LoginProfileView) AsStruct() *LoginProfile {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v LoginProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v LoginProfileView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v LoginProfileView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *LoginProfileView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -58,21 +69,67 @@ func (v *LoginProfileView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x LoginProfile
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v LoginProfileView) ID() ProfileID { return v.ж.ID }
|
||||
func (v LoginProfileView) Name() string { return v.ж.Name }
|
||||
func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.ж.NetworkProfile }
|
||||
func (v LoginProfileView) Key() StateKey { return v.ж.Key }
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *LoginProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x LoginProfile
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// ID is a unique identifier for this profile.
|
||||
// It is assigned on creation and never changes.
|
||||
// It may seem redundant to have both ID and UserProfile.ID
|
||||
// but they are different things. UserProfile.ID may change
|
||||
// over time (e.g. if a device is tagged).
|
||||
func (v LoginProfileView) ID() ProfileID { return v.ж.ID }
|
||||
|
||||
// Name is the user-visible name of this profile.
|
||||
// It is filled in from the UserProfile.LoginName field.
|
||||
func (v LoginProfileView) Name() string { return v.ж.Name }
|
||||
|
||||
// NetworkProfile is a subset of netmap.NetworkMap that we
|
||||
// store to remember information about the tailnet that this
|
||||
// profile was logged in with.
|
||||
//
|
||||
// This field was added on 2023-11-17.
|
||||
func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.ж.NetworkProfile }
|
||||
|
||||
// Key is the StateKey under which the profile is stored.
|
||||
// It is assigned once at profile creation time and never changes.
|
||||
func (v LoginProfileView) Key() StateKey { return v.ж.Key }
|
||||
|
||||
// UserProfile is the server provided UserProfile for this profile.
|
||||
// This is updated whenever the server provides a new UserProfile.
|
||||
func (v LoginProfileView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile }
|
||||
func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID }
|
||||
func (v LoginProfileView) LocalUserID() WindowsUserID { return v.ж.LocalUserID }
|
||||
func (v LoginProfileView) ControlURL() string { return v.ж.ControlURL }
|
||||
|
||||
// NodeID is the NodeID of the node that this profile is logged into.
|
||||
// This should be stable across tagging and untagging nodes.
|
||||
// It may seem redundant to check against both the UserProfile.UserID
|
||||
// and the NodeID. However the NodeID can change if the node is deleted
|
||||
// from the admin panel.
|
||||
func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID }
|
||||
|
||||
// LocalUserID is the user ID of the user who created this profile.
|
||||
// It is only relevant on Windows where we have a multi-user system.
|
||||
// It is assigned once at profile creation time and never changes.
|
||||
func (v LoginProfileView) LocalUserID() WindowsUserID { return v.ж.LocalUserID }
|
||||
|
||||
// ControlURL is the URL of the control server that this profile is logged
|
||||
// into.
|
||||
func (v LoginProfileView) ControlURL() string { return v.ж.ControlURL }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _LoginProfileViewNeedsRegeneration = LoginProfile(struct {
|
||||
@@ -114,8 +171,17 @@ func (v PrefsView) AsStruct() *Prefs {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v PrefsView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v PrefsView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *PrefsView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -124,84 +190,324 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x Prefs
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v PrefsView) ControlURL() string { return v.ж.ControlURL }
|
||||
func (v PrefsView) RouteAll() bool { return v.ж.RouteAll }
|
||||
func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID }
|
||||
func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP }
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x Prefs
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// ControlURL is the URL of the control server to use.
|
||||
//
|
||||
// If empty, the default for new installs, DefaultControlURL
|
||||
// is used. It's set non-empty once the daemon has been started
|
||||
// for the first time.
|
||||
//
|
||||
// TODO(apenwarr): Make it safe to update this with EditPrefs().
|
||||
// Right now, you have to pass it in the initial prefs in Start(),
|
||||
// which is the only code that actually uses the ControlURL value.
|
||||
// It would be more consistent to restart controlclient
|
||||
// automatically whenever this variable changes.
|
||||
//
|
||||
// Meanwhile, you have to provide this as part of
|
||||
// Options.LegacyMigrationPrefs or Options.UpdatePrefs when
|
||||
// calling Backend.Start().
|
||||
func (v PrefsView) ControlURL() string { return v.ж.ControlURL }
|
||||
|
||||
// RouteAll specifies whether to accept subnets advertised by
|
||||
// other nodes on the Tailscale network. Note that this does not
|
||||
// include default routes (0.0.0.0/0 and ::/0), those are
|
||||
// controlled by ExitNodeID/IP below.
|
||||
func (v PrefsView) RouteAll() bool { return v.ж.RouteAll }
|
||||
|
||||
// ExitNodeID and ExitNodeIP specify the node that should be used
|
||||
// as an exit node for internet traffic. At most one of these
|
||||
// should be non-zero.
|
||||
//
|
||||
// The preferred way to express the chosen node is ExitNodeID, but
|
||||
// in some cases it's not possible to use that ID (e.g. in the
|
||||
// linux CLI, before tailscaled has a netmap). For those
|
||||
// situations, we allow specifying the exit node by IP, and
|
||||
// ipnlocal.LocalBackend will translate the IP into an ID when the
|
||||
// node is found in the netmap.
|
||||
//
|
||||
// If the selected exit node doesn't exist (e.g. it's not part of
|
||||
// the current tailnet), or it doesn't offer exit node services, a
|
||||
// blackhole route will be installed on the local system to
|
||||
// prevent any traffic escaping to the local network.
|
||||
func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID }
|
||||
func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP }
|
||||
|
||||
// AutoExitNode is an optional expression that specifies whether and how
|
||||
// tailscaled should pick an exit node automatically.
|
||||
//
|
||||
// If specified, tailscaled will use an exit node based on the expression,
|
||||
// and will re-evaluate the selection periodically as network conditions,
|
||||
// available exit nodes, or policy settings change. A blackhole route will
|
||||
// be installed to prevent traffic from escaping to the local network until
|
||||
// an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP.
|
||||
//
|
||||
// If empty, tailscaled will not automatically select an exit node.
|
||||
//
|
||||
// If the specified expression is invalid or unsupported by the client,
|
||||
// it falls back to the behavior of [AnyExitNode].
|
||||
//
|
||||
// As of 2025-07-02, the only supported value is [AnyExitNode].
|
||||
// It's a string rather than a boolean to allow future extensibility
|
||||
// (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us").
|
||||
func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.ж.AutoExitNode }
|
||||
|
||||
// InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by
|
||||
// the backend on transition from exit node on to off and used by the
|
||||
// backend.
|
||||
//
|
||||
// As an Internal field, it can't be set by LocalAPI clients, rather it is set indirectly
|
||||
// when the ExitNodeID value is zero'd and via the set-use-exit-node-enabled endpoint.
|
||||
func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.ж.InternalExitNodePrior }
|
||||
func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess }
|
||||
func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS }
|
||||
func (v PrefsView) RunSSH() bool { return v.ж.RunSSH }
|
||||
func (v PrefsView) RunWebClient() bool { return v.ж.RunWebClient }
|
||||
func (v PrefsView) WantRunning() bool { return v.ж.WantRunning }
|
||||
func (v PrefsView) LoggedOut() bool { return v.ж.LoggedOut }
|
||||
func (v PrefsView) ShieldsUp() bool { return v.ж.ShieldsUp }
|
||||
func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseTags) }
|
||||
func (v PrefsView) Hostname() string { return v.ж.Hostname }
|
||||
func (v PrefsView) NotepadURLs() bool { return v.ж.NotepadURLs }
|
||||
func (v PrefsView) ForceDaemon() bool { return v.ж.ForceDaemon }
|
||||
func (v PrefsView) Egg() bool { return v.ж.Egg }
|
||||
|
||||
// ExitNodeAllowLANAccess indicates whether locally accessible subnets should be
|
||||
// routed directly or via the exit node.
|
||||
func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess }
|
||||
|
||||
// CorpDNS specifies whether to install the Tailscale network's
|
||||
// DNS configuration, if it exists.
|
||||
func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS }
|
||||
|
||||
// RunSSH bool is whether this node should run an SSH
|
||||
// server, permitting access to peers according to the
|
||||
// policies as configured by the Tailnet's admin(s).
|
||||
func (v PrefsView) RunSSH() bool { return v.ж.RunSSH }
|
||||
|
||||
// RunWebClient bool is whether this node should expose
|
||||
// its web client over Tailscale at port 5252,
|
||||
// permitting access to peers according to the
|
||||
// policies as configured by the Tailnet's admin(s).
|
||||
func (v PrefsView) RunWebClient() bool { return v.ж.RunWebClient }
|
||||
|
||||
// WantRunning indicates whether networking should be active on
|
||||
// this node.
|
||||
func (v PrefsView) WantRunning() bool { return v.ж.WantRunning }
|
||||
|
||||
// LoggedOut indicates whether the user intends to be logged out.
|
||||
// There are other reasons we may be logged out, including no valid
|
||||
// keys.
|
||||
// We need to remember this state so that, on next startup, we can
|
||||
// generate the "Login" vs "Connect" buttons correctly, without having
|
||||
// to contact the server to confirm our nodekey status first.
|
||||
func (v PrefsView) LoggedOut() bool { return v.ж.LoggedOut }
|
||||
|
||||
// ShieldsUp indicates whether to block all incoming connections,
|
||||
// regardless of the control-provided packet filter. If false, we
|
||||
// use the packet filter as provided. If true, we block incoming
|
||||
// connections. This overrides tailcfg.Hostinfo's ShieldsUp.
|
||||
func (v PrefsView) ShieldsUp() bool { return v.ж.ShieldsUp }
|
||||
|
||||
// AdvertiseTags specifies tags that should be applied to this node, for
|
||||
// purposes of ACL enforcement. These can be referenced from the ACL policy
|
||||
// document. Note that advertising a tag on the client doesn't guarantee
|
||||
// that the control server will allow the node to adopt that tag.
|
||||
func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseTags) }
|
||||
|
||||
// Hostname is the hostname to use for identifying the node. If
|
||||
// not set, os.Hostname is used.
|
||||
func (v PrefsView) Hostname() string { return v.ж.Hostname }
|
||||
|
||||
// NotepadURLs is a debugging setting that opens OAuth URLs in
|
||||
// notepad.exe on Windows, rather than loading them in a browser.
|
||||
//
|
||||
// apenwarr 2020-04-29: Unfortunately this is still needed sometimes.
|
||||
// Windows' default browser setting is sometimes screwy and this helps
|
||||
// users narrow it down a bit.
|
||||
func (v PrefsView) NotepadURLs() bool { return v.ж.NotepadURLs }
|
||||
|
||||
// ForceDaemon specifies whether a platform that normally
|
||||
// operates in "client mode" (that is, requires an active user
|
||||
// logged in with the GUI app running) should keep running after the
|
||||
// GUI ends and/or the user logs out.
|
||||
//
|
||||
// The only current applicable platform is Windows. This
|
||||
// forced Windows to go into "server mode" where Tailscale is
|
||||
// running even with no users logged in. This might also be
|
||||
// used for macOS in the future. This setting has no effect
|
||||
// for Linux/etc, which always operate in daemon mode.
|
||||
func (v PrefsView) ForceDaemon() bool { return v.ж.ForceDaemon }
|
||||
|
||||
// Egg is a optional debug flag.
|
||||
func (v PrefsView) Egg() bool { return v.ж.Egg }
|
||||
|
||||
// AdvertiseRoutes specifies CIDR prefixes to advertise into the
|
||||
// Tailscale network as reachable through the current
|
||||
// node.
|
||||
func (v PrefsView) AdvertiseRoutes() views.Slice[netip.Prefix] {
|
||||
return views.SliceOf(v.ж.AdvertiseRoutes)
|
||||
}
|
||||
|
||||
// AdvertiseServices specifies the list of services that this
|
||||
// node can serve as a destination for. Note that an advertised
|
||||
// service must still go through the approval process from the
|
||||
// control server.
|
||||
func (v PrefsView) AdvertiseServices() views.Slice[string] {
|
||||
return views.SliceOf(v.ж.AdvertiseServices)
|
||||
}
|
||||
func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT }
|
||||
func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering }
|
||||
|
||||
// Sync is whether this node should sync its configuration from
|
||||
// the control plane. If unset, this defaults to true.
|
||||
// This exists primarily for testing, to verify that netmap caching
|
||||
// and offline operation work correctly.
|
||||
func (v PrefsView) Sync() opt.Bool { return v.ж.Sync }
|
||||
|
||||
// NoSNAT specifies whether to source NAT traffic going to
|
||||
// destinations in AdvertiseRoutes. The default is to apply source
|
||||
// NAT, which makes the traffic appear to come from the router
|
||||
// machine rather than the peer's Tailscale IP.
|
||||
//
|
||||
// Disabling SNAT requires additional manual configuration in your
|
||||
// network to route Tailscale traffic back to the subnet relay
|
||||
// machine.
|
||||
//
|
||||
// Linux-only.
|
||||
func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT }
|
||||
|
||||
// NoStatefulFiltering specifies whether to apply stateful filtering when
|
||||
// advertising routes in AdvertiseRoutes. The default is to not apply
|
||||
// stateful filtering.
|
||||
//
|
||||
// To allow inbound connections from advertised routes, both NoSNAT and
|
||||
// NoStatefulFiltering must be true.
|
||||
//
|
||||
// This is an opt.Bool because it was first added after NoSNAT, with a
|
||||
// backfill based on the value of that parameter. The backfill has been
|
||||
// removed since then, but the field remains an opt.Bool.
|
||||
//
|
||||
// Linux-only.
|
||||
func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering }
|
||||
|
||||
// NetfilterMode specifies how much to manage netfilter rules for
|
||||
// Tailscale, if at all.
|
||||
func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.ж.NetfilterMode }
|
||||
func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser }
|
||||
func (v PrefsView) ProfileName() string { return v.ж.ProfileName }
|
||||
func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate }
|
||||
func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector }
|
||||
func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking }
|
||||
func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind }
|
||||
|
||||
// OperatorUser is the local machine user name who is allowed to
|
||||
// operate tailscaled without being root or using sudo.
|
||||
func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser }
|
||||
|
||||
// ProfileName is the desired name of the profile. If empty, then the user's
|
||||
// LoginName is used. It is only used for display purposes in the client UI
|
||||
// and CLI.
|
||||
func (v PrefsView) ProfileName() string { return v.ж.ProfileName }
|
||||
|
||||
// AutoUpdate sets the auto-update preferences for the node agent. See
|
||||
// AutoUpdatePrefs docs for more details.
|
||||
func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate }
|
||||
|
||||
// AppConnector sets the app connector preferences for the node agent. See
|
||||
// AppConnectorPrefs docs for more details.
|
||||
func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector }
|
||||
|
||||
// PostureChecking enables the collection of information used for device
|
||||
// posture checks.
|
||||
//
|
||||
// Note: this should be named ReportPosture, but it was shipped as
|
||||
// PostureChecking in some early releases and this JSON field is written to
|
||||
// disk, so we just keep its old name. (akin to CorpDNS which is an internal
|
||||
// pref name that doesn't match the public interface)
|
||||
func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking }
|
||||
|
||||
// NetfilterKind specifies what netfilter implementation to use.
|
||||
//
|
||||
// It can be "iptables", "nftables", or "" to auto-detect.
|
||||
//
|
||||
// Linux-only.
|
||||
func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind }
|
||||
|
||||
// DriveShares are the configured DriveShares, stored in increasing order
|
||||
// by name.
|
||||
func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] {
|
||||
return views.SliceOfViews[*drive.Share, drive.ShareView](v.ж.DriveShares)
|
||||
}
|
||||
|
||||
// RelayServerPort is the UDP port number for the relay server to bind to,
|
||||
// on all interfaces. A non-nil zero value signifies a random unused port
|
||||
// should be used. A nil value signifies relay server functionality
|
||||
// should be disabled.
|
||||
func (v PrefsView) RelayServerPort() views.ValuePointer[uint16] {
|
||||
return views.ValuePointerOf(v.ж.RelayServerPort)
|
||||
}
|
||||
|
||||
// RelayServerStaticEndpoints are static IP:port endpoints to advertise as
|
||||
// candidates for relay connections. Only relevant when RelayServerPort is
|
||||
// non-nil.
|
||||
func (v PrefsView) RelayServerStaticEndpoints() views.Slice[netip.AddrPort] {
|
||||
return views.SliceOf(v.ж.RelayServerStaticEndpoints)
|
||||
}
|
||||
|
||||
// AllowSingleHosts was a legacy field that was always true
|
||||
// for the past 4.5 years. It controlled whether Tailscale
|
||||
// peers got /32 or /128 routes for each other.
|
||||
// As of 2024-05-17 we're starting to ignore it, but to let
|
||||
// people still downgrade Tailscale versions and not break
|
||||
// all peer-to-peer networking we still write it to disk (as JSON)
|
||||
// so it can be loaded back by old versions.
|
||||
// TODO(bradfitz): delete this in 2025 sometime. See #12058.
|
||||
func (v PrefsView) AllowSingleHosts() marshalAsTrueInJSON { return v.ж.AllowSingleHosts }
|
||||
func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() }
|
||||
|
||||
// The Persist field is named 'Config' in the file for backward
|
||||
// compatibility with earlier versions.
|
||||
// TODO(apenwarr): We should move this out of here, it's not a pref.
|
||||
//
|
||||
// We can maybe do that once we're sure which module should persist
|
||||
// it (backend or frontend?)
|
||||
func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _PrefsViewNeedsRegeneration = Prefs(struct {
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
ExitNodeAllowLANAccess bool
|
||||
CorpDNS bool
|
||||
RunSSH bool
|
||||
RunWebClient bool
|
||||
WantRunning bool
|
||||
LoggedOut bool
|
||||
ShieldsUp bool
|
||||
AdvertiseTags []string
|
||||
Hostname string
|
||||
NotepadURLs bool
|
||||
ForceDaemon bool
|
||||
Egg bool
|
||||
AdvertiseRoutes []netip.Prefix
|
||||
AdvertiseServices []string
|
||||
NoSNAT bool
|
||||
NoStatefulFiltering opt.Bool
|
||||
NetfilterMode preftype.NetfilterMode
|
||||
OperatorUser string
|
||||
ProfileName string
|
||||
AutoUpdate AutoUpdatePrefs
|
||||
AppConnector AppConnectorPrefs
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
AutoExitNode ExitNodeExpression
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
ExitNodeAllowLANAccess bool
|
||||
CorpDNS bool
|
||||
RunSSH bool
|
||||
RunWebClient bool
|
||||
WantRunning bool
|
||||
LoggedOut bool
|
||||
ShieldsUp bool
|
||||
AdvertiseTags []string
|
||||
Hostname string
|
||||
NotepadURLs bool
|
||||
ForceDaemon bool
|
||||
Egg bool
|
||||
AdvertiseRoutes []netip.Prefix
|
||||
AdvertiseServices []string
|
||||
Sync opt.Bool
|
||||
NoSNAT bool
|
||||
NoStatefulFiltering opt.Bool
|
||||
NetfilterMode preftype.NetfilterMode
|
||||
OperatorUser string
|
||||
ProfileName string
|
||||
AutoUpdate AutoUpdatePrefs
|
||||
AppConnector AppConnectorPrefs
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
RelayServerPort *uint16
|
||||
RelayServerStaticEndpoints []netip.AddrPort
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
}{})
|
||||
|
||||
// View returns a read-only view of ServeConfig.
|
||||
@@ -232,8 +538,17 @@ func (v ServeConfigView) AsStruct() *ServeConfig {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v ServeConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v ServeConfigView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v ServeConfigView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *ServeConfigView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -242,40 +557,72 @@ func (v *ServeConfigView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x ServeConfig
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *ServeConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x ServeConfig
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// TCP are the list of TCP port numbers that tailscaled should handle for
|
||||
// the Tailscale IP addresses. (not subnet routers, etc)
|
||||
func (v ServeConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] {
|
||||
return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers
|
||||
// keyed by mount point ("/", "/foo", etc)
|
||||
func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] {
|
||||
return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
// Services maps from service name (in the form "svc:dns-label") to a ServiceConfig.
|
||||
// Which describes the L3, L4, and L7 forwarding information for the service.
|
||||
func (v ServeConfigView) Services() views.MapFn[tailcfg.ServiceName, *ServiceConfig, ServiceConfigView] {
|
||||
return views.MapFnOf(v.ж.Services, func(t *ServiceConfig) ServiceConfigView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
// AllowFunnel is the set of SNI:port values for which funnel
|
||||
// traffic is allowed, from trusted ingress peers.
|
||||
func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] {
|
||||
return views.MapOf(v.ж.AllowFunnel)
|
||||
}
|
||||
|
||||
// Foreground is a map of an IPN Bus session ID to an alternate foreground serve config that's valid for the
|
||||
// life of that WatchIPNBus session ID. This allows the config to specify ephemeral configs that are used
|
||||
// in the CLI's foreground mode to ensure ungraceful shutdowns of either the client or the LocalBackend does not
|
||||
// expose ports that users are not aware of. In practice this contains any serve config set via 'tailscale
|
||||
// serve' command run without the '--bg' flag. ServeConfig contained by Foreground is not expected itself to contain
|
||||
// another Foreground block.
|
||||
func (v ServeConfigView) Foreground() views.MapFn[string, *ServeConfig, ServeConfigView] {
|
||||
return views.MapFnOf(v.ж.Foreground, func(t *ServeConfig) ServeConfigView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
// ETag is the checksum of the serve config that's populated
|
||||
// by the LocalClient through the HTTP ETag header during a
|
||||
// GetServeConfig request and is translated to an If-Match header
|
||||
// during a SetServeConfig request.
|
||||
func (v ServeConfigView) ETag() string { return v.ж.ETag }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
@@ -316,8 +663,17 @@ func (v ServiceConfigView) AsStruct() *ServiceConfig {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v ServiceConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v ServiceConfigView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v ServiceConfigView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *ServiceConfigView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -326,24 +682,43 @@ func (v *ServiceConfigView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x ServiceConfig
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *ServiceConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x ServiceConfig
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// TCP are the list of TCP port numbers that tailscaled should handle for
|
||||
// the Tailscale IP addresses. (not subnet routers, etc)
|
||||
func (v ServiceConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] {
|
||||
return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers
|
||||
// keyed by mount point ("/", "/foo", etc)
|
||||
func (v ServiceConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] {
|
||||
return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
// Tun determines if the service should be using L3 forwarding (Tun mode).
|
||||
func (v ServiceConfigView) Tun() bool { return v.ж.Tun }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
@@ -381,8 +756,17 @@ func (v TCPPortHandlerView) AsStruct() *TCPPortHandler {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v TCPPortHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -391,24 +775,64 @@ func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x TCPPortHandler
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v TCPPortHandlerView) HTTPS() bool { return v.ж.HTTPS }
|
||||
func (v TCPPortHandlerView) HTTP() bool { return v.ж.HTTP }
|
||||
func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward }
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *TCPPortHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x TCPPortHandler
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// HTTPS, if true, means that tailscaled should handle this connection as an
|
||||
// HTTPS request as configured by ServeConfig.Web.
|
||||
//
|
||||
// It is mutually exclusive with TCPForward.
|
||||
func (v TCPPortHandlerView) HTTPS() bool { return v.ж.HTTPS }
|
||||
|
||||
// HTTP, if true, means that tailscaled should handle this connection as an
|
||||
// HTTP request as configured by ServeConfig.Web.
|
||||
//
|
||||
// It is mutually exclusive with TCPForward.
|
||||
func (v TCPPortHandlerView) HTTP() bool { return v.ж.HTTP }
|
||||
|
||||
// TCPForward is the IP:port to forward TCP connections to.
|
||||
// Whether or not TLS is terminated by tailscaled depends on
|
||||
// TerminateTLS.
|
||||
//
|
||||
// It is mutually exclusive with HTTPS.
|
||||
func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward }
|
||||
|
||||
// TerminateTLS, if non-empty, means that tailscaled should terminate the
|
||||
// TLS connections before forwarding them to TCPForward, permitting only the
|
||||
// SNI name with this value. It is only used if TCPForward is non-empty.
|
||||
// (the HTTPS mode uses ServeConfig.Web)
|
||||
func (v TCPPortHandlerView) TerminateTLS() string { return v.ж.TerminateTLS }
|
||||
|
||||
// ProxyProtocol indicates whether to send a PROXY protocol header
|
||||
// before forwarding the connection to TCPForward.
|
||||
//
|
||||
// This is only valid if TCPForward is non-empty.
|
||||
func (v TCPPortHandlerView) ProxyProtocol() int { return v.ж.ProxyProtocol }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _TCPPortHandlerViewNeedsRegeneration = TCPPortHandler(struct {
|
||||
HTTPS bool
|
||||
HTTP bool
|
||||
TCPForward string
|
||||
TerminateTLS string
|
||||
HTTPS bool
|
||||
HTTP bool
|
||||
TCPForward string
|
||||
TerminateTLS string
|
||||
ProxyProtocol int
|
||||
}{})
|
||||
|
||||
// View returns a read-only view of HTTPHandler.
|
||||
@@ -439,8 +863,17 @@ func (v HTTPHandlerView) AsStruct() *HTTPHandler {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v HTTPHandlerView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v HTTPHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -449,22 +882,56 @@ func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x HTTPHandler
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v HTTPHandlerView) Path() string { return v.ж.Path }
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *HTTPHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x HTTPHandler
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// absolute path to directory or file to serve
|
||||
func (v HTTPHandlerView) Path() string { return v.ж.Path }
|
||||
|
||||
// http://localhost:3000/, localhost:3030, 3030
|
||||
func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy }
|
||||
func (v HTTPHandlerView) Text() string { return v.ж.Text }
|
||||
|
||||
// plaintext to serve (primarily for testing)
|
||||
func (v HTTPHandlerView) Text() string { return v.ж.Text }
|
||||
|
||||
// peer capabilities to forward in grant header, e.g. example.com/cap/mon
|
||||
func (v HTTPHandlerView) AcceptAppCaps() views.Slice[tailcfg.PeerCapability] {
|
||||
return views.SliceOf(v.ж.AcceptAppCaps)
|
||||
}
|
||||
|
||||
// Redirect, if not empty, is the target URL to redirect requests to.
|
||||
// By default, we redirect with HTTP 302 (Found) status.
|
||||
// If Redirect starts with '<httpcode>:', then we use that status instead.
|
||||
//
|
||||
// The target URL supports the following expansion variables:
|
||||
// - ${HOST}: replaced with the request's Host header value
|
||||
// - ${REQUEST_URI}: replaced with the request's full URI (path and query string)
|
||||
func (v HTTPHandlerView) Redirect() string { return v.ж.Redirect }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct {
|
||||
Path string
|
||||
Proxy string
|
||||
Text string
|
||||
Path string
|
||||
Proxy string
|
||||
Text string
|
||||
AcceptAppCaps []tailcfg.PeerCapability
|
||||
Redirect string
|
||||
}{})
|
||||
|
||||
// View returns a read-only view of WebServerConfig.
|
||||
@@ -495,8 +962,17 @@ func (v WebServerConfigView) AsStruct() *WebServerConfig {
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v WebServerConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
// MarshalJSON implements [jsonv1.Marshaler].
|
||||
func (v WebServerConfigView) MarshalJSON() ([]byte, error) {
|
||||
return jsonv1.Marshal(v.ж)
|
||||
}
|
||||
|
||||
// MarshalJSONTo implements [jsonv2.MarshalerTo].
|
||||
func (v WebServerConfigView) MarshalJSONTo(enc *jsontext.Encoder) error {
|
||||
return jsonv2.MarshalEncode(enc, v.ж)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements [jsonv1.Unmarshaler].
|
||||
func (v *WebServerConfigView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
@@ -505,13 +981,27 @@ func (v *WebServerConfigView) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
var x WebServerConfig
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
if err := jsonv1.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom].
|
||||
func (v *WebServerConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
var x WebServerConfig
|
||||
if err := jsonv2.UnmarshalDecode(dec, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
// mountPoint => handler
|
||||
func (v WebServerConfigView) Handlers() views.MapFn[string, *HTTPHandler, HTTPHandlerView] {
|
||||
return views.MapFnOf(v.ж.Handlers, func(t *HTTPHandler) HTTPHandlerView {
|
||||
return t.View()
|
||||
|
||||
28
vendor/tailscale.com/ipn/ipnauth/ipnauth.go
generated
vendored
28
vendor/tailscale.com/ipn/ipnauth/ipnauth.go
generated
vendored
@@ -14,8 +14,8 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/tailscale/peercred"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/safesocket"
|
||||
"tailscale.com/types/logger"
|
||||
@@ -63,8 +63,8 @@ type ConnIdentity struct {
|
||||
notWindows bool // runtime.GOOS != "windows"
|
||||
|
||||
// Fields used when NotWindows:
|
||||
isUnixSock bool // Conn is a *net.UnixConn
|
||||
creds *peercred.Creds // or nil
|
||||
isUnixSock bool // Conn is a *net.UnixConn
|
||||
creds PeerCreds // or nil if peercred.Get was not implemented on this OS
|
||||
|
||||
// Used on Windows:
|
||||
// TODO(bradfitz): merge these into the peercreds package and
|
||||
@@ -78,6 +78,13 @@ type ConnIdentity struct {
|
||||
// It's suitable for passing to LookupUserFromID (os/user.LookupId) on any
|
||||
// operating system.
|
||||
func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID {
|
||||
if !buildfeatures.HasDebug && runtime.GOOS != "windows" {
|
||||
// This function is only implemented on non-Windows for simulating
|
||||
// Windows in tests. But that test (per comments below) is broken
|
||||
// anyway. So disable this testing path in non-debug builds
|
||||
// and just do the thing that optimizes away.
|
||||
return ""
|
||||
}
|
||||
if envknob.GOOS() != "windows" {
|
||||
return ""
|
||||
}
|
||||
@@ -97,9 +104,18 @@ func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ci *ConnIdentity) Pid() int { return ci.pid }
|
||||
func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock }
|
||||
func (ci *ConnIdentity) Creds() *peercred.Creds { return ci.creds }
|
||||
func (ci *ConnIdentity) Pid() int { return ci.pid }
|
||||
func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock }
|
||||
func (ci *ConnIdentity) Creds() PeerCreds { return ci.creds }
|
||||
|
||||
// PeerCreds is the interface for a github.com/tailscale/peercred.Creds,
|
||||
// if linked into the binary.
|
||||
//
|
||||
// (It's not used on some platforms, or if ts_omit_unixsocketidentity is set.)
|
||||
type PeerCreds interface {
|
||||
UserID() (uid string, ok bool)
|
||||
PID() (pid int, ok bool)
|
||||
}
|
||||
|
||||
var metricIssue869Workaround = clientmetric.NewCounter("issue_869_workaround")
|
||||
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !windows
|
||||
//go:build !windows && ts_omit_unixsocketidentity
|
||||
|
||||
package ipnauth
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/tailscale/peercred"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
@@ -16,12 +15,7 @@ import (
|
||||
// based on the user who owns the other end of the connection.
|
||||
// and couldn't. The returned connIdentity has NotWindows set to true.
|
||||
func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) {
|
||||
ci = &ConnIdentity{conn: c, notWindows: true}
|
||||
_, ci.isUnixSock = c.(*net.UnixConn)
|
||||
if ci.creds, _ = peercred.Get(c); ci.creds != nil {
|
||||
ci.pid, _ = ci.creds.PID()
|
||||
}
|
||||
return ci, nil
|
||||
return &ConnIdentity{conn: c, notWindows: true}, nil
|
||||
}
|
||||
|
||||
// WindowsToken is unsupported when GOOS != windows and always returns
|
||||
37
vendor/tailscale.com/ipn/ipnauth/ipnauth_unix_creds.go
generated
vendored
Normal file
37
vendor/tailscale.com/ipn/ipnauth/ipnauth_unix_creds.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !windows && !ts_omit_unixsocketidentity
|
||||
|
||||
package ipnauth
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/tailscale/peercred"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
// GetConnIdentity extracts the identity information from the connection
|
||||
// based on the user who owns the other end of the connection.
|
||||
// and couldn't. The returned connIdentity has NotWindows set to true.
|
||||
func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) {
|
||||
ci = &ConnIdentity{conn: c, notWindows: true}
|
||||
_, ci.isUnixSock = c.(*net.UnixConn)
|
||||
if creds, err := peercred.Get(c); err == nil {
|
||||
ci.creds = creds
|
||||
ci.pid, _ = ci.creds.PID()
|
||||
} else if err == peercred.ErrNotImplemented {
|
||||
// peercred.Get is not implemented on this OS (such as OpenBSD)
|
||||
// Just leave creds as nil, as documented.
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
// WindowsToken is unsupported when GOOS != windows and always returns
|
||||
// ErrNotImplemented.
|
||||
func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
29
vendor/tailscale.com/ipn/ipnauth/ipnauth_windows.go
generated
vendored
29
vendor/tailscale.com/ipn/ipnauth/ipnauth_windows.go
generated
vendored
@@ -25,6 +25,12 @@ func GetConnIdentity(logf logger.Logf, c net.Conn) (ci *ConnIdentity, err error)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not a WindowsClientConn: %T", c)
|
||||
}
|
||||
if err := wcc.CheckToken(); err != nil {
|
||||
// Failure to obtain a token means the client cannot be authenticated.
|
||||
// We don't care about the exact error, but it typically means the client
|
||||
// attempted to connect at the Anonymous impersonation level.
|
||||
return nil, fmt.Errorf("authentication failed: %w", err)
|
||||
}
|
||||
ci.pid, err = wcc.ClientPID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -169,26 +175,13 @@ func (t *token) IsUID(uid ipn.WindowsUserID) bool {
|
||||
// WindowsToken returns the WindowsToken representing the security context
|
||||
// of the connection's client.
|
||||
func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) {
|
||||
var wcc *safesocket.WindowsClientConn
|
||||
var ok bool
|
||||
if wcc, ok = ci.conn.(*safesocket.WindowsClientConn); !ok {
|
||||
wcc, ok := ci.conn.(*safesocket.WindowsClientConn)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not a WindowsClientConn: %T", ci.conn)
|
||||
}
|
||||
|
||||
// We duplicate the token's handle so that the WindowsToken we return may have
|
||||
// a lifetime independent from the original connection.
|
||||
var h windows.Handle
|
||||
if err := windows.DuplicateHandle(
|
||||
windows.CurrentProcess(),
|
||||
windows.Handle(wcc.Token()),
|
||||
windows.CurrentProcess(),
|
||||
&h,
|
||||
0,
|
||||
false,
|
||||
windows.DUPLICATE_SAME_ACCESS,
|
||||
); err != nil {
|
||||
token, err := wcc.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newToken(windows.Token(h)), nil
|
||||
return newToken(token), nil
|
||||
}
|
||||
|
||||
11
vendor/tailscale.com/ipn/ipnauth/policy.go
generated
vendored
11
vendor/tailscale.com/ipn/ipnauth/policy.go
generated
vendored
@@ -8,9 +8,11 @@ import (
|
||||
"fmt"
|
||||
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/syspolicy"
|
||||
"tailscale.com/util/syspolicy/pkey"
|
||||
"tailscale.com/util/syspolicy/policyclient"
|
||||
)
|
||||
|
||||
type actorWithPolicyChecks struct{ Actor }
|
||||
@@ -50,10 +52,13 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView,
|
||||
// TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver]
|
||||
// and corp to this package.
|
||||
func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error {
|
||||
if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn {
|
||||
if !buildfeatures.HasSystemPolicy {
|
||||
return nil
|
||||
}
|
||||
if allowWithReason, _ := syspolicy.GetBoolean(syspolicy.AlwaysOnOverrideWithReason, false); !allowWithReason {
|
||||
if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn {
|
||||
return nil
|
||||
}
|
||||
if allowWithReason, _ := policyclient.Get().GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason {
|
||||
return errors.New("disconnect not allowed: always-on mode is enabled")
|
||||
}
|
||||
if reason == "" {
|
||||
|
||||
12
vendor/tailscale.com/ipn/ipnauth/self.go
generated
vendored
12
vendor/tailscale.com/ipn/ipnauth/self.go
generated
vendored
@@ -13,6 +13,11 @@ import (
|
||||
// has unlimited access.
|
||||
var Self Actor = unrestricted{}
|
||||
|
||||
// TODO is a caller identity used when the operation is performed on behalf of a user,
|
||||
// rather than by tailscaled itself, but the surrounding function is not yet extended
|
||||
// to accept an [Actor] parameter. It grants the same unrestricted access as [Self].
|
||||
var TODO Actor = unrestricted{}
|
||||
|
||||
// unrestricted is an [Actor] that has unlimited access to the currently running
|
||||
// tailscaled instance. It's typically used for operations performed by tailscaled
|
||||
// on its own, or upon a request from the control plane, rather on behalf of a user.
|
||||
@@ -49,3 +54,10 @@ func (unrestricted) IsLocalSystem() bool { return false }
|
||||
// Deprecated: this method exists for compatibility with the current (as of 2025-01-28)
|
||||
// permission model and will be removed as we progress on tailscale/corp#18342.
|
||||
func (unrestricted) IsLocalAdmin(operatorUID string) bool { return false }
|
||||
|
||||
// IsTailscaled reports whether the given Actor represents Tailscaled itself,
|
||||
// such as [Self] or a [TODO] placeholder actor.
|
||||
func IsTailscaled(a Actor) bool {
|
||||
_, ok := a.(unrestricted)
|
||||
return ok
|
||||
}
|
||||
|
||||
411
vendor/tailscale.com/ipn/ipnext/ipnext.go
generated
vendored
Normal file
411
vendor/tailscale.com/ipn/ipnext/ipnext.go
generated
vendored
Normal file
@@ -0,0 +1,411 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package ipnext defines types and interfaces used for extending the core LocalBackend
|
||||
// functionality with additional features and services.
|
||||
package ipnext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"iter"
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnauth"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsd"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/mapx"
|
||||
)
|
||||
|
||||
// Extension augments LocalBackend with additional functionality.
|
||||
//
|
||||
// An extension uses the provided [Host] to register callbacks
|
||||
// and interact with the backend in a controlled, well-defined
|
||||
// and thread-safe manner.
|
||||
//
|
||||
// Extensions are registered using [RegisterExtension].
|
||||
//
|
||||
// They must be safe for concurrent use.
|
||||
type Extension interface {
|
||||
// Name is a unique name of the extension.
|
||||
// It must be the same as the name used to register the extension.
|
||||
Name() string
|
||||
|
||||
// Init is called to initialize the extension when LocalBackend's
|
||||
// Start method is called. Extensions are created but not initialized
|
||||
// unless LocalBackend is started.
|
||||
//
|
||||
// If the extension cannot be initialized, it must return an error,
|
||||
// and its Shutdown method will not be called on the host's shutdown.
|
||||
// Returned errors are not fatal; they are used for logging.
|
||||
// A [SkipExtension] error indicates an intentional decision rather than a failure.
|
||||
Init(Host) error
|
||||
|
||||
// Shutdown is called when LocalBackend is shutting down,
|
||||
// provided the extension was initialized. For multiple extensions,
|
||||
// Shutdown is called in the reverse order of Init.
|
||||
// Returned errors are not fatal; they are used for logging.
|
||||
// After a call to Shutdown, the extension will not be called again.
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
// NewExtensionFn is a function that instantiates an [Extension].
|
||||
// If a registered extension cannot be instantiated, the function must return an error.
|
||||
// If the extension should be skipped at runtime, it must return either [SkipExtension]
|
||||
// or a wrapped [SkipExtension]. Any other error returned is fatal and will prevent
|
||||
// the LocalBackend from starting.
|
||||
type NewExtensionFn func(logger.Logf, SafeBackend) (Extension, error)
|
||||
|
||||
// SkipExtension is an error returned by [NewExtensionFn] to indicate that the extension
|
||||
// should be skipped rather than prevent the LocalBackend from starting.
|
||||
//
|
||||
// Skipping an extension should be reserved for cases where the extension is not supported
|
||||
// on the current platform or configuration, or depends on a feature that is not available,
|
||||
// or otherwise should be disabled permanently rather than temporarily.
|
||||
//
|
||||
// Specifically, it must not be returned if the extension is not required right now
|
||||
// based on user preferences, policy settings, the current tailnet, or other factors
|
||||
// that may change throughout the LocalBackend's lifetime.
|
||||
var SkipExtension = errors.New("skipping extension")
|
||||
|
||||
// Definition describes a registered [Extension].
|
||||
type Definition struct {
|
||||
name string // name under which the extension is registered
|
||||
newFn NewExtensionFn // function that creates a new instance of the extension
|
||||
}
|
||||
|
||||
// Name returns the name of the extension.
|
||||
func (d *Definition) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
// MakeExtension instantiates the extension.
|
||||
func (d *Definition) MakeExtension(logf logger.Logf, sb SafeBackend) (Extension, error) {
|
||||
ext, err := d.newFn(logf, sb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ext.Name() != d.name {
|
||||
return nil, fmt.Errorf("extension name mismatch: registered %q; actual %q", d.name, ext.Name())
|
||||
}
|
||||
return ext, nil
|
||||
}
|
||||
|
||||
// extensions is a map of registered extensions,
|
||||
// where the key is the name of the extension.
|
||||
var extensions mapx.OrderedMap[string, *Definition]
|
||||
|
||||
// RegisterExtension registers a function that instantiates an [Extension].
|
||||
// The name must be the same as returned by the extension's [Extension.Name].
|
||||
//
|
||||
// It must be called on the main goroutine before LocalBackend is created,
|
||||
// such as from an init function of the package implementing the extension.
|
||||
//
|
||||
// It panics if newExt is nil or if an extension with the same name
|
||||
// has already been registered.
|
||||
func RegisterExtension(name string, newExt NewExtensionFn) {
|
||||
if newExt == nil {
|
||||
panic(fmt.Sprintf("ipnext: newExt is nil: %q", name))
|
||||
}
|
||||
if extensions.Contains(name) {
|
||||
panic(fmt.Sprintf("ipnext: duplicate extension name %q", name))
|
||||
}
|
||||
extensions.Set(name, &Definition{name, newExt})
|
||||
}
|
||||
|
||||
// Extensions iterates over the extensions in the order they were registered
|
||||
// via [RegisterExtension].
|
||||
func Extensions() iter.Seq[*Definition] {
|
||||
return extensions.Values()
|
||||
}
|
||||
|
||||
// DefinitionForTest returns a [Definition] for the specified [Extension].
|
||||
// It is primarily used for testing where the test code needs to instantiate
|
||||
// and use an extension without registering it.
|
||||
func DefinitionForTest(ext Extension) *Definition {
|
||||
return &Definition{
|
||||
name: ext.Name(),
|
||||
newFn: func(logger.Logf, SafeBackend) (Extension, error) { return ext, nil },
|
||||
}
|
||||
}
|
||||
|
||||
// DefinitionWithErrForTest returns a [Definition] with the specified extension name
|
||||
// whose [Definition.MakeExtension] method returns the specified error.
|
||||
// It is used for testing.
|
||||
func DefinitionWithErrForTest(name string, err error) *Definition {
|
||||
return &Definition{
|
||||
name: name,
|
||||
newFn: func(logger.Logf, SafeBackend) (Extension, error) { return nil, err },
|
||||
}
|
||||
}
|
||||
|
||||
// Host is the API surface used by [Extension]s to interact with LocalBackend
|
||||
// in a controlled manner.
|
||||
//
|
||||
// Extensions can register callbacks, request information, or perform actions
|
||||
// via the [Host] interface.
|
||||
//
|
||||
// Typically, the host invokes registered callbacks when one of the following occurs:
|
||||
// - LocalBackend notifies it of an event or state change that may be
|
||||
// of interest to extensions, such as when switching [ipn.LoginProfile].
|
||||
// - LocalBackend needs to consult extensions for information, for example,
|
||||
// determining the most appropriate profile for the current state of the system.
|
||||
// - LocalBackend performs an extensible action, such as logging an auditable event,
|
||||
// and delegates its execution to the extension.
|
||||
//
|
||||
// The callbacks are invoked synchronously, and the LocalBackend's state
|
||||
// remains unchanged while callbacks execute.
|
||||
//
|
||||
// In contrast, actions initiated by extensions are generally asynchronous,
|
||||
// as indicated by the "Async" suffix in their names.
|
||||
// Performing actions may result in callbacks being invoked as described above.
|
||||
//
|
||||
// To prevent conflicts between extensions competing for shared state,
|
||||
// such as the current profile or prefs, the host must not expose methods
|
||||
// that directly modify that state. For example, instead of allowing extensions
|
||||
// to switch profiles at-will, the host's [ProfileServices] provides a method
|
||||
// to switch to the "best" profile. The host can then consult extensions
|
||||
// to determine the appropriate profile to use and resolve any conflicts
|
||||
// in a controlled manner.
|
||||
//
|
||||
// A host must be safe for concurrent use.
|
||||
type Host interface {
|
||||
// Extensions returns the host's [ExtensionServices].
|
||||
Extensions() ExtensionServices
|
||||
|
||||
// Profiles returns the host's [ProfileServices].
|
||||
Profiles() ProfileServices
|
||||
|
||||
// AuditLogger returns a function that calls all currently registered audit loggers.
|
||||
// The function fails if any logger returns an error, indicating that the action
|
||||
// cannot be logged and must not be performed.
|
||||
//
|
||||
// The returned function captures the current state (e.g., the current profile) at
|
||||
// the time of the call and must not be persisted.
|
||||
AuditLogger() ipnauth.AuditLogFunc
|
||||
|
||||
// Hooks returns a non-nil pointer to a [Hooks] struct.
|
||||
// Hooks must not be modified concurrently or after Tailscale has started.
|
||||
Hooks() *Hooks
|
||||
|
||||
// SendNotifyAsync sends a notification to the IPN bus,
|
||||
// typically to the GUI client.
|
||||
SendNotifyAsync(ipn.Notify)
|
||||
|
||||
// NodeBackend returns the [NodeBackend] for the currently active node
|
||||
// (which is approximately the same as the current profile).
|
||||
NodeBackend() NodeBackend
|
||||
}
|
||||
|
||||
// SafeBackend is a subset of the [ipnlocal.LocalBackend] type's methods that
|
||||
// are safe to call from extension hooks at any time (even hooks called while
|
||||
// LocalBackend's internal mutex is held).
|
||||
type SafeBackend interface {
|
||||
Sys() *tsd.System
|
||||
Clock() tstime.Clock
|
||||
TailscaleVarRoot() string
|
||||
}
|
||||
|
||||
// ExtensionServices provides access to the [Host]'s extension management services,
|
||||
// such as fetching active extensions.
|
||||
type ExtensionServices interface {
|
||||
// FindExtensionByName returns an active extension with the given name,
|
||||
// or nil if no such extension exists.
|
||||
FindExtensionByName(name string) any
|
||||
|
||||
// FindMatchingExtension finds the first active extension that matches target,
|
||||
// and if one is found, sets target to that extension and returns true.
|
||||
// Otherwise, it returns false.
|
||||
//
|
||||
// It panics if target is not a non-nil pointer to either a type
|
||||
// that implements [ipnext.Extension], or to any interface type.
|
||||
FindMatchingExtension(target any) bool
|
||||
}
|
||||
|
||||
// ProfileServices provides access to the [Host]'s profile management services,
|
||||
// such as switching profiles and registering profile change callbacks.
|
||||
type ProfileServices interface {
|
||||
// CurrentProfileState returns read-only views of the current profile
|
||||
// and its preferences. The returned views are always valid,
|
||||
// but the profile's [ipn.LoginProfileView.ID] returns ""
|
||||
// if the profile is new and has not been persisted yet.
|
||||
//
|
||||
// The returned views are immutable snapshots of the current profile
|
||||
// and prefs at the time of the call. The actual state is only guaranteed
|
||||
// to remain unchanged and match these views for the duration
|
||||
// of a callback invoked by the host, if used within that callback.
|
||||
//
|
||||
// Extensions that need the current profile or prefs at other times
|
||||
// should typically subscribe to [ProfileStateChangeCallback]
|
||||
// to be notified if the profile or prefs change after retrieval.
|
||||
// CurrentProfileState returns both the profile and prefs
|
||||
// to guarantee that they are consistent with each other.
|
||||
CurrentProfileState() (ipn.LoginProfileView, ipn.PrefsView)
|
||||
|
||||
// CurrentPrefs is like [CurrentProfileState] but only returns prefs.
|
||||
CurrentPrefs() ipn.PrefsView
|
||||
|
||||
// SwitchToBestProfileAsync asynchronously selects the best profile to use
|
||||
// and switches to it, unless it is already the current profile.
|
||||
//
|
||||
// If an extension needs to know when a profile switch occurs,
|
||||
// it must use [ProfileServices.RegisterProfileStateChangeCallback]
|
||||
// to register a [ProfileStateChangeCallback].
|
||||
//
|
||||
// The reason indicates why the profile is being switched, such as due
|
||||
// to a client connecting or disconnecting or a change in the desktop
|
||||
// session state. It is used for logging.
|
||||
SwitchToBestProfileAsync(reason string)
|
||||
}
|
||||
|
||||
// ProfileStore provides read-only access to available login profiles and their preferences.
|
||||
// It is not safe for concurrent use and can only be used from the callback it is passed to.
|
||||
type ProfileStore interface {
|
||||
// CurrentUserID returns the current user ID. It is only non-empty on
|
||||
// Windows where we have a multi-user system.
|
||||
//
|
||||
// Deprecated: this method exists for compatibility with the current (as of 2024-08-27)
|
||||
// permission model and will be removed as we progress on tailscale/corp#18342.
|
||||
CurrentUserID() ipn.WindowsUserID
|
||||
|
||||
// CurrentProfile returns a read-only [ipn.LoginProfileView] of the current profile.
|
||||
// The returned view is always valid, but the profile's [ipn.LoginProfileView.ID]
|
||||
// returns "" if the profile is new and has not been persisted yet.
|
||||
CurrentProfile() ipn.LoginProfileView
|
||||
|
||||
// CurrentPrefs returns a read-only view of the current prefs.
|
||||
// The returned view is always valid.
|
||||
CurrentPrefs() ipn.PrefsView
|
||||
|
||||
// DefaultUserProfile returns a read-only view of the default (last used) profile for the specified user.
|
||||
// It returns a read-only view of a new, non-persisted profile if the specified user does not have a default profile.
|
||||
DefaultUserProfile(uid ipn.WindowsUserID) ipn.LoginProfileView
|
||||
}
|
||||
|
||||
// AuditLogProvider is a function that returns an [ipnauth.AuditLogFunc] for
|
||||
// logging auditable actions.
|
||||
type AuditLogProvider func() ipnauth.AuditLogFunc
|
||||
|
||||
// ProfileResolver is a function that returns a read-only view of a login profile.
|
||||
// An invalid view indicates no profile. A valid profile view with an empty [ipn.ProfileID]
|
||||
// indicates that the profile is new and has not been persisted yet.
|
||||
// The provided [ProfileStore] can only be used for the duration of the callback.
|
||||
type ProfileResolver func(ProfileStore) ipn.LoginProfileView
|
||||
|
||||
// ProfileStateChangeCallback is a function to be called when the current login profile
|
||||
// or its preferences change.
|
||||
//
|
||||
// The sameNode parameter indicates whether the profile represents the same node as before,
|
||||
// which is true when:
|
||||
// - Only the profile's [ipn.Prefs] or metadata (e.g., [tailcfg.UserProfile]) have changed,
|
||||
// but the node ID and [ipn.ProfileID] remain the same.
|
||||
// - The profile has been persisted and assigned an [ipn.ProfileID] for the first time,
|
||||
// so while its node ID and [ipn.ProfileID] have changed, it is still the same profile.
|
||||
//
|
||||
// It can be used to decide whether to reset state bound to the current profile or node identity.
|
||||
//
|
||||
// The profile and prefs are always valid, but the profile's [ipn.LoginProfileView.ID]
|
||||
// returns "" if the profile is new and has not been persisted yet.
|
||||
type ProfileStateChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sameNode bool)
|
||||
|
||||
// NewControlClientCallback is a function to be called when a new [controlclient.Client]
|
||||
// is created and before it is first used. The specified profile represents the node
|
||||
// for which the cc is created and is always valid. Its [ipn.LoginProfileView.ID]
|
||||
// returns "" if it is a new node whose profile has never been persisted.
|
||||
//
|
||||
// If the [controlclient.Client] is created due to a profile switch, any registered
|
||||
// [ProfileStateChangeCallback]s are called first.
|
||||
//
|
||||
// It returns a function to be called when the cc is being shut down,
|
||||
// or nil if no cleanup is needed. That cleanup function should not call
|
||||
// back into LocalBackend, which may be locked during shutdown.
|
||||
type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView) (cleanup func())
|
||||
|
||||
// Hooks is a collection of hooks that extensions can add to (non-concurrently)
|
||||
// during program initialization and can be called by LocalBackend and others at
|
||||
// runtime.
|
||||
//
|
||||
// Each hook has its own rules about when it's called and what environment it
|
||||
// has access to and what it's allowed to do.
|
||||
type Hooks struct {
|
||||
// BackendStateChange is called when the backend state changes.
|
||||
BackendStateChange feature.Hooks[func(ipn.State)]
|
||||
|
||||
// ProfileStateChange contains callbacks that are invoked when the current login profile
|
||||
// or its [ipn.Prefs] change, after those changes have been made. The current login profile
|
||||
// may be changed either because of a profile switch, or because the profile information
|
||||
// was updated by [LocalBackend.SetControlClientStatus], including when the profile
|
||||
// is first populated and persisted.
|
||||
ProfileStateChange feature.Hooks[ProfileStateChangeCallback]
|
||||
|
||||
// BackgroundProfileResolvers are registered background profile resolvers.
|
||||
// They're used to determine the profile to use when no GUI/CLI client is connected.
|
||||
//
|
||||
// TODO(nickkhyl): allow specifying some kind of priority/altitude for the resolver.
|
||||
// TODO(nickkhyl): make it a "profile resolver" instead of a "background profile resolver".
|
||||
// The concepts of the "current user", "foreground profile" and "background profile"
|
||||
// only exist on Windows, and we're moving away from them anyway.
|
||||
BackgroundProfileResolvers feature.Hooks[ProfileResolver]
|
||||
|
||||
// AuditLoggers are registered [AuditLogProvider]s.
|
||||
// Each provider is called to get an [ipnauth.AuditLogFunc] when an auditable action
|
||||
// is about to be performed. If an audit logger returns an error, the action is denied.
|
||||
AuditLoggers feature.Hooks[AuditLogProvider]
|
||||
|
||||
// NewControlClient are the functions to be called when a new control client
|
||||
// is created. It is called with the LocalBackend locked.
|
||||
NewControlClient feature.Hooks[NewControlClientCallback]
|
||||
|
||||
// OnSelfChange is called (with LocalBackend.mu held) when the self node
|
||||
// changes, including changing to nothing (an invalid view).
|
||||
OnSelfChange feature.Hooks[func(tailcfg.NodeView)]
|
||||
|
||||
// MutateNotifyLocked is called to optionally mutate the provided Notify
|
||||
// before sending it to the IPN bus. It is called with LocalBackend.mu held.
|
||||
MutateNotifyLocked feature.Hooks[func(*ipn.Notify)]
|
||||
|
||||
// SetPeerStatus is called to mutate PeerStatus.
|
||||
// Callers must only use NodeBackend to read data.
|
||||
SetPeerStatus feature.Hooks[func(*ipnstate.PeerStatus, tailcfg.NodeView, NodeBackend)]
|
||||
|
||||
// ShouldUploadServices reports whether this node should include services
|
||||
// in Hostinfo from the portlist extension.
|
||||
ShouldUploadServices feature.Hook[func() bool]
|
||||
}
|
||||
|
||||
// NodeBackend is an interface to query the current node and its peers.
|
||||
//
|
||||
// It is not a snapshot in time but is locked to a particular node.
|
||||
type NodeBackend interface {
|
||||
// AppendMatchingPeers appends all peers that match the predicate
|
||||
// to the base slice and returns it.
|
||||
AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView
|
||||
|
||||
// PeerCaps returns the capabilities that src has to this node.
|
||||
PeerCaps(src netip.Addr) tailcfg.PeerCapMap
|
||||
|
||||
// PeerHasCap reports whether the peer has the specified peer capability.
|
||||
PeerHasCap(peer tailcfg.NodeView, cap tailcfg.PeerCapability) bool
|
||||
|
||||
// PeerAPIBase returns the "http://ip:port" URL base to reach peer's
|
||||
// PeerAPI, or the empty string if the peer is invalid or doesn't support
|
||||
// PeerAPI.
|
||||
PeerAPIBase(tailcfg.NodeView) string
|
||||
|
||||
// PeerHasPeerAPI whether the provided peer supports PeerAPI.
|
||||
//
|
||||
// It effectively just reports whether PeerAPIBase(node) is non-empty, but
|
||||
// potentially more efficiently.
|
||||
PeerHasPeerAPI(tailcfg.NodeView) bool
|
||||
|
||||
// CollectServices reports whether the control plane is telling this
|
||||
// node that the portlist service collection is desirable, should it
|
||||
// choose to report them.
|
||||
CollectServices() bool
|
||||
}
|
||||
65
vendor/tailscale.com/ipn/ipnlocal/autoupdate.go
generated
vendored
65
vendor/tailscale.com/ipn/ipnlocal/autoupdate.go
generated
vendored
@@ -1,65 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux || windows
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) stopOfflineAutoUpdate() {
|
||||
if b.offlineAutoUpdateCancel != nil {
|
||||
b.logf("offline auto-update: stopping update checks")
|
||||
b.offlineAutoUpdateCancel()
|
||||
b.offlineAutoUpdateCancel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) {
|
||||
if !prefs.AutoUpdate().Apply.EqualBool(true) {
|
||||
return
|
||||
}
|
||||
// AutoUpdate.Apply field in prefs can only be true for platforms that
|
||||
// support auto-updates. But check it here again, just in case.
|
||||
if !clientupdate.CanAutoUpdate() {
|
||||
return
|
||||
}
|
||||
// On macsys, auto-updates are managed by Sparkle.
|
||||
if version.IsMacSysExt() {
|
||||
return
|
||||
}
|
||||
|
||||
if b.offlineAutoUpdateCancel != nil {
|
||||
// Already running.
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
b.offlineAutoUpdateCancel = cancel
|
||||
|
||||
b.logf("offline auto-update: starting update checks")
|
||||
go b.offlineAutoUpdate(ctx)
|
||||
}
|
||||
|
||||
const offlineAutoUpdateCheckPeriod = time.Hour
|
||||
|
||||
func (b *LocalBackend) offlineAutoUpdate(ctx context.Context) {
|
||||
t := time.NewTicker(offlineAutoUpdateCheckPeriod)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
if err := b.startAutoUpdate("offline auto-update"); err != nil {
|
||||
b.logf("offline auto-update: failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
18
vendor/tailscale.com/ipn/ipnlocal/autoupdate_disabled.go
generated
vendored
18
vendor/tailscale.com/ipn/ipnlocal/autoupdate_disabled.go
generated
vendored
@@ -1,18 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !(linux || windows)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) stopOfflineAutoUpdate() {
|
||||
// Not supported on this platform.
|
||||
}
|
||||
|
||||
func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) {
|
||||
// Not supported on this platform.
|
||||
}
|
||||
3
vendor/tailscale.com/ipn/ipnlocal/bus.go
generated
vendored
3
vendor/tailscale.com/ipn/ipnlocal/bus.go
generated
vendored
@@ -156,5 +156,6 @@ func isNotableNotify(n *ipn.Notify) bool {
|
||||
n.Health != nil ||
|
||||
len(n.IncomingFiles) > 0 ||
|
||||
len(n.OutgoingFiles) > 0 ||
|
||||
n.FilesWaiting != nil
|
||||
n.FilesWaiting != nil ||
|
||||
n.SuggestedExitNode != nil
|
||||
}
|
||||
|
||||
481
vendor/tailscale.com/ipn/ipnlocal/c2n.go
generated
vendored
481
vendor/tailscale.com/ipn/ipnlocal/c2n.go
generated
vendored
@@ -4,76 +4,71 @@
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/net/sockstats"
|
||||
"tailscale.com/posture"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/goroutines"
|
||||
"tailscale.com/util/httpm"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/syspolicy"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// c2nHandlers maps an HTTP method and URI path (without query parameters) to
|
||||
// its handler. The exact method+path match is preferred, but if no entry
|
||||
// exists for that, a map entry with an empty method is used as a fallback.
|
||||
var c2nHandlers = map[methodAndPath]c2nHandler{
|
||||
// Debug.
|
||||
req("/echo"): handleC2NEcho,
|
||||
req("/debug/goroutines"): handleC2NDebugGoroutines,
|
||||
req("/debug/prefs"): handleC2NDebugPrefs,
|
||||
req("/debug/metrics"): handleC2NDebugMetrics,
|
||||
req("/debug/component-logging"): handleC2NDebugComponentLogging,
|
||||
req("/debug/logheap"): handleC2NDebugLogHeap,
|
||||
var c2nHandlers map[methodAndPath]c2nHandler
|
||||
|
||||
// PPROF - We only expose a subset of typical pprof endpoints for security.
|
||||
req("/debug/pprof/heap"): handleC2NPprof,
|
||||
req("/debug/pprof/allocs"): handleC2NPprof,
|
||||
func init() {
|
||||
c2nHandlers = map[methodAndPath]c2nHandler{}
|
||||
if buildfeatures.HasC2N {
|
||||
// Echo is the basic "ping" handler as used by the control plane to probe
|
||||
// whether a node is reachable. In particular, it's important for
|
||||
// high-availability subnet routers for the control plane to probe which of
|
||||
// several candidate nodes is reachable and actually alive.
|
||||
RegisterC2N("/echo", handleC2NEcho)
|
||||
}
|
||||
if buildfeatures.HasSSH {
|
||||
RegisterC2N("/ssh/usernames", handleC2NSSHUsernames)
|
||||
}
|
||||
if buildfeatures.HasLogTail {
|
||||
RegisterC2N("POST /logtail/flush", handleC2NLogtailFlush)
|
||||
}
|
||||
if buildfeatures.HasDebug {
|
||||
RegisterC2N("POST /sockstats", handleC2NSockStats)
|
||||
|
||||
req("POST /logtail/flush"): handleC2NLogtailFlush,
|
||||
req("POST /sockstats"): handleC2NSockStats,
|
||||
// pprof:
|
||||
// we only expose a subset of typical pprof endpoints for security.
|
||||
RegisterC2N("/debug/pprof/heap", handleC2NPprof)
|
||||
RegisterC2N("/debug/pprof/allocs", handleC2NPprof)
|
||||
|
||||
// Check TLS certificate status.
|
||||
req("GET /tls-cert-status"): handleC2NTLSCertStatus,
|
||||
|
||||
// SSH
|
||||
req("/ssh/usernames"): handleC2NSSHUsernames,
|
||||
|
||||
// Auto-updates.
|
||||
req("GET /update"): handleC2NUpdateGet,
|
||||
req("POST /update"): handleC2NUpdatePost,
|
||||
|
||||
// Device posture.
|
||||
req("GET /posture/identity"): handleC2NPostureIdentityGet,
|
||||
|
||||
// App Connectors.
|
||||
req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet,
|
||||
|
||||
// Linux netfilter.
|
||||
req("POST /netfilter-kind"): handleC2NSetNetfilterKind,
|
||||
|
||||
// VIP services.
|
||||
req("GET /vip-services"): handleC2NVIPServicesGet,
|
||||
RegisterC2N("/debug/goroutines", handleC2NDebugGoroutines)
|
||||
RegisterC2N("/debug/prefs", handleC2NDebugPrefs)
|
||||
RegisterC2N("/debug/metrics", handleC2NDebugMetrics)
|
||||
RegisterC2N("/debug/component-logging", handleC2NDebugComponentLogging)
|
||||
RegisterC2N("/debug/logheap", handleC2NDebugLogHeap)
|
||||
RegisterC2N("/debug/netmap", handleC2NDebugNetMap)
|
||||
RegisterC2N("/debug/health", handleC2NDebugHealth)
|
||||
}
|
||||
if runtime.GOOS == "linux" && buildfeatures.HasOSRouter {
|
||||
RegisterC2N("POST /netfilter-kind", handleC2NSetNetfilterKind)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterC2N registers a new c2n handler for the given pattern.
|
||||
@@ -81,6 +76,9 @@ var c2nHandlers = map[methodAndPath]c2nHandler{
|
||||
// A pattern is like "GET /foo" (specific to an HTTP method) or "/foo" (all
|
||||
// methods). It panics if the pattern is already registered.
|
||||
func RegisterC2N(pattern string, h func(*LocalBackend, http.ResponseWriter, *http.Request)) {
|
||||
if !buildfeatures.HasC2N {
|
||||
return
|
||||
}
|
||||
k := req(pattern)
|
||||
if _, ok := c2nHandlers[k]; ok {
|
||||
panic(fmt.Sprintf("c2n: duplicate handler for %q", pattern))
|
||||
@@ -149,21 +147,108 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
func handleC2NDebugHealth(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
var st *health.State
|
||||
if buildfeatures.HasDebug && b.health != nil {
|
||||
st = b.health.CurrentState()
|
||||
}
|
||||
writeJSON(w, st)
|
||||
}
|
||||
|
||||
func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasDebug {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
if r.Method != httpm.POST && r.Method != httpm.GET {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
b.logf("c2n: %s /debug/netmap received", r.Method)
|
||||
|
||||
// redactAndMarshal redacts private keys from the given netmap, clears fields
|
||||
// that should be omitted, and marshals it to JSON.
|
||||
redactAndMarshal := func(nm *netmap.NetworkMap, omitFields []string) (json.RawMessage, error) {
|
||||
for _, f := range omitFields {
|
||||
field := reflect.ValueOf(nm).Elem().FieldByName(f)
|
||||
if !field.IsValid() {
|
||||
b.logf("c2n: /debug/netmap: unknown field %q in omitFields", f)
|
||||
continue
|
||||
}
|
||||
field.SetZero()
|
||||
}
|
||||
return json.Marshal(nm)
|
||||
}
|
||||
|
||||
var omitFields []string
|
||||
resp := &tailcfg.C2NDebugNetmapResponse{}
|
||||
|
||||
if r.Method == httpm.POST {
|
||||
var req tailcfg.C2NDebugNetmapRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to decode request body: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
omitFields = req.OmitFields
|
||||
|
||||
if req.Candidate != nil {
|
||||
cand, err := controlclient.NetmapFromMapResponseForDebug(ctx, b.unsanitizedPersist(), req.Candidate)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to convert candidate MapResponse: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
candJSON, err := redactAndMarshal(cand, omitFields)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to marshal candidate netmap: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp.Candidate = candJSON
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
resp.Current, err = redactAndMarshal(b.currentNode().netMapWithPeers(), omitFields)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to marshal current netmap: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, resp)
|
||||
}
|
||||
|
||||
func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasDebug {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write(goroutines.ScrubbedGoroutineDump(true))
|
||||
}
|
||||
|
||||
func handleC2NDebugPrefs(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasDebug {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
writeJSON(w, b.Prefs())
|
||||
}
|
||||
|
||||
func handleC2NDebugMetrics(_ *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasDebug {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
clientmetric.WritePrometheusExpositionFormat(w)
|
||||
}
|
||||
|
||||
func handleC2NDebugComponentLogging(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasDebug {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
component := r.FormValue("component")
|
||||
secs, _ := strconv.Atoi(r.FormValue("secs"))
|
||||
if secs == 0 {
|
||||
@@ -206,6 +291,10 @@ func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasSSH {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
var req tailcfg.C2NSSHUsernamesRequest
|
||||
if r.Method == "POST" {
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
@@ -232,26 +321,6 @@ func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request)
|
||||
fmt.Fprintf(w, "debug info: %v\n", sockstats.DebugInfo())
|
||||
}
|
||||
|
||||
// handleC2NAppConnectorDomainRoutesGet handles returning the domains
|
||||
// that the app connector is responsible for, as well as the resolved
|
||||
// IP addresses for each domain. If the node is not configured as
|
||||
// an app connector, an empty map is returned.
|
||||
func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /appconnector/routes received")
|
||||
|
||||
var res tailcfg.C2NAppConnectorDomainRoutesResponse
|
||||
if b.appConnector == nil {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
return
|
||||
}
|
||||
|
||||
res.Domains = b.appConnector.DomainRoutes()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: POST /netfilter-kind received")
|
||||
|
||||
@@ -277,285 +346,3 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /vip-services received")
|
||||
var res tailcfg.C2NVIPServicesResponse
|
||||
res.VIPServices = b.VIPServices()
|
||||
res.ServicesHash = b.vipServiceHash(res.VIPServices)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /update received")
|
||||
|
||||
res := b.newC2NUpdateResponse()
|
||||
res.Started = b.c2nUpdateStarted()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func handleC2NUpdatePost(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: POST /update received")
|
||||
res := b.newC2NUpdateResponse()
|
||||
defer func() {
|
||||
if res.Err != "" {
|
||||
b.logf("c2n: POST /update failed: %s", res.Err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}()
|
||||
|
||||
if !res.Enabled {
|
||||
res.Err = "not enabled"
|
||||
return
|
||||
}
|
||||
if !res.Supported {
|
||||
res.Err = "not supported"
|
||||
return
|
||||
}
|
||||
|
||||
// Do not update if we have active inbound SSH connections. Control can set
|
||||
// force=true query parameter to override this.
|
||||
if r.FormValue("force") != "true" && b.sshServer != nil && b.sshServer.NumActiveConns() > 0 {
|
||||
res.Err = "not updating due to active SSH connections"
|
||||
return
|
||||
}
|
||||
|
||||
if err := b.startAutoUpdate("c2n"); err != nil {
|
||||
res.Err = err.Error()
|
||||
return
|
||||
}
|
||||
res.Started = true
|
||||
}
|
||||
|
||||
func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /posture/identity received")
|
||||
|
||||
res := tailcfg.C2NPostureIdentityResponse{}
|
||||
|
||||
// Only collect posture identity if enabled on the client,
|
||||
// this will first check syspolicy, MDM settings like Registry
|
||||
// on Windows or defaults on macOS. If they are not set, it falls
|
||||
// back to the cli-flag, `--posture-checking`.
|
||||
choice, err := syspolicy.GetPreferenceOption(syspolicy.PostureChecking)
|
||||
if err != nil {
|
||||
b.logf(
|
||||
"c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s",
|
||||
b.Prefs().PostureChecking(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if choice.ShouldEnable(b.Prefs().PostureChecking()) {
|
||||
res.SerialNumbers, err = posture.GetSerialNumbers(b.logf)
|
||||
if err != nil {
|
||||
b.logf("c2n: GetSerialNumbers returned error: %v", err)
|
||||
}
|
||||
|
||||
// TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release
|
||||
// and looks good in client metrics, remove this parameter and always report MAC
|
||||
// addresses.
|
||||
if r.FormValue("hwaddrs") == "true" {
|
||||
res.IfaceHardwareAddrs, err = posture.GetHardwareAddrs()
|
||||
if err != nil {
|
||||
b.logf("c2n: GetHardwareAddrs returned error: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
res.PostureDisabled = true
|
||||
}
|
||||
|
||||
b.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs))
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse {
|
||||
// If NewUpdater does not return an error, we can update the installation.
|
||||
//
|
||||
// Note that we create the Updater solely to check for errors; we do not
|
||||
// invoke it here. For this purpose, it is ok to pass it a zero Arguments.
|
||||
prefs := b.Prefs().AutoUpdate()
|
||||
return tailcfg.C2NUpdateResponse{
|
||||
Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply.EqualBool(true),
|
||||
Supported: clientupdate.CanAutoUpdate() && !version.IsMacSysExt(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) c2nUpdateStarted() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.c2nUpdateStatus.started
|
||||
}
|
||||
|
||||
func (b *LocalBackend) setC2NUpdateStarted(v bool) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.c2nUpdateStatus.started = v
|
||||
}
|
||||
|
||||
func (b *LocalBackend) trySetC2NUpdateStarted() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.c2nUpdateStatus.started {
|
||||
return false
|
||||
}
|
||||
b.c2nUpdateStatus.started = true
|
||||
return true
|
||||
}
|
||||
|
||||
// findCmdTailscale looks for the cmd/tailscale that corresponds to the
|
||||
// currently running cmd/tailscaled. It's up to the caller to verify that the
|
||||
// two match, but this function does its best to find the right one. Notably, it
|
||||
// doesn't use $PATH for security reasons.
|
||||
func findCmdTailscale() (string, error) {
|
||||
self, err := os.Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var ts string
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" {
|
||||
ts = "/usr/bin/tailscale"
|
||||
}
|
||||
if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" {
|
||||
ts = "/usr/local/bin/tailscale"
|
||||
}
|
||||
switch distro.Get() {
|
||||
case distro.QNAP:
|
||||
// The volume under /share/ where qpkg are installed is not
|
||||
// predictable. But the rest of the path is.
|
||||
ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self)
|
||||
if err == nil && ok {
|
||||
ts = filepath.Join(filepath.Dir(self), "tailscale")
|
||||
}
|
||||
case distro.Unraid:
|
||||
if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" {
|
||||
ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale"
|
||||
}
|
||||
}
|
||||
case "windows":
|
||||
ts = filepath.Join(filepath.Dir(self), "tailscale.exe")
|
||||
case "freebsd":
|
||||
if self == "/usr/local/bin/tailscaled" {
|
||||
ts = "/usr/local/bin/tailscale"
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported OS %v", runtime.GOOS)
|
||||
}
|
||||
if ts != "" && regularFileExists(ts) {
|
||||
return ts, nil
|
||||
}
|
||||
return "", errors.New("tailscale executable not found in expected place")
|
||||
}
|
||||
|
||||
func tailscaleUpdateCmd(cmdTS string) *exec.Cmd {
|
||||
defaultCmd := exec.Command(cmdTS, "update", "--yes")
|
||||
if runtime.GOOS != "linux" {
|
||||
return defaultCmd
|
||||
}
|
||||
if _, err := exec.LookPath("systemd-run"); err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
|
||||
// When systemd-run is available, use it to run the update command. This
|
||||
// creates a new temporary unit separate from the tailscaled unit. When
|
||||
// tailscaled is restarted during the update, systemd won't kill this
|
||||
// temporary update unit, which could cause unexpected breakage.
|
||||
//
|
||||
// We want to use a few optional flags:
|
||||
// * --wait, to block the update command until completion (added in systemd 232)
|
||||
// * --pipe, to collect stdout/stderr (added in systemd 235)
|
||||
// * --collect, to clean up failed runs from memory (added in systemd 236)
|
||||
//
|
||||
// We need to check the version of systemd to figure out if those flags are
|
||||
// available.
|
||||
//
|
||||
// The output will look like:
|
||||
//
|
||||
// systemd 255 (255.7-1-arch)
|
||||
// +PAM +AUDIT ... other feature flags ...
|
||||
systemdVerOut, err := exec.Command("systemd-run", "--version").Output()
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
parts := strings.Fields(string(systemdVerOut))
|
||||
if len(parts) < 2 || parts[0] != "systemd" {
|
||||
return defaultCmd
|
||||
}
|
||||
systemdVer, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
if systemdVer >= 236 {
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes")
|
||||
} else if systemdVer >= 235 {
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes")
|
||||
} else if systemdVer >= 232 {
|
||||
return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes")
|
||||
} else {
|
||||
return exec.Command("systemd-run", cmdTS, "update", "--yes")
|
||||
}
|
||||
}
|
||||
|
||||
func regularFileExists(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
return err == nil && fi.Mode().IsRegular()
|
||||
}
|
||||
|
||||
// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the
|
||||
// provided domain. This can be called by the controlplane to clean up DNS TXT
|
||||
// records when they're no longer needed by LetsEncrypt.
|
||||
//
|
||||
// It does not kick off a cert fetch or async refresh. It only reports anything
|
||||
// that's already sitting on disk, and only reports metadata about the public
|
||||
// cert (stuff that'd be the in CT logs anyway).
|
||||
func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
cs, err := b.getCertStore()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
domain := r.FormValue("domain")
|
||||
if domain == "" {
|
||||
http.Error(w, "no 'domain'", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
ret := &tailcfg.C2NTLSCertInfo{}
|
||||
pair, err := getCertPEMCached(cs, domain, b.clock.Now())
|
||||
ret.Valid = err == nil
|
||||
if err != nil {
|
||||
ret.Error = err.Error()
|
||||
if errors.Is(err, errCertExpired) {
|
||||
ret.Expired = true
|
||||
} else if errors.Is(err, ipn.ErrStateNotExist) {
|
||||
ret.Missing = true
|
||||
ret.Error = "no certificate"
|
||||
}
|
||||
} else {
|
||||
block, _ := pem.Decode(pair.CertPEM)
|
||||
if block == nil {
|
||||
ret.Error = "invalid PEM"
|
||||
ret.Valid = false
|
||||
} else {
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
ret.Error = fmt.Sprintf("invalid certificate: %v", err)
|
||||
ret.Valid = false
|
||||
} else {
|
||||
ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339)
|
||||
ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, ret)
|
||||
}
|
||||
|
||||
2
vendor/tailscale.com/ipn/ipnlocal/c2n_pprof.go
generated
vendored
2
vendor/tailscale.com/ipn/ipnlocal/c2n_pprof.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !js && !wasm
|
||||
//go:build !js && !wasm && !ts_omit_debug
|
||||
|
||||
package ipnlocal
|
||||
|
||||
|
||||
186
vendor/tailscale.com/ipn/ipnlocal/captiveportal.go
generated
vendored
Normal file
186
vendor/tailscale.com/ipn/ipnlocal/captiveportal.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_captiveportal
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/net/captivedetection"
|
||||
"tailscale.com/util/clientmetric"
|
||||
)
|
||||
|
||||
func init() {
|
||||
hookCaptivePortalHealthChange.Set(captivePortalHealthChange)
|
||||
hookCheckCaptivePortalLoop.Set(checkCaptivePortalLoop)
|
||||
}
|
||||
|
||||
var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected")
|
||||
|
||||
// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken
|
||||
// before running captive portal detection.
|
||||
const captivePortalDetectionInterval = 2 * time.Second
|
||||
|
||||
func captivePortalHealthChange(b *LocalBackend, state *health.State) {
|
||||
isConnectivityImpacted := false
|
||||
for _, w := range state.Warnings {
|
||||
// Ignore the captive portal warnable itself.
|
||||
if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code {
|
||||
isConnectivityImpacted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// captiveCtx can be changed, and is protected with 'mu'; grab that
|
||||
// before we start our select, below.
|
||||
//
|
||||
// It is guaranteed to be non-nil.
|
||||
b.mu.Lock()
|
||||
ctx := b.captiveCtx
|
||||
b.mu.Unlock()
|
||||
|
||||
// If the context is canceled, we don't need to do anything.
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if isConnectivityImpacted {
|
||||
b.logf("health: connectivity impacted; triggering captive portal detection")
|
||||
|
||||
// Ensure that we select on captiveCtx so that we can time out
|
||||
// triggering captive portal detection if the backend is shutdown.
|
||||
select {
|
||||
case b.needsCaptiveDetection <- true:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
} else {
|
||||
// If connectivity is not impacted, we know for sure we're not behind a captive portal,
|
||||
// so drop any warning, and signal that we don't need captive portal detection.
|
||||
b.health.SetHealthy(captivePortalWarnable)
|
||||
select {
|
||||
case b.needsCaptiveDetection <- false:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected.
|
||||
var captivePortalWarnable = health.Register(&health.Warnable{
|
||||
Code: "captive-portal-detected",
|
||||
Title: "Captive portal detected",
|
||||
// High severity, because captive portals block all traffic and require user intervention.
|
||||
Severity: health.SeverityHigh,
|
||||
Text: health.StaticMessage("This network requires you to log in using your web browser."),
|
||||
ImpactsConnectivity: true,
|
||||
})
|
||||
|
||||
func checkCaptivePortalLoop(b *LocalBackend, ctx context.Context) {
|
||||
var tmr *time.Timer
|
||||
|
||||
maybeStartTimer := func() {
|
||||
// If there's an existing timer, nothing to do; just continue
|
||||
// waiting for it to expire. Otherwise, create a new timer.
|
||||
if tmr == nil {
|
||||
tmr = time.NewTimer(captivePortalDetectionInterval)
|
||||
}
|
||||
}
|
||||
maybeStopTimer := func() {
|
||||
if tmr == nil {
|
||||
return
|
||||
}
|
||||
if !tmr.Stop() {
|
||||
<-tmr.C
|
||||
}
|
||||
tmr = nil
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
maybeStopTimer()
|
||||
return
|
||||
}
|
||||
|
||||
// First, see if we have a signal on our "healthy" channel, which
|
||||
// takes priority over an existing timer. Because a select is
|
||||
// nondeterministic, we explicitly check this channel before
|
||||
// entering the main select below, so that we're guaranteed to
|
||||
// stop the timer before starting captive portal detection.
|
||||
select {
|
||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||
if needsCaptiveDetection {
|
||||
maybeStartTimer()
|
||||
} else {
|
||||
maybeStopTimer()
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
var timerChan <-chan time.Time
|
||||
if tmr != nil {
|
||||
timerChan = tmr.C
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// All done; stop the timer and then exit.
|
||||
maybeStopTimer()
|
||||
return
|
||||
case <-timerChan:
|
||||
// Kick off captive portal check
|
||||
b.performCaptiveDetection()
|
||||
// nil out timer to force recreation
|
||||
tmr = nil
|
||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||
if needsCaptiveDetection {
|
||||
maybeStartTimer()
|
||||
} else {
|
||||
// Healthy; cancel any existing timer
|
||||
maybeStopTimer()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRunCaptivePortalDetection reports whether captive portal detection
|
||||
// should be run. It is enabled by default, but can be disabled via a control
|
||||
// knob. It is also only run when the user explicitly wants the backend to be
|
||||
// running.
|
||||
func (b *LocalBackend) shouldRunCaptivePortalDetection() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning()
|
||||
}
|
||||
|
||||
// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs
|
||||
// the detection and updates the Warnable accordingly.
|
||||
func (b *LocalBackend) performCaptiveDetection() {
|
||||
if !b.shouldRunCaptivePortalDetection() {
|
||||
return
|
||||
}
|
||||
|
||||
d := captivedetection.NewDetector(b.logf)
|
||||
b.mu.Lock() // for b.hostinfo
|
||||
cn := b.currentNode()
|
||||
dm := cn.DERPMap()
|
||||
preferredDERP := 0
|
||||
if b.hostinfo != nil {
|
||||
if b.hostinfo.NetInfo != nil {
|
||||
preferredDERP = b.hostinfo.NetInfo.PreferredDERP
|
||||
}
|
||||
}
|
||||
ctx := b.ctx
|
||||
netMon := b.NetMon()
|
||||
b.mu.Unlock()
|
||||
found := d.Detect(ctx, netMon, dm, preferredDERP)
|
||||
if found {
|
||||
if !b.health.IsUnhealthy(captivePortalWarnable) {
|
||||
metricCaptivePortalDetected.Add(1)
|
||||
}
|
||||
b.health.SetUnhealthy(captivePortalWarnable, health.Args{})
|
||||
} else {
|
||||
b.health.SetHealthy(captivePortalWarnable)
|
||||
}
|
||||
}
|
||||
115
vendor/tailscale.com/ipn/ipnlocal/cert.go
generated
vendored
115
vendor/tailscale.com/ipn/ipnlocal/cert.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !js
|
||||
//go:build !js && !ts_omit_acme
|
||||
|
||||
package ipnlocal
|
||||
|
||||
@@ -24,22 +24,25 @@ import (
|
||||
"log"
|
||||
randv2 "math/rand/v2"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/ipn/store"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/net/bakedroots"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tempfork/acme"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/testenv"
|
||||
@@ -47,15 +50,19 @@ import (
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatus)
|
||||
}
|
||||
|
||||
// Process-wide cache. (A new *Handler is created per connection,
|
||||
// effectively per request)
|
||||
var (
|
||||
// acmeMu guards all ACME operations, so concurrent requests
|
||||
// for certs don't slam ACME. The first will go through and
|
||||
// populate the on-disk cache and the rest should use that.
|
||||
acmeMu sync.Mutex
|
||||
acmeMu syncs.Mutex
|
||||
|
||||
renewMu sync.Mutex // lock order: acmeMu before renewMu
|
||||
renewMu syncs.Mutex // lock order: acmeMu before renewMu
|
||||
renewCertAt = map[string]time.Time{}
|
||||
)
|
||||
|
||||
@@ -67,7 +74,7 @@ func (b *LocalBackend) certDir() (string, error) {
|
||||
// As a workaround for Synology DSM6 not having a "var" directory, use the
|
||||
// app's "etc" directory (on a small partition) to hold certs at least.
|
||||
// See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251
|
||||
if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 {
|
||||
if buildfeatures.HasSynology && d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 {
|
||||
d = "/var/packages/Tailscale/etc" // base; we append "certs" below
|
||||
}
|
||||
if d == "" {
|
||||
@@ -100,6 +107,15 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK
|
||||
// If a cert is expired, or expires sooner than minValidity, it will be renewed
|
||||
// synchronously. Otherwise it will be renewed asynchronously.
|
||||
func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string, minValidity time.Duration) (*TLSCertKeyPair, error) {
|
||||
b.mu.Lock()
|
||||
getCertForTest := b.getCertForTest
|
||||
b.mu.Unlock()
|
||||
|
||||
if getCertForTest != nil {
|
||||
testenv.AssertInTest()
|
||||
return getCertForTest(domain)
|
||||
}
|
||||
|
||||
if !validLookingCertDomain(domain) {
|
||||
return nil, errors.New("invalid domain")
|
||||
}
|
||||
@@ -137,7 +153,11 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string
|
||||
if minValidity == 0 {
|
||||
logf("starting async renewal")
|
||||
// Start renewal in the background, return current valid cert.
|
||||
b.goTracker.Go(func() { getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity) })
|
||||
b.goTracker.Go(func() {
|
||||
if _, err := getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity); err != nil {
|
||||
logf("async renewal failed: getCertPem: %v", err)
|
||||
}
|
||||
})
|
||||
return pair, nil
|
||||
}
|
||||
// If the caller requested a specific validity duration, fall through
|
||||
@@ -292,6 +312,16 @@ func (b *LocalBackend) getCertStore() (certStore, error) {
|
||||
return certFileStore{dir: dir, testRoots: testX509Roots}, nil
|
||||
}
|
||||
|
||||
// ConfigureCertsForTest sets a certificate retrieval function to be used by
|
||||
// this local backend, skipping the usual ACME certificate registration. Should
|
||||
// only be used in tests.
|
||||
func (b *LocalBackend) ConfigureCertsForTest(getCert func(hostname string) (*TLSCertKeyPair, error)) {
|
||||
testenv.AssertInTest()
|
||||
b.mu.Lock()
|
||||
b.getCertForTest = getCert
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// certFileStore implements certStore by storing the cert & key files in the named directory.
|
||||
type certFileStore struct {
|
||||
dir string
|
||||
@@ -484,14 +514,15 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l
|
||||
// In case this method was triggered multiple times in parallel (when
|
||||
// serving incoming requests), check whether one of the other goroutines
|
||||
// already renewed the cert before us.
|
||||
if p, err := getCertPEMCached(cs, domain, now); err == nil {
|
||||
previous, err := getCertPEMCached(cs, domain, now)
|
||||
if err == nil {
|
||||
// shouldStartDomainRenewal caches its result so it's OK to call this
|
||||
// frequently.
|
||||
shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, p, minValidity)
|
||||
shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, previous, minValidity)
|
||||
if err != nil {
|
||||
logf("error checking for certificate renewal: %v", err)
|
||||
} else if !shouldRenew {
|
||||
return p, nil
|
||||
return previous, nil
|
||||
}
|
||||
} else if !errors.Is(err, ipn.ErrStateNotExist) && !errors.Is(err, errCertExpired) {
|
||||
return nil, err
|
||||
@@ -536,7 +567,20 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l
|
||||
return nil, err
|
||||
}
|
||||
|
||||
order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}})
|
||||
// If we have a previous cert, include it in the order. Assuming we're
|
||||
// within the ARI renewal window this should exclude us from LE rate
|
||||
// limits.
|
||||
// Note that this order extension will fail renewals if the ACME account key has changed
|
||||
// since the last issuance, see
|
||||
// https://github.com/tailscale/tailscale/issues/18251
|
||||
var opts []acme.OrderOption
|
||||
if previous != nil && !envknob.Bool("TS_DEBUG_ACME_FORCE_RENEWAL") {
|
||||
prevCrt, err := previous.parseCertificate()
|
||||
if err == nil {
|
||||
opts = append(opts, acme.WithOrderReplacesCert(prevCrt))
|
||||
}
|
||||
}
|
||||
order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -825,3 +869,54 @@ func checkCertDomain(st *ipnstate.Status, domain string) error {
|
||||
}
|
||||
return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains)
|
||||
}
|
||||
|
||||
// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the
|
||||
// provided domain. This can be called by the controlplane to clean up DNS TXT
|
||||
// records when they're no longer needed by LetsEncrypt.
|
||||
//
|
||||
// It does not kick off a cert fetch or async refresh. It only reports anything
|
||||
// that's already sitting on disk, and only reports metadata about the public
|
||||
// cert (stuff that'd be the in CT logs anyway).
|
||||
func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
cs, err := b.getCertStore()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
domain := r.FormValue("domain")
|
||||
if domain == "" {
|
||||
http.Error(w, "no 'domain'", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
ret := &tailcfg.C2NTLSCertInfo{}
|
||||
pair, err := getCertPEMCached(cs, domain, b.clock.Now())
|
||||
ret.Valid = err == nil
|
||||
if err != nil {
|
||||
ret.Error = err.Error()
|
||||
if errors.Is(err, errCertExpired) {
|
||||
ret.Expired = true
|
||||
} else if errors.Is(err, ipn.ErrStateNotExist) {
|
||||
ret.Missing = true
|
||||
ret.Error = "no certificate"
|
||||
}
|
||||
} else {
|
||||
block, _ := pem.Decode(pair.CertPEM)
|
||||
if block == nil {
|
||||
ret.Error = "invalid PEM"
|
||||
ret.Valid = false
|
||||
} else {
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
ret.Error = fmt.Sprintf("invalid certificate: %v", err)
|
||||
ret.Valid = false
|
||||
} else {
|
||||
ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339)
|
||||
ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, ret)
|
||||
}
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build js || ts_omit_acme
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatusDisabled)
|
||||
}
|
||||
|
||||
var errNoCerts = errors.New("cert support not compiled in this build")
|
||||
|
||||
type TLSCertKeyPair struct {
|
||||
CertPEM, KeyPEM []byte
|
||||
}
|
||||
|
||||
func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
return nil, errNoCerts
|
||||
}
|
||||
|
||||
var errCertExpired = errors.New("cert expired")
|
||||
@@ -22,9 +32,14 @@ var errCertExpired = errors.New("cert expired")
|
||||
type certStore interface{}
|
||||
|
||||
func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
return nil, errNoCerts
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getCertStore() (certStore, error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
return nil, errNoCerts
|
||||
}
|
||||
|
||||
func handleC2NTLSCertStatusDisabled(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
io.WriteString(w, `{"Missing":true}`) // a minimal tailcfg.C2NTLSCertInfo
|
||||
}
|
||||
178
vendor/tailscale.com/ipn/ipnlocal/desktop_sessions.go
generated
vendored
178
vendor/tailscale.com/ipn/ipnlocal/desktop_sessions.go
generated
vendored
@@ -1,178 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Both the desktop session manager and multi-user support
|
||||
// are currently available only on Windows.
|
||||
// This file does not need to be built for other platforms.
|
||||
|
||||
//go:build windows && !ts_omit_desktop_sessions
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/desktop"
|
||||
"tailscale.com/tsd"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/syspolicy"
|
||||
)
|
||||
|
||||
func init() {
|
||||
feature.Register("desktop-sessions")
|
||||
RegisterExtension("desktop-sessions", newDesktopSessionsExt)
|
||||
}
|
||||
|
||||
// desktopSessionsExt implements [localBackendExtension].
|
||||
var _ localBackendExtension = (*desktopSessionsExt)(nil)
|
||||
|
||||
// desktopSessionsExt extends [LocalBackend] with desktop session management.
|
||||
// It keeps Tailscale running in the background if Always-On mode is enabled,
|
||||
// and switches to an appropriate profile when a user signs in or out,
|
||||
// locks their screen, or disconnects a remote session.
|
||||
type desktopSessionsExt struct {
|
||||
logf logger.Logf
|
||||
sm desktop.SessionManager
|
||||
|
||||
*LocalBackend // or nil, until Init is called
|
||||
cleanup []func() // cleanup functions to call on shutdown
|
||||
|
||||
// mu protects all following fields.
|
||||
// When both mu and [LocalBackend.mu] need to be taken,
|
||||
// [LocalBackend.mu] must be taken before mu.
|
||||
mu sync.Mutex
|
||||
id2sess map[desktop.SessionID]*desktop.Session
|
||||
}
|
||||
|
||||
// newDesktopSessionsExt returns a new [desktopSessionsExt],
|
||||
// or an error if [desktop.SessionManager] is not available.
|
||||
func newDesktopSessionsExt(logf logger.Logf, sys *tsd.System) (localBackendExtension, error) {
|
||||
sm, ok := sys.SessionManager.GetOK()
|
||||
if !ok {
|
||||
return nil, errors.New("session manager is not available")
|
||||
}
|
||||
return &desktopSessionsExt{logf: logf, sm: sm, id2sess: make(map[desktop.SessionID]*desktop.Session)}, nil
|
||||
}
|
||||
|
||||
// Init implements [localBackendExtension].
|
||||
func (e *desktopSessionsExt) Init(lb *LocalBackend) (err error) {
|
||||
e.LocalBackend = lb
|
||||
unregisterResolver := lb.RegisterBackgroundProfileResolver(e.getBackgroundProfile)
|
||||
unregisterSessionCb, err := e.sm.RegisterStateCallback(e.updateDesktopSessionState)
|
||||
if err != nil {
|
||||
unregisterResolver()
|
||||
return fmt.Errorf("session callback registration failed: %w", err)
|
||||
}
|
||||
e.cleanup = []func(){unregisterResolver, unregisterSessionCb}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDesktopSessionState is a [desktop.SessionStateCallback]
|
||||
// invoked by [desktop.SessionManager] once for each existing session
|
||||
// and whenever the session state changes. It updates the session map
|
||||
// and switches to the best profile if necessary.
|
||||
func (e *desktopSessionsExt) updateDesktopSessionState(session *desktop.Session) {
|
||||
e.mu.Lock()
|
||||
if session.Status != desktop.ClosedSession {
|
||||
e.id2sess[session.ID] = session
|
||||
} else {
|
||||
delete(e.id2sess, session.ID)
|
||||
}
|
||||
e.mu.Unlock()
|
||||
|
||||
var action string
|
||||
switch session.Status {
|
||||
case desktop.ForegroundSession:
|
||||
// The user has either signed in or unlocked their session.
|
||||
// For remote sessions, this may also mean the user has connected.
|
||||
// The distinction isn't important for our purposes,
|
||||
// so let's always say "signed in".
|
||||
action = "signed in to"
|
||||
case desktop.BackgroundSession:
|
||||
action = "locked"
|
||||
case desktop.ClosedSession:
|
||||
action = "signed out from"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
maybeUsername, _ := session.User.Username()
|
||||
userIdentifier := cmp.Or(maybeUsername, string(session.User.UserID()), "user")
|
||||
reason := fmt.Sprintf("%s %s session %v", userIdentifier, action, session.ID)
|
||||
|
||||
e.SwitchToBestProfile(reason)
|
||||
}
|
||||
|
||||
// getBackgroundProfile is a [profileResolver] that works as follows:
|
||||
//
|
||||
// If Always-On mode is disabled, it returns no profile ("","",false).
|
||||
//
|
||||
// If AlwaysOn mode is enabled, it returns the current profile unless:
|
||||
// - The current user has signed out.
|
||||
// - Another user has a foreground (i.e. active/unlocked) session.
|
||||
//
|
||||
// If the current user's session runs in the background and no other user
|
||||
// has a foreground session, it returns the current profile. This applies
|
||||
// when a locally signed-in user locks their screen or when a remote user
|
||||
// disconnects without signing out.
|
||||
//
|
||||
// In all other cases, it returns no profile ("","",false).
|
||||
//
|
||||
// It is called with [LocalBackend.mu] locked.
|
||||
func (e *desktopSessionsExt) getBackgroundProfile() (_ ipn.WindowsUserID, _ ipn.ProfileID, ok bool) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn {
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
isCurrentUserSingedIn := false
|
||||
var foregroundUIDs []ipn.WindowsUserID
|
||||
for _, s := range e.id2sess {
|
||||
switch uid := s.User.UserID(); uid {
|
||||
case e.pm.CurrentUserID():
|
||||
isCurrentUserSingedIn = true
|
||||
if s.Status == desktop.ForegroundSession {
|
||||
// Keep the current profile if the user has a foreground session.
|
||||
return e.pm.CurrentUserID(), e.pm.CurrentProfile().ID(), true
|
||||
}
|
||||
default:
|
||||
if s.Status == desktop.ForegroundSession {
|
||||
foregroundUIDs = append(foregroundUIDs, uid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no current user (e.g., tailscaled just started), or if the current
|
||||
// user has no foreground session, switch to the default profile of the first user
|
||||
// with a foreground session, if any.
|
||||
for _, uid := range foregroundUIDs {
|
||||
if profileID := e.pm.DefaultUserProfileID(uid); profileID != "" {
|
||||
return uid, profileID, true
|
||||
}
|
||||
}
|
||||
|
||||
// If no user has a foreground session but the current user is still signed in,
|
||||
// keep the current profile even if the session is not in the foreground,
|
||||
// such as when the screen is locked or a remote session is disconnected.
|
||||
if len(foregroundUIDs) == 0 && isCurrentUserSingedIn {
|
||||
return e.pm.CurrentUserID(), e.pm.CurrentProfile().ID(), true
|
||||
}
|
||||
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
// Shutdown implements [localBackendExtension].
|
||||
func (e *desktopSessionsExt) Shutdown() error {
|
||||
for _, f := range e.cleanup {
|
||||
f()
|
||||
}
|
||||
e.cleanup = nil
|
||||
e.LocalBackend = nil
|
||||
return nil
|
||||
}
|
||||
233
vendor/tailscale.com/ipn/ipnlocal/drive.go
generated
vendored
233
vendor/tailscale.com/ipn/ipnlocal/drive.go
generated
vendored
@@ -1,51 +1,35 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_drive
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
const (
|
||||
// DriveLocalPort is the port on which the Taildrive listens for location
|
||||
// connections on quad 100.
|
||||
DriveLocalPort = 8080
|
||||
)
|
||||
|
||||
// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is
|
||||
// enabled. This is currently based on checking for the drive:share node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveSharingEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.driveSharingEnabledLocked()
|
||||
func init() {
|
||||
hookSetNetMapLockedDrive.Set(setNetMapLockedDrive)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveSharingEnabledLocked() bool {
|
||||
return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveShare)
|
||||
}
|
||||
|
||||
// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes
|
||||
// is enabled. This is currently based on checking for the drive:access node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveAccessEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.driveAccessEnabledLocked()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveAccessEnabledLocked() bool {
|
||||
return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveAccess)
|
||||
func setNetMapLockedDrive(b *LocalBackend, nm *netmap.NetworkMap) {
|
||||
b.updateDrivePeersLocked(nm)
|
||||
b.driveNotifyCurrentSharesLocked()
|
||||
}
|
||||
|
||||
// DriveSetServerAddr tells Taildrive to use the given address for connecting
|
||||
@@ -266,7 +250,7 @@ func (b *LocalBackend) driveNotifyShares(shares views.SliceView[*drive.Share, dr
|
||||
// shares has changed since the last notification.
|
||||
func (b *LocalBackend) driveNotifyCurrentSharesLocked() {
|
||||
var shares views.SliceView[*drive.Share, drive.ShareView]
|
||||
if b.driveSharingEnabledLocked() {
|
||||
if b.DriveSharingEnabled() {
|
||||
// Only populate shares if sharing is enabled.
|
||||
shares = b.pm.prefs.DriveShares()
|
||||
}
|
||||
@@ -310,59 +294,206 @@ func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) {
|
||||
}
|
||||
|
||||
var driveRemotes []*drive.Remote
|
||||
if b.driveAccessEnabledLocked() {
|
||||
if b.DriveAccessEnabled() {
|
||||
// Only populate peers if access is enabled, otherwise leave blank.
|
||||
driveRemotes = b.driveRemotesFromPeers(nm)
|
||||
}
|
||||
|
||||
fs.SetRemotes(b.netMap.Domain, driveRemotes, b.newDriveTransport())
|
||||
fs.SetRemotes(nm.Domain, driveRemotes, b.newDriveTransport())
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote {
|
||||
b.logf("[v1] taildrive: setting up drive remotes from peers")
|
||||
driveRemotes := make([]*drive.Remote, 0, len(nm.Peers))
|
||||
for _, p := range nm.Peers {
|
||||
peerID := p.ID()
|
||||
url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:])
|
||||
peer := p
|
||||
peerID := peer.ID()
|
||||
peerKey := peer.Key().ShortString()
|
||||
b.logf("[v1] taildrive: appending remote for peer %s", peerKey)
|
||||
driveRemotes = append(driveRemotes, &drive.Remote{
|
||||
Name: p.DisplayName(false),
|
||||
URL: url,
|
||||
URL: func() string {
|
||||
url := fmt.Sprintf("%s/%s", b.currentNode().PeerAPIBase(peer), taildrivePrefix[1:])
|
||||
b.logf("[v2] taildrive: url for peer %s: %s", peerKey, url)
|
||||
return url
|
||||
},
|
||||
Available: func() bool {
|
||||
// Peers are available to Taildrive if:
|
||||
// - They are online
|
||||
// - Their PeerAPI is reachable
|
||||
// - They are allowed to share at least one folder with us
|
||||
b.mu.Lock()
|
||||
latestNetMap := b.netMap
|
||||
b.mu.Unlock()
|
||||
|
||||
idx, found := slices.BinarySearchFunc(latestNetMap.Peers, peerID, func(candidate tailcfg.NodeView, id tailcfg.NodeID) int {
|
||||
return cmp.Compare(candidate.ID(), id)
|
||||
})
|
||||
if !found {
|
||||
cn := b.currentNode()
|
||||
peer, ok := cn.NodeByID(peerID)
|
||||
if !ok {
|
||||
b.logf("[v2] taildrive: Available(): peer %s not found", peerKey)
|
||||
return false
|
||||
}
|
||||
|
||||
peer := latestNetMap.Peers[idx]
|
||||
|
||||
// Exclude offline peers.
|
||||
// TODO(oxtoacart): for some reason, this correctly
|
||||
// catches when a node goes from offline to online,
|
||||
// but not the other way around...
|
||||
// TODO(oxtoacart,nickkhyl): the reason was probably
|
||||
// that we were using netmap.Peers instead of b.peers.
|
||||
// The netmap.Peers slice is not updated in all cases.
|
||||
// It should be fixed now that we use PeerByIDOk.
|
||||
if !peer.Online().Get() {
|
||||
b.logf("[v2] taildrive: Available(): peer %s offline", peerKey)
|
||||
return false
|
||||
}
|
||||
|
||||
if b.currentNode().PeerAPIBase(peer) == "" {
|
||||
b.logf("[v2] taildrive: Available(): peer %s PeerAPI unreachable", peerKey)
|
||||
return false
|
||||
}
|
||||
|
||||
// Check that the peer is allowed to share with us.
|
||||
addresses := peer.Addresses()
|
||||
for _, p := range addresses.All() {
|
||||
capsMap := b.PeerCaps(p.Addr())
|
||||
if capsMap.HasCapability(tailcfg.PeerCapabilityTaildriveSharer) {
|
||||
return true
|
||||
}
|
||||
if cn.PeerHasCap(peer, tailcfg.PeerCapabilityTaildriveSharer) {
|
||||
b.logf("[v2] taildrive: Available(): peer %s available", peerKey)
|
||||
return true
|
||||
}
|
||||
|
||||
b.logf("[v2] taildrive: Available(): peer %s not allowed to share", peerKey)
|
||||
return false
|
||||
},
|
||||
})
|
||||
}
|
||||
return driveRemotes
|
||||
}
|
||||
|
||||
// responseBodyWrapper wraps an io.ReadCloser and stores
|
||||
// the number of bytesRead.
|
||||
type responseBodyWrapper struct {
|
||||
io.ReadCloser
|
||||
logVerbose bool
|
||||
bytesRx int64
|
||||
bytesTx int64
|
||||
log logger.Logf
|
||||
method string
|
||||
statusCode int
|
||||
contentType string
|
||||
fileExtension string
|
||||
shareNodeKey string
|
||||
selfNodeKey string
|
||||
contentLength int64
|
||||
}
|
||||
|
||||
// logAccess logs the taildrive: access: log line. If the logger is nil,
|
||||
// the log will not be written.
|
||||
func (rbw *responseBodyWrapper) logAccess(err string) {
|
||||
if rbw.log == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Some operating systems create and copy lots of 0 length hidden files for
|
||||
// tracking various states. Omit these to keep logs from being too verbose.
|
||||
if rbw.logVerbose || rbw.contentLength > 0 {
|
||||
levelPrefix := ""
|
||||
if rbw.logVerbose {
|
||||
levelPrefix = "[v1] "
|
||||
}
|
||||
rbw.log(
|
||||
"%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q",
|
||||
levelPrefix,
|
||||
rbw.method,
|
||||
rbw.selfNodeKey,
|
||||
rbw.shareNodeKey,
|
||||
rbw.statusCode,
|
||||
rbw.fileExtension,
|
||||
rbw.contentType,
|
||||
roundTraffic(rbw.contentLength),
|
||||
roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (rbw *responseBodyWrapper) Read(b []byte) (int, error) {
|
||||
n, err := rbw.ReadCloser.Read(b)
|
||||
rbw.bytesRx += int64(n)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
rbw.logAccess(err.Error())
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close implements the io.Close interface.
|
||||
func (rbw *responseBodyWrapper) Close() error {
|
||||
err := rbw.ReadCloser.Close()
|
||||
var errStr string
|
||||
if err != nil {
|
||||
errStr = err.Error()
|
||||
}
|
||||
rbw.logAccess(errStr)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// driveTransport is an http.RoundTripper that wraps
|
||||
// b.Dialer().PeerAPITransport() with metrics tracking.
|
||||
type driveTransport struct {
|
||||
b *LocalBackend
|
||||
tr http.RoundTripper
|
||||
}
|
||||
|
||||
func (b *LocalBackend) newDriveTransport() *driveTransport {
|
||||
return &driveTransport{
|
||||
b: b,
|
||||
tr: b.Dialer().PeerAPITransport(),
|
||||
}
|
||||
}
|
||||
|
||||
func (dt *driveTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
// Some WebDAV clients include origin and refer headers, which peerapi does
|
||||
// not like. Remove them.
|
||||
req.Header.Del("origin")
|
||||
req.Header.Del("referer")
|
||||
|
||||
bw := &requestBodyWrapper{}
|
||||
if req.Body != nil {
|
||||
bw.ReadCloser = req.Body
|
||||
req.Body = bw
|
||||
}
|
||||
|
||||
resp, err := dt.tr.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentType := "unknown"
|
||||
if ct := req.Header.Get("Content-Type"); ct != "" {
|
||||
contentType = ct
|
||||
}
|
||||
|
||||
dt.b.mu.Lock()
|
||||
selfNodeKey := dt.b.currentNode().Self().Key().ShortString()
|
||||
dt.b.mu.Unlock()
|
||||
n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host))
|
||||
shareNodeKey := "unknown"
|
||||
if ok {
|
||||
shareNodeKey = string(n.Key().ShortString())
|
||||
}
|
||||
|
||||
rbw := responseBodyWrapper{
|
||||
log: dt.b.logf,
|
||||
logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level
|
||||
method: req.Method,
|
||||
bytesTx: int64(bw.bytesRead),
|
||||
selfNodeKey: selfNodeKey,
|
||||
shareNodeKey: shareNodeKey,
|
||||
contentType: contentType,
|
||||
contentLength: resp.ContentLength,
|
||||
fileExtension: parseDriveFileExtensionForLog(req.URL.Path),
|
||||
statusCode: resp.StatusCode,
|
||||
ReadCloser: resp.Body,
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
// in case of error response, just log immediately
|
||||
rbw.logAccess("")
|
||||
} else {
|
||||
resp.Body = &rbw
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/drive_tomove.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/drive_tomove.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// This is the Taildrive stuff that should ideally be registered in init only when
|
||||
// the ts_omit_drive is not set, but for transition reasons is currently (2025-09-08)
|
||||
// always defined, as we work to pull it out of LocalBackend.
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import "tailscale.com/tailcfg"
|
||||
|
||||
const (
|
||||
// DriveLocalPort is the port on which the Taildrive listens for location
|
||||
// connections on quad 100.
|
||||
DriveLocalPort = 8080
|
||||
)
|
||||
|
||||
// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is
|
||||
// enabled. This is currently based on checking for the drive:share node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveSharingEnabled() bool {
|
||||
return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare)
|
||||
}
|
||||
|
||||
// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes
|
||||
// is enabled. This is currently based on checking for the drive:access node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveAccessEnabled() bool {
|
||||
return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess)
|
||||
}
|
||||
16
vendor/tailscale.com/ipn/ipnlocal/expiry.go
generated
vendored
16
vendor/tailscale.com/ipn/ipnlocal/expiry.go
generated
vendored
@@ -6,12 +6,14 @@ package ipnlocal
|
||||
import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/util/eventbus"
|
||||
)
|
||||
|
||||
// For extra defense-in-depth, when we're testing expired nodes we check
|
||||
@@ -40,14 +42,22 @@ type expiryManager struct {
|
||||
|
||||
logf logger.Logf
|
||||
clock tstime.Clock
|
||||
|
||||
eventClient *eventbus.Client
|
||||
}
|
||||
|
||||
func newExpiryManager(logf logger.Logf) *expiryManager {
|
||||
return &expiryManager{
|
||||
func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager {
|
||||
em := &expiryManager{
|
||||
previouslyExpired: map[tailcfg.StableNodeID]bool{},
|
||||
logf: logf,
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
|
||||
em.eventClient = bus.Client("ipnlocal.expiryManager")
|
||||
eventbus.SubscribeFunc(em.eventClient, func(ct controlclient.ControlTime) {
|
||||
em.onControlTime(ct.Value)
|
||||
})
|
||||
return em
|
||||
}
|
||||
|
||||
// onControlTime is called whenever we receive a new timestamp from the control
|
||||
@@ -218,6 +228,8 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim
|
||||
return nextExpiry
|
||||
}
|
||||
|
||||
func (em *expiryManager) close() { em.eventClient.Close() }
|
||||
|
||||
// ControlNow estimates the current time on the control server, calculated as
|
||||
// localNow + the delta between local and control server clocks as recorded
|
||||
// when the LocalBackend last received a time message from the control server.
|
||||
|
||||
621
vendor/tailscale.com/ipn/ipnlocal/extension_host.go
generated
vendored
Normal file
621
vendor/tailscale.com/ipn/ipnlocal/extension_host.go
generated
vendored
Normal file
@@ -0,0 +1,621 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnauth"
|
||||
"tailscale.com/ipn/ipnext"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/execqueue"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/testenv"
|
||||
)
|
||||
|
||||
// ExtensionHost is a bridge between the [LocalBackend] and the registered [ipnext.Extension]s.
|
||||
// It implements [ipnext.Host] and is safe for concurrent use.
|
||||
//
|
||||
// A nil pointer to [ExtensionHost] is a valid, no-op extension host which is primarily used in tests
|
||||
// that instantiate [LocalBackend] directly without using [NewExtensionHost].
|
||||
//
|
||||
// The [LocalBackend] is not required to hold its mutex when calling the host's methods,
|
||||
// but it typically does so either to prevent changes to its state (for example, the current profile)
|
||||
// while callbacks are executing, or because it calls the host's methods as part of a larger operation
|
||||
// that requires the mutex to be held.
|
||||
//
|
||||
// Extensions might invoke the host's methods either from callbacks triggered by the [LocalBackend],
|
||||
// or in a response to external events. Some methods can be called by both the extensions and the backend.
|
||||
//
|
||||
// As a general rule, the host cannot assume anything about the current state of the [LocalBackend]'s
|
||||
// internal mutex on entry to its methods, and therefore cannot safely call [LocalBackend] methods directly.
|
||||
//
|
||||
// The following are typical and supported patterns:
|
||||
// - LocalBackend notifies the host about an event, such as a change in the current profile.
|
||||
// The host invokes callbacks registered by Extensions, forwarding the event arguments to them.
|
||||
// If necessary, the host can also update its own state for future use.
|
||||
// - LocalBackend requests information from the host, such as the effective [ipnauth.AuditLogFunc]
|
||||
// or the [ipn.LoginProfile] to use when no GUI/CLI client is connected. Typically, [LocalBackend]
|
||||
// provides the required context to the host, and the host returns the result to [LocalBackend]
|
||||
// after forwarding the request to the extensions.
|
||||
// - Extension invokes the host's method to perform an action, such as switching to the "best" profile
|
||||
// in response to a change in the device's state. Since the host does not know whether the [LocalBackend]'s
|
||||
// internal mutex is held, it cannot invoke any methods on the [LocalBackend] directly and must instead
|
||||
// do so asynchronously, such as by using [ExtensionHost.enqueueBackendOperation].
|
||||
// - Extension requests information from the host, such as the effective [ipnauth.AuditLogFunc]
|
||||
// or the current [ipn.LoginProfile]. Since the host cannot invoke any methods on the [LocalBackend] directly,
|
||||
// it should maintain its own view of the current state, updating it when the [LocalBackend] notifies it
|
||||
// about a change or event.
|
||||
//
|
||||
// To safeguard against adopting incorrect or risky patterns, the host does not store [LocalBackend] in its fields
|
||||
// and instead provides [ExtensionHost.enqueueBackendOperation]. Additionally, to make it easier to test extensions
|
||||
// and to further reduce the risk of accessing unexported methods or fields of [LocalBackend], the host interacts
|
||||
// with it via the [Backend] interface.
|
||||
type ExtensionHost struct {
|
||||
b Backend
|
||||
hooks ipnext.Hooks
|
||||
logf logger.Logf // prefixed with "ipnext:"
|
||||
|
||||
// allExtensions holds the extensions in the order they were registered,
|
||||
// including those that have not yet attempted initialization or have failed to initialize.
|
||||
allExtensions []ipnext.Extension
|
||||
|
||||
// initOnce is used to ensure that the extensions are initialized only once,
|
||||
// even if [extensionHost.Init] is called multiple times.
|
||||
initOnce sync.Once
|
||||
initDone atomic.Bool
|
||||
// shutdownOnce is like initOnce, but for [ExtensionHost.Shutdown].
|
||||
shutdownOnce sync.Once
|
||||
|
||||
// workQueue maintains execution order for asynchronous operations requested by extensions.
|
||||
// It is always an [execqueue.ExecQueue] except in some tests.
|
||||
workQueue execQueue
|
||||
// doEnqueueBackendOperation adds an asynchronous [LocalBackend] operation to the workQueue.
|
||||
doEnqueueBackendOperation func(func(Backend))
|
||||
|
||||
shuttingDown atomic.Bool
|
||||
|
||||
extByType sync.Map // reflect.Type -> ipnext.Extension
|
||||
|
||||
// mu protects the following fields.
|
||||
// It must not be held when calling [LocalBackend] methods
|
||||
// or when invoking callbacks registered by extensions.
|
||||
mu sync.Mutex
|
||||
// initialized is whether the host and extensions have been fully initialized.
|
||||
initialized atomic.Bool
|
||||
// activeExtensions is a subset of allExtensions that have been initialized and are ready to use.
|
||||
activeExtensions []ipnext.Extension
|
||||
// extensionsByName are the extensions indexed by their names.
|
||||
// They are not necessarily initialized (in activeExtensions) yet.
|
||||
extensionsByName map[string]ipnext.Extension
|
||||
// postInitWorkQueue is a queue of functions to be executed
|
||||
// by the workQueue after all extensions have been initialized.
|
||||
postInitWorkQueue []func(Backend)
|
||||
|
||||
// currentProfile is a read-only view of the currently used profile.
|
||||
// The view is always Valid, but might be of an empty, non-persisted profile.
|
||||
currentProfile ipn.LoginProfileView
|
||||
// currentPrefs is a read-only view of the current profile's [ipn.Prefs]
|
||||
// with any private keys stripped. It is always Valid.
|
||||
currentPrefs ipn.PrefsView
|
||||
}
|
||||
|
||||
// Backend is a subset of [LocalBackend] methods that are used by [ExtensionHost].
|
||||
// It is primarily used for testing.
|
||||
type Backend interface {
|
||||
// SwitchToBestProfile switches to the best profile for the current state of the system.
|
||||
// The reason indicates why the profile is being switched.
|
||||
SwitchToBestProfile(reason string)
|
||||
|
||||
SendNotify(ipn.Notify)
|
||||
|
||||
NodeBackend() ipnext.NodeBackend
|
||||
|
||||
ipnext.SafeBackend
|
||||
}
|
||||
|
||||
// NewExtensionHost returns a new [ExtensionHost] which manages registered extensions for the given backend.
|
||||
// The extensions are instantiated, but are not initialized until [ExtensionHost.Init] is called.
|
||||
// It returns an error if instantiating any extension fails.
|
||||
func NewExtensionHost(logf logger.Logf, b Backend) (*ExtensionHost, error) {
|
||||
return newExtensionHost(logf, b)
|
||||
}
|
||||
|
||||
func NewExtensionHostForTest(logf logger.Logf, b Backend, overrideExts ...*ipnext.Definition) (*ExtensionHost, error) {
|
||||
if !testenv.InTest() {
|
||||
panic("use outside of test")
|
||||
}
|
||||
return newExtensionHost(logf, b, overrideExts...)
|
||||
}
|
||||
|
||||
// newExtensionHost is the shared implementation of [NewExtensionHost] and
|
||||
// [NewExtensionHostForTest].
|
||||
//
|
||||
// If overrideExts is non-nil, the registered extensions are ignored and the
|
||||
// provided extensions are used instead. Overriding extensions is primarily used
|
||||
// for testing.
|
||||
func newExtensionHost(logf logger.Logf, b Backend, overrideExts ...*ipnext.Definition) (_ *ExtensionHost, err error) {
|
||||
host := &ExtensionHost{
|
||||
b: b,
|
||||
logf: logger.WithPrefix(logf, "ipnext: "),
|
||||
workQueue: &execqueue.ExecQueue{},
|
||||
// The host starts with an empty profile and default prefs.
|
||||
// We'll update them once [profileManager] notifies us of the initial profile.
|
||||
currentProfile: zeroProfile,
|
||||
currentPrefs: defaultPrefs,
|
||||
}
|
||||
|
||||
// All operations on the backend must be executed asynchronously by the work queue.
|
||||
// DO NOT retain a direct reference to the backend in the host.
|
||||
// See the docstring for [ExtensionHost] for more details.
|
||||
host.doEnqueueBackendOperation = func(f func(Backend)) {
|
||||
if f == nil {
|
||||
panic("nil backend operation")
|
||||
}
|
||||
host.workQueue.Add(func() { f(b) })
|
||||
}
|
||||
|
||||
// Use registered extensions.
|
||||
extDef := ipnext.Extensions()
|
||||
if overrideExts != nil {
|
||||
// Use the provided, potentially empty, overrideExts
|
||||
// instead of the registered ones.
|
||||
extDef = slices.Values(overrideExts)
|
||||
}
|
||||
|
||||
for d := range extDef {
|
||||
ext, err := d.MakeExtension(logf, b)
|
||||
if errors.Is(err, ipnext.SkipExtension) {
|
||||
// The extension wants to be skipped.
|
||||
host.logf("%q: %v", d.Name(), err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to create %q extension: %v", d.Name(), err)
|
||||
}
|
||||
host.allExtensions = append(host.allExtensions, ext)
|
||||
|
||||
if d.Name() != ext.Name() {
|
||||
return nil, fmt.Errorf("extension name %q does not match the registered name %q", ext.Name(), d.Name())
|
||||
}
|
||||
|
||||
if _, ok := host.extensionsByName[ext.Name()]; ok {
|
||||
return nil, fmt.Errorf("duplicate extension name %q", ext.Name())
|
||||
} else {
|
||||
mak.Set(&host.extensionsByName, ext.Name(), ext)
|
||||
}
|
||||
|
||||
typ := reflect.TypeOf(ext)
|
||||
if _, ok := host.extByType.Load(typ); ok {
|
||||
if _, ok := ext.(interface{ PermitDoubleRegister() }); !ok {
|
||||
return nil, fmt.Errorf("duplicate extension type %T", ext)
|
||||
}
|
||||
}
|
||||
host.extByType.Store(typ, ext)
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
func (h *ExtensionHost) NodeBackend() ipnext.NodeBackend {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
return h.b.NodeBackend()
|
||||
}
|
||||
|
||||
// Init initializes the host and the extensions it manages.
|
||||
func (h *ExtensionHost) Init() {
|
||||
if h != nil {
|
||||
h.initOnce.Do(h.init)
|
||||
}
|
||||
}
|
||||
|
||||
var zeroHooks ipnext.Hooks
|
||||
|
||||
func (h *ExtensionHost) Hooks() *ipnext.Hooks {
|
||||
if h == nil {
|
||||
return &zeroHooks
|
||||
}
|
||||
return &h.hooks
|
||||
}
|
||||
|
||||
func (h *ExtensionHost) init() {
|
||||
defer h.initDone.Store(true)
|
||||
|
||||
// Initialize the extensions in the order they were registered.
|
||||
for _, ext := range h.allExtensions {
|
||||
// Do not hold the lock while calling [ipnext.Extension.Init].
|
||||
// Extensions call back into the host to register their callbacks,
|
||||
// and that would cause a deadlock if the h.mu is already held.
|
||||
if err := ext.Init(h); err != nil {
|
||||
// As per the [ipnext.Extension] interface, failures to initialize
|
||||
// an extension are never fatal. The extension is simply skipped.
|
||||
//
|
||||
// But we handle [ipnext.SkipExtension] differently for nicer logging
|
||||
// if the extension wants to be skipped and not actually failing.
|
||||
if errors.Is(err, ipnext.SkipExtension) {
|
||||
h.logf("%q: %v", ext.Name(), err)
|
||||
} else {
|
||||
h.logf("%q init failed: %v", ext.Name(), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Update the initialized extensions lists as soon as the extension is initialized.
|
||||
// We'd like to make them visible to other extensions that are initialized later.
|
||||
h.mu.Lock()
|
||||
h.activeExtensions = append(h.activeExtensions, ext)
|
||||
h.mu.Unlock()
|
||||
}
|
||||
|
||||
// Report active extensions to the log.
|
||||
// TODO(nickkhyl): update client metrics to include the active/failed/skipped extensions.
|
||||
h.mu.Lock()
|
||||
extensionNames := slices.Collect(maps.Keys(h.extensionsByName))
|
||||
h.mu.Unlock()
|
||||
h.logf("active extensions: %v", strings.Join(extensionNames, ", "))
|
||||
|
||||
// Additional init steps that need to be performed after all extensions have been initialized.
|
||||
h.mu.Lock()
|
||||
wq := h.postInitWorkQueue
|
||||
h.postInitWorkQueue = nil
|
||||
h.initialized.Store(true)
|
||||
h.mu.Unlock()
|
||||
|
||||
// Enqueue work that was requested and deferred during initialization.
|
||||
h.doEnqueueBackendOperation(func(b Backend) {
|
||||
for _, f := range wq {
|
||||
f(b)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Extensions implements [ipnext.Host].
|
||||
func (h *ExtensionHost) Extensions() ipnext.ExtensionServices {
|
||||
// Currently, [ExtensionHost] implements [ExtensionServices] directly.
|
||||
// We might want to extract it to a separate type in the future.
|
||||
return h
|
||||
}
|
||||
|
||||
// FindExtensionByName implements [ipnext.ExtensionServices]
|
||||
// and is also used by the [LocalBackend].
|
||||
// It returns nil if the extension is not found.
|
||||
func (h *ExtensionHost) FindExtensionByName(name string) any {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
return h.extensionsByName[name]
|
||||
}
|
||||
|
||||
// extensionIfaceType is the runtime type of the [ipnext.Extension] interface.
|
||||
var extensionIfaceType = reflect.TypeFor[ipnext.Extension]()
|
||||
|
||||
// GetExt returns the extension of type T registered with lb.
|
||||
// If lb is nil or the extension is not found, it returns zero, false.
|
||||
func GetExt[T ipnext.Extension](lb *LocalBackend) (_ T, ok bool) {
|
||||
var zero T
|
||||
if lb == nil {
|
||||
return zero, false
|
||||
}
|
||||
if ext, ok := lb.extHost.extensionOfType(reflect.TypeFor[T]()); ok {
|
||||
return ext.(T), true
|
||||
}
|
||||
return zero, false
|
||||
}
|
||||
|
||||
func (h *ExtensionHost) extensionOfType(t reflect.Type) (_ ipnext.Extension, ok bool) {
|
||||
if h == nil {
|
||||
return nil, false
|
||||
}
|
||||
if v, ok := h.extByType.Load(t); ok {
|
||||
return v.(ipnext.Extension), true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// FindMatchingExtension implements [ipnext.ExtensionServices]
|
||||
// and is also used by the [LocalBackend].
|
||||
func (h *ExtensionHost) FindMatchingExtension(target any) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if target == nil {
|
||||
panic("ipnext: target cannot be nil")
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(target)
|
||||
typ := val.Type()
|
||||
if typ.Kind() != reflect.Ptr || val.IsNil() {
|
||||
panic("ipnext: target must be a non-nil pointer")
|
||||
}
|
||||
targetType := typ.Elem()
|
||||
if targetType.Kind() != reflect.Interface && !targetType.Implements(extensionIfaceType) {
|
||||
panic("ipnext: *target must be interface or implement ipnext.Extension")
|
||||
}
|
||||
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
for _, ext := range h.activeExtensions {
|
||||
if reflect.TypeOf(ext).AssignableTo(targetType) {
|
||||
val.Elem().Set(reflect.ValueOf(ext))
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Profiles implements [ipnext.Host].
|
||||
func (h *ExtensionHost) Profiles() ipnext.ProfileServices {
|
||||
// Currently, [ExtensionHost] implements [ipnext.ProfileServices] directly.
|
||||
// We might want to extract it to a separate type in the future.
|
||||
return h
|
||||
}
|
||||
|
||||
// CurrentProfileState implements [ipnext.ProfileServices].
|
||||
func (h *ExtensionHost) CurrentProfileState() (ipn.LoginProfileView, ipn.PrefsView) {
|
||||
if h == nil {
|
||||
return zeroProfile, defaultPrefs
|
||||
}
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
return h.currentProfile, h.currentPrefs
|
||||
}
|
||||
|
||||
// CurrentPrefs implements [ipnext.ProfileServices].
|
||||
func (h *ExtensionHost) CurrentPrefs() ipn.PrefsView {
|
||||
_, prefs := h.CurrentProfileState()
|
||||
return prefs
|
||||
}
|
||||
|
||||
// SwitchToBestProfileAsync implements [ipnext.ProfileServices].
|
||||
func (h *ExtensionHost) SwitchToBestProfileAsync(reason string) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
h.enqueueBackendOperation(func(b Backend) {
|
||||
b.SwitchToBestProfile(reason)
|
||||
})
|
||||
}
|
||||
|
||||
// SendNotifyAsync implements [ipnext.Host].
|
||||
func (h *ExtensionHost) SendNotifyAsync(n ipn.Notify) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
h.enqueueBackendOperation(func(b Backend) {
|
||||
b.SendNotify(n)
|
||||
})
|
||||
}
|
||||
|
||||
// NotifyProfileChange invokes registered profile state change callbacks
|
||||
// and updates the current profile and prefs in the host.
|
||||
// It strips private keys from the [ipn.Prefs] before preserving
|
||||
// or passing them to the callbacks.
|
||||
func (h *ExtensionHost) NotifyProfileChange(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) {
|
||||
if !h.active() {
|
||||
return
|
||||
}
|
||||
h.mu.Lock()
|
||||
// Strip private keys from the prefs before preserving or passing them to the callbacks.
|
||||
// Extensions should not need them (unless proven otherwise in the future),
|
||||
// and this is a good way to ensure that they won't accidentally leak them.
|
||||
prefs = stripKeysFromPrefs(prefs)
|
||||
// Update the current profile and prefs in the host,
|
||||
// so we can provide them to the extensions later if they ask.
|
||||
h.currentPrefs = prefs
|
||||
h.currentProfile = profile
|
||||
h.mu.Unlock()
|
||||
|
||||
for _, cb := range h.hooks.ProfileStateChange {
|
||||
cb(profile, prefs, sameNode)
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyProfilePrefsChanged invokes registered profile state change callbacks,
|
||||
// and updates the current profile and prefs in the host.
|
||||
// It strips private keys from the [ipn.Prefs] before preserving or using them.
|
||||
func (h *ExtensionHost) NotifyProfilePrefsChanged(profile ipn.LoginProfileView, oldPrefs, newPrefs ipn.PrefsView) {
|
||||
if !h.active() {
|
||||
return
|
||||
}
|
||||
h.mu.Lock()
|
||||
// Strip private keys from the prefs before preserving or passing them to the callbacks.
|
||||
// Extensions should not need them (unless proven otherwise in the future),
|
||||
// and this is a good way to ensure that they won't accidentally leak them.
|
||||
newPrefs = stripKeysFromPrefs(newPrefs)
|
||||
// Update the current profile and prefs in the host,
|
||||
// so we can provide them to the extensions later if they ask.
|
||||
h.currentPrefs = newPrefs
|
||||
h.currentProfile = profile
|
||||
// Get the callbacks to be invoked.
|
||||
h.mu.Unlock()
|
||||
|
||||
for _, cb := range h.hooks.ProfileStateChange {
|
||||
cb(profile, newPrefs, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ExtensionHost) active() bool {
|
||||
return h != nil && !h.shuttingDown.Load()
|
||||
}
|
||||
|
||||
// DetermineBackgroundProfile returns a read-only view of the profile
|
||||
// used when no GUI/CLI client is connected, using background profile
|
||||
// resolvers registered by extensions.
|
||||
//
|
||||
// It returns an invalid view if Tailscale should not run in the background
|
||||
// and instead disconnect until a GUI/CLI client connects.
|
||||
//
|
||||
// As of 2025-02-07, this is only used on Windows.
|
||||
func (h *ExtensionHost) DetermineBackgroundProfile(profiles ipnext.ProfileStore) ipn.LoginProfileView {
|
||||
if !h.active() {
|
||||
return ipn.LoginProfileView{}
|
||||
}
|
||||
// TODO(nickkhyl): check if the returned profile is allowed on the device,
|
||||
// such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet.
|
||||
// See tailscale/corp#26249.
|
||||
|
||||
// Attempt to resolve the background profile using the registered
|
||||
// background profile resolvers (e.g., [ipn/desktop.desktopSessionsExt] on Windows).
|
||||
for _, resolver := range h.hooks.BackgroundProfileResolvers {
|
||||
if profile := resolver(profiles); profile.Valid() {
|
||||
return profile
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, switch to an empty profile and disconnect Tailscale
|
||||
// until a GUI or CLI client connects.
|
||||
return ipn.LoginProfileView{}
|
||||
}
|
||||
|
||||
// NotifyNewControlClient invokes all registered control client callbacks.
|
||||
// It returns callbacks to be executed when the control client shuts down.
|
||||
func (h *ExtensionHost) NotifyNewControlClient(cc controlclient.Client, profile ipn.LoginProfileView) (ccShutdownCbs []func()) {
|
||||
if !h.active() {
|
||||
return nil
|
||||
}
|
||||
for _, cb := range h.hooks.NewControlClient {
|
||||
if shutdown := cb(cc, profile); shutdown != nil {
|
||||
ccShutdownCbs = append(ccShutdownCbs, shutdown)
|
||||
}
|
||||
}
|
||||
return ccShutdownCbs
|
||||
}
|
||||
|
||||
// AuditLogger returns a function that reports an auditable action
|
||||
// to all registered audit loggers. It fails if any of them returns an error,
|
||||
// indicating that the action cannot be logged and must not be performed.
|
||||
//
|
||||
// It implements [ipnext.Host], but is also used by the [LocalBackend].
|
||||
//
|
||||
// The returned function closes over the current state of the host and extensions,
|
||||
// which typically includes the current profile and the audit loggers registered by extensions.
|
||||
// It must not be persisted outside of the auditable action context.
|
||||
func (h *ExtensionHost) AuditLogger() ipnauth.AuditLogFunc {
|
||||
if !h.active() {
|
||||
return func(tailcfg.ClientAuditAction, string) error { return nil }
|
||||
}
|
||||
loggers := make([]ipnauth.AuditLogFunc, 0, len(h.hooks.AuditLoggers))
|
||||
for _, provider := range h.hooks.AuditLoggers {
|
||||
loggers = append(loggers, provider())
|
||||
}
|
||||
return func(action tailcfg.ClientAuditAction, details string) error {
|
||||
// Log auditable actions to the host's log regardless of whether
|
||||
// the audit loggers are available or not.
|
||||
h.logf("auditlog: %v: %v", action, details)
|
||||
|
||||
// Invoke all registered audit loggers and collect errors.
|
||||
// If any of them returns an error, the action is denied.
|
||||
var errs []error
|
||||
for _, logger := range loggers {
|
||||
if err := logger(action, details); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown shuts down the extension host and all initialized extensions.
|
||||
func (h *ExtensionHost) Shutdown() {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
// Ensure that the init function has completed before shutting down,
|
||||
// or prevent any further init calls from happening.
|
||||
h.initOnce.Do(func() {})
|
||||
h.shutdownOnce.Do(h.shutdown)
|
||||
}
|
||||
|
||||
func (h *ExtensionHost) shutdown() {
|
||||
h.shuttingDown.Store(true)
|
||||
// Prevent any queued but not yet started operations from running,
|
||||
// block new operations from being enqueued, and wait for the
|
||||
// currently executing operation (if any) to finish.
|
||||
h.shutdownWorkQueue()
|
||||
// Invoke shutdown callbacks registered by extensions.
|
||||
h.shutdownExtensions()
|
||||
}
|
||||
|
||||
func (h *ExtensionHost) shutdownWorkQueue() {
|
||||
h.workQueue.Shutdown()
|
||||
var ctx context.Context
|
||||
if testenv.InTest() {
|
||||
// In tests, we'd like to wait indefinitely for the current operation to finish,
|
||||
// mostly to help avoid flaky tests. Test runners can be pretty slow.
|
||||
ctx = context.Background()
|
||||
} else {
|
||||
// In prod, however, we want to avoid blocking indefinitely.
|
||||
// The 5s timeout is somewhat arbitrary; LocalBackend operations
|
||||
// should not take that long.
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
// Since callbacks are invoked synchronously, this will also wait
|
||||
// for in-flight callbacks associated with those operations to finish.
|
||||
if err := h.workQueue.Wait(ctx); err != nil {
|
||||
h.logf("work queue shutdown failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ExtensionHost) shutdownExtensions() {
|
||||
h.mu.Lock()
|
||||
extensions := h.activeExtensions
|
||||
h.mu.Unlock()
|
||||
|
||||
// h.mu must not be held while shutting down extensions.
|
||||
// Extensions might call back into the host and that would cause
|
||||
// a deadlock if the h.mu is already held.
|
||||
//
|
||||
// Shutdown is called in the reverse order of Init.
|
||||
for _, ext := range slices.Backward(extensions) {
|
||||
if err := ext.Shutdown(); err != nil {
|
||||
// Extension shutdown errors are never fatal, but we log them for debugging purposes.
|
||||
h.logf("%q: shutdown callback failed: %v", ext.Name(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueBackendOperation enqueues a function to perform an operation on the [Backend].
|
||||
// If the host has not yet been initialized (e.g., when called from an extension's Init method),
|
||||
// the operation is deferred until after the host and all extensions have completed initialization.
|
||||
// It panics if the f is nil.
|
||||
func (h *ExtensionHost) enqueueBackendOperation(f func(Backend)) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
if f == nil {
|
||||
panic("nil backend operation")
|
||||
}
|
||||
h.mu.Lock() // protects h.initialized and h.postInitWorkQueue
|
||||
defer h.mu.Unlock()
|
||||
if h.initialized.Load() {
|
||||
h.doEnqueueBackendOperation(f)
|
||||
} else {
|
||||
h.postInitWorkQueue = append(h.postInitWorkQueue, f)
|
||||
}
|
||||
}
|
||||
|
||||
// execQueue is an ordered asynchronous queue for executing functions.
|
||||
// It is implemented by [execqueue.ExecQueue]. The interface is used
|
||||
// to allow testing with a mock implementation.
|
||||
type execQueue interface {
|
||||
Add(func())
|
||||
Shutdown()
|
||||
Wait(context.Context) error
|
||||
}
|
||||
48
vendor/tailscale.com/ipn/ipnlocal/hwattest.go
generated
vendored
Normal file
48
vendor/tailscale.com/ipn/ipnlocal/hwattest.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_tpm
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/persist"
|
||||
)
|
||||
|
||||
func init() {
|
||||
feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty)
|
||||
}
|
||||
|
||||
// generateAttestationKeyIfEmpty generates a new hardware attestation key if
|
||||
// none exists. It returns true if a new key was generated and stored in
|
||||
// p.AttestationKey.
|
||||
func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) {
|
||||
// attempt to generate a new hardware attestation key if none exists
|
||||
var ak key.HardwareAttestationKey
|
||||
if p != nil {
|
||||
ak = p.AttestationKey
|
||||
}
|
||||
|
||||
if ak == nil || ak.IsZero() {
|
||||
var err error
|
||||
ak, err = key.NewHardwareAttestationKey()
|
||||
if err != nil {
|
||||
if !errors.Is(err, key.ErrUnsupported) {
|
||||
logf("failed to create hardware attestation key: %v", err)
|
||||
}
|
||||
} else if ak != nil {
|
||||
logf("using new hardware attestation key: %v", ak.Public())
|
||||
if p == nil {
|
||||
p = &persist.Persist{}
|
||||
}
|
||||
p.AttestationKey = ak
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
5100
vendor/tailscale.com/ipn/ipnlocal/local.go
generated
vendored
5100
vendor/tailscale.com/ipn/ipnlocal/local.go
generated
vendored
File diff suppressed because it is too large
Load Diff
74
vendor/tailscale.com/ipn/ipnlocal/netstack.go
generated
vendored
Normal file
74
vendor/tailscale.com/ipn/ipnlocal/netstack.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_netstack
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if
|
||||
// no handler is needed. It also returns a list of TCP socket options to
|
||||
// apply to the socket before calling the handler.
|
||||
// TCPHandlerForDst is called both for connections to our node's local IP
|
||||
// as well as to the service IP (quad 100).
|
||||
func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) {
|
||||
// First handle internal connections to the service IP
|
||||
hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6
|
||||
if hittingServiceIP {
|
||||
switch dst.Port() {
|
||||
case 80:
|
||||
// TODO(mpminardi): do we want to show an error message if the web client
|
||||
// has been disabled instead of the more "basic" web UI?
|
||||
if b.ShouldRunWebClient() {
|
||||
return b.handleWebClientConn, opts
|
||||
}
|
||||
return b.HandleQuad100Port80Conn, opts
|
||||
case DriveLocalPort:
|
||||
return b.handleDriveConn, opts
|
||||
}
|
||||
}
|
||||
|
||||
if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok {
|
||||
if handler := f(b, dst, src); handler != nil {
|
||||
return handler, opts
|
||||
}
|
||||
}
|
||||
// Then handle external connections to the local IP.
|
||||
if !b.isLocalIP(dst.Addr()) {
|
||||
return nil, nil
|
||||
}
|
||||
if dst.Port() == 22 && b.ShouldRunSSH() {
|
||||
// Use a higher keepalive idle time for SSH connections, as they are
|
||||
// typically long lived and idle connections are more likely to be
|
||||
// intentional. Ideally we would turn this off entirely, but we can't
|
||||
// tell the difference between a long lived connection that is idle
|
||||
// vs a connection that is dead because the peer has gone away.
|
||||
// We pick 72h as that is typically sufficient for a long weekend.
|
||||
opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour)))
|
||||
return b.handleSSHConn, opts
|
||||
}
|
||||
// TODO(will,sonia): allow customizing web client port ?
|
||||
if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() {
|
||||
return b.handleWebClientConn, opts
|
||||
}
|
||||
if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port {
|
||||
return func(c net.Conn) error {
|
||||
b.handlePeerAPIConn(src, dst, c)
|
||||
return nil
|
||||
}, opts
|
||||
}
|
||||
if f, ok := hookTCPHandlerForServe.GetOk(); ok {
|
||||
if handler := f(b, dst.Port(), src, nil); handler != nil {
|
||||
return handler, opts
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
213
vendor/tailscale.com/ipn/ipnlocal/network-lock.go
generated
vendored
213
vendor/tailscale.com/ipn/ipnlocal/network-lock.go
generated
vendored
@@ -1,6 +1,8 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_tailnetlock
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
@@ -21,6 +23,7 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/health/healthmsg"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
@@ -52,10 +55,68 @@ var (
|
||||
type tkaState struct {
|
||||
profile ipn.ProfileID
|
||||
authority *tka.Authority
|
||||
storage *tka.FS
|
||||
storage tka.CompactableChonk
|
||||
filtered []ipnstate.TKAPeer
|
||||
}
|
||||
|
||||
func (b *LocalBackend) initTKALocked() error {
|
||||
cp := b.pm.CurrentProfile()
|
||||
if cp.ID() == "" {
|
||||
b.tka = nil
|
||||
return nil
|
||||
}
|
||||
if b.tka != nil {
|
||||
if b.tka.profile == cp.ID() {
|
||||
// Already initialized.
|
||||
return nil
|
||||
}
|
||||
// As we're switching profiles, we need to reset the TKA to nil.
|
||||
b.tka = nil
|
||||
}
|
||||
root := b.TailscaleVarRoot()
|
||||
if root == "" {
|
||||
b.tka = nil
|
||||
b.logf("cannot fetch existing TKA state; no state directory for network-lock")
|
||||
return nil
|
||||
}
|
||||
|
||||
chonkDir := b.chonkPathLocked()
|
||||
if _, err := os.Stat(chonkDir); err == nil {
|
||||
// The directory exists, which means network-lock has been initialized.
|
||||
storage, err := tka.ChonkDir(chonkDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening tailchonk: %v", err)
|
||||
}
|
||||
authority, err := tka.Open(storage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing tka: %v", err)
|
||||
}
|
||||
|
||||
if err := authority.Compact(storage, tkaCompactionDefaults); err != nil {
|
||||
b.logf("tka compaction failed: %v", err)
|
||||
}
|
||||
|
||||
b.tka = &tkaState{
|
||||
profile: cp.ID(),
|
||||
authority: authority,
|
||||
storage: storage,
|
||||
}
|
||||
b.logf("tka initialized at head %x", authority.Head())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// noNetworkLockStateDirWarnable is a Warnable to warn the user that Tailnet Lock data
|
||||
// (in particular, the list of AUMs in the TKA state) is being stored in memory and will
|
||||
// be lost when tailscaled restarts.
|
||||
var noNetworkLockStateDirWarnable = health.Register(&health.Warnable{
|
||||
Code: "no-tailnet-lock-state-dir",
|
||||
Title: "No statedir for Tailnet Lock",
|
||||
Severity: health.SeverityMedium,
|
||||
Text: health.StaticMessage(healthmsg.InMemoryTailnetLockState),
|
||||
})
|
||||
|
||||
// tkaFilterNetmapLocked checks the signatures on each node key, dropping
|
||||
// nodes from the netmap whose signature does not verify.
|
||||
//
|
||||
@@ -239,8 +300,11 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.tka != nil || nm.TKAEnabled {
|
||||
b.logf("tkaSyncIfNeeded: enabled=%v, head=%v", nm.TKAEnabled, nm.TKAHead)
|
||||
isEnabled := b.tka != nil
|
||||
wantEnabled := nm.TKAEnabled
|
||||
|
||||
if isEnabled || wantEnabled {
|
||||
b.logf("tkaSyncIfNeeded: isEnabled=%t, wantEnabled=%t, head=%v", isEnabled, wantEnabled, nm.TKAHead)
|
||||
}
|
||||
|
||||
ourNodeKey, ok := prefs.Persist().PublicNodeKeyOK()
|
||||
@@ -248,8 +312,6 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie
|
||||
return errors.New("tkaSyncIfNeeded: no node key in prefs")
|
||||
}
|
||||
|
||||
isEnabled := b.tka != nil
|
||||
wantEnabled := nm.TKAEnabled
|
||||
didJustEnable := false
|
||||
if isEnabled != wantEnabled {
|
||||
var ourHead tka.AUMHash
|
||||
@@ -294,25 +356,18 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie
|
||||
if err := b.tkaSyncLocked(ourNodeKey); err != nil {
|
||||
return fmt.Errorf("tka sync: %w", err)
|
||||
}
|
||||
// Try to compact the TKA state, to avoid unbounded storage on nodes.
|
||||
//
|
||||
// We run this on every sync so that clients compact consistently. In many
|
||||
// cases this will be a no-op.
|
||||
if err := b.tka.authority.Compact(b.tka.storage, tkaCompactionDefaults); err != nil {
|
||||
return fmt.Errorf("tka compact: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toSyncOffer(head string, ancestors []string) (tka.SyncOffer, error) {
|
||||
var out tka.SyncOffer
|
||||
if err := out.Head.UnmarshalText([]byte(head)); err != nil {
|
||||
return tka.SyncOffer{}, fmt.Errorf("head.UnmarshalText: %v", err)
|
||||
}
|
||||
out.Ancestors = make([]tka.AUMHash, len(ancestors))
|
||||
for i, a := range ancestors {
|
||||
if err := out.Ancestors[i].UnmarshalText([]byte(a)); err != nil {
|
||||
return tka.SyncOffer{}, fmt.Errorf("ancestor[%d].UnmarshalText: %v", i, err)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// tkaSyncLocked synchronizes TKA state with control. b.mu must be held
|
||||
// and tka must be initialized. b.mu will be stepped out of (and back into)
|
||||
// during network RPCs.
|
||||
@@ -330,7 +385,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("offer RPC: %w", err)
|
||||
}
|
||||
controlOffer, err := toSyncOffer(offerResp.Head, offerResp.Ancestors)
|
||||
controlOffer, err := tka.ToSyncOffer(offerResp.Head, offerResp.Ancestors)
|
||||
if err != nil {
|
||||
return fmt.Errorf("control offer: %v", err)
|
||||
}
|
||||
@@ -393,7 +448,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error {
|
||||
// b.mu must be held & TKA must be initialized.
|
||||
func (b *LocalBackend) tkaApplyDisablementLocked(secret []byte) error {
|
||||
if b.tka.authority.ValidDisablement(secret) {
|
||||
if err := os.RemoveAll(b.chonkPathLocked()); err != nil {
|
||||
if err := b.tka.storage.RemoveAll(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.tka = nil
|
||||
@@ -415,10 +470,6 @@ func (b *LocalBackend) chonkPathLocked() string {
|
||||
//
|
||||
// b.mu must be held.
|
||||
func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, persist persist.PersistView) error {
|
||||
if err := b.CanSupportNetworkLock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var genesis tka.AUM
|
||||
if err := genesis.Unserialize(g); err != nil {
|
||||
return fmt.Errorf("reading genesis: %v", err)
|
||||
@@ -437,19 +488,21 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per
|
||||
}
|
||||
}
|
||||
|
||||
chonkDir := b.chonkPathLocked()
|
||||
if err := os.Mkdir(filepath.Dir(chonkDir), 0755); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("creating chonk root dir: %v", err)
|
||||
root := b.TailscaleVarRoot()
|
||||
var storage tka.CompactableChonk
|
||||
if root == "" {
|
||||
b.health.SetUnhealthy(noNetworkLockStateDirWarnable, nil)
|
||||
b.logf("network-lock using in-memory storage; no state directory")
|
||||
storage = tka.ChonkMem()
|
||||
} else {
|
||||
chonkDir := b.chonkPathLocked()
|
||||
chonk, err := tka.ChonkDir(chonkDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chonk: %v", err)
|
||||
}
|
||||
storage = chonk
|
||||
}
|
||||
if err := os.Mkdir(chonkDir, 0755); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("mkdir: %v", err)
|
||||
}
|
||||
|
||||
chonk, err := tka.ChonkDir(chonkDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chonk: %v", err)
|
||||
}
|
||||
authority, err := tka.Bootstrap(chonk, genesis)
|
||||
authority, err := tka.Bootstrap(storage, genesis)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tka bootstrap: %v", err)
|
||||
}
|
||||
@@ -457,29 +510,11 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per
|
||||
b.tka = &tkaState{
|
||||
profile: b.pm.CurrentProfile().ID(),
|
||||
authority: authority,
|
||||
storage: chonk,
|
||||
storage: storage,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanSupportNetworkLock returns nil if tailscaled is able to operate
|
||||
// a local tailnet key authority (and hence enforce network lock).
|
||||
func (b *LocalBackend) CanSupportNetworkLock() error {
|
||||
if b.tka != nil {
|
||||
// If the TKA is being used, it is supported.
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.TailscaleVarRoot() == "" {
|
||||
return errors.New("network-lock is not supported in this configuration, try setting --statedir")
|
||||
}
|
||||
|
||||
// There's a var root (aka --statedir), so if network lock gets
|
||||
// initialized we have somewhere to store our AUMs. That's all
|
||||
// we need.
|
||||
return nil
|
||||
}
|
||||
|
||||
// NetworkLockStatus returns a structure describing the state of the
|
||||
// tailnet key authority, if any.
|
||||
func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
@@ -516,9 +551,10 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
|
||||
var selfAuthorized bool
|
||||
nodeKeySignature := &tka.NodeKeySignature{}
|
||||
if b.netMap != nil {
|
||||
selfAuthorized = b.tka.authority.NodeKeyAuthorized(b.netMap.SelfNode.Key(), b.netMap.SelfNode.KeySignature().AsSlice()) == nil
|
||||
if err := nodeKeySignature.Unserialize(b.netMap.SelfNode.KeySignature().AsSlice()); err != nil {
|
||||
nm := b.currentNode().NetMap()
|
||||
if nm != nil {
|
||||
selfAuthorized = b.tka.authority.NodeKeyAuthorized(nm.SelfNode.Key(), nm.SelfNode.KeySignature().AsSlice()) == nil
|
||||
if err := nodeKeySignature.Unserialize(nm.SelfNode.KeySignature().AsSlice()); err != nil {
|
||||
b.logf("failed to decode self node key signature: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -527,6 +563,7 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
outKeys := make([]ipnstate.TKAKey, len(keys))
|
||||
for i, k := range keys {
|
||||
outKeys[i] = ipnstate.TKAKey{
|
||||
Kind: k.Kind.String(),
|
||||
Key: key.NLPublicFromEd25519Unsafe(k.Public),
|
||||
Metadata: k.Meta,
|
||||
Votes: k.Votes,
|
||||
@@ -539,9 +576,9 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
}
|
||||
|
||||
var visible []*ipnstate.TKAPeer
|
||||
if b.netMap != nil {
|
||||
visible = make([]*ipnstate.TKAPeer, len(b.netMap.Peers))
|
||||
for i, p := range b.netMap.Peers {
|
||||
if nm != nil {
|
||||
visible = make([]*ipnstate.TKAPeer, len(nm.Peers))
|
||||
for i, p := range nm.Peers {
|
||||
s := tkaStateFromPeer(p)
|
||||
visible[i] = &s
|
||||
}
|
||||
@@ -593,24 +630,16 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer {
|
||||
// The Finish RPC submits signatures for all these nodes, at which point
|
||||
// Control has everything it needs to atomically enable network lock.
|
||||
func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) error {
|
||||
if err := b.CanSupportNetworkLock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ourNodeKey key.NodePublic
|
||||
var nlPriv key.NLPrivate
|
||||
|
||||
b.mu.Lock()
|
||||
|
||||
if !b.capTailnetLock {
|
||||
b.mu.Unlock()
|
||||
return errors.New("not permitted to enable tailnet lock")
|
||||
}
|
||||
|
||||
if p := b.pm.CurrentPrefs(); p.Valid() && p.Persist().Valid() && !p.Persist().PrivateNodeKey().IsZero() {
|
||||
ourNodeKey = p.Persist().PublicNodeKey()
|
||||
nlPriv = p.Persist().NetworkLockKey()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
if ourNodeKey.IsZero() || nlPriv.IsZero() {
|
||||
return errors.New("no node-key: is tailscale logged in?")
|
||||
}
|
||||
@@ -624,7 +653,7 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt
|
||||
// We use an in-memory tailchonk because we don't want to commit to
|
||||
// the filesystem until we've finished the initialization sequence,
|
||||
// just in case something goes wrong.
|
||||
_, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{
|
||||
_, genesisAUM, err := tka.Create(tka.ChonkMem(), tka.State{
|
||||
Keys: keys,
|
||||
// TODO(tom): s/tka.State.DisablementSecrets/tka.State.DisablementValues
|
||||
// This will center on consistent nomenclature:
|
||||
@@ -652,7 +681,7 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt
|
||||
|
||||
// Our genesis AUM was accepted but before Control turns on enforcement of
|
||||
// node-key signatures, we need to sign keys for all the existing nodes.
|
||||
// If we don't get these signatures ahead of time, everyone will loose
|
||||
// If we don't get these signatures ahead of time, everyone will lose
|
||||
// connectivity because control won't have any signatures to send which
|
||||
// satisfy network-lock checks.
|
||||
sigs := make(map[tailcfg.NodeID]tkatype.MarshaledSignature, len(initResp.NeedSignatures))
|
||||
@@ -670,6 +699,13 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt
|
||||
return err
|
||||
}
|
||||
|
||||
// NetworkLockAllowed reports whether the node is allowed to use Tailnet Lock.
|
||||
func (b *LocalBackend) NetworkLockAllowed() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.capTailnetLock
|
||||
}
|
||||
|
||||
// Only use is in tests.
|
||||
func (b *LocalBackend) NetworkLockVerifySignatureForTest(nks tkatype.MarshaledSignature, nodeKey key.NodePublic) error {
|
||||
b.mu.Lock()
|
||||
@@ -702,16 +738,14 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error {
|
||||
id1, id2 := b.tka.authority.StateIDs()
|
||||
stateID := fmt.Sprintf("%d:%d", id1, id2)
|
||||
|
||||
cn := b.currentNode()
|
||||
newPrefs := b.pm.CurrentPrefs().AsStruct().Clone() // .Persist should always be initialized here.
|
||||
newPrefs.Persist.DisallowedTKAStateIDs = append(newPrefs.Persist.DisallowedTKAStateIDs, stateID)
|
||||
if err := b.pm.SetPrefs(newPrefs.View(), ipn.NetworkProfile{
|
||||
MagicDNSName: b.netMap.MagicDNSSuffix(),
|
||||
DomainName: b.netMap.DomainName(),
|
||||
}); err != nil {
|
||||
if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil {
|
||||
return fmt.Errorf("saving prefs: %w", err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(b.chonkPathLocked()); err != nil {
|
||||
if err := b.tka.storage.RemoveAll(); err != nil {
|
||||
return fmt.Errorf("deleting TKA state: %w", err)
|
||||
}
|
||||
b.tka = nil
|
||||
@@ -897,7 +931,7 @@ func (b *LocalBackend) NetworkLockLog(maxEntries int) ([]ipnstate.NetworkLockUpd
|
||||
if err == os.ErrNotExist {
|
||||
break
|
||||
}
|
||||
return out, fmt.Errorf("reading AUM: %w", err)
|
||||
return out, fmt.Errorf("reading AUM (%v): %w", cursor, err)
|
||||
}
|
||||
|
||||
update := ipnstate.NetworkLockUpdate{
|
||||
@@ -1247,27 +1281,10 @@ func (b *LocalBackend) tkaFetchBootstrap(ourNodeKey key.NodePublic, head tka.AUM
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func fromSyncOffer(offer tka.SyncOffer) (head string, ancestors []string, err error) {
|
||||
headBytes, err := offer.Head.MarshalText()
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("head.MarshalText: %v", err)
|
||||
}
|
||||
|
||||
ancestors = make([]string, len(offer.Ancestors))
|
||||
for i, ancestor := range offer.Ancestors {
|
||||
hash, err := ancestor.MarshalText()
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("ancestor[%d].MarshalText: %v", i, err)
|
||||
}
|
||||
ancestors[i] = string(hash)
|
||||
}
|
||||
return string(headBytes), ancestors, nil
|
||||
}
|
||||
|
||||
// tkaDoSyncOffer sends a /machine/tka/sync/offer RPC to the control plane
|
||||
// over noise. This is the first of two RPCs implementing tka synchronization.
|
||||
func (b *LocalBackend) tkaDoSyncOffer(ourNodeKey key.NodePublic, offer tka.SyncOffer) (*tailcfg.TKASyncOfferResponse, error) {
|
||||
head, ancestors, err := fromSyncOffer(offer)
|
||||
head, ancestors, err := tka.FromSyncOffer(offer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding offer: %v", err)
|
||||
}
|
||||
|
||||
872
vendor/tailscale.com/ipn/ipnlocal/node_backend.go
generated
vendored
Normal file
872
vendor/tailscale.com/ipn/ipnlocal/node_backend.go
generated
vendored
Normal file
@@ -0,0 +1,872 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/net/dns"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/util/eventbus"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/slicesx"
|
||||
"tailscale.com/wgengine/filter"
|
||||
"tailscale.com/wgengine/magicsock"
|
||||
)
|
||||
|
||||
// nodeBackend is node-specific [LocalBackend] state. It is usually the current node.
|
||||
//
|
||||
// Its exported methods are safe for concurrent use, but the struct is not a snapshot of state at a given moment;
|
||||
// its state can change between calls. For example, asking for the same value (e.g., netmap or prefs) twice
|
||||
// may return different results. Returned values are immutable and safe for concurrent use.
|
||||
//
|
||||
// If both the [LocalBackend]'s internal mutex and the [nodeBackend] mutex must be held at the same time,
|
||||
// the [LocalBackend] mutex must be acquired first. See the comment on the [LocalBackend] field for more details.
|
||||
//
|
||||
// Two pointers to different [nodeBackend] instances represent different local nodes.
|
||||
// However, there's currently a bug where a new [nodeBackend] might not be created
|
||||
// during an implicit node switch (see tailscale/corp#28014).
|
||||
//
|
||||
// In the future, we might want to include at least the following in this struct (in addition to the current fields).
|
||||
// However, not everything should be exported or otherwise made available to the outside world (e.g. [ipnext] extensions,
|
||||
// peer API handlers, etc.).
|
||||
// - [ipn.State]: when the LocalBackend switches to a different [nodeBackend], it can update the state of the old one.
|
||||
// - [ipn.LoginProfileView] and [ipn.Prefs]: we should update them when the [profileManager] reports changes to them.
|
||||
// In the future, [profileManager] (and the corresponding methods of the [LocalBackend]) can be made optional,
|
||||
// and something else could be used to set them once or update them as needed.
|
||||
// - [tailcfg.HostinfoView]: it includes certain fields that are tied to the current profile/node/prefs. We should also
|
||||
// update to build it once instead of mutating it in twelvety different places.
|
||||
// - [filter.Filter] (normal and jailed, along with the filterHash): the nodeBackend could have a method to (re-)build
|
||||
// the filter for the current netmap/prefs (see [LocalBackend.updateFilterLocked]), and it needs to track the current
|
||||
// filters and their hash.
|
||||
// - Fields related to a requested or required (re-)auth: authURL, authURLTime, authActor, keyExpired, etc.
|
||||
// - [controlclient.Client]/[*controlclient.Auto]: the current control client. It is ties to a node identity.
|
||||
// - [tkaState]: it is tied to the current profile / node.
|
||||
// - Fields related to scheduled node expiration: nmExpiryTimer, numClientStatusCalls, [expiryManager].
|
||||
//
|
||||
// It should not include any fields used by specific features that don't belong in [LocalBackend].
|
||||
// Even if they're tied to the local node, instead of moving them here, we should extract the entire feature
|
||||
// into a separate package and have it install proper hooks.
|
||||
type nodeBackend struct {
|
||||
logf logger.Logf
|
||||
|
||||
ctx context.Context // canceled by [nodeBackend.shutdown]
|
||||
ctxCancel context.CancelCauseFunc // cancels ctx
|
||||
|
||||
// filterAtomic is a stateful packet filter. Immutable once created, but can be
|
||||
// replaced with a new one.
|
||||
filterAtomic atomic.Pointer[filter.Filter]
|
||||
|
||||
// initialized once and immutable
|
||||
eventClient *eventbus.Client
|
||||
filterPub *eventbus.Publisher[magicsock.FilterUpdate]
|
||||
nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate]
|
||||
nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate]
|
||||
derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView]
|
||||
|
||||
// TODO(nickkhyl): maybe use sync.RWMutex?
|
||||
mu syncs.Mutex // protects the following fields
|
||||
|
||||
shutdownOnce sync.Once // guards calling [nodeBackend.shutdown]
|
||||
readyCh chan struct{} // closed by [nodeBackend.ready]; nil after shutdown
|
||||
|
||||
// NetMap is the most recently set full netmap from the controlclient.
|
||||
// It can't be mutated in place once set. Because it can't be mutated in place,
|
||||
// delta updates from the control server don't apply to it. Instead, use
|
||||
// the peers map to get up-to-date information on the state of peers.
|
||||
// In general, avoid using the netMap.Peers slice. We'd like it to go away
|
||||
// as of 2023-09-17.
|
||||
// TODO(nickkhyl): make it an atomic pointer to avoid the need for a mutex?
|
||||
netMap *netmap.NetworkMap
|
||||
|
||||
// peers is the set of current peers and their current values after applying
|
||||
// delta node mutations as they come in (with mu held). The map values can be
|
||||
// given out to callers, but the map itself can be mutated in place (with mu held)
|
||||
// and must not escape the [nodeBackend].
|
||||
peers map[tailcfg.NodeID]tailcfg.NodeView
|
||||
|
||||
// nodeByAddr maps nodes' own addresses (excluding subnet routes) to node IDs.
|
||||
// It is mutated in place (with mu held) and must not escape the [nodeBackend].
|
||||
nodeByAddr map[netip.Addr]tailcfg.NodeID
|
||||
}
|
||||
|
||||
func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *nodeBackend {
|
||||
ctx, ctxCancel := context.WithCancelCause(ctx)
|
||||
nb := &nodeBackend{
|
||||
logf: logf,
|
||||
ctx: ctx,
|
||||
ctxCancel: ctxCancel,
|
||||
eventClient: bus.Client("ipnlocal.nodeBackend"),
|
||||
readyCh: make(chan struct{}),
|
||||
}
|
||||
// Default filter blocks everything and logs nothing.
|
||||
noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{})
|
||||
nb.filterAtomic.Store(noneFilter)
|
||||
nb.filterPub = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient)
|
||||
nb.nodeViewsPub = eventbus.Publish[magicsock.NodeViewsUpdate](nb.eventClient)
|
||||
nb.nodeMutsPub = eventbus.Publish[magicsock.NodeMutationsUpdate](nb.eventClient)
|
||||
nb.derpMapViewPub = eventbus.Publish[tailcfg.DERPMapView](nb.eventClient)
|
||||
nb.filterPub.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()})
|
||||
return nb
|
||||
}
|
||||
|
||||
// Context returns a context that is canceled when the [nodeBackend] shuts down,
|
||||
// either because [LocalBackend] is switching to a different [nodeBackend]
|
||||
// or is shutting down itself.
|
||||
func (nb *nodeBackend) Context() context.Context {
|
||||
return nb.ctx
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) Self() tailcfg.NodeView {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.netMap == nil {
|
||||
return tailcfg.NodeView{}
|
||||
}
|
||||
return nb.netMap.SelfNode
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) SelfUserID() tailcfg.UserID {
|
||||
self := nb.Self()
|
||||
if !self.Valid() {
|
||||
return 0
|
||||
}
|
||||
return self.User()
|
||||
}
|
||||
|
||||
// SelfHasCap reports whether the specified capability was granted to the self node in the most recent netmap.
|
||||
func (nb *nodeBackend) SelfHasCap(wantCap tailcfg.NodeCapability) bool {
|
||||
return nb.SelfHasCapOr(wantCap, false)
|
||||
}
|
||||
|
||||
// SelfHasCapOr is like [nodeBackend.SelfHasCap], but returns the specified default value
|
||||
// if the netmap is not available yet.
|
||||
func (nb *nodeBackend) SelfHasCapOr(wantCap tailcfg.NodeCapability, def bool) bool {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.netMap == nil {
|
||||
return def
|
||||
}
|
||||
return nb.netMap.AllCaps.Contains(wantCap)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) NetworkProfile() ipn.NetworkProfile {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return ipn.NetworkProfile{
|
||||
// These are ok to call with nil netMap.
|
||||
MagicDNSName: nb.netMap.MagicDNSSuffix(),
|
||||
DomainName: nb.netMap.DomainName(),
|
||||
DisplayName: nb.netMap.TailnetDisplayName(),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(nickkhyl): update it to return a [tailcfg.DERPMapView]?
|
||||
func (nb *nodeBackend) DERPMap() *tailcfg.DERPMap {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.netMap == nil {
|
||||
return nil
|
||||
}
|
||||
return nb.netMap.DERPMap
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) NodeByAddr(ip netip.Addr) (_ tailcfg.NodeID, ok bool) {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
nid, ok := nb.nodeByAddr[ip]
|
||||
return nid, ok
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.netMap == nil {
|
||||
return 0, false
|
||||
}
|
||||
if self := nb.netMap.SelfNode; self.Valid() && self.Key() == k {
|
||||
return self.ID(), true
|
||||
}
|
||||
// TODO(bradfitz,nickkhyl): add nodeByKey like nodeByAddr instead of walking peers.
|
||||
for _, n := range nb.peers {
|
||||
if n.Key() == k {
|
||||
return n.ID(), true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) NodeByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.netMap != nil {
|
||||
if self := nb.netMap.SelfNode; self.Valid() && self.ID() == id {
|
||||
return self, true
|
||||
}
|
||||
}
|
||||
n, ok := nb.peers[id]
|
||||
return n, ok
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) PeerByStableID(id tailcfg.StableNodeID) (_ tailcfg.NodeView, ok bool) {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
for _, n := range nb.peers {
|
||||
if n.StableID() == id {
|
||||
return n, true
|
||||
}
|
||||
}
|
||||
return tailcfg.NodeView{}, false
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) UserByID(id tailcfg.UserID) (_ tailcfg.UserProfileView, ok bool) {
|
||||
nb.mu.Lock()
|
||||
nm := nb.netMap
|
||||
nb.mu.Unlock()
|
||||
if nm == nil {
|
||||
return tailcfg.UserProfileView{}, false
|
||||
}
|
||||
u, ok := nm.UserProfiles[id]
|
||||
return u, ok
|
||||
}
|
||||
|
||||
// Peers returns all the current peers in an undefined order.
|
||||
func (nb *nodeBackend) Peers() []tailcfg.NodeView {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return slicesx.MapValues(nb.peers)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
ret := slicesx.MapValues(nb.peers)
|
||||
slices.SortFunc(ret, func(a, b tailcfg.NodeView) int {
|
||||
return cmp.Compare(a.ID(), b.ID())
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) CollectServices() bool {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return nb.netMap != nil && nb.netMap.CollectServices
|
||||
}
|
||||
|
||||
// AppendMatchingPeers returns base with all peers that match pred appended.
|
||||
//
|
||||
// It acquires b.mu to read the netmap but releases it before calling pred.
|
||||
func (nb *nodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
|
||||
var peers []tailcfg.NodeView
|
||||
|
||||
nb.mu.Lock()
|
||||
if nb.netMap != nil {
|
||||
// All fields on b.netMap are immutable, so this is
|
||||
// safe to copy and use outside the lock.
|
||||
peers = nb.netMap.Peers
|
||||
}
|
||||
nb.mu.Unlock()
|
||||
|
||||
ret := base
|
||||
for _, peer := range peers {
|
||||
// The peers in b.netMap don't contain updates made via
|
||||
// UpdateNetmapDelta. So only use PeerView in b.netMap for its NodeID,
|
||||
// and then look up the latest copy in b.peers which is updated in
|
||||
// response to UpdateNetmapDelta edits.
|
||||
nb.mu.Lock()
|
||||
peer, ok := nb.peers[peer.ID()]
|
||||
nb.mu.Unlock()
|
||||
if ok && pred(peer) {
|
||||
ret = append(ret, peer)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// PeerCaps returns the capabilities that remote src IP has to
|
||||
// ths current node.
|
||||
func (nb *nodeBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return nb.peerCapsLocked(src)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) peerCapsLocked(src netip.Addr) tailcfg.PeerCapMap {
|
||||
if nb.netMap == nil {
|
||||
return nil
|
||||
}
|
||||
filt := nb.filterAtomic.Load()
|
||||
if filt == nil {
|
||||
return nil
|
||||
}
|
||||
addrs := nb.netMap.GetAddresses()
|
||||
for i := range addrs.Len() {
|
||||
a := addrs.At(i)
|
||||
if !a.IsSingleIP() {
|
||||
continue
|
||||
}
|
||||
dst := a.Addr()
|
||||
if dst.BitLen() == src.BitLen() { // match on family
|
||||
return filt.CapsWithValues(src, dst)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerHasCap reports whether the peer contains the given capability string,
|
||||
// with any value(s).
|
||||
func (nb *nodeBackend) PeerHasCap(peer tailcfg.NodeView, wantCap tailcfg.PeerCapability) bool {
|
||||
if !peer.Valid() {
|
||||
return false
|
||||
}
|
||||
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
for _, ap := range peer.Addresses().All() {
|
||||
if nb.peerHasCapLocked(ap.Addr(), wantCap) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) peerHasCapLocked(addr netip.Addr, wantCap tailcfg.PeerCapability) bool {
|
||||
return nb.peerCapsLocked(addr).HasCapability(wantCap)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool {
|
||||
return nb.PeerAPIBase(p) != ""
|
||||
}
|
||||
|
||||
// PeerAPIBase returns the "http://ip:port" URL base to reach peer's PeerAPI,
|
||||
// or the empty string if the peer is invalid or doesn't support PeerAPI.
|
||||
func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string {
|
||||
nb.mu.Lock()
|
||||
nm := nb.netMap
|
||||
nb.mu.Unlock()
|
||||
return peerAPIBase(nm, p)
|
||||
}
|
||||
|
||||
// PeerIsReachable reports whether the current node can reach p. If the ctx is
|
||||
// done, this function may return a result based on stale reachability data.
|
||||
func (nb *nodeBackend) PeerIsReachable(ctx context.Context, p tailcfg.NodeView) bool {
|
||||
if !nb.SelfHasCap(tailcfg.NodeAttrClientSideReachability) {
|
||||
// Legacy behavior is to always trust the control plane, which
|
||||
// isn’t always correct because the peer could be slow to check
|
||||
// in so that control marks it as offline.
|
||||
// See tailscale/corp#32686.
|
||||
return p.Online().Get()
|
||||
}
|
||||
|
||||
nb.mu.Lock()
|
||||
nm := nb.netMap
|
||||
nb.mu.Unlock()
|
||||
|
||||
if self := nm.SelfNode; self.Valid() && self.ID() == p.ID() {
|
||||
// This node can always reach itself.
|
||||
return true
|
||||
}
|
||||
return nb.peerIsReachable(ctx, p)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) peerIsReachable(ctx context.Context, p tailcfg.NodeView) bool {
|
||||
// TODO(sfllaw): The following does not actually test for client-side
|
||||
// reachability. This would require a mechanism that tracks whether the
|
||||
// current node can actually reach this peer, either because they are
|
||||
// already communicating or because they can ping each other.
|
||||
//
|
||||
// Instead, it makes the client ignore p.Online completely.
|
||||
//
|
||||
// See tailscale/corp#32686.
|
||||
return true
|
||||
}
|
||||
|
||||
func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr {
|
||||
for _, pfx := range n.Addresses().All() {
|
||||
if pfx.IsSingleIP() && pred(pfx.Addr()) {
|
||||
return pfx.Addr()
|
||||
}
|
||||
}
|
||||
return netip.Addr{}
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) NetMap() *netmap.NetworkMap {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return nb.netMap
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.netMap == nil {
|
||||
return nil
|
||||
}
|
||||
nm := ptr.To(*nb.netMap) // shallow clone
|
||||
nm.Peers = slicesx.MapValues(nb.peers)
|
||||
slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int {
|
||||
return cmp.Compare(a.ID(), b.ID())
|
||||
})
|
||||
return nm
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
nb.netMap = nm
|
||||
nb.updateNodeByAddrLocked()
|
||||
nb.updatePeersLocked()
|
||||
nv := magicsock.NodeViewsUpdate{}
|
||||
if nm != nil {
|
||||
nv.SelfNode = nm.SelfNode
|
||||
nv.Peers = nm.Peers
|
||||
nb.derpMapViewPub.Publish(nm.DERPMap.View())
|
||||
} else {
|
||||
nb.derpMapViewPub.Publish(tailcfg.DERPMapView{})
|
||||
}
|
||||
nb.nodeViewsPub.Publish(nv)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) updateNodeByAddrLocked() {
|
||||
nm := nb.netMap
|
||||
if nm == nil {
|
||||
nb.nodeByAddr = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Update the nodeByAddr index.
|
||||
if nb.nodeByAddr == nil {
|
||||
nb.nodeByAddr = map[netip.Addr]tailcfg.NodeID{}
|
||||
}
|
||||
// First pass, mark everything unwanted.
|
||||
for k := range nb.nodeByAddr {
|
||||
nb.nodeByAddr[k] = 0
|
||||
}
|
||||
addNode := func(n tailcfg.NodeView) {
|
||||
for _, ipp := range n.Addresses().All() {
|
||||
if ipp.IsSingleIP() {
|
||||
nb.nodeByAddr[ipp.Addr()] = n.ID()
|
||||
}
|
||||
}
|
||||
}
|
||||
if nm.SelfNode.Valid() {
|
||||
addNode(nm.SelfNode)
|
||||
}
|
||||
for _, p := range nm.Peers {
|
||||
addNode(p)
|
||||
}
|
||||
// Third pass, actually delete the unwanted items.
|
||||
for k, v := range nb.nodeByAddr {
|
||||
if v == 0 {
|
||||
delete(nb.nodeByAddr, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) updatePeersLocked() {
|
||||
nm := nb.netMap
|
||||
if nm == nil {
|
||||
nb.peers = nil
|
||||
return
|
||||
}
|
||||
|
||||
// First pass, mark everything unwanted.
|
||||
for k := range nb.peers {
|
||||
nb.peers[k] = tailcfg.NodeView{}
|
||||
}
|
||||
|
||||
// Second pass, add everything wanted.
|
||||
for _, p := range nm.Peers {
|
||||
mak.Set(&nb.peers, p.ID(), p)
|
||||
}
|
||||
|
||||
// Third pass, remove deleted things.
|
||||
for k, v := range nb.peers {
|
||||
if !v.Valid() {
|
||||
delete(nb.peers, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bool) {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.netMap == nil || len(nb.peers) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Locally cloned mutable nodes, to avoid calling AsStruct (clone)
|
||||
// multiple times on a node if it's mutated multiple times in this
|
||||
// call (e.g. its endpoints + online status both change)
|
||||
var mutableNodes map[tailcfg.NodeID]*tailcfg.Node
|
||||
|
||||
update := magicsock.NodeMutationsUpdate{
|
||||
Mutations: make([]netmap.NodeMutation, 0, len(muts)),
|
||||
}
|
||||
for _, m := range muts {
|
||||
n, ok := mutableNodes[m.NodeIDBeingMutated()]
|
||||
if !ok {
|
||||
nv, ok := nb.peers[m.NodeIDBeingMutated()]
|
||||
if !ok {
|
||||
// TODO(bradfitz): unexpected metric?
|
||||
return false
|
||||
}
|
||||
n = nv.AsStruct()
|
||||
mak.Set(&mutableNodes, nv.ID(), n)
|
||||
update.Mutations = append(update.Mutations, m)
|
||||
}
|
||||
m.Apply(n)
|
||||
}
|
||||
for nid, n := range mutableNodes {
|
||||
nb.peers[nid] = n.View()
|
||||
}
|
||||
nb.nodeMutsPub.Publish(update)
|
||||
return true
|
||||
}
|
||||
|
||||
// unlockedNodesPermitted reports whether any peer with theUnsignedPeerAPIOnly bool set true has any of its allowed IPs
|
||||
// in the specified packet filter.
|
||||
//
|
||||
// TODO(nickkhyl): It is here temporarily until we can move the whole [LocalBackend.updateFilterLocked] here,
|
||||
// but change it so it builds and returns a filter for the current netmap/prefs instead of re-configuring the engine filter.
|
||||
// Something like (*nodeBackend).RebuildFilters() (filter, jailedFilter *filter.Filter, changed bool) perhaps?
|
||||
func (nb *nodeBackend) unlockedNodesPermitted(packetFilter []filter.Match) bool {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return packetFilterPermitsUnlockedNodes(nb.peers, packetFilter)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) filter() *filter.Filter {
|
||||
return nb.filterAtomic.Load()
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) setFilter(f *filter.Filter) {
|
||||
nb.filterAtomic.Store(f)
|
||||
nb.filterPub.Publish(magicsock.FilterUpdate{Filter: f})
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, versionOS string) *dns.Config {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, nb.logf, versionOS)
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) {
|
||||
if !buildfeatures.HasUseExitNode {
|
||||
return "", false
|
||||
}
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID)
|
||||
}
|
||||
|
||||
// ready signals that [LocalBackend] has completed the switch to this [nodeBackend]
|
||||
// and any pending calls to [nodeBackend.Wait] must be unblocked.
|
||||
func (nb *nodeBackend) ready() {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.readyCh != nil {
|
||||
close(nb.readyCh)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until [LocalBackend] completes the switch to this [nodeBackend]
|
||||
// and calls [nodeBackend.ready]. It returns an error if the provided context
|
||||
// is canceled or if the [nodeBackend] shuts down or is already shut down.
|
||||
//
|
||||
// It must not be called with the [LocalBackend]'s internal mutex held as [LocalBackend]
|
||||
// may need to acquire it to complete the switch.
|
||||
//
|
||||
// TODO(nickkhyl): Relax this restriction once [LocalBackend]'s state machine
|
||||
// runs in its own goroutine, or if we decide that waiting for the state machine
|
||||
// restart to finish isn't necessary for [LocalBackend] to consider the switch complete.
|
||||
// We mostly need this because of [LocalBackend.Start] acquiring b.mu and the fact that
|
||||
// methods like [LocalBackend.SwitchProfile] must report any errors returned by it.
|
||||
// Perhaps we could report those errors asynchronously as [health.Warnable]s?
|
||||
func (nb *nodeBackend) Wait(ctx context.Context) error {
|
||||
nb.mu.Lock()
|
||||
readyCh := nb.readyCh
|
||||
nb.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-nb.ctx.Done():
|
||||
return context.Cause(nb.ctx)
|
||||
case <-readyCh:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// shutdown shuts down the [nodeBackend] and cancels its context
|
||||
// with the provided cause.
|
||||
func (nb *nodeBackend) shutdown(cause error) {
|
||||
nb.shutdownOnce.Do(func() {
|
||||
nb.doShutdown(cause)
|
||||
})
|
||||
}
|
||||
|
||||
func (nb *nodeBackend) doShutdown(cause error) {
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
nb.ctxCancel(cause)
|
||||
nb.readyCh = nil
|
||||
nb.eventClient.Close()
|
||||
}
|
||||
|
||||
// useWithExitNodeResolvers filters out resolvers so the ones that remain
|
||||
// are all the ones marked for use with exit nodes.
|
||||
func useWithExitNodeResolvers(resolvers []*dnstype.Resolver) []*dnstype.Resolver {
|
||||
var filtered []*dnstype.Resolver
|
||||
for _, res := range resolvers {
|
||||
if res.UseWithExitNode {
|
||||
filtered = append(filtered, res)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// useWithExitNodeRoutes filters out routes so the ones that remain
|
||||
// are either zero-length resolver lists, or lists containing only
|
||||
// resolvers marked for use with exit nodes.
|
||||
func useWithExitNodeRoutes(routes map[string][]*dnstype.Resolver) map[string][]*dnstype.Resolver {
|
||||
var filtered map[string][]*dnstype.Resolver
|
||||
for suffix, resolvers := range routes {
|
||||
// Suffixes with no resolvers represent a valid configuration,
|
||||
// and should persist regardless of exit node considerations.
|
||||
if len(resolvers) == 0 {
|
||||
mak.Set(&filtered, suffix, make([]*dnstype.Resolver, 0))
|
||||
continue
|
||||
}
|
||||
|
||||
// In exit node contexts, we filter out resolvers not configured for use with
|
||||
// exit nodes. If there are no such configured resolvers, there should not be an entry for that suffix.
|
||||
filteredResolvers := useWithExitNodeResolvers(resolvers)
|
||||
if len(filteredResolvers) > 0 {
|
||||
mak.Set(&filtered, suffix, filteredResolvers)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
// dnsConfigForNetmap returns a *dns.Config for the given netmap,
|
||||
// prefs, client OS version, and cloud hosting environment.
|
||||
//
|
||||
// The versionOS is a Tailscale-style version ("iOS", "macOS") and not
|
||||
// a runtime.GOOS.
|
||||
func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config {
|
||||
if nm == nil {
|
||||
return nil
|
||||
}
|
||||
if !buildfeatures.HasDNS {
|
||||
return &dns.Config{}
|
||||
}
|
||||
|
||||
// If the current node's key is expired, then we don't program any DNS
|
||||
// configuration into the operating system. This ensures that if the
|
||||
// DNS configuration specifies a DNS server that is only reachable over
|
||||
// Tailscale, we don't break connectivity for the user.
|
||||
//
|
||||
// TODO(andrew-d): this also stops returning anything from quad-100; we
|
||||
// could do the same thing as having "CorpDNS: false" and keep that but
|
||||
// not program the OS?
|
||||
if selfExpired {
|
||||
return &dns.Config{}
|
||||
}
|
||||
|
||||
dcfg := &dns.Config{
|
||||
Routes: map[dnsname.FQDN][]*dnstype.Resolver{},
|
||||
Hosts: map[dnsname.FQDN][]netip.Addr{},
|
||||
}
|
||||
|
||||
// selfV6Only is whether we only have IPv6 addresses ourselves.
|
||||
selfV6Only := nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs6) &&
|
||||
!nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs4)
|
||||
dcfg.OnlyIPv6 = selfV6Only
|
||||
|
||||
wantAAAA := nm.AllCaps.Contains(tailcfg.NodeAttrMagicDNSPeerAAAA)
|
||||
|
||||
// Populate MagicDNS records. We do this unconditionally so that
|
||||
// quad-100 can always respond to MagicDNS queries, even if the OS
|
||||
// isn't configured to make MagicDNS resolution truly
|
||||
// magic. Details in
|
||||
// https://github.com/tailscale/tailscale/issues/1886.
|
||||
set := func(name string, addrs views.Slice[netip.Prefix]) {
|
||||
if addrs.Len() == 0 || name == "" {
|
||||
return
|
||||
}
|
||||
fqdn, err := dnsname.ToFQDN(name)
|
||||
if err != nil {
|
||||
return // TODO: propagate error?
|
||||
}
|
||||
var have4 bool
|
||||
for _, addr := range addrs.All() {
|
||||
if addr.Addr().Is4() {
|
||||
have4 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
var ips []netip.Addr
|
||||
for _, addr := range addrs.All() {
|
||||
if selfV6Only {
|
||||
if addr.Addr().Is6() {
|
||||
ips = append(ips, addr.Addr())
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If this node has an IPv4 address, then
|
||||
// remove peers' IPv6 addresses for now, as we
|
||||
// don't guarantee that the peer node actually
|
||||
// can speak IPv6 correctly.
|
||||
//
|
||||
// https://github.com/tailscale/tailscale/issues/1152
|
||||
// tracks adding the right capability reporting to
|
||||
// enable AAAA in MagicDNS.
|
||||
if addr.Addr().Is6() && have4 && !wantAAAA {
|
||||
continue
|
||||
}
|
||||
ips = append(ips, addr.Addr())
|
||||
}
|
||||
dcfg.Hosts[fqdn] = ips
|
||||
}
|
||||
set(nm.SelfName(), nm.GetAddresses())
|
||||
for _, peer := range peers {
|
||||
set(peer.Name(), peer.Addresses())
|
||||
}
|
||||
for _, rec := range nm.DNS.ExtraRecords {
|
||||
switch rec.Type {
|
||||
case "", "A", "AAAA":
|
||||
// Treat these all the same for now: infer from the value
|
||||
default:
|
||||
// TODO: more
|
||||
continue
|
||||
}
|
||||
ip, err := netip.ParseAddr(rec.Value)
|
||||
if err != nil {
|
||||
// Ignore.
|
||||
continue
|
||||
}
|
||||
fqdn, err := dnsname.ToFQDN(rec.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dcfg.Hosts[fqdn] = append(dcfg.Hosts[fqdn], ip)
|
||||
}
|
||||
|
||||
if !prefs.CorpDNS() {
|
||||
return dcfg
|
||||
}
|
||||
|
||||
for _, dom := range nm.DNS.Domains {
|
||||
fqdn, err := dnsname.ToFQDN(dom)
|
||||
if err != nil {
|
||||
logf("[unexpected] non-FQDN search domain %q", dom)
|
||||
}
|
||||
dcfg.SearchDomains = append(dcfg.SearchDomains, fqdn)
|
||||
}
|
||||
if nm.DNS.Proxied { // actually means "enable MagicDNS"
|
||||
for _, dom := range magicDNSRootDomains(nm) {
|
||||
dcfg.Routes[dom] = nil // resolve internally with dcfg.Hosts
|
||||
}
|
||||
}
|
||||
|
||||
addDefault := func(resolvers []*dnstype.Resolver) {
|
||||
dcfg.DefaultResolvers = append(dcfg.DefaultResolvers, resolvers...)
|
||||
}
|
||||
|
||||
addSplitDNSRoutes := func(routes map[string][]*dnstype.Resolver) {
|
||||
for suffix, resolvers := range routes {
|
||||
fqdn, err := dnsname.ToFQDN(suffix)
|
||||
if err != nil {
|
||||
logf("[unexpected] non-FQDN route suffix %q", suffix)
|
||||
}
|
||||
|
||||
// Create map entry even if len(resolvers) == 0; Issue 2706.
|
||||
// This lets the control plane send ExtraRecords for which we
|
||||
// can authoritatively answer "name not exists" for when the
|
||||
// control plane also sends this explicit but empty route
|
||||
// making it as something we handle.
|
||||
dcfg.Routes[fqdn] = slices.Clone(resolvers)
|
||||
}
|
||||
}
|
||||
|
||||
// If we're using an exit node and that exit node is new enough (1.19.x+)
|
||||
// to run a DoH DNS proxy, then send all our DNS traffic through it,
|
||||
// unless we find resolvers with UseWithExitNode set, in which case we use that.
|
||||
if buildfeatures.HasUseExitNode {
|
||||
if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok {
|
||||
filtered := useWithExitNodeResolvers(nm.DNS.Resolvers)
|
||||
if len(filtered) > 0 {
|
||||
addDefault(filtered)
|
||||
} else {
|
||||
// If no default global resolvers with the override
|
||||
// are configured, configure the exit node's resolver.
|
||||
addDefault([]*dnstype.Resolver{{Addr: dohURL}})
|
||||
}
|
||||
|
||||
addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes))
|
||||
return dcfg
|
||||
}
|
||||
}
|
||||
|
||||
// If the user has set default resolvers ("override local DNS"), prefer to
|
||||
// use those resolvers as the default, otherwise if there are WireGuard exit
|
||||
// node resolvers, use those as the default.
|
||||
if len(nm.DNS.Resolvers) > 0 {
|
||||
addDefault(nm.DNS.Resolvers)
|
||||
} else if buildfeatures.HasUseExitNode {
|
||||
if resolvers, ok := wireguardExitNodeDNSResolvers(nm, peers, prefs.ExitNodeID()); ok {
|
||||
addDefault(resolvers)
|
||||
}
|
||||
}
|
||||
|
||||
// Add split DNS routes, with no regard to exit node configuration.
|
||||
addSplitDNSRoutes(nm.DNS.Routes)
|
||||
|
||||
// Set FallbackResolvers as the default resolvers in the
|
||||
// scenarios that can't handle a purely split-DNS config. See
|
||||
// https://github.com/tailscale/tailscale/issues/1743 for
|
||||
// details.
|
||||
switch {
|
||||
case len(dcfg.DefaultResolvers) != 0:
|
||||
// Default resolvers already set.
|
||||
case !prefs.ExitNodeID().IsZero():
|
||||
// When using an exit node, we send all DNS traffic to the exit node, so
|
||||
// we don't need a fallback resolver.
|
||||
//
|
||||
// However, if the exit node is too old to run a DoH DNS proxy, then we
|
||||
// need to use a fallback resolver as it's very likely the LAN resolvers
|
||||
// will become unreachable.
|
||||
//
|
||||
// This is especially important on Apple OSes, where
|
||||
// adding the default route to the tunnel interface makes
|
||||
// it "primary", and we MUST provide VPN-sourced DNS
|
||||
// settings or we break all DNS resolution.
|
||||
//
|
||||
// https://github.com/tailscale/tailscale/issues/1713
|
||||
addDefault(nm.DNS.FallbackResolvers)
|
||||
case len(dcfg.Routes) == 0:
|
||||
// No settings requiring split DNS, no problem.
|
||||
}
|
||||
|
||||
return dcfg
|
||||
}
|
||||
470
vendor/tailscale.com/ipn/ipnlocal/peerapi.go
generated
vendored
470
vendor/tailscale.com/ipn/ipnlocal/peerapi.go
generated
vendored
@@ -15,9 +15,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
@@ -27,33 +25,25 @@ import (
|
||||
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/net/netaddr"
|
||||
"tailscale.com/net/netmon"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/net/sockstats"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/taildrop"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/httphdr"
|
||||
"tailscale.com/util/httpm"
|
||||
"tailscale.com/wgengine/filter"
|
||||
)
|
||||
|
||||
const (
|
||||
taildrivePrefix = "/v0/drive"
|
||||
)
|
||||
|
||||
var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error
|
||||
|
||||
// addH2C is non-nil on platforms where we want to add H2C
|
||||
// ("cleartext" HTTP/2) support to the peerAPI.
|
||||
var addH2C func(*http.Server)
|
||||
// initListenConfig, if non-nil, is called during peerAPIListener setup. It is used only
|
||||
// on iOS and macOS to set socket options to bind the listener to the Tailscale interface.
|
||||
var initListenConfig func(config *net.ListenConfig, addr netip.Addr, tunIfIndex int) error
|
||||
|
||||
// peerDNSQueryHandler is implemented by tsdns.Resolver.
|
||||
type peerDNSQueryHandler interface {
|
||||
@@ -63,11 +53,9 @@ type peerDNSQueryHandler interface {
|
||||
type peerAPIServer struct {
|
||||
b *LocalBackend
|
||||
resolver peerDNSQueryHandler
|
||||
|
||||
taildrop *taildrop.Manager
|
||||
}
|
||||
|
||||
func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Listener, err error) {
|
||||
func (s *peerAPIServer) listen(ip netip.Addr, tunIfIndex int) (ln net.Listener, err error) {
|
||||
// Android for whatever reason often has problems creating the peerapi listener.
|
||||
// But since we started intercepting it with netstack, it's not even important that
|
||||
// we have a real kernel-level listener. So just create a dummy listener on Android
|
||||
@@ -83,7 +71,14 @@ func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Lis
|
||||
// On iOS/macOS, this sets the lc.Control hook to
|
||||
// setsockopt the interface index to bind to, to get
|
||||
// out of the network sandbox.
|
||||
if err := initListenConfig(&lc, ip, ifState, s.b.dialer.TUNName()); err != nil {
|
||||
|
||||
// A zero tunIfIndex is invalid for peerapi. A zero value will not get us
|
||||
// out of the network sandbox. Caller should log and retry.
|
||||
if tunIfIndex == 0 {
|
||||
return nil, fmt.Errorf("peerapi: cannot listen on %s with tunIfIndex 0", ipStr)
|
||||
}
|
||||
|
||||
if err := initListenConfig(&lc, ip, tunIfIndex); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
|
||||
@@ -146,6 +141,9 @@ type peerAPIListener struct {
|
||||
}
|
||||
|
||||
func (pln *peerAPIListener) Close() error {
|
||||
if !buildfeatures.HasPeerAPIServer {
|
||||
return nil
|
||||
}
|
||||
if pln.ln != nil {
|
||||
return pln.ln.Close()
|
||||
}
|
||||
@@ -153,6 +151,9 @@ func (pln *peerAPIListener) Close() error {
|
||||
}
|
||||
|
||||
func (pln *peerAPIListener) serve() {
|
||||
if !buildfeatures.HasPeerAPIServer {
|
||||
return
|
||||
}
|
||||
if pln.ln == nil {
|
||||
return
|
||||
}
|
||||
@@ -206,11 +207,11 @@ func (pln *peerAPIListener) ServeConn(src netip.AddrPort, c net.Conn) {
|
||||
peerUser: peerUser,
|
||||
}
|
||||
httpServer := &http.Server{
|
||||
Handler: h,
|
||||
}
|
||||
if addH2C != nil {
|
||||
addH2C(httpServer)
|
||||
Handler: h,
|
||||
Protocols: new(http.Protocols),
|
||||
}
|
||||
httpServer.Protocols.SetHTTP1(true)
|
||||
httpServer.Protocols.SetUnencryptedHTTP2(true) // over WireGuard; "unencrypted" means no TLS
|
||||
go httpServer.Serve(netutil.NewOneConnListener(c, nil))
|
||||
}
|
||||
|
||||
@@ -229,9 +230,12 @@ type peerAPIHandler struct {
|
||||
type PeerAPIHandler interface {
|
||||
Peer() tailcfg.NodeView
|
||||
PeerCaps() tailcfg.PeerCapMap
|
||||
CanDebug() bool // can remote node can debug this node (internal state, etc)
|
||||
Self() tailcfg.NodeView
|
||||
LocalBackend() *LocalBackend
|
||||
IsSelfUntagged() bool // whether the peer is untagged and the same as this user
|
||||
RemoteAddr() netip.AddrPort
|
||||
Logf(format string, a ...any)
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) IsSelfUntagged() bool {
|
||||
@@ -239,12 +243,20 @@ func (h *peerAPIHandler) IsSelfUntagged() bool {
|
||||
}
|
||||
func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode }
|
||||
func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode }
|
||||
func (h *peerAPIHandler) RemoteAddr() netip.AddrPort { return h.remoteAddr }
|
||||
func (h *peerAPIHandler) LocalBackend() *LocalBackend { return h.ps.b }
|
||||
func (h *peerAPIHandler) Logf(format string, a ...any) {
|
||||
h.logf(format, a...)
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) logf(format string, a ...any) {
|
||||
h.ps.b.logf("peerapi: "+format, a...)
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) logfv1(format string, a ...any) {
|
||||
h.ps.b.logf("[v1] peerapi: "+format, a...)
|
||||
}
|
||||
|
||||
// isAddressValid reports whether addr is a valid destination address for this
|
||||
// node originating from the peer.
|
||||
func (h *peerAPIHandler) isAddressValid(addr netip.Addr) bool {
|
||||
@@ -323,15 +335,31 @@ func peerAPIRequestShouldGetSecurityHeaders(r *http.Request) bool {
|
||||
//
|
||||
// It panics if the path is already registered.
|
||||
func RegisterPeerAPIHandler(path string, f func(PeerAPIHandler, http.ResponseWriter, *http.Request)) {
|
||||
if !buildfeatures.HasPeerAPIServer {
|
||||
return
|
||||
}
|
||||
if _, ok := peerAPIHandlers[path]; ok {
|
||||
panic(fmt.Sprintf("duplicate PeerAPI handler %q", path))
|
||||
}
|
||||
peerAPIHandlers[path] = f
|
||||
if strings.HasSuffix(path, "/") {
|
||||
peerAPIHandlerPrefixes[path] = f
|
||||
}
|
||||
}
|
||||
|
||||
var peerAPIHandlers = map[string]func(PeerAPIHandler, http.ResponseWriter, *http.Request){} // by URL.Path
|
||||
var (
|
||||
peerAPIHandlers = map[string]func(PeerAPIHandler, http.ResponseWriter, *http.Request){} // by URL.Path
|
||||
|
||||
// peerAPIHandlerPrefixes are the subset of peerAPIHandlers where
|
||||
// the map key ends with a slash, indicating a prefix match.
|
||||
peerAPIHandlerPrefixes = map[string]func(PeerAPIHandler, http.ResponseWriter, *http.Request){}
|
||||
)
|
||||
|
||||
func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasPeerAPIServer {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
if err := h.validatePeerAPIRequest(r); err != nil {
|
||||
metricInvalidRequests.Add(1)
|
||||
h.logf("invalid request from %v: %v", h.remoteAddr, err)
|
||||
@@ -343,56 +371,50 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("X-Frame-Options", "DENY")
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
}
|
||||
if strings.HasPrefix(r.URL.Path, "/v0/put/") {
|
||||
if r.Method == "PUT" {
|
||||
metricPutCalls.Add(1)
|
||||
for pfx, ph := range peerAPIHandlerPrefixes {
|
||||
if strings.HasPrefix(r.URL.Path, pfx) {
|
||||
ph(h, w, r)
|
||||
return
|
||||
}
|
||||
h.handlePeerPut(w, r)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(r.URL.Path, "/dns-query") {
|
||||
if buildfeatures.HasDNS && strings.HasPrefix(r.URL.Path, "/dns-query") {
|
||||
metricDNSCalls.Add(1)
|
||||
h.handleDNSQuery(w, r)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(r.URL.Path, taildrivePrefix) {
|
||||
h.handleServeDrive(w, r)
|
||||
return
|
||||
}
|
||||
switch r.URL.Path {
|
||||
case "/v0/goroutines":
|
||||
h.handleServeGoroutines(w, r)
|
||||
return
|
||||
case "/v0/env":
|
||||
h.handleServeEnv(w, r)
|
||||
return
|
||||
case "/v0/metrics":
|
||||
h.handleServeMetrics(w, r)
|
||||
return
|
||||
case "/v0/magicsock":
|
||||
h.handleServeMagicsock(w, r)
|
||||
return
|
||||
case "/v0/dnsfwd":
|
||||
h.handleServeDNSFwd(w, r)
|
||||
return
|
||||
case "/v0/interfaces":
|
||||
h.handleServeInterfaces(w, r)
|
||||
return
|
||||
case "/v0/doctor":
|
||||
h.handleServeDoctor(w, r)
|
||||
return
|
||||
case "/v0/sockstats":
|
||||
h.handleServeSockStats(w, r)
|
||||
return
|
||||
case "/v0/ingress":
|
||||
metricIngressCalls.Add(1)
|
||||
h.handleServeIngress(w, r)
|
||||
return
|
||||
if buildfeatures.HasDebug {
|
||||
switch r.URL.Path {
|
||||
case "/v0/goroutines":
|
||||
h.handleServeGoroutines(w, r)
|
||||
return
|
||||
case "/v0/env":
|
||||
h.handleServeEnv(w, r)
|
||||
return
|
||||
case "/v0/metrics":
|
||||
h.handleServeMetrics(w, r)
|
||||
return
|
||||
case "/v0/magicsock":
|
||||
h.handleServeMagicsock(w, r)
|
||||
return
|
||||
case "/v0/dnsfwd":
|
||||
h.handleServeDNSFwd(w, r)
|
||||
return
|
||||
case "/v0/interfaces":
|
||||
h.handleServeInterfaces(w, r)
|
||||
return
|
||||
case "/v0/sockstats":
|
||||
h.handleServeSockStats(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
if ph, ok := peerAPIHandlers[r.URL.Path]; ok {
|
||||
ph(h, w, r)
|
||||
return
|
||||
}
|
||||
if r.URL.Path != "/" {
|
||||
http.Error(w, "unsupported peerapi path", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
who := h.peerUser.DisplayName
|
||||
fmt.Fprintf(w, `<html>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
@@ -406,67 +428,6 @@ This is my Tailscale device. Your device is %v.
|
||||
}
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handleServeIngress(w http.ResponseWriter, r *http.Request) {
|
||||
// http.Errors only useful if hitting endpoint manually
|
||||
// otherwise rely on log lines when debugging ingress connections
|
||||
// as connection is hijacked for bidi and is encrypted tls
|
||||
if !h.canIngress() {
|
||||
h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr)
|
||||
http.Error(w, "denied; no ingress cap", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
logAndError := func(code int, publicMsg string) {
|
||||
h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg)
|
||||
http.Error(w, publicMsg, http.StatusMethodNotAllowed)
|
||||
}
|
||||
bad := func(publicMsg string) {
|
||||
logAndError(http.StatusBadRequest, publicMsg)
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
logAndError(http.StatusMethodNotAllowed, "only POST allowed")
|
||||
return
|
||||
}
|
||||
srcAddrStr := r.Header.Get("Tailscale-Ingress-Src")
|
||||
if srcAddrStr == "" {
|
||||
bad("Tailscale-Ingress-Src header not set")
|
||||
return
|
||||
}
|
||||
srcAddr, err := netip.ParseAddrPort(srcAddrStr)
|
||||
if err != nil {
|
||||
bad("Tailscale-Ingress-Src header invalid; want ip:port")
|
||||
return
|
||||
}
|
||||
target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target"))
|
||||
if target == "" {
|
||||
bad("Tailscale-Ingress-Target header not set")
|
||||
return
|
||||
}
|
||||
if _, _, err := net.SplitHostPort(string(target)); err != nil {
|
||||
bad("Tailscale-Ingress-Target header invalid; want host:port")
|
||||
return
|
||||
}
|
||||
|
||||
getConnOrReset := func() (net.Conn, bool) {
|
||||
conn, _, err := w.(http.Hijacker).Hijack()
|
||||
if err != nil {
|
||||
h.logf("ingress: failed hijacking conn")
|
||||
http.Error(w, "failed hijacking conn", http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n")
|
||||
return &ipn.FunnelConn{
|
||||
Conn: conn,
|
||||
Src: srcAddr,
|
||||
Target: target,
|
||||
}, true
|
||||
}
|
||||
sendRST := func() {
|
||||
http.Error(w, "denied", http.StatusForbidden)
|
||||
}
|
||||
|
||||
h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST)
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.canDebug() {
|
||||
http.Error(w, "denied; no debug access", http.StatusForbidden)
|
||||
@@ -514,24 +475,6 @@ func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Re
|
||||
fmt.Fprintln(w, "</table>")
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handleServeDoctor(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.canDebug() {
|
||||
http.Error(w, "denied; no debug access", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintln(w, "<h1>Doctor Output</h1>")
|
||||
|
||||
fmt.Fprintln(w, "<pre>")
|
||||
|
||||
h.ps.b.Doctor(r.Context(), func(format string, args ...any) {
|
||||
line := fmt.Sprintf(format, args...)
|
||||
fmt.Fprintln(w, html.EscapeString(line))
|
||||
})
|
||||
|
||||
fmt.Fprintln(w, "</pre>")
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.canDebug() {
|
||||
http.Error(w, "denied; no debug access", http.StatusForbidden)
|
||||
@@ -630,14 +573,7 @@ func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Req
|
||||
fmt.Fprintln(w, "</pre>")
|
||||
}
|
||||
|
||||
// canPutFile reports whether h can put a file ("Taildrop") to this node.
|
||||
func (h *peerAPIHandler) canPutFile() bool {
|
||||
if h.peerNode.UnsignedPeerAPIOnly() {
|
||||
// Unsigned peers can't send files.
|
||||
return false
|
||||
}
|
||||
return h.isSelf || h.peerHasCap(tailcfg.PeerCapabilityFileSharingSend)
|
||||
}
|
||||
func (h *peerAPIHandler) CanDebug() bool { return h.canDebug() }
|
||||
|
||||
// canDebug reports whether h can debug this node (goroutines, metrics,
|
||||
// magicsock internal state, etc).
|
||||
@@ -668,110 +604,6 @@ func (h *peerAPIHandler) PeerCaps() tailcfg.PeerCapMap {
|
||||
return h.ps.b.PeerCaps(h.remoteAddr.Addr())
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handlePeerPut(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.canPutFile() {
|
||||
http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if !h.ps.b.hasCapFileSharing() {
|
||||
http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
rawPath := r.URL.EscapedPath()
|
||||
prefix, ok := strings.CutPrefix(rawPath, "/v0/put/")
|
||||
if !ok {
|
||||
http.Error(w, "misconfigured internals", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
baseName, err := url.PathUnescape(prefix)
|
||||
if err != nil {
|
||||
http.Error(w, taildrop.ErrInvalidFileName.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
id := taildrop.ClientID(h.peerNode.StableID())
|
||||
if prefix == "" {
|
||||
// List all the partial files.
|
||||
files, err := h.ps.taildrop.PartialFiles(id)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(files); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
h.logf("json.Encoder.Encode error: %v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Stream all the block hashes for the specified file.
|
||||
next, close, err := h.ps.taildrop.HashPartialFile(id, baseName)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer close()
|
||||
for {
|
||||
switch cs, err := next(); {
|
||||
case err == io.EOF:
|
||||
return
|
||||
case err != nil:
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
h.logf("HashPartialFile.next error: %v", err)
|
||||
return
|
||||
default:
|
||||
if err := enc.Encode(cs); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
h.logf("json.Encoder.Encode error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "PUT":
|
||||
t0 := h.ps.b.clock.Now()
|
||||
id := taildrop.ClientID(h.peerNode.StableID())
|
||||
|
||||
var offset int64
|
||||
if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
|
||||
ranges, ok := httphdr.ParseRange(rangeHdr)
|
||||
if !ok || len(ranges) != 1 || ranges[0].Length != 0 {
|
||||
http.Error(w, "invalid Range header", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
offset = ranges[0].Start
|
||||
}
|
||||
n, err := h.ps.taildrop.PutFile(taildrop.ClientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength)
|
||||
switch err {
|
||||
case nil:
|
||||
d := h.ps.b.clock.Since(t0).Round(time.Second / 10)
|
||||
h.logf("got put of %s in %v from %v/%v", approxSize(n), d, h.remoteAddr.Addr(), h.peerNode.ComputedName)
|
||||
io.WriteString(w, "{}\n")
|
||||
case taildrop.ErrNoTaildrop:
|
||||
http.Error(w, err.Error(), http.StatusForbidden)
|
||||
case taildrop.ErrInvalidFileName:
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
case taildrop.ErrFileExists:
|
||||
http.Error(w, err.Error(), http.StatusConflict)
|
||||
default:
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
default:
|
||||
http.Error(w, "expected method GET or PUT", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func approxSize(n int64) string {
|
||||
if n <= 1<<10 {
|
||||
return "<=1KB"
|
||||
}
|
||||
if n <= 1<<20 {
|
||||
return "<=1MB"
|
||||
}
|
||||
return fmt.Sprintf("~%dMB", n>>20)
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handleServeGoroutines(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.canDebug() {
|
||||
http.Error(w, "denied; no debug access", http.StatusForbidden)
|
||||
@@ -826,6 +658,10 @@ func (h *peerAPIHandler) handleServeMetrics(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasDNS {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
if !h.canDebug() {
|
||||
http.Error(w, "denied; no debug access", http.StatusForbidden)
|
||||
return
|
||||
@@ -839,6 +675,9 @@ func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) replyToDNSQueries() bool {
|
||||
if !buildfeatures.HasDNS {
|
||||
return false
|
||||
}
|
||||
if h.isSelf {
|
||||
// If the peer is owned by the same user, just allow it
|
||||
// without further checks.
|
||||
@@ -868,7 +707,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool {
|
||||
// but an app connector explicitly adds 0.0.0.0/32 (and the
|
||||
// IPv6 equivalent) to make this work (see updateFilterLocked
|
||||
// in LocalBackend).
|
||||
f := b.filterAtomic.Load()
|
||||
f := b.currentNode().filter()
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
@@ -890,7 +729,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool {
|
||||
// handleDNSQuery implements a DoH server (RFC 8484) over the peerapi.
|
||||
// It's not over HTTPS as the spec dictates, but rather HTTP-over-WireGuard.
|
||||
func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) {
|
||||
if h.ps.resolver == nil {
|
||||
if !buildfeatures.HasDNS || h.ps.resolver == nil {
|
||||
http.Error(w, "DNS not wired up", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
@@ -931,7 +770,7 @@ func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request)
|
||||
// TODO(raggi): consider pushing the integration down into the resolver
|
||||
// instead to avoid re-parsing the DNS response for improved performance in
|
||||
// the future.
|
||||
if h.ps.b.OfferingAppConnector() {
|
||||
if buildfeatures.HasAppConnectors && h.ps.b.OfferingAppConnector() {
|
||||
if err := h.ps.b.ObserveDNSResponse(res); err != nil {
|
||||
h.logf("ObserveDNSResponse error: %v", err)
|
||||
// This is not fatal, we probably just failed to parse the upstream
|
||||
@@ -958,7 +797,7 @@ func dohQuery(r *http.Request) (dnsQuery []byte, publicErr string) {
|
||||
case "GET":
|
||||
q64 := r.FormValue("dns")
|
||||
if q64 == "" {
|
||||
return nil, "missing 'dns' parameter"
|
||||
return nil, "missing ‘dns’ parameter; try '?dns=' (DoH standard) or use '?q=<name>' for JSON debug mode"
|
||||
}
|
||||
if base64.RawURLEncoding.DecodedLen(len(q64)) > maxQueryLen {
|
||||
return nil, "query too large"
|
||||
@@ -1113,85 +952,46 @@ func (rbw *requestBodyWrapper) Read(b []byte) (int, error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.ps.b.DriveSharingEnabled() {
|
||||
h.logf("taildrive: not enabled")
|
||||
http.Error(w, "taildrive not enabled", http.StatusNotFound)
|
||||
return
|
||||
// peerAPIURL returns an HTTP URL for the peer's peerapi service,
|
||||
// without a trailing slash.
|
||||
//
|
||||
// If ip or port is the zero value then it returns the empty string.
|
||||
func peerAPIURL(ip netip.Addr, port uint16) string {
|
||||
if port == 0 || !ip.IsValid() {
|
||||
return ""
|
||||
}
|
||||
|
||||
capsMap := h.PeerCaps()
|
||||
driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive]
|
||||
if !ok {
|
||||
h.logf("taildrive: not permitted")
|
||||
http.Error(w, "taildrive not permitted", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
rawPerms := make([][]byte, 0, len(driveCaps))
|
||||
for _, cap := range driveCaps {
|
||||
rawPerms = append(rawPerms, []byte(cap))
|
||||
}
|
||||
|
||||
p, err := drive.ParsePermissions(rawPerms)
|
||||
if err != nil {
|
||||
h.logf("taildrive: error parsing permissions: %w", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fs, ok := h.ps.b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
h.logf("taildrive: not supported on platform")
|
||||
http.Error(w, "taildrive not supported on platform", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
wr := &httpResponseWrapper{
|
||||
ResponseWriter: w,
|
||||
}
|
||||
bw := &requestBodyWrapper{
|
||||
ReadCloser: r.Body,
|
||||
}
|
||||
r.Body = bw
|
||||
|
||||
if r.Method == httpm.PUT || r.Method == httpm.GET {
|
||||
defer func() {
|
||||
switch wr.statusCode {
|
||||
case 304:
|
||||
// 304s are particularly chatty so skip logging.
|
||||
default:
|
||||
contentType := "unknown"
|
||||
if ct := wr.Header().Get("Content-Type"); ct != "" {
|
||||
contentType = ct
|
||||
}
|
||||
|
||||
h.logf("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix)
|
||||
fs.ServeHTTPWithPerms(p, wr, r)
|
||||
return fmt.Sprintf("http://%v", netip.AddrPortFrom(ip, port))
|
||||
}
|
||||
|
||||
// parseDriveFileExtensionForLog parses the file extension, if available.
|
||||
// If a file extension is not present or parsable, the file extension is
|
||||
// set to "unknown". If the file extension contains a double quote, it is
|
||||
// replaced with "removed".
|
||||
// All whitespace is removed from a parsed file extension.
|
||||
// File extensions including the leading ., e.g. ".gif".
|
||||
func parseDriveFileExtensionForLog(path string) string {
|
||||
fileExt := "unknown"
|
||||
if fe := filepath.Ext(path); fe != "" {
|
||||
if strings.Contains(fe, "\"") {
|
||||
// Do not log include file extensions with quotes within them.
|
||||
return "removed"
|
||||
}
|
||||
// Remove white space from user defined inputs.
|
||||
fileExt = strings.ReplaceAll(fe, " ", "")
|
||||
// peerAPIBase returns the "http://ip:port" URL base to reach peer's peerAPI.
|
||||
// It returns the empty string if the peer doesn't support the peerapi
|
||||
// or there's no matching address family based on the netmap's own addresses.
|
||||
func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string {
|
||||
if nm == nil || !peer.Valid() || !peer.Hostinfo().Valid() {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fileExt
|
||||
var have4, have6 bool
|
||||
addrs := nm.GetAddresses()
|
||||
for _, a := range addrs.All() {
|
||||
if !a.IsSingleIP() {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case a.Addr().Is4():
|
||||
have4 = true
|
||||
case a.Addr().Is6():
|
||||
have6 = true
|
||||
}
|
||||
}
|
||||
p4, p6 := peerAPIPorts(peer)
|
||||
switch {
|
||||
case have4 && p4 != 0:
|
||||
return peerAPIURL(nodeIP(peer, netip.Addr.Is4), p4)
|
||||
case have6 && p6 != 0:
|
||||
return peerAPIURL(nodeIP(peer, netip.Addr.Is6), p6)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// newFakePeerAPIListener creates a new net.Listener that acts like
|
||||
@@ -1244,7 +1044,5 @@ var (
|
||||
metricInvalidRequests = clientmetric.NewCounter("peerapi_invalid_requests")
|
||||
|
||||
// Non-debug PeerAPI endpoints.
|
||||
metricPutCalls = clientmetric.NewCounter("peerapi_put")
|
||||
metricDNSCalls = clientmetric.NewCounter("peerapi_dns")
|
||||
metricIngressCalls = clientmetric.NewCounter("peerapi_ingress")
|
||||
metricDNSCalls = clientmetric.NewCounter("peerapi_dns")
|
||||
)
|
||||
|
||||
110
vendor/tailscale.com/ipn/ipnlocal/peerapi_drive.go
generated
vendored
Normal file
110
vendor/tailscale.com/ipn/ipnlocal/peerapi_drive.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_drive
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
const (
|
||||
taildrivePrefix = "/v0/drive"
|
||||
)
|
||||
|
||||
func init() {
|
||||
peerAPIHandlerPrefixes[taildrivePrefix] = handleServeDrive
|
||||
}
|
||||
|
||||
func handleServeDrive(hi PeerAPIHandler, w http.ResponseWriter, r *http.Request) {
|
||||
h := hi.(*peerAPIHandler)
|
||||
|
||||
h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString())
|
||||
if !h.ps.b.DriveSharingEnabled() {
|
||||
h.logf("taildrive: not enabled")
|
||||
http.Error(w, "taildrive not enabled", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
capsMap := h.PeerCaps()
|
||||
driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive]
|
||||
if !ok {
|
||||
h.logf("taildrive: not permitted")
|
||||
http.Error(w, "taildrive not permitted", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
rawPerms := make([][]byte, 0, len(driveCaps))
|
||||
for _, cap := range driveCaps {
|
||||
rawPerms = append(rawPerms, []byte(cap))
|
||||
}
|
||||
|
||||
p, err := drive.ParsePermissions(rawPerms)
|
||||
if err != nil {
|
||||
h.logf("taildrive: error parsing permissions: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fs, ok := h.ps.b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
h.logf("taildrive: not supported on platform")
|
||||
http.Error(w, "taildrive not supported on platform", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
wr := &httpResponseWrapper{
|
||||
ResponseWriter: w,
|
||||
}
|
||||
bw := &requestBodyWrapper{
|
||||
ReadCloser: r.Body,
|
||||
}
|
||||
r.Body = bw
|
||||
|
||||
defer func() {
|
||||
switch wr.statusCode {
|
||||
case 304:
|
||||
// 304s are particularly chatty so skip logging.
|
||||
default:
|
||||
log := h.logf
|
||||
if r.Method != httpm.PUT && r.Method != httpm.GET {
|
||||
log = h.logfv1
|
||||
}
|
||||
contentType := "unknown"
|
||||
if ct := wr.Header().Get("Content-Type"); ct != "" {
|
||||
contentType = ct
|
||||
}
|
||||
|
||||
log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead))
|
||||
}
|
||||
}()
|
||||
|
||||
r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix)
|
||||
fs.ServeHTTPWithPerms(p, wr, r)
|
||||
}
|
||||
|
||||
// parseDriveFileExtensionForLog parses the file extension, if available.
|
||||
// If a file extension is not present or parsable, the file extension is
|
||||
// set to "unknown". If the file extension contains a double quote, it is
|
||||
// replaced with "removed".
|
||||
// All whitespace is removed from a parsed file extension.
|
||||
// File extensions including the leading ., e.g. ".gif".
|
||||
func parseDriveFileExtensionForLog(path string) string {
|
||||
fileExt := "unknown"
|
||||
if fe := filepath.Ext(path); fe != "" {
|
||||
if strings.Contains(fe, "\"") {
|
||||
// Do not log include file extensions with quotes within them.
|
||||
return "removed"
|
||||
}
|
||||
// Remove white space from user defined inputs.
|
||||
fileExt = strings.ReplaceAll(fe, " ", "")
|
||||
}
|
||||
|
||||
return fileExt
|
||||
}
|
||||
20
vendor/tailscale.com/ipn/ipnlocal/peerapi_h2c.go
generated
vendored
20
vendor/tailscale.com/ipn/ipnlocal/peerapi_h2c.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android && !js
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
)
|
||||
|
||||
func init() {
|
||||
addH2C = func(s *http.Server) {
|
||||
h2s := &http2.Server{}
|
||||
s.Handler = h2c.NewHandler(s.Handler, h2s)
|
||||
}
|
||||
}
|
||||
10
vendor/tailscale.com/ipn/ipnlocal/peerapi_macios_ext.go
generated
vendored
10
vendor/tailscale.com/ipn/ipnlocal/peerapi_macios_ext.go
generated
vendored
@@ -6,11 +6,9 @@
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/net/netmon"
|
||||
"tailscale.com/net/netns"
|
||||
)
|
||||
|
||||
@@ -21,10 +19,6 @@ func init() {
|
||||
// initListenConfigNetworkExtension configures nc for listening on IP
|
||||
// through the iOS/macOS Network/System Extension (Packet Tunnel
|
||||
// Provider) sandbox.
|
||||
func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, st *netmon.State, tunIfName string) error {
|
||||
tunIf, ok := st.Interface[tunIfName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no interface with name %q", tunIfName)
|
||||
}
|
||||
return netns.SetListenConfigInterfaceIndex(nc, tunIf.Index)
|
||||
func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, ifaceIndex int) error {
|
||||
return netns.SetListenConfigInterfaceIndex(nc, ifaceIndex)
|
||||
}
|
||||
|
||||
103
vendor/tailscale.com/ipn/ipnlocal/prefs_metrics.go
generated
vendored
Normal file
103
vendor/tailscale.com/ipn/ipnlocal/prefs_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/clientmetric"
|
||||
)
|
||||
|
||||
// Counter metrics for edit/change events
|
||||
var (
|
||||
// metricExitNodeEnabled is incremented when the user enables an exit node independent of the node's characteristics.
|
||||
metricExitNodeEnabled = clientmetric.NewCounter("prefs_exit_node_enabled")
|
||||
// metricExitNodeEnabledSuggested is incremented when the user enables the suggested exit node.
|
||||
metricExitNodeEnabledSuggested = clientmetric.NewCounter("prefs_exit_node_enabled_suggested")
|
||||
// metricExitNodeEnabledMullvad is incremented when the user enables a Mullvad exit node.
|
||||
metricExitNodeEnabledMullvad = clientmetric.NewCounter("prefs_exit_node_enabled_mullvad")
|
||||
// metricWantRunningEnabled is incremented when WantRunning transitions from false to true.
|
||||
metricWantRunningEnabled = clientmetric.NewCounter("prefs_want_running_enabled")
|
||||
// metricWantRunningDisabled is incremented when WantRunning transitions from true to false.
|
||||
metricWantRunningDisabled = clientmetric.NewCounter("prefs_want_running_disabled")
|
||||
)
|
||||
|
||||
type exitNodeProperty string
|
||||
|
||||
const (
|
||||
exitNodeTypePreferred exitNodeProperty = "suggested" // The exit node is the last suggested exit node
|
||||
exitNodeTypeMullvad exitNodeProperty = "mullvad" // The exit node is a Mullvad exit node
|
||||
)
|
||||
|
||||
// prefsMetricsEditEvent encapsulates information needed to record metrics related
|
||||
// to any changes to preferences.
|
||||
type prefsMetricsEditEvent struct {
|
||||
change *ipn.MaskedPrefs // the preference mask used to update the preferences
|
||||
pNew ipn.PrefsView // new preferences (after ApplyUpdates)
|
||||
pOld ipn.PrefsView // old preferences (before ApplyUpdates)
|
||||
node *nodeBackend // the node the event is associated with
|
||||
lastSuggestedExitNode tailcfg.StableNodeID // the last suggested exit node
|
||||
}
|
||||
|
||||
// record records changes to preferences as clientmetrics.
|
||||
func (e *prefsMetricsEditEvent) record() error {
|
||||
if e.change == nil || e.node == nil {
|
||||
return errors.New("prefsMetricsEditEvent: missing required fields")
|
||||
}
|
||||
|
||||
// Record up/down events.
|
||||
if e.change.WantRunningSet && (e.pNew.WantRunning() != e.pOld.WantRunning()) {
|
||||
if e.pNew.WantRunning() {
|
||||
metricWantRunningEnabled.Add(1)
|
||||
} else {
|
||||
metricWantRunningDisabled.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Record any changes to exit node settings.
|
||||
if e.change.ExitNodeIDSet || e.change.ExitNodeIPSet {
|
||||
if exitNodeTypes, ok := e.exitNodeType(e.pNew.ExitNodeID()); ok {
|
||||
// We have switched to a valid exit node if ok is true.
|
||||
metricExitNodeEnabled.Add(1)
|
||||
|
||||
// We may have some additional characteristics we should also record.
|
||||
for _, t := range exitNodeTypes {
|
||||
switch t {
|
||||
case exitNodeTypePreferred:
|
||||
metricExitNodeEnabledSuggested.Add(1)
|
||||
case exitNodeTypeMullvad:
|
||||
metricExitNodeEnabledMullvad.Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// exitNodeTypesLocked returns type of exit node for the given stable ID.
|
||||
// An exit node may have multiple type (can be both mullvad and preferred
|
||||
// simultaneously for example).
|
||||
//
|
||||
// This will return ok as true if the supplied stable ID resolves to a known peer,
|
||||
// false otherwise. The caller is responsible for ensuring that the id belongs to
|
||||
// an exit node.
|
||||
func (e *prefsMetricsEditEvent) exitNodeType(id tailcfg.StableNodeID) (props []exitNodeProperty, isNode bool) {
|
||||
if !buildfeatures.HasUseExitNode {
|
||||
return nil, false
|
||||
}
|
||||
var peer tailcfg.NodeView
|
||||
|
||||
if peer, isNode = e.node.PeerByStableID(id); isNode {
|
||||
if tailcfg.StableNodeID(id) == e.lastSuggestedExitNode {
|
||||
props = append(props, exitNodeTypePreferred)
|
||||
}
|
||||
if peer.IsWireGuardOnly() {
|
||||
props = append(props, exitNodeTypeMullvad)
|
||||
}
|
||||
}
|
||||
return props, isNode
|
||||
}
|
||||
459
vendor/tailscale.com/ipn/ipnlocal/profiles.go
generated
vendored
459
vendor/tailscale.com/ipn/ipnlocal/profiles.go
generated
vendored
@@ -5,25 +5,35 @@ package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnext"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/eventbus"
|
||||
"tailscale.com/util/testenv"
|
||||
)
|
||||
|
||||
var debug = envknob.RegisterBool("TS_DEBUG_PROFILES")
|
||||
|
||||
// [profileManager] implements [ipnext.ProfileStore].
|
||||
var _ ipnext.ProfileStore = (*profileManager)(nil)
|
||||
|
||||
// profileManager is a wrapper around an [ipn.StateStore] that manages
|
||||
// multiple profiles and the current profile.
|
||||
//
|
||||
@@ -36,8 +46,33 @@ type profileManager struct {
|
||||
|
||||
currentUserID ipn.WindowsUserID
|
||||
knownProfiles map[ipn.ProfileID]ipn.LoginProfileView // always non-nil
|
||||
currentProfile ipn.LoginProfileView // always Valid.
|
||||
prefs ipn.PrefsView // always Valid.
|
||||
currentProfile ipn.LoginProfileView // always Valid (once [newProfileManager] returns).
|
||||
prefs ipn.PrefsView // always Valid (once [newProfileManager] returns).
|
||||
|
||||
// StateChangeHook is an optional hook that is called when the current profile or prefs change,
|
||||
// such as due to a profile switch or a change in the profile's preferences.
|
||||
// It is typically set by the [LocalBackend] to invert the dependency between
|
||||
// the [profileManager] and the [LocalBackend], so that instead of [LocalBackend]
|
||||
// asking [profileManager] for the state, we can have [profileManager] call
|
||||
// [LocalBackend] when the state changes. See also:
|
||||
// https://github.com/tailscale/tailscale/pull/15791#discussion_r2060838160
|
||||
StateChangeHook ipnext.ProfileStateChangeCallback
|
||||
|
||||
// extHost is the bridge between [profileManager] and the registered [ipnext.Extension]s.
|
||||
// It may be nil in tests. A nil pointer is a valid, no-op host.
|
||||
extHost *ExtensionHost
|
||||
|
||||
// Override for key.NewEmptyHardwareAttestationKey used for testing.
|
||||
newEmptyHardwareAttestationKey func() (key.HardwareAttestationKey, error)
|
||||
}
|
||||
|
||||
// SetExtensionHost sets the [ExtensionHost] for the [profileManager].
|
||||
// The specified host will be notified about profile and prefs changes
|
||||
// and will immediately be notified about the current profile and prefs.
|
||||
// A nil host is a valid, no-op host.
|
||||
func (pm *profileManager) SetExtensionHost(host *ExtensionHost) {
|
||||
pm.extHost = host
|
||||
host.NotifyProfileChange(pm.currentProfile, pm.prefs, false)
|
||||
}
|
||||
|
||||
func (pm *profileManager) dlogf(format string, args ...any) {
|
||||
@@ -64,8 +99,7 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) {
|
||||
if pm.currentUserID == uid {
|
||||
return
|
||||
}
|
||||
pm.currentUserID = uid
|
||||
if err := pm.SwitchToDefaultProfile(); err != nil {
|
||||
if _, _, err := pm.SwitchToDefaultProfileForUser(uid); err != nil {
|
||||
// SetCurrentUserID should never fail and must always switch to the
|
||||
// user's default profile or create a new profile for the current user.
|
||||
// Until we implement multi-user support and the new permission model,
|
||||
@@ -73,79 +107,122 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) {
|
||||
// that when SetCurrentUserID exits, the profile in pm.currentProfile
|
||||
// is either an existing profile owned by the user, or a new, empty profile.
|
||||
pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err)
|
||||
pm.NewProfileForUser(uid)
|
||||
pm.SwitchToNewProfileForUser(uid)
|
||||
}
|
||||
}
|
||||
|
||||
// SetCurrentUserAndProfile sets the current user ID and switches the specified
|
||||
// profile, if it is accessible to the user. If the profile does not exist,
|
||||
// or is not accessible, it switches to the user's default profile,
|
||||
// creating a new one if necessary.
|
||||
// SwitchToProfile switches to the specified profile and (temporarily,
|
||||
// while the "current user" is still a thing on Windows; see tailscale/corp#18342)
|
||||
// sets its owner as the current user. The profile must be a valid profile
|
||||
// returned by the [profileManager], such as by [profileManager.Profiles],
|
||||
// [profileManager.ProfileByID], or [profileManager.NewProfileForUser].
|
||||
//
|
||||
// It is a shorthand for [profileManager.SetCurrentUserID] followed by
|
||||
// [profileManager.SwitchProfile], but it is more efficient as it switches
|
||||
// [profileManager.SwitchProfileByID], but it is more efficient as it switches
|
||||
// directly to the specified profile rather than switching to the user's
|
||||
// default profile first.
|
||||
// default profile first. It is a no-op if the specified profile is already
|
||||
// the current profile.
|
||||
//
|
||||
// As a special case, if the specified profile ID "", it creates a new
|
||||
// profile for the user and switches to it, unless the current profile
|
||||
// is already a new, empty profile owned by the user.
|
||||
// As a special case, if the specified profile view is not valid, it resets
|
||||
// both the current user and the profile to a new, empty profile not owned
|
||||
// by any user.
|
||||
//
|
||||
// It returns the current profile and whether the call resulted
|
||||
// in a profile switch.
|
||||
func (pm *profileManager) SetCurrentUserAndProfile(uid ipn.WindowsUserID, profileID ipn.ProfileID) (cp ipn.LoginProfileView, changed bool) {
|
||||
pm.currentUserID = uid
|
||||
|
||||
if profileID == "" {
|
||||
if pm.currentProfile.ID() == "" && pm.currentProfile.LocalUserID() == uid {
|
||||
return pm.currentProfile, false
|
||||
// It returns the current profile and whether the call resulted in a profile change,
|
||||
// or an error if the specified profile does not exist or its prefs could not be loaded.
|
||||
//
|
||||
// It may be called during [profileManager] initialization before [newProfileManager] returns
|
||||
// and must check whether pm.currentProfile is Valid before using it.
|
||||
func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn.LoginProfileView, changed bool, err error) {
|
||||
prefs := defaultPrefs
|
||||
switch {
|
||||
case !profile.Valid():
|
||||
// Create a new profile that is not associated with any user.
|
||||
profile = pm.NewProfileForUser("")
|
||||
case profile == pm.currentProfile,
|
||||
profile.ID() != "" && pm.currentProfile.Valid() && profile.ID() == pm.currentProfile.ID(),
|
||||
profile.ID() == "" && profile.Equals(pm.currentProfile) && prefs.Equals(pm.prefs):
|
||||
// The profile is already the current profile; no need to switch.
|
||||
//
|
||||
// It includes three cases:
|
||||
// 1. The target profile and the current profile are aliases referencing the [ipn.LoginProfile].
|
||||
// The profile may be either a new (non-persisted) profile or an existing well-known profile.
|
||||
// 2. The target profile is a well-known, persisted profile with the same ID as the current profile.
|
||||
// 3. The target and the current profiles are both new (non-persisted) profiles and they are equal.
|
||||
// At minimum, equality means that the profiles are owned by the same user on platforms that support it
|
||||
// and the prefs are the same as well.
|
||||
return pm.currentProfile, false, nil
|
||||
case profile.ID() == "":
|
||||
// Copy the specified profile to prevent accidental mutation.
|
||||
profile = profile.AsStruct().View()
|
||||
default:
|
||||
// Find an existing profile by ID and load its prefs.
|
||||
kp, ok := pm.knownProfiles[profile.ID()]
|
||||
if !ok {
|
||||
// The profile ID is not valid; it may have been deleted or never existed.
|
||||
// As the target profile should have been returned by the [profileManager],
|
||||
// this is unexpected and might indicate a bug in the code.
|
||||
return pm.currentProfile, false, fmt.Errorf("[unexpected] %w: %s (%s)", errProfileNotFound, profile.Name(), profile.ID())
|
||||
}
|
||||
pm.NewProfileForUser(uid)
|
||||
return pm.currentProfile, true
|
||||
}
|
||||
|
||||
if profile, err := pm.ProfileByID(profileID); err == nil {
|
||||
if pm.CurrentProfile().ID() == profileID {
|
||||
return pm.currentProfile, false
|
||||
}
|
||||
if err := pm.SwitchProfile(profile.ID()); err == nil {
|
||||
return pm.currentProfile, true
|
||||
profile = kp
|
||||
if prefs, err = pm.loadSavedPrefs(profile.Key()); err != nil {
|
||||
return pm.currentProfile, false, fmt.Errorf("failed to load profile prefs for %s (%s): %w", profile.Name(), profile.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := pm.SwitchToDefaultProfile(); err != nil {
|
||||
pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err)
|
||||
pm.NewProfile()
|
||||
if profile.ID() == "" { // new profile that has never been persisted
|
||||
metricNewProfile.Add(1)
|
||||
} else {
|
||||
metricSwitchProfile.Add(1)
|
||||
}
|
||||
return pm.currentProfile, true
|
||||
|
||||
pm.prefs = prefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = profile
|
||||
pm.currentUserID = profile.LocalUserID()
|
||||
if err := pm.setProfileAsUserDefault(profile); err != nil {
|
||||
// This is not a fatal error; we've already switched to the profile.
|
||||
// But if updating the default profile fails, we should log it.
|
||||
pm.logf("failed to set %s (%s) as the default profile: %v", profile.Name(), profile.ID(), err)
|
||||
}
|
||||
|
||||
if f := pm.StateChangeHook; f != nil {
|
||||
f(pm.currentProfile, pm.prefs, false)
|
||||
}
|
||||
// Do not call pm.extHost.NotifyProfileChange here; it is invoked in
|
||||
// [LocalBackend.resetForProfileChangeLockedOnEntry] after the netmap reset.
|
||||
// TODO(nickkhyl): Consider moving it here (or into the stateChangeCb handler
|
||||
// in [LocalBackend]) once the profile/node state, including the netmap,
|
||||
// is actually tied to the current profile.
|
||||
|
||||
return profile, true, nil
|
||||
}
|
||||
|
||||
// DefaultUserProfileID returns [ipn.ProfileID] of the default (last used) profile for the specified user,
|
||||
// or an empty string if the specified user does not have a default profile.
|
||||
func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.ProfileID {
|
||||
// DefaultUserProfile returns a read-only view of the default (last used) profile for the specified user.
|
||||
// It returns a read-only view of a new, non-persisted profile if the specified user does not have a default profile.
|
||||
func (pm *profileManager) DefaultUserProfile(uid ipn.WindowsUserID) ipn.LoginProfileView {
|
||||
// Read the CurrentProfileKey from the store which stores
|
||||
// the selected profile for the specified user.
|
||||
b, err := pm.store.ReadState(ipn.CurrentProfileKey(string(uid)))
|
||||
pm.dlogf("DefaultUserProfileID: ReadState(%q) = %v, %v", string(uid), len(b), err)
|
||||
pm.dlogf("DefaultUserProfile: ReadState(%q) = %v, %v", string(uid), len(b), err)
|
||||
if err == ipn.ErrStateNotExist || len(b) == 0 {
|
||||
if runtime.GOOS == "windows" {
|
||||
pm.dlogf("DefaultUserProfileID: windows: migrating from legacy preferences")
|
||||
profile, err := pm.migrateFromLegacyPrefs(uid, false)
|
||||
pm.dlogf("DefaultUserProfile: windows: migrating from legacy preferences")
|
||||
profile, err := pm.migrateFromLegacyPrefs(uid)
|
||||
if err == nil {
|
||||
return profile.ID()
|
||||
return profile
|
||||
}
|
||||
pm.logf("failed to migrate from legacy preferences: %v", err)
|
||||
}
|
||||
return ""
|
||||
return pm.NewProfileForUser(uid)
|
||||
}
|
||||
|
||||
pk := ipn.StateKey(string(b))
|
||||
prof := pm.findProfileByKey(uid, pk)
|
||||
if !prof.Valid() {
|
||||
pm.dlogf("DefaultUserProfileID: no profile found for key: %q", pk)
|
||||
return ""
|
||||
pm.dlogf("DefaultUserProfile: no profile found for key: %q", pk)
|
||||
return pm.NewProfileForUser(uid)
|
||||
}
|
||||
return prof.ID()
|
||||
return prof
|
||||
}
|
||||
|
||||
// checkProfileAccess returns an [errProfileAccessDenied] if the current user
|
||||
@@ -251,12 +328,6 @@ func (pm *profileManager) setUnattendedModeAsConfigured() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Reset unloads the current profile, if any.
|
||||
func (pm *profileManager) Reset() {
|
||||
pm.currentUserID = ""
|
||||
pm.NewProfile()
|
||||
}
|
||||
|
||||
// SetPrefs sets the current profile's prefs to the provided value.
|
||||
// It also saves the prefs to the [ipn.StateStore]. It stores a copy of the
|
||||
// provided prefs, which may be accessed via [profileManager.CurrentPrefs].
|
||||
@@ -288,13 +359,37 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile)
|
||||
delete(pm.knownProfiles, p.ID())
|
||||
}
|
||||
}
|
||||
pm.currentProfile = cp
|
||||
// TODO(nickkhyl): Revisit how we handle implicit switching to a different profile,
|
||||
// which occurs when prefsIn represents a node/user different from that of the
|
||||
// currentProfile. It happens when a login (either reauth or user-initiated login)
|
||||
// is completed with a different node/user identity than the one currently in use.
|
||||
//
|
||||
// Currently, we overwrite the existing profile prefs with the ones from prefsIn,
|
||||
// where prefsIn is the previous profile's prefs with an updated Persist, LoggedOut,
|
||||
// WantRunning and possibly other fields. This may not be the desired behavior.
|
||||
//
|
||||
// Additionally, LocalBackend doesn't treat it as a proper profile switch, meaning that
|
||||
// [LocalBackend.resetForProfileChangeLockedOnEntry] is not called and certain
|
||||
// node/profile-specific state may not be reset as expected.
|
||||
//
|
||||
// However, [profileManager] notifies [ipnext.Extension]s about the profile change,
|
||||
// so features migrated from LocalBackend to external packages should not be affected.
|
||||
//
|
||||
// See tailscale/corp#28014.
|
||||
if !cp.Equals(pm.currentProfile) {
|
||||
const sameNode = false // implicit profile switch
|
||||
pm.currentProfile = cp
|
||||
pm.prefs = prefsIn.AsStruct().View()
|
||||
if f := pm.StateChangeHook; f != nil {
|
||||
f(cp, prefsIn, sameNode)
|
||||
}
|
||||
pm.extHost.NotifyProfileChange(cp, prefsIn, sameNode)
|
||||
}
|
||||
cp, err := pm.setProfilePrefs(nil, prefsIn, np)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.setProfileAsUserDefault(cp)
|
||||
|
||||
}
|
||||
|
||||
// setProfilePrefs is like [profileManager.SetPrefs], but sets prefs for the specified [ipn.LoginProfile],
|
||||
@@ -351,7 +446,20 @@ func (pm *profileManager) setProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.Pref
|
||||
// Update the current profile view to reflect the changes
|
||||
// if the specified profile is the current profile.
|
||||
if isCurrentProfile {
|
||||
pm.currentProfile = lp.View()
|
||||
// Always set pm.currentProfile to the new profile view for pointer equality.
|
||||
// We check it further down the call stack.
|
||||
lp := lp.View()
|
||||
sameProfileInfo := lp.Equals(pm.currentProfile)
|
||||
pm.currentProfile = lp
|
||||
if !sameProfileInfo {
|
||||
// But only invoke the callbacks if the profile info has actually changed.
|
||||
const sameNode = true // just an info update; still the same node
|
||||
pm.prefs = prefsIn.AsStruct().View() // suppress further callbacks for this change
|
||||
if f := pm.StateChangeHook; f != nil {
|
||||
f(lp, prefsIn, sameNode)
|
||||
}
|
||||
pm.extHost.NotifyProfileChange(lp, prefsIn, sameNode)
|
||||
}
|
||||
}
|
||||
|
||||
// An empty profile.ID indicates that the node info is not available yet,
|
||||
@@ -392,7 +500,33 @@ func newUnusedID(knownProfiles map[ipn.ProfileID]ipn.LoginProfileView) (ipn.Prof
|
||||
func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileView, clonedPrefs ipn.PrefsView) error {
|
||||
isCurrentProfile := pm.currentProfile == profile
|
||||
if isCurrentProfile {
|
||||
oldPrefs := pm.prefs
|
||||
pm.prefs = clonedPrefs
|
||||
|
||||
// Sadly, profile prefs can be changed in multiple ways.
|
||||
// It's pretty chaotic, and in many cases callers use
|
||||
// unexported methods of the profile manager instead of
|
||||
// going through [LocalBackend.setPrefsLockedOnEntry]
|
||||
// or at least using [profileManager.SetPrefs].
|
||||
//
|
||||
// While we should definitely clean this up to improve
|
||||
// the overall structure of how prefs are set, which would
|
||||
// also address current and future conflicts, such as
|
||||
// competing features changing the same prefs, this method
|
||||
// is currently the central place where we can detect all
|
||||
// changes to the current profile's prefs.
|
||||
//
|
||||
// That said, regardless of the cleanup, we might want
|
||||
// to keep the profileManager responsible for invoking
|
||||
// profile- and prefs-related callbacks.
|
||||
|
||||
if !clonedPrefs.Equals(oldPrefs) {
|
||||
if f := pm.StateChangeHook; f != nil {
|
||||
f(pm.currentProfile, clonedPrefs, true)
|
||||
}
|
||||
pm.extHost.NotifyProfilePrefsChanged(pm.currentProfile, oldPrefs, clonedPrefs)
|
||||
}
|
||||
|
||||
pm.updateHealth()
|
||||
}
|
||||
if profile.Key() != "" {
|
||||
@@ -477,42 +611,32 @@ func (pm *profileManager) profilePrefs(p ipn.LoginProfileView) (ipn.PrefsView, e
|
||||
return pm.loadSavedPrefs(p.Key())
|
||||
}
|
||||
|
||||
// SwitchProfile switches to the profile with the given id.
|
||||
// SwitchToProfileByID switches to the profile with the given id.
|
||||
// It returns the current profile and whether the call resulted in a profile change.
|
||||
// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied].
|
||||
// If the profile does not exist, it returns an [errProfileNotFound].
|
||||
func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error {
|
||||
metricSwitchProfile.Add(1)
|
||||
|
||||
kp, ok := pm.knownProfiles[id]
|
||||
if !ok {
|
||||
return errProfileNotFound
|
||||
func (pm *profileManager) SwitchToProfileByID(id ipn.ProfileID) (_ ipn.LoginProfileView, changed bool, err error) {
|
||||
if id == pm.currentProfile.ID() {
|
||||
return pm.currentProfile, false, nil
|
||||
}
|
||||
if pm.currentProfile.Valid() && kp.ID() == pm.currentProfile.ID() && pm.prefs.Valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return fmt.Errorf("%w: profile %q is not accessible to the current user", err, id)
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(kp.Key())
|
||||
profile, err := pm.ProfileByID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
return pm.currentProfile, false, err
|
||||
}
|
||||
pm.prefs = prefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = kp
|
||||
return pm.setProfileAsUserDefault(kp)
|
||||
return pm.SwitchToProfile(profile)
|
||||
}
|
||||
|
||||
// SwitchToDefaultProfile switches to the default (last used) profile for the current user.
|
||||
// It creates a new one and switches to it if the current user does not have a default profile,
|
||||
// SwitchToDefaultProfileForUser switches to the default (last used) profile for the specified user.
|
||||
// It creates a new one and switches to it if the specified user does not have a default profile,
|
||||
// or returns an error if the default profile is inaccessible or could not be loaded.
|
||||
func (pm *profileManager) SwitchToDefaultProfile() error {
|
||||
if id := pm.DefaultUserProfileID(pm.currentUserID); id != "" {
|
||||
return pm.SwitchProfile(id)
|
||||
}
|
||||
pm.NewProfileForUser(pm.currentUserID)
|
||||
return nil
|
||||
func (pm *profileManager) SwitchToDefaultProfileForUser(uid ipn.WindowsUserID) (_ ipn.LoginProfileView, changed bool, err error) {
|
||||
return pm.SwitchToProfile(pm.DefaultUserProfile(uid))
|
||||
}
|
||||
|
||||
// SwitchToDefaultProfile is like [profileManager.SwitchToDefaultProfileForUser], but switches
|
||||
// to the default profile for the current user.
|
||||
func (pm *profileManager) SwitchToDefaultProfile() (_ ipn.LoginProfileView, changed bool, err error) {
|
||||
return pm.SwitchToDefaultProfileForUser(pm.currentUserID)
|
||||
}
|
||||
|
||||
// setProfileAsUserDefault sets the specified profile as the default for the current user.
|
||||
@@ -529,8 +653,8 @@ func (pm *profileManager) setProfileAsUserDefault(profile ipn.LoginProfileView)
|
||||
return pm.WriteState(k, []byte(profile.Key()))
|
||||
}
|
||||
|
||||
func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) {
|
||||
bs, err := pm.store.ReadState(key)
|
||||
func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) {
|
||||
bs, err := pm.store.ReadState(k)
|
||||
if err == ipn.ErrStateNotExist || len(bs) == 0 {
|
||||
return defaultPrefs, nil
|
||||
}
|
||||
@@ -538,10 +662,28 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
savedPrefs := ipn.NewPrefs()
|
||||
if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil {
|
||||
return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err)
|
||||
|
||||
// if supported by the platform, create an empty hardware attestation key to use when deserializing
|
||||
// to avoid type exceptions from json.Unmarshaling into an interface{}.
|
||||
hw, _ := pm.newEmptyHardwareAttestationKey()
|
||||
savedPrefs.Persist = &persist.Persist{
|
||||
AttestationKey: hw,
|
||||
}
|
||||
pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty())
|
||||
|
||||
if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil {
|
||||
// Try loading again, this time ignoring the AttestationKey contents.
|
||||
// If that succeeds, there's something wrong with the underlying
|
||||
// attestation key mechanism (most likely the TPM changed), but we
|
||||
// should at least proceed with client startup.
|
||||
origErr := err
|
||||
savedPrefs.Persist.AttestationKey = &noopAttestationKey{}
|
||||
if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil {
|
||||
return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %w", err)
|
||||
} else {
|
||||
pm.logf("failed to parse savedPrefs with attestation key (error: %v) but parsing without the attestation key succeeded; will proceed without using the old attestation key", origErr)
|
||||
}
|
||||
}
|
||||
pm.logf("using backend prefs for %q: %v", k, savedPrefs.Pretty())
|
||||
|
||||
// Ignore any old stored preferences for https://login.tailscale.com
|
||||
// as the control server that would override the new default of
|
||||
@@ -558,7 +700,7 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error
|
||||
// cause any EditPrefs calls to fail (other than disabling auto-updates).
|
||||
//
|
||||
// Reset AutoUpdate.Apply if we detect such invalid prefs.
|
||||
if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() {
|
||||
if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() {
|
||||
savedPrefs.AutoUpdate.Apply.Clear()
|
||||
}
|
||||
|
||||
@@ -590,7 +732,6 @@ var errProfileAccessDenied = errors.New("profile access denied")
|
||||
// This is useful for deleting the last profile. In other cases, it is
|
||||
// recommended to call [profileManager.SwitchProfile] first.
|
||||
func (pm *profileManager) DeleteProfile(id ipn.ProfileID) error {
|
||||
metricDeleteProfile.Add(1)
|
||||
if id == pm.currentProfile.ID() {
|
||||
return pm.deleteCurrentProfile()
|
||||
}
|
||||
@@ -610,7 +751,7 @@ func (pm *profileManager) deleteCurrentProfile() error {
|
||||
}
|
||||
if pm.currentProfile.ID() == "" {
|
||||
// Deleting the in-memory only new profile, just create a new one.
|
||||
pm.NewProfile()
|
||||
pm.SwitchToNewProfile()
|
||||
return nil
|
||||
}
|
||||
return pm.deleteProfileNoPermCheck(pm.currentProfile)
|
||||
@@ -620,12 +761,13 @@ func (pm *profileManager) deleteCurrentProfile() error {
|
||||
// but it doesn't check user's access rights to the profile.
|
||||
func (pm *profileManager) deleteProfileNoPermCheck(profile ipn.LoginProfileView) error {
|
||||
if profile.ID() == pm.currentProfile.ID() {
|
||||
pm.NewProfile()
|
||||
pm.SwitchToNewProfile()
|
||||
}
|
||||
if err := pm.WriteState(profile.Key(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(pm.knownProfiles, profile.ID())
|
||||
metricDeleteProfile.Add(1)
|
||||
return pm.writeKnownProfiles()
|
||||
}
|
||||
|
||||
@@ -637,7 +779,7 @@ func (pm *profileManager) DeleteAllProfilesForUser() error {
|
||||
currentProfileDeleted := false
|
||||
writeKnownProfiles := func() error {
|
||||
if currentProfileDeleted || pm.currentProfile.ID() == "" {
|
||||
pm.NewProfile()
|
||||
pm.SwitchToNewProfile()
|
||||
}
|
||||
return pm.writeKnownProfiles()
|
||||
}
|
||||
@@ -666,6 +808,7 @@ func (pm *profileManager) writeKnownProfiles() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricProfileCount.Set(int64(len(pm.knownProfiles)))
|
||||
return pm.WriteState(ipn.KnownProfilesStateKey, b)
|
||||
}
|
||||
|
||||
@@ -676,45 +819,25 @@ func (pm *profileManager) updateHealth() {
|
||||
pm.health.SetAutoUpdatePrefs(pm.prefs.AutoUpdate().Check, pm.prefs.AutoUpdate().Apply)
|
||||
}
|
||||
|
||||
// NewProfile creates and switches to a new unnamed profile. The new profile is
|
||||
// SwitchToNewProfile creates and switches to a new unnamed profile. The new profile is
|
||||
// not persisted until [profileManager.SetPrefs] is called with a logged-in user.
|
||||
func (pm *profileManager) NewProfile() {
|
||||
pm.NewProfileForUser(pm.currentUserID)
|
||||
func (pm *profileManager) SwitchToNewProfile() {
|
||||
pm.SwitchToNewProfileForUser(pm.currentUserID)
|
||||
}
|
||||
|
||||
// NewProfileForUser is like [profileManager.NewProfile], but it switches to the
|
||||
// SwitchToNewProfileForUser is like [profileManager.SwitchToNewProfile], but it switches to the
|
||||
// specified user and sets that user as the profile owner for the new profile.
|
||||
func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) {
|
||||
pm.currentUserID = uid
|
||||
|
||||
metricNewProfile.Add(1)
|
||||
|
||||
pm.prefs = defaultPrefs
|
||||
pm.updateHealth()
|
||||
newProfile := &ipn.LoginProfile{LocalUserID: uid}
|
||||
pm.currentProfile = newProfile.View()
|
||||
func (pm *profileManager) SwitchToNewProfileForUser(uid ipn.WindowsUserID) {
|
||||
pm.SwitchToProfile(pm.NewProfileForUser(uid))
|
||||
}
|
||||
|
||||
// newProfileWithPrefs creates a new profile with the specified prefs and assigns
|
||||
// the specified uid as the profile owner. If switchNow is true, it switches to the
|
||||
// newly created profile immediately. It returns the newly created profile on success,
|
||||
// or an error on failure.
|
||||
func (pm *profileManager) newProfileWithPrefs(uid ipn.WindowsUserID, prefs ipn.PrefsView, switchNow bool) (ipn.LoginProfileView, error) {
|
||||
metricNewProfile.Add(1)
|
||||
// zeroProfile is a read-only view of a new, empty profile that is not persisted to the store.
|
||||
var zeroProfile = (&ipn.LoginProfile{}).View()
|
||||
|
||||
profile, err := pm.setProfilePrefs(&ipn.LoginProfile{LocalUserID: uid}, prefs, ipn.NetworkProfile{})
|
||||
if err != nil {
|
||||
return ipn.LoginProfileView{}, err
|
||||
}
|
||||
if switchNow {
|
||||
pm.currentProfile = profile
|
||||
pm.prefs = prefs.AsStruct().View()
|
||||
pm.updateHealth()
|
||||
if err := pm.setProfileAsUserDefault(profile); err != nil {
|
||||
return ipn.LoginProfileView{}, err
|
||||
}
|
||||
}
|
||||
return profile, nil
|
||||
// NewProfileForUser creates a new profile for the specified user and returns a read-only view of it.
|
||||
// It neither switches to the new profile nor persists it to the store.
|
||||
func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) ipn.LoginProfileView {
|
||||
return (&ipn.LoginProfile{LocalUserID: uid}).View()
|
||||
}
|
||||
|
||||
// defaultPrefs is the default prefs for a new profile. This initializes before
|
||||
@@ -742,7 +865,10 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView {
|
||||
|
||||
// ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing.
|
||||
func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) {
|
||||
ht := new(health.Tracker) // in tests, don't care about the health status
|
||||
testenv.AssertInTest()
|
||||
bus := eventbus.New()
|
||||
defer bus.Close()
|
||||
ht := health.NewTracker(bus) // in tests, don't care about the health status
|
||||
pm, err := newProfileManager(store, logf, ht)
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
@@ -798,35 +924,20 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metricProfileCount.Set(int64(len(knownProfiles)))
|
||||
|
||||
pm := &profileManager{
|
||||
goos: goos,
|
||||
store: store,
|
||||
knownProfiles: knownProfiles,
|
||||
logf: logf,
|
||||
health: ht,
|
||||
goos: goos,
|
||||
store: store,
|
||||
knownProfiles: knownProfiles,
|
||||
logf: logf,
|
||||
health: ht,
|
||||
newEmptyHardwareAttestationKey: key.NewEmptyHardwareAttestationKey,
|
||||
}
|
||||
|
||||
var initialProfile ipn.LoginProfileView
|
||||
if stateKey != "" {
|
||||
for _, v := range knownProfiles {
|
||||
if v.Key() == stateKey {
|
||||
pm.currentProfile = v
|
||||
}
|
||||
}
|
||||
if !pm.currentProfile.Valid() {
|
||||
if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok {
|
||||
pm.currentUserID = ipn.WindowsUserID(suf)
|
||||
}
|
||||
pm.NewProfile()
|
||||
} else {
|
||||
pm.currentUserID = pm.currentProfile.LocalUserID()
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(stateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initialProfile = pm.findProfileByKey("", stateKey)
|
||||
// Most platform behavior is controlled by the goos parameter, however
|
||||
// some behavior is implied by build tag and fails when run on Windows,
|
||||
// so we explicitly avoid that behavior when running on Windows.
|
||||
@@ -837,17 +948,24 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt
|
||||
} else if len(knownProfiles) == 0 && goos != "windows" && runtime.GOOS != "windows" {
|
||||
// No known profiles, try a migration.
|
||||
pm.dlogf("no known profiles; trying to migrate from legacy prefs")
|
||||
if _, err := pm.migrateFromLegacyPrefs(pm.currentUserID, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pm.NewProfile()
|
||||
}
|
||||
if initialProfile, err = pm.migrateFromLegacyPrefs(pm.currentUserID); err != nil {
|
||||
|
||||
}
|
||||
}
|
||||
if !initialProfile.Valid() {
|
||||
var initialUserID ipn.WindowsUserID
|
||||
if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok {
|
||||
initialUserID = ipn.WindowsUserID(suf)
|
||||
}
|
||||
initialProfile = pm.NewProfileForUser(initialUserID)
|
||||
}
|
||||
if _, _, err := pm.SwitchToProfile(initialProfile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNow bool) (ipn.LoginProfileView, error) {
|
||||
func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID) (ipn.LoginProfileView, error) {
|
||||
metricMigration.Add(1)
|
||||
sentinel, prefs, err := pm.loadLegacyPrefs(uid)
|
||||
if err != nil {
|
||||
@@ -855,7 +973,7 @@ func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNo
|
||||
return ipn.LoginProfileView{}, fmt.Errorf("load legacy prefs: %w", err)
|
||||
}
|
||||
pm.dlogf("loaded legacy preferences; sentinel=%q", sentinel)
|
||||
profile, err := pm.newProfileWithPrefs(uid, prefs, switchNow)
|
||||
profile, err := pm.setProfilePrefs(&ipn.LoginProfile{LocalUserID: uid}, prefs, ipn.NetworkProfile{})
|
||||
if err != nil {
|
||||
metricMigrationError.Add(1)
|
||||
return ipn.LoginProfileView{}, fmt.Errorf("migrating _daemon profile: %w", err)
|
||||
@@ -877,8 +995,27 @@ var (
|
||||
metricSwitchProfile = clientmetric.NewCounter("profiles_switch")
|
||||
metricDeleteProfile = clientmetric.NewCounter("profiles_delete")
|
||||
metricDeleteAllProfile = clientmetric.NewCounter("profiles_delete_all")
|
||||
metricProfileCount = clientmetric.NewGauge("profiles_count")
|
||||
|
||||
metricMigration = clientmetric.NewCounter("profiles_migration")
|
||||
metricMigrationError = clientmetric.NewCounter("profiles_migration_error")
|
||||
metricMigrationSuccess = clientmetric.NewCounter("profiles_migration_success")
|
||||
)
|
||||
|
||||
// noopAttestationKey is a key.HardwareAttestationKey that always successfully
|
||||
// unmarshals as a zero key.
|
||||
type noopAttestationKey struct{}
|
||||
|
||||
func (n noopAttestationKey) Public() crypto.PublicKey {
|
||||
panic("noopAttestationKey.Public should not be called; missing IsZero check somewhere?")
|
||||
}
|
||||
|
||||
func (n noopAttestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
panic("noopAttestationKey.Sign should not be called; missing IsZero check somewhere?")
|
||||
}
|
||||
|
||||
func (n noopAttestationKey) MarshalJSON() ([]byte, error) { return nil, nil }
|
||||
func (n noopAttestationKey) UnmarshalJSON([]byte) error { return nil }
|
||||
func (n noopAttestationKey) Close() error { return nil }
|
||||
func (n noopAttestationKey) Clone() key.HardwareAttestationKey { return n }
|
||||
func (n noopAttestationKey) IsZero() bool { return true }
|
||||
|
||||
871
vendor/tailscale.com/ipn/ipnlocal/serve.go
generated
vendored
871
vendor/tailscale.com/ipn/ipnlocal/serve.go
generated
vendored
File diff suppressed because it is too large
Load Diff
34
vendor/tailscale.com/ipn/ipnlocal/serve_disabled.go
generated
vendored
Normal file
34
vendor/tailscale.com/ipn/ipnlocal/serve_disabled.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ts_omit_serve
|
||||
|
||||
// These are temporary (2025-09-13) stubs for when tailscaled is built with the
|
||||
// ts_omit_serve build tag, disabling serve.
|
||||
//
|
||||
// TODO: move serve to a separate package, out of ipnlocal, and delete this
|
||||
// file. One step at a time.
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const serveEnabled = false
|
||||
|
||||
type localListener = struct{}
|
||||
|
||||
func (b *LocalBackend) DeleteForegroundSession(sessionID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type funnelFlow = struct{}
|
||||
|
||||
func (*LocalBackend) hasIngressEnabledLocked() bool { return false }
|
||||
func (*LocalBackend) shouldWireInactiveIngressLocked() bool { return false }
|
||||
|
||||
func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService {
|
||||
return nil
|
||||
}
|
||||
2
vendor/tailscale.com/ipn/ipnlocal/ssh.go
generated
vendored
2
vendor/tailscale.com/ipn/ipnlocal/ssh.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux || (darwin && !ios) || freebsd || openbsd
|
||||
//go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh
|
||||
|
||||
package ipnlocal
|
||||
|
||||
|
||||
2
vendor/tailscale.com/ipn/ipnlocal/ssh_stub.go
generated
vendored
2
vendor/tailscale.com/ipn/ipnlocal/ssh_stub.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ios || (!linux && !darwin && !freebsd && !openbsd)
|
||||
//go:build ts_omit_ssh || ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
|
||||
35
vendor/tailscale.com/ipn/ipnlocal/taildrop.go
generated
vendored
35
vendor/tailscale.com/ipn/ipnlocal/taildrop.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
// UpdateOutgoingFiles updates b.outgoingFiles to reflect the given updates and
|
||||
// sends an ipn.Notify with the full list of outgoingFiles.
|
||||
func (b *LocalBackend) UpdateOutgoingFiles(updates map[string]*ipn.OutgoingFile) {
|
||||
b.mu.Lock()
|
||||
if b.outgoingFiles == nil {
|
||||
b.outgoingFiles = make(map[string]*ipn.OutgoingFile, len(updates))
|
||||
}
|
||||
maps.Copy(b.outgoingFiles, updates)
|
||||
outgoingFiles := make([]*ipn.OutgoingFile, 0, len(b.outgoingFiles))
|
||||
for _, file := range b.outgoingFiles {
|
||||
outgoingFiles = append(outgoingFiles, file)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
slices.SortFunc(outgoingFiles, func(a, b *ipn.OutgoingFile) int {
|
||||
t := a.Started.Compare(b.Started)
|
||||
if t != 0 {
|
||||
return t
|
||||
}
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
})
|
||||
b.send(ipn.Notify{OutgoingFiles: outgoingFiles})
|
||||
}
|
||||
31
vendor/tailscale.com/ipn/ipnlocal/tailnetlock_disabled.go
generated
vendored
Normal file
31
vendor/tailscale.com/ipn/ipnlocal/tailnetlock_disabled.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ts_omit_tailnetlock
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/netmap"
|
||||
)
|
||||
|
||||
type tkaState struct {
|
||||
authority *tka.Authority
|
||||
}
|
||||
|
||||
func (b *LocalBackend) initTKALocked() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsView) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {}
|
||||
|
||||
func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
return &ipnstate.NetworkLockStatus{Enabled: false}
|
||||
}
|
||||
12
vendor/tailscale.com/ipn/ipnlocal/web_client.go
generated
vendored
12
vendor/tailscale.com/ipn/ipnlocal/web_client.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android
|
||||
//go:build !ios && !android && !ts_omit_webclient
|
||||
|
||||
package ipnlocal
|
||||
|
||||
@@ -19,14 +19,15 @@ import (
|
||||
|
||||
"tailscale.com/client/local"
|
||||
"tailscale.com/client/web"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsconst"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/backoff"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
const webClientPort = web.ListenPort
|
||||
const webClientPort = tsconst.WebListenPort
|
||||
|
||||
// webClient holds state for the web interface for managing this
|
||||
// tailscale instance. The web interface is not used by default,
|
||||
@@ -116,11 +117,12 @@ func (b *LocalBackend) handleWebClientConn(c net.Conn) error {
|
||||
// for each of the local device's Tailscale IP addresses. This is needed to properly
|
||||
// route local traffic when using kernel networking mode.
|
||||
func (b *LocalBackend) updateWebClientListenersLocked() {
|
||||
if b.netMap == nil {
|
||||
nm := b.currentNode().NetMap()
|
||||
if nm == nil {
|
||||
return
|
||||
}
|
||||
|
||||
addrs := b.netMap.GetAddresses()
|
||||
addrs := nm.GetAddresses()
|
||||
for _, pfx := range addrs.All() {
|
||||
addrPort := netip.AddrPortFrom(pfx.Addr(), webClientPort)
|
||||
if _, ok := b.webClientListeners[addrPort]; ok {
|
||||
|
||||
6
vendor/tailscale.com/ipn/ipnlocal/web_client_stub.go
generated
vendored
6
vendor/tailscale.com/ipn/ipnlocal/web_client_stub.go
generated
vendored
@@ -1,22 +1,20 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ios || android
|
||||
//go:build ios || android || ts_omit_webclient
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"tailscale.com/client/local"
|
||||
)
|
||||
|
||||
const webClientPort = 5252
|
||||
|
||||
type webClient struct{}
|
||||
|
||||
func (b *LocalBackend) ConfigureWebClient(lc *local.Client) {}
|
||||
func (b *LocalBackend) ConfigureWebClient(any) {}
|
||||
|
||||
func (b *LocalBackend) webClientGetOrInit() error {
|
||||
return errors.New("not implemented")
|
||||
|
||||
23
vendor/tailscale.com/ipn/ipnstate/ipnstate.go
generated
vendored
23
vendor/tailscale.com/ipn/ipnstate/ipnstate.go
generated
vendored
@@ -89,6 +89,7 @@ type Status struct {
|
||||
|
||||
// TKAKey describes a key trusted by network lock.
|
||||
type TKAKey struct {
|
||||
Kind string
|
||||
Key key.NLPublic
|
||||
Metadata map[string]string
|
||||
Votes uint
|
||||
@@ -251,9 +252,10 @@ type PeerStatus struct {
|
||||
PrimaryRoutes *views.Slice[netip.Prefix] `json:",omitempty"`
|
||||
|
||||
// Endpoints:
|
||||
Addrs []string
|
||||
CurAddr string // one of Addrs, or unique if roaming
|
||||
Relay string // DERP region
|
||||
Addrs []string
|
||||
CurAddr string // one of Addrs, or unique if roaming
|
||||
Relay string // DERP region
|
||||
PeerRelay string // peer relay address (ip:port:vni)
|
||||
|
||||
RxBytes int64
|
||||
TxBytes int64
|
||||
@@ -451,6 +453,9 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) {
|
||||
if v := st.Relay; v != "" {
|
||||
e.Relay = v
|
||||
}
|
||||
if v := st.PeerRelay; v != "" {
|
||||
e.PeerRelay = v
|
||||
}
|
||||
if v := st.UserID; v != 0 {
|
||||
e.UserID = v
|
||||
}
|
||||
@@ -697,10 +702,17 @@ type PingResult struct {
|
||||
Err string
|
||||
LatencySeconds float64
|
||||
|
||||
// Endpoint is the ip:port if direct UDP was used.
|
||||
// It is not currently set for TSMP pings.
|
||||
// Endpoint is a string of the form "{ip}:{port}" if direct UDP was used. It
|
||||
// is not currently set for TSMP.
|
||||
Endpoint string
|
||||
|
||||
// PeerRelay is a string of the form "{ip}:{port}:vni:{vni}" if a peer
|
||||
// relay was used. It is not currently set for TSMP. Note that this field
|
||||
// is not omitted during JSON encoding if it contains a zero value. This is
|
||||
// done for consistency with the Endpoint field; this structure is exposed
|
||||
// externally via localAPI, so we want to maintain the existing convention.
|
||||
PeerRelay string
|
||||
|
||||
// DERPRegionID is non-zero DERP region ID if DERP was used.
|
||||
// It is not currently set for TSMP pings.
|
||||
DERPRegionID int
|
||||
@@ -735,6 +747,7 @@ func (pr *PingResult) ToPingResponse(pingType tailcfg.PingType) *tailcfg.PingRes
|
||||
Err: pr.Err,
|
||||
LatencySeconds: pr.LatencySeconds,
|
||||
Endpoint: pr.Endpoint,
|
||||
PeerRelay: pr.PeerRelay,
|
||||
DERPRegionID: pr.DERPRegionID,
|
||||
DERPRegionCode: pr.DERPRegionCode,
|
||||
PeerAPIPort: pr.PeerAPIPort,
|
||||
|
||||
6
vendor/tailscale.com/ipn/localapi/cert.go
generated
vendored
6
vendor/tailscale.com/ipn/localapi/cert.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android && !js
|
||||
//go:build !ios && !android && !js && !ts_omit_acme
|
||||
|
||||
package localapi
|
||||
|
||||
@@ -14,6 +14,10 @@ import (
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("cert/", (*Handler).serveCert)
|
||||
}
|
||||
|
||||
func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite && !h.PermitCert {
|
||||
http.Error(w, "cert access denied", http.StatusForbidden)
|
||||
|
||||
495
vendor/tailscale.com/ipn/localapi/debug.go
generated
vendored
Normal file
495
vendor/tailscale.com/ipn/localapi/debug.go
generated
vendored
Normal file
@@ -0,0 +1,495 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_debug
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/feature"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/eventbus"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("component-debug-logging", (*Handler).serveComponentDebugLogging)
|
||||
Register("debug", (*Handler).serveDebug)
|
||||
Register("debug-rotate-disco-key", (*Handler).serveDebugRotateDiscoKey)
|
||||
Register("dev-set-state-store", (*Handler).serveDevSetStateStore)
|
||||
Register("debug-bus-events", (*Handler).serveDebugBusEvents)
|
||||
Register("debug-bus-graph", (*Handler).serveEventBusGraph)
|
||||
Register("debug-derp-region", (*Handler).serveDebugDERPRegion)
|
||||
Register("debug-dial-types", (*Handler).serveDebugDialTypes)
|
||||
Register("debug-log", (*Handler).serveDebugLog)
|
||||
Register("debug-packet-filter-matches", (*Handler).serveDebugPacketFilterMatches)
|
||||
Register("debug-packet-filter-rules", (*Handler).serveDebugPacketFilterRules)
|
||||
Register("debug-peer-endpoint-changes", (*Handler).serveDebugPeerEndpointChanges)
|
||||
Register("debug-optional-features", (*Handler).serveDebugOptionalFeatures)
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitRead {
|
||||
http.Error(w, "status access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
ipStr := r.FormValue("ip")
|
||||
if ipStr == "" {
|
||||
http.Error(w, "missing 'ip' parameter", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ip, err := netip.ParseAddr(ipStr)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid IP", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
e := json.NewEncoder(w)
|
||||
e.SetIndent("", "\t")
|
||||
e.Encode(chs)
|
||||
}
|
||||
|
||||
func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
component := r.FormValue("component")
|
||||
secs, _ := strconv.Atoi(r.FormValue("secs"))
|
||||
err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second))
|
||||
var res struct {
|
||||
Error string
|
||||
}
|
||||
if err != nil {
|
||||
res.Error = err.Error()
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug-dial-types access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "only POST allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
ip := r.FormValue("ip")
|
||||
port := r.FormValue("port")
|
||||
network := r.FormValue("network")
|
||||
|
||||
addr := ip + ":" + port
|
||||
if _, err := netip.ParseAddrPort(addr); err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprintf(w, "invalid address %q: %v", addr, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var bareDialer net.Dialer
|
||||
|
||||
dialer := h.b.Dialer()
|
||||
|
||||
var peerDialer net.Dialer
|
||||
peerDialer.Control = dialer.PeerDialControlFunc()
|
||||
|
||||
// Kick off a dial with each available dialer in parallel.
|
||||
dialers := []struct {
|
||||
name string
|
||||
dial func(context.Context, string, string) (net.Conn, error)
|
||||
}{
|
||||
{"SystemDial", dialer.SystemDial},
|
||||
{"UserDial", dialer.UserDial},
|
||||
{"PeerDial", peerDialer.DialContext},
|
||||
{"BareDial", bareDialer.DialContext},
|
||||
}
|
||||
type result struct {
|
||||
name string
|
||||
conn net.Conn
|
||||
err error
|
||||
}
|
||||
results := make(chan result, len(dialers))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, dialer := range dialers {
|
||||
dialer := dialer // loop capture
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
conn, err := dialer.dial(ctx, network, addr)
|
||||
results <- result{dialer.name, conn, err}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
for range len(dialers) {
|
||||
res := <-results
|
||||
fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err)
|
||||
if res.conn != nil {
|
||||
res.conn.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasDebug {
|
||||
http.Error(w, "debug not supported in this build", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "POST required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
// The action is normally in a POST form parameter, but
|
||||
// some actions (like "notify") want a full JSON body, so
|
||||
// permit some to have their action in a header.
|
||||
var action string
|
||||
switch v := r.Header.Get("Debug-Action"); v {
|
||||
case "notify":
|
||||
action = v
|
||||
default:
|
||||
action = r.FormValue("action")
|
||||
}
|
||||
var err error
|
||||
switch action {
|
||||
case "derp-set-homeless":
|
||||
h.b.MagicConn().SetHomeless(true)
|
||||
case "derp-unset-homeless":
|
||||
h.b.MagicConn().SetHomeless(false)
|
||||
case "rebind":
|
||||
err = h.b.DebugRebind()
|
||||
case "restun":
|
||||
err = h.b.DebugReSTUN()
|
||||
case "notify":
|
||||
var n ipn.Notify
|
||||
err = json.NewDecoder(r.Body).Decode(&n)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
h.b.DebugNotify(n)
|
||||
case "notify-last-netmap":
|
||||
h.b.DebugNotifyLastNetMap()
|
||||
case "break-tcp-conns":
|
||||
err = h.b.DebugBreakTCPConns()
|
||||
case "break-derp-conns":
|
||||
err = h.b.DebugBreakDERPConns()
|
||||
case "force-netmap-update":
|
||||
h.b.DebugForceNetmapUpdate()
|
||||
case "control-knobs":
|
||||
k := h.b.ControlKnobs()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
err = json.NewEncoder(w).Encode(k.AsDebugJSON())
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
case "pick-new-derp":
|
||||
err = h.b.DebugPickNewDERP()
|
||||
case "force-prefer-derp":
|
||||
var n int
|
||||
err = json.NewDecoder(r.Body).Decode(&n)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
h.b.DebugForcePreferDERP(n)
|
||||
case "peer-relay-servers":
|
||||
servers := h.b.DebugPeerRelayServers().Slice()
|
||||
slices.SortFunc(servers, func(a, b netip.Addr) int {
|
||||
return a.Compare(b)
|
||||
})
|
||||
err = json.NewEncoder(w).Encode(servers)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
case "rotate-disco-key":
|
||||
err = h.b.DebugRotateDiscoKey()
|
||||
case "":
|
||||
err = fmt.Errorf("missing parameter 'action'")
|
||||
default:
|
||||
err = fmt.Errorf("unknown action %q", action)
|
||||
}
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "done\n")
|
||||
}
|
||||
|
||||
func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "POST required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "done\n")
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
nm := h.b.NetMap()
|
||||
if nm == nil {
|
||||
http.Error(w, "no netmap", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", "\t")
|
||||
enc.Encode(nm.PacketFilterRules)
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
nm := h.b.NetMap()
|
||||
if nm == nil {
|
||||
http.Error(w, "no netmap", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", "\t")
|
||||
enc.Encode(nm.PacketFilter)
|
||||
}
|
||||
|
||||
// debugEventError provides the JSON encoding of internal errors from event processing.
|
||||
type debugEventError struct {
|
||||
Error string
|
||||
}
|
||||
|
||||
// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams
|
||||
// events to the client.
|
||||
func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) {
|
||||
// Require write access (~root) as the logs could contain something
|
||||
// sensitive.
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "event bus access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.GET {
|
||||
http.Error(w, "GET required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
bus, ok := h.LocalBackend().Sys().Bus.GetOK()
|
||||
if !ok {
|
||||
http.Error(w, "event bus not running", http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
f, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "streaming unsupported", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n")
|
||||
f.Flush()
|
||||
|
||||
mon := bus.Debugger().WatchBus()
|
||||
defer mon.Close()
|
||||
|
||||
i := 0
|
||||
for {
|
||||
select {
|
||||
case <-r.Context().Done():
|
||||
fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`)
|
||||
return
|
||||
case <-mon.Done():
|
||||
return
|
||||
case event := <-mon.Events():
|
||||
data := eventbus.DebugEvent{
|
||||
Count: i,
|
||||
Type: reflect.TypeOf(event.Event).String(),
|
||||
Event: event.Event,
|
||||
From: event.From.Name(),
|
||||
}
|
||||
for _, client := range event.To {
|
||||
data.To = append(data.To, client.Name())
|
||||
}
|
||||
|
||||
if msg, err := json.Marshal(data); err != nil {
|
||||
data.Event = debugEventError{Error: fmt.Sprintf(
|
||||
"failed to marshal JSON for %T", event.Event,
|
||||
)}
|
||||
if errMsg, err := json.Marshal(data); err != nil {
|
||||
fmt.Fprintf(w,
|
||||
`{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`,
|
||||
i, event.Event)
|
||||
} else {
|
||||
w.Write(errMsg)
|
||||
}
|
||||
} else {
|
||||
w.Write(msg)
|
||||
}
|
||||
f.Flush()
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// serveEventBusGraph taps into the event bus and dumps out the active graph of
|
||||
// publishers and subscribers. It does not represent anything about the messages
|
||||
// exchanged.
|
||||
func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != httpm.GET {
|
||||
http.Error(w, "GET required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
bus, ok := h.LocalBackend().Sys().Bus.GetOK()
|
||||
if !ok {
|
||||
http.Error(w, "event bus not running", http.StatusPreconditionFailed)
|
||||
return
|
||||
}
|
||||
|
||||
debugger := bus.Debugger()
|
||||
clients := debugger.Clients()
|
||||
|
||||
graph := map[string]eventbus.DebugTopic{}
|
||||
|
||||
for _, client := range clients {
|
||||
for _, pub := range debugger.PublishTypes(client) {
|
||||
topic, ok := graph[pub.Name()]
|
||||
if !ok {
|
||||
topic = eventbus.DebugTopic{Name: pub.Name()}
|
||||
}
|
||||
topic.Publisher = client.Name()
|
||||
graph[pub.Name()] = topic
|
||||
}
|
||||
for _, sub := range debugger.SubscribeTypes(client) {
|
||||
topic, ok := graph[sub.Name()]
|
||||
if !ok {
|
||||
topic = eventbus.DebugTopic{Name: sub.Name()}
|
||||
}
|
||||
topic.Subscribers = append(topic.Subscribers, client.Name())
|
||||
graph[sub.Name()] = topic
|
||||
}
|
||||
}
|
||||
|
||||
// The top level map is not really needed for the client, convert to a list.
|
||||
topics := eventbus.DebugTopics{}
|
||||
for _, v := range graph {
|
||||
topics.Topics = append(topics.Topics, v)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(topics)
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) {
|
||||
if !buildfeatures.HasLogTail {
|
||||
http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
if !h.PermitRead {
|
||||
http.Error(w, "debug-log access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "only POST allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
defer h.b.TryFlushLogs() // kick off upload after we're done logging
|
||||
|
||||
type logRequestJSON struct {
|
||||
Lines []string
|
||||
Prefix string
|
||||
}
|
||||
|
||||
var logRequest logRequestJSON
|
||||
if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil {
|
||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
prefix := logRequest.Prefix
|
||||
if prefix == "" {
|
||||
prefix = "debug-log"
|
||||
}
|
||||
logf := logger.WithPrefix(h.logf, prefix+": ")
|
||||
|
||||
// We can write logs too fast for logtail to handle, even when
|
||||
// opting-out of rate limits. Limit ourselves to at most one message
|
||||
// per 20ms and a burst of 60 log lines, which should be fast enough to
|
||||
// not block for too long but slow enough that we can upload all lines.
|
||||
logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now)
|
||||
|
||||
for _, line := range logRequest.Lines {
|
||||
logf("%s", line)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebugOptionalFeatures(w http.ResponseWriter, r *http.Request) {
|
||||
of := &apitype.OptionalFeatures{
|
||||
Features: feature.Registered(),
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(of)
|
||||
}
|
||||
|
||||
func (h *Handler) serveDebugRotateDiscoKey(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "POST required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if err := h.b.DebugRotateDiscoKey(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "done\n")
|
||||
}
|
||||
92
vendor/tailscale.com/ipn/localapi/debugderp.go
generated
vendored
92
vendor/tailscale.com/ipn/localapi/debugderp.go
generated
vendored
@@ -1,6 +1,8 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_debug
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
@@ -228,55 +230,59 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Start by checking whether we can establish a HTTP connection
|
||||
for _, derpNode := range reg.Nodes {
|
||||
connSuccess := checkConn(derpNode)
|
||||
if !derpNode.STUNOnly {
|
||||
connSuccess := checkConn(derpNode)
|
||||
|
||||
// Verify that the /generate_204 endpoint works
|
||||
captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix())
|
||||
req, err := http.NewRequest("GET", captivePortalURL, nil)
|
||||
if err != nil {
|
||||
st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err))
|
||||
continue
|
||||
}
|
||||
req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL))
|
||||
} else {
|
||||
resp.Body.Close()
|
||||
}
|
||||
// Verify that the /generate_204 endpoint works
|
||||
captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix())
|
||||
req, err := http.NewRequest("GET", captivePortalURL, nil)
|
||||
if err != nil {
|
||||
st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err))
|
||||
continue
|
||||
}
|
||||
req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL))
|
||||
} else {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
if !connSuccess {
|
||||
continue
|
||||
}
|
||||
if !connSuccess {
|
||||
continue
|
||||
}
|
||||
|
||||
fakePrivKey := key.NewNode()
|
||||
fakePrivKey := key.NewNode()
|
||||
|
||||
// Next, repeatedly get the server key to see if the node is
|
||||
// behind a load balancer (incorrectly).
|
||||
serverPubKeys := make(map[key.NodePublic]bool)
|
||||
for i := range 5 {
|
||||
func() {
|
||||
rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion {
|
||||
return &tailcfg.DERPRegion{
|
||||
RegionID: reg.RegionID,
|
||||
RegionCode: reg.RegionCode,
|
||||
RegionName: reg.RegionName,
|
||||
Nodes: []*tailcfg.DERPNode{derpNode},
|
||||
// Next, repeatedly get the server key to see if the node is
|
||||
// behind a load balancer (incorrectly).
|
||||
serverPubKeys := make(map[key.NodePublic]bool)
|
||||
for i := range 5 {
|
||||
func() {
|
||||
rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion {
|
||||
return &tailcfg.DERPRegion{
|
||||
RegionID: reg.RegionID,
|
||||
RegionCode: reg.RegionCode,
|
||||
RegionName: reg.RegionName,
|
||||
Nodes: []*tailcfg.DERPNode{derpNode},
|
||||
}
|
||||
})
|
||||
if err := rc.Connect(ctx); err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err))
|
||||
return
|
||||
}
|
||||
})
|
||||
if err := rc.Connect(ctx); err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err))
|
||||
return
|
||||
}
|
||||
|
||||
if len(serverPubKeys) == 0 {
|
||||
st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName))
|
||||
}
|
||||
serverPubKeys[rc.ServerPublicKey()] = true
|
||||
}()
|
||||
}
|
||||
if len(serverPubKeys) > 1 {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys)))
|
||||
if len(serverPubKeys) == 0 {
|
||||
st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName))
|
||||
}
|
||||
serverPubKeys[rc.ServerPublicKey()] = true
|
||||
}()
|
||||
}
|
||||
if len(serverPubKeys) > 1 {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys)))
|
||||
}
|
||||
} else {
|
||||
st.Info = append(st.Info, fmt.Sprintf("Node %q is marked STUNOnly; skipped non-STUN checks", derpNode.HostName))
|
||||
}
|
||||
|
||||
// Send a STUN query to this node to verify whether or not it
|
||||
|
||||
2013
vendor/tailscale.com/ipn/localapi/localapi.go
generated
vendored
2013
vendor/tailscale.com/ipn/localapi/localapi.go
generated
vendored
File diff suppressed because it is too large
Load Diff
141
vendor/tailscale.com/ipn/localapi/localapi_drive.go
generated
vendored
Normal file
141
vendor/tailscale.com/ipn/localapi/localapi_drive.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_drive
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("drive/fileserver-address", (*Handler).serveDriveServerAddr)
|
||||
Register("drive/shares", (*Handler).serveShares)
|
||||
}
|
||||
|
||||
// serveDriveServerAddr handles updates of the Taildrive file server address.
|
||||
func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != httpm.PUT {
|
||||
http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
h.b.DriveSetServerAddr(string(b))
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
||||
// serveShares handles the management of Taildrive shares.
|
||||
//
|
||||
// PUT - adds or updates an existing share
|
||||
// DELETE - removes a share
|
||||
// GET - gets a list of all shares, sorted by name
|
||||
// POST - renames an existing share
|
||||
func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.b.DriveSharingEnabled() {
|
||||
http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
switch r.Method {
|
||||
case httpm.PUT:
|
||||
var share drive.Share
|
||||
err := json.NewDecoder(r.Body).Decode(&share)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
share.Path = path.Clean(share.Path)
|
||||
fi, err := os.Stat(share.Path)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
http.Error(w, "not a directory", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if drive.AllowShareAs() {
|
||||
// share as the connected user
|
||||
username, err := h.Actor.Username()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
share.As = username
|
||||
}
|
||||
err = h.b.DriveSetShare(&share)
|
||||
if err != nil {
|
||||
if errors.Is(err, drive.ErrInvalidShareName) {
|
||||
http.Error(w, "invalid share name", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
case httpm.DELETE:
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = h.b.DriveRemoveShare(string(b))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
http.Error(w, "share not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
case httpm.POST:
|
||||
var names [2]string
|
||||
err := json.NewDecoder(r.Body).Decode(&names)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = h.b.DriveRenameShare(names[0], names[1])
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
http.Error(w, "share not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if os.IsExist(err) {
|
||||
http.Error(w, "share name already used", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if errors.Is(err, drive.ErrInvalidShareName) {
|
||||
http.Error(w, "invalid share name", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
case httpm.GET:
|
||||
shares := h.b.DriveGetShares()
|
||||
err := json.NewEncoder(w).Encode(shares)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
default:
|
||||
http.Error(w, "unsupported method", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
2
vendor/tailscale.com/ipn/localapi/pprof.go
generated
vendored
2
vendor/tailscale.com/ipn/localapi/pprof.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android && !js
|
||||
//go:build !ios && !android && !js && !ts_omit_debug
|
||||
|
||||
// We don't include it on mobile where we're more memory constrained and
|
||||
// there's no CLI to get at the results anyway.
|
||||
|
||||
108
vendor/tailscale.com/ipn/localapi/serve.go
generated
vendored
Normal file
108
vendor/tailscale.com/ipn/localapi/serve.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_serve
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/util/httpm"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("serve-config", (*Handler).serveServeConfig)
|
||||
}
|
||||
|
||||
func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case httpm.GET:
|
||||
if !h.PermitRead {
|
||||
http.Error(w, "serve config denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
config, etag, err := h.b.ServeConfigETag()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
bts, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Etag", etag)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(bts)
|
||||
case httpm.POST:
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "serve config denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
configIn := new(ipn.ServeConfig)
|
||||
if err := json.NewDecoder(r.Body).Decode(configIn); err != nil {
|
||||
WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// require a local admin when setting a path handler
|
||||
// TODO: roll-up this Windows-specific check into either PermitWrite
|
||||
// or a global admin escalation check.
|
||||
if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
etag := r.Header.Get("If-Match")
|
||||
if err := h.b.SetServeConfig(configIn, etag); err != nil {
|
||||
if errors.Is(err, ipnlocal.ErrETagMismatch) {
|
||||
http.Error(w, err.Error(), http.StatusPreconditionFailed)
|
||||
return
|
||||
}
|
||||
WriteErrorJSON(w, fmt.Errorf("updating config: %w", err))
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error {
|
||||
switch goos {
|
||||
case "windows", "linux", "darwin", "illumos", "solaris":
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
// Only check for local admin on tailscaled-on-mac (based on "sudo"
|
||||
// permissions). On sandboxed variants (MacSys and AppStore), tailscaled
|
||||
// cannot serve files outside of the sandbox and this check is not
|
||||
// relevant.
|
||||
if goos == "darwin" && version.IsSandboxedMacOS() {
|
||||
return nil
|
||||
}
|
||||
if !configIn.HasPathHandler() {
|
||||
return nil
|
||||
}
|
||||
if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) {
|
||||
return nil
|
||||
}
|
||||
switch goos {
|
||||
case "windows":
|
||||
return errors.New("must be a Windows local admin to serve a path")
|
||||
case "linux", "darwin", "illumos", "solaris":
|
||||
return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path")
|
||||
default:
|
||||
// We filter goos at the start of the func, this default case
|
||||
// should never happen.
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
68
vendor/tailscale.com/ipn/localapi/syspolicy_api.go
generated
vendored
Normal file
68
vendor/tailscale.com/ipn/localapi/syspolicy_api.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_syspolicy
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/util/httpm"
|
||||
"tailscale.com/util/syspolicy/rsop"
|
||||
"tailscale.com/util/syspolicy/setting"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("policy/", (*Handler).servePolicy)
|
||||
}
|
||||
|
||||
func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitRead {
|
||||
http.Error(w, "policy access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/")
|
||||
if !ok {
|
||||
http.Error(w, "misconfigured", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
var scope setting.PolicyScope
|
||||
if suffix == "" {
|
||||
scope = setting.DefaultScope()
|
||||
} else if err := scope.UnmarshalText([]byte(suffix)); err != nil {
|
||||
http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
policy, err := rsop.PolicyFor(scope)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
var effectivePolicy *setting.Snapshot
|
||||
switch r.Method {
|
||||
case httpm.GET:
|
||||
effectivePolicy = policy.Get()
|
||||
case httpm.POST:
|
||||
effectivePolicy, err = policy.Reload()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
default:
|
||||
http.Error(w, "unsupported method", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
e := json.NewEncoder(w)
|
||||
e.SetIndent("", "\t")
|
||||
e.Encode(effectivePolicy)
|
||||
}
|
||||
413
vendor/tailscale.com/ipn/localapi/tailnetlock.go
generated
vendored
Normal file
413
vendor/tailscale.com/ipn/localapi/tailnetlock.go
generated
vendored
Normal file
@@ -0,0 +1,413 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_tailnetlock
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/tkatype"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("tka/affected-sigs", (*Handler).serveTKAAffectedSigs)
|
||||
Register("tka/cosign-recovery-aum", (*Handler).serveTKACosignRecoveryAUM)
|
||||
Register("tka/disable", (*Handler).serveTKADisable)
|
||||
Register("tka/force-local-disable", (*Handler).serveTKALocalDisable)
|
||||
Register("tka/generate-recovery-aum", (*Handler).serveTKAGenerateRecoveryAUM)
|
||||
Register("tka/init", (*Handler).serveTKAInit)
|
||||
Register("tka/log", (*Handler).serveTKALog)
|
||||
Register("tka/modify", (*Handler).serveTKAModify)
|
||||
Register("tka/sign", (*Handler).serveTKASign)
|
||||
Register("tka/status", (*Handler).serveTKAStatus)
|
||||
Register("tka/submit-recovery-aum", (*Handler).serveTKASubmitRecoveryAUM)
|
||||
Register("tka/verify-deeplink", (*Handler).serveTKAVerifySigningDeeplink)
|
||||
Register("tka/wrap-preauth-key", (*Handler).serveTKAWrapPreauthKey)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitRead {
|
||||
http.Error(w, "lock status access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.GET {
|
||||
http.Error(w, "use GET", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t")
|
||||
if err != nil {
|
||||
http.Error(w, "JSON encoding error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(j)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "lock sign access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
type signRequest struct {
|
||||
NodeKey key.NodePublic
|
||||
RotationPublic []byte
|
||||
}
|
||||
var req signRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil {
|
||||
http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "lock init access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
type initRequest struct {
|
||||
Keys []tka.Key
|
||||
DisablementValues [][]byte
|
||||
SupportDisablement []byte
|
||||
}
|
||||
var req initRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if !h.b.NetworkLockAllowed() {
|
||||
http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil {
|
||||
http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t")
|
||||
if err != nil {
|
||||
http.Error(w, "JSON encoding error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(j)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "network-lock modify access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
type modifyRequest struct {
|
||||
AddKeys []tka.Key
|
||||
RemoveKeys []tka.Key
|
||||
}
|
||||
var req modifyRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil {
|
||||
http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "network-lock modify access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
type wrapRequest struct {
|
||||
TSKey string
|
||||
TKAKey string // key.NLPrivate.MarshalText
|
||||
}
|
||||
var req wrapRequest
|
||||
if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil {
|
||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var priv key.NLPrivate
|
||||
if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil {
|
||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(wrappedKey))
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitRead {
|
||||
http.Error(w, "signing deeplink verification access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
type verifyRequest struct {
|
||||
URL string
|
||||
}
|
||||
var req verifyRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
res := h.b.NetworkLockVerifySigningDeeplink(req.URL)
|
||||
j, err := json.MarshalIndent(res, "", "\t")
|
||||
if err != nil {
|
||||
http.Error(w, "JSON encoding error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(j)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "network-lock modify access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
body := io.LimitReader(r.Body, 1024*1024)
|
||||
secret, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
http.Error(w, "reading secret", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.b.NetworkLockDisable(secret); err != nil {
|
||||
http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "network-lock modify access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require a JSON stanza for the body as an additional CSRF protection.
|
||||
var req struct{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.b.NetworkLockForceLocalDisable(); err != nil {
|
||||
http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != httpm.GET {
|
||||
http.Error(w, "use GET", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
limit := 50
|
||||
if limitStr := r.FormValue("limit"); limitStr != "" {
|
||||
lm, err := strconv.Atoi(limitStr)
|
||||
if err != nil {
|
||||
http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
limit = int(lm)
|
||||
}
|
||||
|
||||
updates, err := h.b.NetworkLockLog(limit)
|
||||
if err != nil {
|
||||
http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
j, err := json.MarshalIndent(updates, "", "\t")
|
||||
if err != nil {
|
||||
http.Error(w, "JSON encoding error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(j)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048))
|
||||
if err != nil {
|
||||
http.Error(w, "reading body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
sigs, err := h.b.NetworkLockAffectedSigs(keyID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
j, err := json.MarshalIndent(sigs, "", "\t")
|
||||
if err != nil {
|
||||
http.Error(w, "JSON encoding error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(j)
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
type verifyRequest struct {
|
||||
Keys []tkatype.KeyID
|
||||
ForkFrom string
|
||||
}
|
||||
var req verifyRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var forkFrom tka.AUMHash
|
||||
if req.ForkFrom != "" {
|
||||
if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil {
|
||||
http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Write(res.Serialize())
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
body := io.LimitReader(r.Body, 1024*1024)
|
||||
aumBytes, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
http.Error(w, "reading AUM", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var aum tka.AUM
|
||||
if err := aum.Unserialize(aumBytes); err != nil {
|
||||
http.Error(w, "decoding AUM", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := h.b.NetworkLockCosignRecoveryAUM(&aum)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Write(res.Serialize())
|
||||
}
|
||||
|
||||
func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != httpm.POST {
|
||||
http.Error(w, "use POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
body := io.LimitReader(r.Body, 1024*1024)
|
||||
aumBytes, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
http.Error(w, "reading AUM", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var aum tka.AUM
|
||||
if err := aum.Unserialize(aumBytes); err != nil {
|
||||
http.Error(w, "decoding AUM", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
47
vendor/tailscale.com/ipn/policy/policy.go
generated
vendored
47
vendor/tailscale.com/ipn/policy/policy.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package policy contains various policy decisions that need to be
|
||||
// shared between the node client & control server.
|
||||
package policy
|
||||
|
||||
import (
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// IsInterestingService reports whether service s on the given operating
|
||||
// system (a version.OS value) is an interesting enough port to report
|
||||
// to our peer nodes for discovery purposes.
|
||||
func IsInterestingService(s tailcfg.Service, os string) bool {
|
||||
switch s.Proto {
|
||||
case tailcfg.PeerAPI4, tailcfg.PeerAPI6, tailcfg.PeerAPIDNS:
|
||||
return true
|
||||
}
|
||||
if s.Proto != tailcfg.TCP {
|
||||
return false
|
||||
}
|
||||
if os != "windows" {
|
||||
// For non-Windows machines, assume all TCP listeners
|
||||
// are interesting enough. We don't see listener spam
|
||||
// there.
|
||||
return true
|
||||
}
|
||||
// Windows has tons of TCP listeners. We need to move to a denylist
|
||||
// model later, but for now we just allow some common ones:
|
||||
switch s.Port {
|
||||
case 22, // ssh
|
||||
80, // http
|
||||
443, // https (but no hostname, so little useless)
|
||||
3389, // rdp
|
||||
5900, // vnc
|
||||
32400, // plex
|
||||
|
||||
// And now some arbitrary HTTP dev server ports:
|
||||
// Eventually we'll remove this and make all ports
|
||||
// work, once we nicely filter away noisy system
|
||||
// ports.
|
||||
8000, 8080, 8443, 8888:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
354
vendor/tailscale.com/ipn/prefs.go
generated
vendored
354
vendor/tailscale.com/ipn/prefs.go
generated
vendored
@@ -5,6 +5,7 @@ package ipn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/feature/buildfeatures"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/net/netaddr"
|
||||
"tailscale.com/net/tsaddr"
|
||||
@@ -28,7 +30,9 @@ import (
|
||||
"tailscale.com/types/preftype"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/util/syspolicy"
|
||||
"tailscale.com/util/syspolicy/pkey"
|
||||
"tailscale.com/util/syspolicy/policyclient"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
// DefaultControlURL is the URL base of the control plane
|
||||
@@ -93,6 +97,25 @@ type Prefs struct {
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
|
||||
// AutoExitNode is an optional expression that specifies whether and how
|
||||
// tailscaled should pick an exit node automatically.
|
||||
//
|
||||
// If specified, tailscaled will use an exit node based on the expression,
|
||||
// and will re-evaluate the selection periodically as network conditions,
|
||||
// available exit nodes, or policy settings change. A blackhole route will
|
||||
// be installed to prevent traffic from escaping to the local network until
|
||||
// an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP.
|
||||
//
|
||||
// If empty, tailscaled will not automatically select an exit node.
|
||||
//
|
||||
// If the specified expression is invalid or unsupported by the client,
|
||||
// it falls back to the behavior of [AnyExitNode].
|
||||
//
|
||||
// As of 2025-07-02, the only supported value is [AnyExitNode].
|
||||
// It's a string rather than a boolean to allow future extensibility
|
||||
// (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us").
|
||||
AutoExitNode ExitNodeExpression `json:",omitempty"`
|
||||
|
||||
// InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by
|
||||
// the backend on transition from exit node on to off and used by the
|
||||
// backend.
|
||||
@@ -138,11 +161,10 @@ type Prefs struct {
|
||||
// connections. This overrides tailcfg.Hostinfo's ShieldsUp.
|
||||
ShieldsUp bool
|
||||
|
||||
// AdvertiseTags specifies groups that this node wants to join, for
|
||||
// purposes of ACL enforcement. These can be referenced from the ACL
|
||||
// security policy. Note that advertising a tag doesn't guarantee that
|
||||
// the control server will allow you to take on the rights for that
|
||||
// tag.
|
||||
// AdvertiseTags specifies tags that should be applied to this node, for
|
||||
// purposes of ACL enforcement. These can be referenced from the ACL policy
|
||||
// document. Note that advertising a tag on the client doesn't guarantee
|
||||
// that the control server will allow the node to adopt that tag.
|
||||
AdvertiseTags []string
|
||||
|
||||
// Hostname is the hostname to use for identifying the node. If
|
||||
@@ -185,6 +207,12 @@ type Prefs struct {
|
||||
// control server.
|
||||
AdvertiseServices []string
|
||||
|
||||
// Sync is whether this node should sync its configuration from
|
||||
// the control plane. If unset, this defaults to true.
|
||||
// This exists primarily for testing, to verify that netmap caching
|
||||
// and offline operation work correctly.
|
||||
Sync opt.Bool
|
||||
|
||||
// NoSNAT specifies whether to source NAT traffic going to
|
||||
// destinations in AdvertiseRoutes. The default is to apply source
|
||||
// NAT, which makes the traffic appear to come from the router
|
||||
@@ -234,10 +262,17 @@ type Prefs struct {
|
||||
|
||||
// PostureChecking enables the collection of information used for device
|
||||
// posture checks.
|
||||
//
|
||||
// Note: this should be named ReportPosture, but it was shipped as
|
||||
// PostureChecking in some early releases and this JSON field is written to
|
||||
// disk, so we just keep its old name. (akin to CorpDNS which is an internal
|
||||
// pref name that doesn't match the public interface)
|
||||
PostureChecking bool
|
||||
|
||||
// NetfilterKind specifies what netfilter implementation to use.
|
||||
//
|
||||
// It can be "iptables", "nftables", or "" to auto-detect.
|
||||
//
|
||||
// Linux-only.
|
||||
NetfilterKind string
|
||||
|
||||
@@ -245,9 +280,20 @@ type Prefs struct {
|
||||
// by name.
|
||||
DriveShares []*drive.Share
|
||||
|
||||
// RelayServerPort is the UDP port number for the relay server to bind to,
|
||||
// on all interfaces. A non-nil zero value signifies a random unused port
|
||||
// should be used. A nil value signifies relay server functionality
|
||||
// should be disabled.
|
||||
RelayServerPort *uint16 `json:",omitempty"`
|
||||
|
||||
// RelayServerStaticEndpoints are static IP:port endpoints to advertise as
|
||||
// candidates for relay connections. Only relevant when RelayServerPort is
|
||||
// non-nil.
|
||||
RelayServerStaticEndpoints []netip.AddrPort `json:",omitempty"`
|
||||
|
||||
// AllowSingleHosts was a legacy field that was always true
|
||||
// for the past 4.5 years. It controlled whether Tailscale
|
||||
// peers got /32 or /127 routes for each other.
|
||||
// peers got /32 or /128 routes for each other.
|
||||
// As of 2024-05-17 we're starting to ignore it, but to let
|
||||
// people still downgrade Tailscale versions and not break
|
||||
// all peer-to-peer networking we still write it to disk (as JSON)
|
||||
@@ -307,35 +353,39 @@ type AppConnectorPrefs struct {
|
||||
type MaskedPrefs struct {
|
||||
Prefs
|
||||
|
||||
ControlURLSet bool `json:",omitempty"`
|
||||
RouteAllSet bool `json:",omitempty"`
|
||||
ExitNodeIDSet bool `json:",omitempty"`
|
||||
ExitNodeIPSet bool `json:",omitempty"`
|
||||
InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients
|
||||
ExitNodeAllowLANAccessSet bool `json:",omitempty"`
|
||||
CorpDNSSet bool `json:",omitempty"`
|
||||
RunSSHSet bool `json:",omitempty"`
|
||||
RunWebClientSet bool `json:",omitempty"`
|
||||
WantRunningSet bool `json:",omitempty"`
|
||||
LoggedOutSet bool `json:",omitempty"`
|
||||
ShieldsUpSet bool `json:",omitempty"`
|
||||
AdvertiseTagsSet bool `json:",omitempty"`
|
||||
HostnameSet bool `json:",omitempty"`
|
||||
NotepadURLsSet bool `json:",omitempty"`
|
||||
ForceDaemonSet bool `json:",omitempty"`
|
||||
EggSet bool `json:",omitempty"`
|
||||
AdvertiseRoutesSet bool `json:",omitempty"`
|
||||
AdvertiseServicesSet bool `json:",omitempty"`
|
||||
NoSNATSet bool `json:",omitempty"`
|
||||
NoStatefulFilteringSet bool `json:",omitempty"`
|
||||
NetfilterModeSet bool `json:",omitempty"`
|
||||
OperatorUserSet bool `json:",omitempty"`
|
||||
ProfileNameSet bool `json:",omitempty"`
|
||||
AutoUpdateSet AutoUpdatePrefsMask `json:",omitempty"`
|
||||
AppConnectorSet bool `json:",omitempty"`
|
||||
PostureCheckingSet bool `json:",omitempty"`
|
||||
NetfilterKindSet bool `json:",omitempty"`
|
||||
DriveSharesSet bool `json:",omitempty"`
|
||||
ControlURLSet bool `json:",omitempty"`
|
||||
RouteAllSet bool `json:",omitempty"`
|
||||
ExitNodeIDSet bool `json:",omitempty"`
|
||||
ExitNodeIPSet bool `json:",omitempty"`
|
||||
AutoExitNodeSet bool `json:",omitempty"`
|
||||
InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients
|
||||
ExitNodeAllowLANAccessSet bool `json:",omitempty"`
|
||||
CorpDNSSet bool `json:",omitempty"`
|
||||
RunSSHSet bool `json:",omitempty"`
|
||||
RunWebClientSet bool `json:",omitempty"`
|
||||
WantRunningSet bool `json:",omitempty"`
|
||||
LoggedOutSet bool `json:",omitempty"`
|
||||
ShieldsUpSet bool `json:",omitempty"`
|
||||
AdvertiseTagsSet bool `json:",omitempty"`
|
||||
HostnameSet bool `json:",omitempty"`
|
||||
NotepadURLsSet bool `json:",omitempty"`
|
||||
ForceDaemonSet bool `json:",omitempty"`
|
||||
EggSet bool `json:",omitempty"`
|
||||
AdvertiseRoutesSet bool `json:",omitempty"`
|
||||
AdvertiseServicesSet bool `json:",omitempty"`
|
||||
SyncSet bool `json:",omitzero"`
|
||||
NoSNATSet bool `json:",omitempty"`
|
||||
NoStatefulFilteringSet bool `json:",omitempty"`
|
||||
NetfilterModeSet bool `json:",omitempty"`
|
||||
OperatorUserSet bool `json:",omitempty"`
|
||||
ProfileNameSet bool `json:",omitempty"`
|
||||
AutoUpdateSet AutoUpdatePrefsMask `json:",omitzero"`
|
||||
AppConnectorSet bool `json:",omitempty"`
|
||||
PostureCheckingSet bool `json:",omitempty"`
|
||||
NetfilterKindSet bool `json:",omitempty"`
|
||||
DriveSharesSet bool `json:",omitempty"`
|
||||
RelayServerPortSet bool `json:",omitempty"`
|
||||
RelayServerStaticEndpointsSet bool `json:",omitzero"`
|
||||
}
|
||||
|
||||
// SetsInternal reports whether mp has any of the Internal*Set field bools set
|
||||
@@ -493,17 +543,24 @@ func (p *Prefs) Pretty() string { return p.pretty(runtime.GOOS) }
|
||||
func (p *Prefs) pretty(goos string) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Prefs{")
|
||||
fmt.Fprintf(&sb, "ra=%v ", p.RouteAll)
|
||||
fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning)
|
||||
if p.RunSSH {
|
||||
if buildfeatures.HasUseRoutes {
|
||||
fmt.Fprintf(&sb, "ra=%v ", p.RouteAll)
|
||||
}
|
||||
if buildfeatures.HasDNS {
|
||||
fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning)
|
||||
}
|
||||
if buildfeatures.HasSSH && p.RunSSH {
|
||||
sb.WriteString("ssh=true ")
|
||||
}
|
||||
if p.RunWebClient {
|
||||
if buildfeatures.HasWebClient && p.RunWebClient {
|
||||
sb.WriteString("webclient=true ")
|
||||
}
|
||||
if p.LoggedOut {
|
||||
sb.WriteString("loggedout=true ")
|
||||
}
|
||||
if p.Sync.EqualBool(false) {
|
||||
sb.WriteString("sync=false ")
|
||||
}
|
||||
if p.ForceDaemon {
|
||||
sb.WriteString("server=true ")
|
||||
}
|
||||
@@ -513,23 +570,30 @@ func (p *Prefs) pretty(goos string) string {
|
||||
if p.ShieldsUp {
|
||||
sb.WriteString("shields=true ")
|
||||
}
|
||||
if p.ExitNodeIP.IsValid() {
|
||||
fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess)
|
||||
} else if !p.ExitNodeID.IsZero() {
|
||||
fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess)
|
||||
if buildfeatures.HasUseExitNode {
|
||||
if p.ExitNodeIP.IsValid() {
|
||||
fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess)
|
||||
} else if !p.ExitNodeID.IsZero() {
|
||||
fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess)
|
||||
}
|
||||
if p.AutoExitNode.IsSet() {
|
||||
fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode)
|
||||
}
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || goos == "linux" {
|
||||
fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes)
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || p.NoSNAT {
|
||||
fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT)
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) {
|
||||
// Only print if we're advertising any routes, or the user has
|
||||
// turned off stateful filtering (NoStatefulFiltering=true ⇒
|
||||
// StatefulFiltering=false).
|
||||
bb, _ := p.NoStatefulFiltering.Get()
|
||||
fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb)
|
||||
if buildfeatures.HasAdvertiseRoutes {
|
||||
if len(p.AdvertiseRoutes) > 0 || goos == "linux" {
|
||||
fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes)
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || p.NoSNAT {
|
||||
fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT)
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) {
|
||||
// Only print if we're advertising any routes, or the user has
|
||||
// turned off stateful filtering (NoStatefulFiltering=true ⇒
|
||||
// StatefulFiltering=false).
|
||||
bb, _ := p.NoStatefulFiltering.Get()
|
||||
fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb)
|
||||
}
|
||||
}
|
||||
if len(p.AdvertiseTags) > 0 {
|
||||
fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ","))
|
||||
@@ -552,8 +616,18 @@ func (p *Prefs) pretty(goos string) string {
|
||||
if p.NetfilterKind != "" {
|
||||
fmt.Fprintf(&sb, "netfilterKind=%s ", p.NetfilterKind)
|
||||
}
|
||||
sb.WriteString(p.AutoUpdate.Pretty())
|
||||
sb.WriteString(p.AppConnector.Pretty())
|
||||
if buildfeatures.HasClientUpdate {
|
||||
sb.WriteString(p.AutoUpdate.Pretty())
|
||||
}
|
||||
if buildfeatures.HasAppConnectors {
|
||||
sb.WriteString(p.AppConnector.Pretty())
|
||||
}
|
||||
if buildfeatures.HasRelayServer && p.RelayServerPort != nil {
|
||||
fmt.Fprintf(&sb, "relayServerPort=%d ", *p.RelayServerPort)
|
||||
}
|
||||
if buildfeatures.HasRelayServer && len(p.RelayServerStaticEndpoints) > 0 {
|
||||
fmt.Fprintf(&sb, "relayServerStaticEndpoints=%v ", p.RelayServerStaticEndpoints)
|
||||
}
|
||||
if p.Persist != nil {
|
||||
sb.WriteString(p.Persist.Pretty())
|
||||
} else {
|
||||
@@ -580,7 +654,7 @@ func (p PrefsView) Equals(p2 PrefsView) bool {
|
||||
}
|
||||
|
||||
func (p *Prefs) Equals(p2 *Prefs) bool {
|
||||
if p == nil && p2 == nil {
|
||||
if p == p2 {
|
||||
return true
|
||||
}
|
||||
if p == nil || p2 == nil {
|
||||
@@ -591,10 +665,12 @@ func (p *Prefs) Equals(p2 *Prefs) bool {
|
||||
p.RouteAll == p2.RouteAll &&
|
||||
p.ExitNodeID == p2.ExitNodeID &&
|
||||
p.ExitNodeIP == p2.ExitNodeIP &&
|
||||
p.AutoExitNode == p2.AutoExitNode &&
|
||||
p.InternalExitNodePrior == p2.InternalExitNodePrior &&
|
||||
p.ExitNodeAllowLANAccess == p2.ExitNodeAllowLANAccess &&
|
||||
p.CorpDNS == p2.CorpDNS &&
|
||||
p.RunSSH == p2.RunSSH &&
|
||||
p.Sync.Normalized() == p2.Sync.Normalized() &&
|
||||
p.RunWebClient == p2.RunWebClient &&
|
||||
p.WantRunning == p2.WantRunning &&
|
||||
p.LoggedOut == p2.LoggedOut &&
|
||||
@@ -606,16 +682,18 @@ func (p *Prefs) Equals(p2 *Prefs) bool {
|
||||
p.OperatorUser == p2.OperatorUser &&
|
||||
p.Hostname == p2.Hostname &&
|
||||
p.ForceDaemon == p2.ForceDaemon &&
|
||||
compareIPNets(p.AdvertiseRoutes, p2.AdvertiseRoutes) &&
|
||||
compareStrings(p.AdvertiseTags, p2.AdvertiseTags) &&
|
||||
compareStrings(p.AdvertiseServices, p2.AdvertiseServices) &&
|
||||
slices.Equal(p.AdvertiseRoutes, p2.AdvertiseRoutes) &&
|
||||
slices.Equal(p.AdvertiseTags, p2.AdvertiseTags) &&
|
||||
slices.Equal(p.AdvertiseServices, p2.AdvertiseServices) &&
|
||||
p.Persist.Equals(p2.Persist) &&
|
||||
p.ProfileName == p2.ProfileName &&
|
||||
p.AutoUpdate.Equals(p2.AutoUpdate) &&
|
||||
p.AppConnector == p2.AppConnector &&
|
||||
p.PostureChecking == p2.PostureChecking &&
|
||||
slices.EqualFunc(p.DriveShares, p2.DriveShares, drive.SharesEqual) &&
|
||||
p.NetfilterKind == p2.NetfilterKind
|
||||
p.NetfilterKind == p2.NetfilterKind &&
|
||||
compareUint16Ptrs(p.RelayServerPort, p2.RelayServerPort) &&
|
||||
slices.Equal(p.RelayServerStaticEndpoints, p2.RelayServerStaticEndpoints)
|
||||
}
|
||||
|
||||
func (au AutoUpdatePrefs) Pretty() string {
|
||||
@@ -635,28 +713,14 @@ func (ap AppConnectorPrefs) Pretty() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func compareIPNets(a, b []netip.Prefix) bool {
|
||||
if len(a) != len(b) {
|
||||
func compareUint16Ptrs(a, b *uint16) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func compareStrings(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// NewPrefs returns the default preferences to use.
|
||||
@@ -664,7 +728,8 @@ func NewPrefs() *Prefs {
|
||||
// Provide default values for options which might be missing
|
||||
// from the json data for any reason. The json can still
|
||||
// override them to false.
|
||||
return &Prefs{
|
||||
|
||||
p := &Prefs{
|
||||
// ControlURL is explicitly not set to signal that
|
||||
// it's not yet configured, which relaxes the CLI "up"
|
||||
// safety net features. It will get set to DefaultControlURL
|
||||
@@ -672,7 +737,6 @@ func NewPrefs() *Prefs {
|
||||
// later anyway.
|
||||
ControlURL: "",
|
||||
|
||||
RouteAll: true,
|
||||
CorpDNS: true,
|
||||
WantRunning: false,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
@@ -682,22 +746,24 @@ func NewPrefs() *Prefs {
|
||||
Apply: opt.Bool("unset"),
|
||||
},
|
||||
}
|
||||
p.RouteAll = p.DefaultRouteAll(runtime.GOOS)
|
||||
return p
|
||||
}
|
||||
|
||||
// ControlURLOrDefault returns the coordination server's URL base.
|
||||
//
|
||||
// If not configured, or if the configured value is a legacy name equivalent to
|
||||
// the default, then DefaultControlURL is returned instead.
|
||||
func (p PrefsView) ControlURLOrDefault() string {
|
||||
return p.ж.ControlURLOrDefault()
|
||||
func (p PrefsView) ControlURLOrDefault(polc policyclient.Client) string {
|
||||
return p.ж.ControlURLOrDefault(polc)
|
||||
}
|
||||
|
||||
// ControlURLOrDefault returns the coordination server's URL base.
|
||||
//
|
||||
// If not configured, or if the configured value is a legacy name equivalent to
|
||||
// the default, then DefaultControlURL is returned instead.
|
||||
func (p *Prefs) ControlURLOrDefault() string {
|
||||
controlURL, err := syspolicy.GetString(syspolicy.ControlURL, p.ControlURL)
|
||||
func (p *Prefs) ControlURLOrDefault(polc policyclient.Client) string {
|
||||
controlURL, err := polc.GetString(pkey.ControlURL, p.ControlURL)
|
||||
if err != nil {
|
||||
controlURL = p.ControlURL
|
||||
}
|
||||
@@ -711,12 +777,26 @@ func (p *Prefs) ControlURLOrDefault() string {
|
||||
return DefaultControlURL
|
||||
}
|
||||
|
||||
// AdminPageURL returns the admin web site URL for the current ControlURL.
|
||||
func (p PrefsView) AdminPageURL() string { return p.ж.AdminPageURL() }
|
||||
// DefaultRouteAll returns the default value of [Prefs.RouteAll] as a function
|
||||
// of the platform it's running on.
|
||||
func (p *Prefs) DefaultRouteAll(goos string) bool {
|
||||
switch goos {
|
||||
case "windows", "android", "ios":
|
||||
return true
|
||||
case "darwin":
|
||||
// Only true for macAppStore and macsys, false for darwin tailscaled.
|
||||
return version.IsSandboxedMacOS()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AdminPageURL returns the admin web site URL for the current ControlURL.
|
||||
func (p *Prefs) AdminPageURL() string {
|
||||
url := p.ControlURLOrDefault()
|
||||
func (p PrefsView) AdminPageURL(polc policyclient.Client) string { return p.ж.AdminPageURL(polc) }
|
||||
|
||||
// AdminPageURL returns the admin web site URL for the current ControlURL.
|
||||
func (p *Prefs) AdminPageURL(polc policyclient.Client) string {
|
||||
url := p.ControlURLOrDefault(polc)
|
||||
if IsLoginServerSynonym(url) {
|
||||
// TODO(crawshaw): In future release, make this https://console.tailscale.com
|
||||
url = "https://login.tailscale.com"
|
||||
@@ -740,6 +820,9 @@ func (p *Prefs) AdvertisesExitNode() bool {
|
||||
// SetAdvertiseExitNode mutates p (if non-nil) to add or remove the two
|
||||
// /0 exit node routes.
|
||||
func (p *Prefs) SetAdvertiseExitNode(runExit bool) {
|
||||
if !buildfeatures.HasAdvertiseExitNode {
|
||||
return
|
||||
}
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
@@ -784,6 +867,7 @@ func isRemoteIP(st *ipnstate.Status, ip netip.Addr) bool {
|
||||
func (p *Prefs) ClearExitNode() {
|
||||
p.ExitNodeID = ""
|
||||
p.ExitNodeIP = netip.Addr{}
|
||||
p.AutoExitNode = ""
|
||||
}
|
||||
|
||||
// ExitNodeLocalIPError is returned when the requested IP address for an exit
|
||||
@@ -802,6 +886,9 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) {
|
||||
}
|
||||
ip, err = netip.ParseAddr(s)
|
||||
if err == nil {
|
||||
if !isRemoteIP(st, ip) {
|
||||
return ip, ExitNodeLocalIPError{s}
|
||||
}
|
||||
// If we're online already and have a netmap, double check that the IP
|
||||
// address specified is valid.
|
||||
if st.BackendState == "Running" {
|
||||
@@ -813,9 +900,6 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) {
|
||||
return ip, fmt.Errorf("node %v is not advertising an exit node", ip)
|
||||
}
|
||||
}
|
||||
if !isRemoteIP(st, ip) {
|
||||
return ip, ExitNodeLocalIPError{s}
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
match := 0
|
||||
@@ -891,10 +975,15 @@ func PrefsFromBytes(b []byte, base *Prefs) error {
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return json.Unmarshal(b, base)
|
||||
}
|
||||
|
||||
func (p *Prefs) normalizeOptBools() {
|
||||
if p.Sync == opt.ExplicitlyUnset {
|
||||
p.Sync = ""
|
||||
}
|
||||
}
|
||||
|
||||
var jsonEscapedZero = []byte(`\u0000`)
|
||||
|
||||
// LoadPrefsWindows loads a legacy relaynode config file into Prefs with
|
||||
@@ -943,6 +1032,7 @@ type WindowsUserID string
|
||||
type NetworkProfile struct {
|
||||
MagicDNSName string
|
||||
DomainName string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// RequiresBackfill returns whether this object does not have all the data
|
||||
@@ -955,6 +1045,13 @@ func (n NetworkProfile) RequiresBackfill() bool {
|
||||
return n == NetworkProfile{}
|
||||
}
|
||||
|
||||
// DisplayNameOrDefault will always return a non-empty string.
|
||||
// If there is a defined display name, it will return that.
|
||||
// If they did not it will default to their domain name.
|
||||
func (n NetworkProfile) DisplayNameOrDefault() string {
|
||||
return cmp.Or(n.DisplayName, n.DomainName)
|
||||
}
|
||||
|
||||
// LoginProfile represents a single login profile as managed
|
||||
// by the ProfileManager.
|
||||
type LoginProfile struct {
|
||||
@@ -1000,3 +1097,68 @@ type LoginProfile struct {
|
||||
// into.
|
||||
ControlURL string
|
||||
}
|
||||
|
||||
// Equals reports whether p and p2 are equal.
|
||||
func (p LoginProfileView) Equals(p2 LoginProfileView) bool {
|
||||
return p.ж.Equals(p2.ж)
|
||||
}
|
||||
|
||||
// Equals reports whether p and p2 are equal.
|
||||
func (p *LoginProfile) Equals(p2 *LoginProfile) bool {
|
||||
if p == p2 {
|
||||
return true
|
||||
}
|
||||
if p == nil || p2 == nil {
|
||||
return false
|
||||
}
|
||||
return p.ID == p2.ID &&
|
||||
p.Name == p2.Name &&
|
||||
p.NetworkProfile == p2.NetworkProfile &&
|
||||
p.Key == p2.Key &&
|
||||
p.UserProfile.Equal(&p2.UserProfile) &&
|
||||
p.NodeID == p2.NodeID &&
|
||||
p.LocalUserID == p2.LocalUserID &&
|
||||
p.ControlURL == p2.ControlURL
|
||||
}
|
||||
|
||||
// ExitNodeExpression is a string that specifies how an exit node
|
||||
// should be selected. An empty string means that no exit node
|
||||
// should be selected.
|
||||
//
|
||||
// As of 2025-07-02, the only supported value is [AnyExitNode].
|
||||
type ExitNodeExpression string
|
||||
|
||||
// AnyExitNode indicates that the exit node should be automatically
|
||||
// selected from the pool of available exit nodes, excluding any
|
||||
// disallowed by policy (e.g., [syspolicy.AllowedSuggestedExitNodes]).
|
||||
// The exact implementation is subject to change, but exit nodes
|
||||
// offering the best performance will be preferred.
|
||||
const AnyExitNode ExitNodeExpression = "any"
|
||||
|
||||
// IsSet reports whether the expression is non-empty and can be used
|
||||
// to select an exit node.
|
||||
func (e ExitNodeExpression) IsSet() bool {
|
||||
return e != ""
|
||||
}
|
||||
|
||||
const (
|
||||
// AutoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values and CLI
|
||||
// to indicate that the string following the prefix is an [ipn.ExitNodeExpression].
|
||||
AutoExitNodePrefix = "auto:"
|
||||
)
|
||||
|
||||
// ParseAutoExitNodeString attempts to parse the given string
|
||||
// as an [ExitNodeExpression].
|
||||
//
|
||||
// It returns the parsed expression and true on success,
|
||||
// or an empty string and false if the input does not appear to be
|
||||
// an [ExitNodeExpression] (i.e., it doesn't start with "auto:").
|
||||
//
|
||||
// It is mainly used to parse the [syspolicy.ExitNodeID] value
|
||||
// when it is set to "auto:<expression>" (e.g., auto:any).
|
||||
func ParseAutoExitNodeString[T ~string](s T) (_ ExitNodeExpression, ok bool) {
|
||||
if expr, ok := strings.CutPrefix(string(s), AutoExitNodePrefix); ok && expr != "" {
|
||||
return ExitNodeExpression(expr), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
367
vendor/tailscale.com/ipn/serve.go
generated
vendored
367
vendor/tailscale.com/ipn/serve.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/ipproto"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
@@ -149,6 +151,12 @@ type TCPPortHandler struct {
|
||||
// SNI name with this value. It is only used if TCPForward is non-empty.
|
||||
// (the HTTPS mode uses ServeConfig.Web)
|
||||
TerminateTLS string `json:",omitempty"`
|
||||
|
||||
// ProxyProtocol indicates whether to send a PROXY protocol header
|
||||
// before forwarding the connection to TCPForward.
|
||||
//
|
||||
// This is only valid if TCPForward is non-empty.
|
||||
ProxyProtocol int `json:",omitzero"`
|
||||
}
|
||||
|
||||
// HTTPHandler is either a path or a proxy to serve.
|
||||
@@ -160,32 +168,61 @@ type HTTPHandler struct {
|
||||
|
||||
Text string `json:",omitempty"` // plaintext to serve (primarily for testing)
|
||||
|
||||
AcceptAppCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon
|
||||
|
||||
// Redirect, if not empty, is the target URL to redirect requests to.
|
||||
// By default, we redirect with HTTP 302 (Found) status.
|
||||
// If Redirect starts with '<httpcode>:', then we use that status instead.
|
||||
//
|
||||
// The target URL supports the following expansion variables:
|
||||
// - ${HOST}: replaced with the request's Host header value
|
||||
// - ${REQUEST_URI}: replaced with the request's full URI (path and query string)
|
||||
Redirect string `json:",omitempty"`
|
||||
|
||||
// TODO(bradfitz): bool to not enumerate directories? TTL on mapping for
|
||||
// temporary ones? Error codes? Redirects?
|
||||
// temporary ones? Error codes?
|
||||
}
|
||||
|
||||
// WebHandlerExists reports whether if the ServeConfig Web handler exists for
|
||||
// the given host:port and mount point.
|
||||
func (sc *ServeConfig) WebHandlerExists(hp HostPort, mount string) bool {
|
||||
h := sc.GetWebHandler(hp, mount)
|
||||
func (sc *ServeConfig) WebHandlerExists(svcName tailcfg.ServiceName, hp HostPort, mount string) bool {
|
||||
h := sc.GetWebHandler(svcName, hp, mount)
|
||||
return h != nil
|
||||
}
|
||||
|
||||
// GetWebHandler returns the HTTPHandler for the given host:port and mount point.
|
||||
// Returns nil if the handler does not exist.
|
||||
func (sc *ServeConfig) GetWebHandler(hp HostPort, mount string) *HTTPHandler {
|
||||
if sc == nil || sc.Web[hp] == nil {
|
||||
func (sc *ServeConfig) GetWebHandler(svcName tailcfg.ServiceName, hp HostPort, mount string) *HTTPHandler {
|
||||
if sc == nil {
|
||||
return nil
|
||||
}
|
||||
if svcName != "" {
|
||||
if svc, ok := sc.Services[svcName]; ok && svc.Web != nil {
|
||||
if webCfg, ok := svc.Web[hp]; ok {
|
||||
return webCfg.Handlers[mount]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if sc.Web[hp] == nil {
|
||||
return nil
|
||||
}
|
||||
return sc.Web[hp].Handlers[mount]
|
||||
}
|
||||
|
||||
// GetTCPPortHandler returns the TCPPortHandler for the given port.
|
||||
// If the port is not configured, nil is returned.
|
||||
func (sc *ServeConfig) GetTCPPortHandler(port uint16) *TCPPortHandler {
|
||||
// GetTCPPortHandler returns the TCPPortHandler for the given port. If the port
|
||||
// is not configured, nil is returned. Parameter svcName can be tailcfg.NoService
|
||||
// for local serve or a service name for a service hosted on node.
|
||||
func (sc *ServeConfig) GetTCPPortHandler(port uint16, svcName tailcfg.ServiceName) *TCPPortHandler {
|
||||
if sc == nil {
|
||||
return nil
|
||||
}
|
||||
if svcName != "" {
|
||||
if svc, ok := sc.Services[svcName]; ok && svc != nil {
|
||||
return svc.TCP[port]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return sc.TCP[port]
|
||||
}
|
||||
|
||||
@@ -202,6 +239,20 @@ func (sc *ServeConfig) HasPathHandler() bool {
|
||||
}
|
||||
}
|
||||
|
||||
if sc.Services != nil {
|
||||
for _, serviceConfig := range sc.Services {
|
||||
if serviceConfig.Web != nil {
|
||||
for _, webServerConfig := range serviceConfig.Web {
|
||||
for _, httpHandler := range webServerConfig.Handlers {
|
||||
if httpHandler.Path != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sc.Foreground != nil {
|
||||
for _, fgConfig := range sc.Foreground {
|
||||
if fgConfig.HasPathHandler() {
|
||||
@@ -227,34 +278,78 @@ func (sc *ServeConfig) IsTCPForwardingAny() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsTCPForwardingOnPort reports whether if ServeConfig is currently forwarding
|
||||
// in TCPForward mode on the given port. This is exclusive of Web/HTTPS serving.
|
||||
func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16) bool {
|
||||
if sc == nil || sc.TCP[port] == nil {
|
||||
// IsTCPForwardingOnPort reports whether ServeConfig is currently forwarding
|
||||
// in TCPForward mode on the given port for local or a service. svcName will
|
||||
// either be noService (empty string) for local serve or a serviceName for service
|
||||
// hosted on node. Notice TCPForwarding is exclusive with Web/HTTPS serving.
|
||||
func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16, svcName tailcfg.ServiceName) bool {
|
||||
if sc == nil {
|
||||
return false
|
||||
}
|
||||
return !sc.IsServingWeb(port)
|
||||
}
|
||||
|
||||
// IsServingWeb reports whether if ServeConfig is currently serving Web
|
||||
// (HTTP/HTTPS) on the given port. This is exclusive of TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingWeb(port uint16) bool {
|
||||
return sc.IsServingHTTP(port) || sc.IsServingHTTPS(port)
|
||||
}
|
||||
|
||||
// IsServingHTTPS reports whether if ServeConfig is currently serving HTTPS on
|
||||
// the given port. This is exclusive of HTTP and TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingHTTPS(port uint16) bool {
|
||||
if sc == nil || sc.TCP[port] == nil {
|
||||
if svcName != "" {
|
||||
svc, ok := sc.Services[svcName]
|
||||
if !ok || svc == nil {
|
||||
return false
|
||||
}
|
||||
if svc.TCP[port] == nil {
|
||||
return false
|
||||
}
|
||||
} else if sc.TCP[port] == nil {
|
||||
return false
|
||||
}
|
||||
return sc.TCP[port].HTTPS
|
||||
return !sc.IsServingWeb(port, svcName)
|
||||
}
|
||||
|
||||
// IsServingHTTP reports whether if ServeConfig is currently serving HTTP on the
|
||||
// given port. This is exclusive of HTTPS and TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingHTTP(port uint16) bool {
|
||||
if sc == nil || sc.TCP[port] == nil {
|
||||
// IsServingWeb reports whether ServeConfig is currently serving Web (HTTP/HTTPS)
|
||||
// on the given port for local or a service. svcName will be either tailcfg.NoService,
|
||||
// or a serviceName for service hosted on node. This is exclusive with TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingWeb(port uint16, svcName tailcfg.ServiceName) bool {
|
||||
return sc.IsServingHTTP(port, svcName) || sc.IsServingHTTPS(port, svcName)
|
||||
}
|
||||
|
||||
// IsServingHTTPS reports whether ServeConfig is currently serving HTTPS on
|
||||
// the given port for local or a service. svcName will be either tailcfg.NoService
|
||||
// for local serve, or a serviceName for service hosted on node. This is exclusive
|
||||
// with HTTP and TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingHTTPS(port uint16, svcName tailcfg.ServiceName) bool {
|
||||
if sc == nil {
|
||||
return false
|
||||
}
|
||||
var tcpHandlers map[uint16]*TCPPortHandler
|
||||
if svcName != "" {
|
||||
if svc := sc.Services[svcName]; svc != nil {
|
||||
tcpHandlers = svc.TCP
|
||||
}
|
||||
} else {
|
||||
tcpHandlers = sc.TCP
|
||||
}
|
||||
|
||||
th := tcpHandlers[port]
|
||||
if th == nil {
|
||||
return false
|
||||
}
|
||||
return th.HTTPS
|
||||
}
|
||||
|
||||
// IsServingHTTP reports whether ServeConfig is currently serving HTTP on the
|
||||
// given port for local or a service. svcName will be either tailcfg.NoService for
|
||||
// local serve, or a serviceName for service hosted on node. This is exclusive
|
||||
// with HTTPS and TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingHTTP(port uint16, svcName tailcfg.ServiceName) bool {
|
||||
if sc == nil {
|
||||
return false
|
||||
}
|
||||
if svcName != "" {
|
||||
if svc := sc.Services[svcName]; svc != nil {
|
||||
if svc.TCP[port] != nil {
|
||||
return svc.TCP[port].HTTP
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if sc.TCP[port] == nil {
|
||||
return false
|
||||
}
|
||||
return sc.TCP[port].HTTP
|
||||
@@ -280,21 +375,38 @@ func (sc *ServeConfig) FindConfig(port uint16) (*ServeConfig, bool) {
|
||||
|
||||
// SetWebHandler sets the given HTTPHandler at the specified host, port,
|
||||
// and mount in the serve config. sc.TCP is also updated to reflect web
|
||||
// serving usage of the given port.
|
||||
func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool) {
|
||||
// serving usage of the given port. The st argument is needed when setting
|
||||
// a web handler for a service, otherwise it can be nil. mds is the Magic DNS
|
||||
// suffix, which is used to recreate serve's host.
|
||||
func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool, mds string) {
|
||||
if sc == nil {
|
||||
sc = new(ServeConfig)
|
||||
}
|
||||
mak.Set(&sc.TCP, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS})
|
||||
|
||||
hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port))))
|
||||
if _, ok := sc.Web[hp]; !ok {
|
||||
mak.Set(&sc.Web, hp, new(WebServerConfig))
|
||||
tcpMap := &sc.TCP
|
||||
webServerMap := &sc.Web
|
||||
hostName := host
|
||||
if svcName := tailcfg.AsServiceName(host); svcName != "" {
|
||||
hostName = strings.Join([]string{svcName.WithoutPrefix(), mds}, ".")
|
||||
svc, ok := sc.Services[svcName]
|
||||
if !ok {
|
||||
svc = new(ServiceConfig)
|
||||
mak.Set(&sc.Services, svcName, svc)
|
||||
}
|
||||
tcpMap = &svc.TCP
|
||||
webServerMap = &svc.Web
|
||||
}
|
||||
mak.Set(&sc.Web[hp].Handlers, mount, handler)
|
||||
|
||||
mak.Set(tcpMap, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS})
|
||||
hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port))))
|
||||
webCfg, ok := (*webServerMap)[hp]
|
||||
if !ok {
|
||||
webCfg = new(WebServerConfig)
|
||||
mak.Set(webServerMap, hp, webCfg)
|
||||
}
|
||||
mak.Set(&webCfg.Handlers, mount, handler)
|
||||
// TODO(tylersmalley): handle multiple web handlers from foreground mode
|
||||
for k, v := range sc.Web[hp].Handlers {
|
||||
for k, v := range webCfg.Handlers {
|
||||
if v == handler {
|
||||
continue
|
||||
}
|
||||
@@ -305,7 +417,7 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin
|
||||
m1 := strings.TrimSuffix(mount, "/")
|
||||
m2 := strings.TrimSuffix(k, "/")
|
||||
if m1 == m2 {
|
||||
delete(sc.Web[hp].Handlers, k)
|
||||
delete(webCfg.Handlers, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -314,16 +426,46 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin
|
||||
// connections from the given port. If terminateTLS is true, TLS connections
|
||||
// are terminated with only the given host name permitted before passing them
|
||||
// to the fwdAddr.
|
||||
func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, host string) {
|
||||
//
|
||||
// If proxyProtocol is non-zero, the corresponding PROXY protocol version
|
||||
// header is sent before forwarding the connection.
|
||||
func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, proxyProtocol int, host string) {
|
||||
if sc == nil {
|
||||
sc = new(ServeConfig)
|
||||
}
|
||||
mak.Set(&sc.TCP, port, &TCPPortHandler{TCPForward: fwdAddr})
|
||||
mak.Set(&sc.TCP, port, &TCPPortHandler{
|
||||
TCPForward: fwdAddr,
|
||||
ProxyProtocol: proxyProtocol, // can be 0
|
||||
})
|
||||
|
||||
if terminateTLS {
|
||||
sc.TCP[port].TerminateTLS = host
|
||||
}
|
||||
}
|
||||
|
||||
// SetTCPForwardingForService sets the fwdAddr (IP:port form) to which to
|
||||
// forward connections from the given port on the service. If terminateTLS
|
||||
// is true, TLS connections are terminated, with only the FQDN that corresponds
|
||||
// to the given service being permitted, before passing them to the fwdAddr.
|
||||
func (sc *ServeConfig) SetTCPForwardingForService(port uint16, fwdAddr string, terminateTLS bool, svcName tailcfg.ServiceName, proxyProtocol int, magicDNSSuffix string) {
|
||||
if sc == nil {
|
||||
sc = new(ServeConfig)
|
||||
}
|
||||
svcConfig, ok := sc.Services[svcName]
|
||||
if !ok {
|
||||
svcConfig = new(ServiceConfig)
|
||||
mak.Set(&sc.Services, svcName, svcConfig)
|
||||
}
|
||||
mak.Set(&svcConfig.TCP, port, &TCPPortHandler{
|
||||
TCPForward: fwdAddr,
|
||||
ProxyProtocol: proxyProtocol, // can be 0
|
||||
})
|
||||
|
||||
if terminateTLS {
|
||||
svcConfig.TCP[port].TerminateTLS = fmt.Sprintf("%s.%s", svcName.WithoutPrefix(), magicDNSSuffix)
|
||||
}
|
||||
}
|
||||
|
||||
// SetFunnel sets the sc.AllowFunnel value for the given host and port.
|
||||
func (sc *ServeConfig) SetFunnel(host string, port uint16, setOn bool) {
|
||||
if sc == nil {
|
||||
@@ -344,9 +486,9 @@ func (sc *ServeConfig) SetFunnel(host string, port uint16, setOn bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveWebHandler deletes the web handlers at all of the given mount points
|
||||
// for the provided host and port in the serve config. If cleanupFunnel is
|
||||
// true, this also removes the funnel value for this port if no handlers remain.
|
||||
// RemoveWebHandler deletes the web handlers at all of the given mount points for the
|
||||
// provided host and port in the serve config for the node (as opposed to a service).
|
||||
// If cleanupFunnel is true, this also removes the funnel value for this port if no handlers remain.
|
||||
func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []string, cleanupFunnel bool) {
|
||||
hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port))))
|
||||
|
||||
@@ -374,9 +516,50 @@ func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []strin
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveServiceWebHandler deletes the web handlers at all of the given mount points
|
||||
// for the provided host and port in the serve config for the given service.
|
||||
func (sc *ServeConfig) RemoveServiceWebHandler(svcName tailcfg.ServiceName, hostName string, port uint16, mounts []string) {
|
||||
hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port))))
|
||||
|
||||
svc, ok := sc.Services[svcName]
|
||||
if !ok || svc == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete existing handler, then cascade delete if empty.
|
||||
for _, m := range mounts {
|
||||
delete(svc.Web[hp].Handlers, m)
|
||||
}
|
||||
if len(svc.Web[hp].Handlers) == 0 {
|
||||
delete(svc.Web, hp)
|
||||
delete(svc.TCP, port)
|
||||
}
|
||||
if len(svc.Web) == 0 && len(svc.TCP) == 0 {
|
||||
delete(sc.Services, svcName)
|
||||
}
|
||||
if len(sc.Services) == 0 {
|
||||
sc.Services = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTCPForwarding deletes the TCP forwarding configuration for the given
|
||||
// port from the serve config.
|
||||
func (sc *ServeConfig) RemoveTCPForwarding(port uint16) {
|
||||
func (sc *ServeConfig) RemoveTCPForwarding(svcName tailcfg.ServiceName, port uint16) {
|
||||
if svcName != "" {
|
||||
if svc := sc.Services[svcName]; svc != nil {
|
||||
delete(svc.TCP, port)
|
||||
if len(svc.TCP) == 0 {
|
||||
svc.TCP = nil
|
||||
}
|
||||
if len(svc.Web) == 0 && len(svc.TCP) == 0 {
|
||||
delete(sc.Services, svcName)
|
||||
}
|
||||
if len(sc.Services) == 0 {
|
||||
sc.Services = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
delete(sc.TCP, port)
|
||||
if len(sc.TCP) == 0 {
|
||||
sc.TCP = nil
|
||||
@@ -519,7 +702,8 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error {
|
||||
|
||||
// ExpandProxyTargetValue expands the supported target values to be proxied
|
||||
// allowing for input values to be a port number, a partial URL, or a full URL
|
||||
// including a path.
|
||||
// including a path. If it's for a service, remote addresses are allowed and
|
||||
// there doesn't have to be a port specified.
|
||||
//
|
||||
// examples:
|
||||
// - 3000
|
||||
@@ -529,17 +713,40 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error {
|
||||
// - https://localhost:3000
|
||||
// - https-insecure://localhost:3000
|
||||
// - https-insecure://localhost:3000/foo
|
||||
// - https://tailscale.com
|
||||
func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultScheme string) (string, error) {
|
||||
const host = "127.0.0.1"
|
||||
|
||||
// empty target is invalid
|
||||
if target == "" {
|
||||
return "", fmt.Errorf("empty target")
|
||||
}
|
||||
|
||||
// support target being a port number
|
||||
if port, err := strconv.ParseUint(target, 10, 16); err == nil {
|
||||
return fmt.Sprintf("%s://%s:%d", defaultScheme, host, port), nil
|
||||
}
|
||||
|
||||
// handle unix: scheme specially - it doesn't use standard URL format
|
||||
if strings.HasPrefix(target, "unix:") {
|
||||
if !slices.Contains(supportedSchemes, "unix") {
|
||||
return "", fmt.Errorf("unix sockets are not supported for this target type")
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
return "", fmt.Errorf("unix socket serve target is not supported on Windows")
|
||||
}
|
||||
path := strings.TrimPrefix(target, "unix:")
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("unix socket path cannot be empty")
|
||||
}
|
||||
return target, nil
|
||||
}
|
||||
|
||||
hasScheme := true
|
||||
// prepend scheme if not present
|
||||
if !strings.Contains(target, "://") {
|
||||
target = defaultScheme + "://" + target
|
||||
hasScheme = false
|
||||
}
|
||||
|
||||
// make sure we can parse the target
|
||||
@@ -553,16 +760,28 @@ func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultSch
|
||||
return "", fmt.Errorf("must be a URL starting with one of the supported schemes: %v", supportedSchemes)
|
||||
}
|
||||
|
||||
// validate the host.
|
||||
switch u.Hostname() {
|
||||
case "localhost", "127.0.0.1":
|
||||
default:
|
||||
return "", errors.New("only localhost or 127.0.0.1 proxies are currently supported")
|
||||
// validate port according to host.
|
||||
if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" {
|
||||
// require port for localhost targets
|
||||
if u.Port() == "" {
|
||||
return "", fmt.Errorf("port required for localhost target %q", target)
|
||||
}
|
||||
} else {
|
||||
validHN := dnsname.ValidHostname(u.Hostname()) == nil
|
||||
validIP := net.ParseIP(u.Hostname()) != nil
|
||||
if !validHN && !validIP {
|
||||
return "", fmt.Errorf("invalid hostname or IP address %q", u.Hostname())
|
||||
}
|
||||
// require scheme for non-localhost targets
|
||||
if !hasScheme {
|
||||
return "", fmt.Errorf("non-localhost target %q must include a scheme", target)
|
||||
}
|
||||
}
|
||||
|
||||
// validate the port
|
||||
port, err := strconv.ParseUint(u.Port(), 10, 16)
|
||||
if err != nil || port == 0 {
|
||||
if u.Port() == "" {
|
||||
return u.String(), nil // allow no port for remote destinations
|
||||
}
|
||||
return "", fmt.Errorf("invalid port %q", u.Port())
|
||||
}
|
||||
|
||||
@@ -626,6 +845,7 @@ func (v ServeConfigView) FindServiceTCP(svcName tailcfg.ServiceName, port uint16
|
||||
return svcCfg.TCP().GetOk(port)
|
||||
}
|
||||
|
||||
// FindServiceWeb returns the web handler for the service's host-port.
|
||||
func (v ServeConfigView) FindServiceWeb(svcName tailcfg.ServiceName, hp HostPort) (res WebServerConfigView, ok bool) {
|
||||
if svcCfg, ok := v.Services().GetOk(svcName); ok {
|
||||
if res, ok := svcCfg.Web().GetOk(hp); ok {
|
||||
@@ -639,10 +859,9 @@ func (v ServeConfigView) FindServiceWeb(svcName tailcfg.ServiceName, hp HostPort
|
||||
// prefers a foreground match first followed by a background search if none
|
||||
// existed.
|
||||
func (v ServeConfigView) FindTCP(port uint16) (res TCPPortHandlerView, ok bool) {
|
||||
for _, conf := range v.Foreground().All() {
|
||||
if res, ok := conf.TCP().GetOk(port); ok {
|
||||
return res, ok
|
||||
}
|
||||
res, ok = v.FindForegroundTCP(port)
|
||||
if ok {
|
||||
return res, ok
|
||||
}
|
||||
return v.TCP().GetOk(port)
|
||||
}
|
||||
@@ -659,6 +878,17 @@ func (v ServeConfigView) FindWeb(hp HostPort) (res WebServerConfigView, ok bool)
|
||||
return v.Web().GetOk(hp)
|
||||
}
|
||||
|
||||
// FindForegroundTCP returns the first foreground TCP handler matching the input
|
||||
// port.
|
||||
func (v ServeConfigView) FindForegroundTCP(port uint16) (res TCPPortHandlerView, ok bool) {
|
||||
for _, conf := range v.Foreground().All() {
|
||||
if res, ok := conf.TCP().GetOk(port); ok {
|
||||
return res, ok
|
||||
}
|
||||
}
|
||||
return res, false
|
||||
}
|
||||
|
||||
// HasAllowFunnel returns whether this config has at least one AllowFunnel
|
||||
// set in the background or foreground configs.
|
||||
func (v ServeConfigView) HasAllowFunnel() bool {
|
||||
@@ -687,17 +917,6 @@ func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckValidServicesConfig reports whether the ServeConfig has
|
||||
// invalid service configurations.
|
||||
func (sc *ServeConfig) CheckValidServicesConfig() error {
|
||||
for svcName, service := range sc.Services {
|
||||
if err := service.checkValidConfig(); err != nil {
|
||||
return fmt.Errorf("invalid service configuration for %q: %w", svcName, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServicePortRange returns the list of tailcfg.ProtoPortRange that represents
|
||||
// the proto/ports pairs that are being served by the service.
|
||||
//
|
||||
@@ -735,17 +954,3 @@ func (v ServiceConfigView) ServicePortRange() []tailcfg.ProtoPortRange {
|
||||
}
|
||||
return ranges
|
||||
}
|
||||
|
||||
// ErrServiceConfigHasBothTCPAndTun signals that a service
|
||||
// in Tun mode cannot also has TCP or Web handlers set.
|
||||
var ErrServiceConfigHasBothTCPAndTun = errors.New("the VIP Service configuration can not set TUN at the same time as TCP or Web")
|
||||
|
||||
// checkValidConfig checks if the service configuration is valid.
|
||||
// Currently, the only invalid configuration is when the service is in Tun mode
|
||||
// and has TCP or Web handlers.
|
||||
func (v *ServiceConfig) checkValidConfig() error {
|
||||
if v.Tun && (len(v.TCP) > 0 || len(v.Web) > 0) {
|
||||
return ErrServiceConfigHasBothTCPAndTun
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
21
vendor/tailscale.com/ipn/store.go
generated
vendored
21
vendor/tailscale.com/ipn/store.go
generated
vendored
@@ -10,6 +10,8 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"tailscale.com/health"
|
||||
)
|
||||
|
||||
// ErrStateNotExist is returned by StateStore.ReadState when the
|
||||
@@ -60,6 +62,19 @@ const (
|
||||
TaildropReceivedKey = StateKey("_taildrop-received")
|
||||
)
|
||||
|
||||
// StateStoreHealth is a Warnable set when store.New fails at startup. If
|
||||
// unhealthy, we block all login attempts and return a health message in status
|
||||
// responses.
|
||||
var StateStoreHealth = health.Register(&health.Warnable{
|
||||
Code: "state-store-health",
|
||||
Severity: health.SeverityHigh,
|
||||
Title: "Tailscale state store failed to initialize",
|
||||
Text: func(args health.Args) string {
|
||||
return fmt.Sprintf("State store failed to initialize, Tailscale will not work until this is resolved. See https://tailscale.com/s/state-store-init-error. Error: %s", args[health.ArgError])
|
||||
},
|
||||
ImpactsConnectivity: true,
|
||||
})
|
||||
|
||||
// CurrentProfileID returns the StateKey that stores the
|
||||
// current profile ID. The value is a JSON-encoded LoginProfile.
|
||||
// If the userID is empty, the key returned is CurrentProfileStateKey,
|
||||
@@ -113,3 +128,9 @@ func ReadStoreInt(store StateStore, id StateKey) (int64, error) {
|
||||
func PutStoreInt(store StateStore, id StateKey, val int64) error {
|
||||
return WriteState(store, id, fmt.Appendf(nil, "%d", val))
|
||||
}
|
||||
|
||||
// EncryptedStateStore is a marker interface implemented by StateStores that
|
||||
// encrypt data at rest.
|
||||
type EncryptedStateStore interface {
|
||||
stateStoreIsEncrypted()
|
||||
}
|
||||
|
||||
255
vendor/tailscale.com/ipn/store/awsstore/store_aws.go
generated
vendored
255
vendor/tailscale.com/ipn/store/awsstore/store_aws.go
generated
vendored
@@ -1,255 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux && !ts_omit_aws
|
||||
|
||||
// Package awsstore contains an ipn.StateStore implementation using AWS SSM.
|
||||
package awsstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/aws/arn"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ssm"
|
||||
ssmTypes "github.com/aws/aws-sdk-go-v2/service/ssm/types"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
parameterNameRxStr = `^parameter(/.*)`
|
||||
)
|
||||
|
||||
var parameterNameRx = regexp.MustCompile(parameterNameRxStr)
|
||||
|
||||
// Option defines a functional option type for configuring awsStore.
|
||||
type Option func(*storeOptions)
|
||||
|
||||
// storeOptions holds optional settings for creating a new awsStore.
|
||||
type storeOptions struct {
|
||||
kmsKey string
|
||||
}
|
||||
|
||||
// awsSSMClient is an interface allowing us to mock the couple of
|
||||
// API calls we are leveraging with the AWSStore provider
|
||||
type awsSSMClient interface {
|
||||
GetParameter(ctx context.Context,
|
||||
params *ssm.GetParameterInput,
|
||||
optFns ...func(*ssm.Options)) (*ssm.GetParameterOutput, error)
|
||||
|
||||
PutParameter(ctx context.Context,
|
||||
params *ssm.PutParameterInput,
|
||||
optFns ...func(*ssm.Options)) (*ssm.PutParameterOutput, error)
|
||||
}
|
||||
|
||||
// store is a store which leverages AWS SSM parameter store
|
||||
// to persist the state
|
||||
type awsStore struct {
|
||||
ssmClient awsSSMClient
|
||||
ssmARN arn.ARN
|
||||
|
||||
// kmsKey is optional. If empty, the parameter is stored in plaintext.
|
||||
// If non-empty, the parameter is encrypted with this KMS key.
|
||||
kmsKey string
|
||||
|
||||
memory mem.Store
|
||||
}
|
||||
|
||||
// New returns a new ipn.StateStore using the AWS SSM storage
|
||||
// location given by ssmARN.
|
||||
//
|
||||
// Note that we store the entire store in a single parameter
|
||||
// key, therefore if the state is above 8kb, it can cause
|
||||
// Tailscaled to only only store new state in-memory and
|
||||
// restarting Tailscaled can fail until you delete your state
|
||||
// from the AWS Parameter Store.
|
||||
//
|
||||
// If you want to specify an optional KMS key,
|
||||
// pass one or more Option objects, e.g. awsstore.WithKeyID("alias/my-key").
|
||||
func New(_ logger.Logf, ssmARN string, opts ...Option) (ipn.StateStore, error) {
|
||||
// Apply all options to an empty storeOptions
|
||||
var so storeOptions
|
||||
for _, opt := range opts {
|
||||
opt(&so)
|
||||
}
|
||||
|
||||
return newStore(ssmARN, so, nil)
|
||||
}
|
||||
|
||||
// WithKeyID sets the KMS key to be used for encryption. It can be
|
||||
// a KeyID, an alias ("alias/my-key"), or a full ARN.
|
||||
//
|
||||
// If kmsKey is empty, the Option is a no-op.
|
||||
func WithKeyID(kmsKey string) Option {
|
||||
return func(o *storeOptions) {
|
||||
o.kmsKey = kmsKey
|
||||
}
|
||||
}
|
||||
|
||||
// ParseARNAndOpts parses an ARN and optional URL-encoded parameters
|
||||
// from arg.
|
||||
func ParseARNAndOpts(arg string) (ssmARN string, opts []Option, err error) {
|
||||
ssmARN = arg
|
||||
|
||||
// Support optional ?url-encoded-parameters.
|
||||
if s, q, ok := strings.Cut(arg, "?"); ok {
|
||||
ssmARN = s
|
||||
q, err := url.ParseQuery(q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
for k := range q {
|
||||
switch k {
|
||||
default:
|
||||
return "", nil, fmt.Errorf("unknown arn option parameter %q", k)
|
||||
case "kmsKey":
|
||||
// We allow an ARN, a key ID, or an alias name for kmsKeyID.
|
||||
// If it doesn't look like an ARN and doesn't have a '/',
|
||||
// prepend "alias/" for KMS alias references.
|
||||
kmsKey := q.Get(k)
|
||||
if kmsKey != "" &&
|
||||
!strings.Contains(kmsKey, "/") &&
|
||||
!strings.HasPrefix(kmsKey, "arn:") {
|
||||
kmsKey = "alias/" + kmsKey
|
||||
}
|
||||
if kmsKey != "" {
|
||||
opts = append(opts, WithKeyID(kmsKey))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ssmARN, opts, nil
|
||||
}
|
||||
|
||||
// newStore is NewStore, but for tests. If client is non-nil, it's
|
||||
// used instead of making one.
|
||||
func newStore(ssmARN string, so storeOptions, client awsSSMClient) (ipn.StateStore, error) {
|
||||
s := &awsStore{
|
||||
ssmClient: client,
|
||||
kmsKey: so.kmsKey,
|
||||
}
|
||||
|
||||
var err error
|
||||
if s.ssmARN, err = arn.Parse(ssmARN); err != nil {
|
||||
return nil, fmt.Errorf("unable to parse the ARN correctly: %v", err)
|
||||
}
|
||||
if s.ssmARN.Service != "ssm" {
|
||||
return nil, fmt.Errorf("invalid service %q, expected 'ssm'", s.ssmARN.Service)
|
||||
}
|
||||
if !parameterNameRx.MatchString(s.ssmARN.Resource) {
|
||||
return nil, fmt.Errorf("invalid resource %q, expected to match %v", s.ssmARN.Resource, parameterNameRxStr)
|
||||
}
|
||||
|
||||
if s.ssmClient == nil {
|
||||
var cfg aws.Config
|
||||
if cfg, err = config.LoadDefaultConfig(
|
||||
context.TODO(),
|
||||
config.WithRegion(s.ssmARN.Region),
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.ssmClient = ssm.NewFromConfig(cfg)
|
||||
}
|
||||
|
||||
// Preload existing state, if any
|
||||
if err := s.LoadState(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// LoadState attempts to read the state from AWS SSM parameter store key.
|
||||
func (s *awsStore) LoadState() error {
|
||||
param, err := s.ssmClient.GetParameter(
|
||||
context.TODO(),
|
||||
&ssm.GetParameterInput{
|
||||
Name: aws.String(s.ParameterName()),
|
||||
WithDecryption: aws.Bool(true),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
var pnf *ssmTypes.ParameterNotFound
|
||||
if errors.As(err, &pnf) {
|
||||
// Create the parameter as it does not exist yet
|
||||
// and return directly as it is defacto empty
|
||||
return s.persistState()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Load the content in-memory
|
||||
return s.memory.LoadFromJSON([]byte(*param.Parameter.Value))
|
||||
}
|
||||
|
||||
// ParameterName returns the parameter name extracted from
|
||||
// the provided ARN
|
||||
func (s *awsStore) ParameterName() (name string) {
|
||||
values := parameterNameRx.FindStringSubmatch(s.ssmARN.Resource)
|
||||
if len(values) == 2 {
|
||||
name = values[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String returns the awsStore and the ARN of the SSM parameter store
|
||||
// configured to store the state
|
||||
func (s *awsStore) String() string { return fmt.Sprintf("awsStore(%q)", s.ssmARN.String()) }
|
||||
|
||||
// ReadState implements the Store interface.
|
||||
func (s *awsStore) ReadState(id ipn.StateKey) (bs []byte, err error) {
|
||||
return s.memory.ReadState(id)
|
||||
}
|
||||
|
||||
// WriteState implements the Store interface.
|
||||
func (s *awsStore) WriteState(id ipn.StateKey, bs []byte) (err error) {
|
||||
// Write the state in-memory
|
||||
if err = s.memory.WriteState(id, bs); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Persist the state in AWS SSM parameter store
|
||||
return s.persistState()
|
||||
}
|
||||
|
||||
// PersistState saves the states into the AWS SSM parameter store
|
||||
func (s *awsStore) persistState() error {
|
||||
// Generate JSON from in-memory cache
|
||||
bs, err := s.memory.ExportToJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store in AWS SSM parameter store.
|
||||
//
|
||||
// We use intelligent tiering so that when the state is below 4kb, it uses Standard tiering
|
||||
// which is free. However, if it exceeds 4kb it switches the parameter to advanced tiering
|
||||
// doubling the capacity to 8kb per the following docs:
|
||||
// https://aws.amazon.com/about-aws/whats-new/2019/08/aws-systems-manager-parameter-store-announces-intelligent-tiering-to-enable-automatic-parameter-tier-selection/
|
||||
in := &ssm.PutParameterInput{
|
||||
Name: aws.String(s.ParameterName()),
|
||||
Value: aws.String(string(bs)),
|
||||
Overwrite: aws.Bool(true),
|
||||
Tier: ssmTypes.ParameterTierIntelligentTiering,
|
||||
Type: ssmTypes.ParameterTypeSecureString,
|
||||
}
|
||||
|
||||
// If kmsKey is specified, encrypt with that key
|
||||
// NOTE: this input allows any alias, keyID or ARN
|
||||
// If this isn't specified, AWS will use the default KMS key
|
||||
if s.kmsKey != "" {
|
||||
in.KeyId = aws.String(s.kmsKey)
|
||||
}
|
||||
|
||||
_, err = s.ssmClient.PutParameter(context.TODO(), in)
|
||||
return err
|
||||
}
|
||||
430
vendor/tailscale.com/ipn/store/kubestore/store_kube.go
generated
vendored
430
vendor/tailscale.com/ipn/store/kubestore/store_kube.go
generated
vendored
@@ -1,430 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package kubestore contains an ipn.StateStore implementation using Kubernetes Secrets.
|
||||
package kubestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
const (
|
||||
// timeout is the timeout for a single state update that includes calls to the API server to write or read a
|
||||
// state Secret and emit an Event.
|
||||
timeout = 30 * time.Second
|
||||
|
||||
reasonTailscaleStateUpdated = "TailscaledStateUpdated"
|
||||
reasonTailscaleStateLoaded = "TailscaleStateLoaded"
|
||||
reasonTailscaleStateUpdateFailed = "TailscaleStateUpdateFailed"
|
||||
reasonTailscaleStateLoadFailed = "TailscaleStateLoadFailed"
|
||||
eventTypeWarning = "Warning"
|
||||
eventTypeNormal = "Normal"
|
||||
|
||||
keyTLSCert = "tls.crt"
|
||||
keyTLSKey = "tls.key"
|
||||
)
|
||||
|
||||
// Store is an ipn.StateStore that uses a Kubernetes Secret for persistence.
|
||||
type Store struct {
|
||||
client kubeclient.Client
|
||||
canPatch bool
|
||||
secretName string // state Secret
|
||||
certShareMode string // 'ro', 'rw', or empty
|
||||
podName string
|
||||
|
||||
// memory holds the latest tailscale state. Writes write state to a kube
|
||||
// Secret and memory, Reads read from memory.
|
||||
memory mem.Store
|
||||
}
|
||||
|
||||
// New returns a new Store that persists state to Kubernets Secret(s).
|
||||
// Tailscale state is stored in a Secret named by the secretName parameter.
|
||||
// TLS certs are stored and retrieved from state Secret or separate Secrets
|
||||
// named after TLS endpoints if running in cert share mode.
|
||||
func New(logf logger.Logf, secretName string) (*Store, error) {
|
||||
c, err := newClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newWithClient(logf, c, secretName)
|
||||
}
|
||||
|
||||
func newClient() (kubeclient.Client, error) {
|
||||
c, err := kubeclient.New("tailscale-state-store")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
|
||||
// Derive the API server address from the environment variables
|
||||
c.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*Store, error) {
|
||||
canPatch, _, err := c.CheckSecretPermissions(context.Background(), secretName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := &Store{
|
||||
client: c,
|
||||
canPatch: canPatch,
|
||||
secretName: secretName,
|
||||
podName: os.Getenv("POD_NAME"),
|
||||
}
|
||||
if envknob.IsCertShareReadWriteMode() {
|
||||
s.certShareMode = "rw"
|
||||
} else if envknob.IsCertShareReadOnlyMode() {
|
||||
s.certShareMode = "ro"
|
||||
}
|
||||
|
||||
// Load latest state from kube Secret if it already exists.
|
||||
if err := s.loadState(); err != nil && err != ipn.ErrStateNotExist {
|
||||
return nil, fmt.Errorf("error loading state from kube Secret: %w", err)
|
||||
}
|
||||
// If we are in cert share mode, pre-load existing shared certs.
|
||||
if s.certShareMode == "rw" || s.certShareMode == "ro" {
|
||||
sel := s.certSecretSelector()
|
||||
if err := s.loadCerts(context.Background(), sel); err != nil {
|
||||
// We will attempt to again retrieve the certs from Secrets when a request for an HTTPS endpoint
|
||||
// is received.
|
||||
log.Printf("[unexpected] error loading TLS certs: %v", err)
|
||||
}
|
||||
}
|
||||
if s.certShareMode == "ro" {
|
||||
go s.runCertReload(context.Background(), logf)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Store) SetDialer(d func(ctx context.Context, network, address string) (net.Conn, error)) {
|
||||
s.client.SetDialer(d)
|
||||
}
|
||||
|
||||
func (s *Store) String() string { return "kube.Store" }
|
||||
|
||||
// ReadState implements the StateStore interface.
|
||||
func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) {
|
||||
return s.memory.ReadState(ipn.StateKey(sanitizeKey(id)))
|
||||
}
|
||||
|
||||
// WriteState implements the StateStore interface.
|
||||
func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) {
|
||||
defer func() {
|
||||
if err == nil {
|
||||
s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs)
|
||||
}
|
||||
}()
|
||||
return s.updateSecret(map[string][]byte{string(id): bs}, s.secretName)
|
||||
}
|
||||
|
||||
// WriteTLSCertAndKey writes a TLS cert and key to domain.crt, domain.key fields
|
||||
// of a Tailscale Kubernetes node's state Secret.
|
||||
func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) {
|
||||
if s.certShareMode == "ro" {
|
||||
log.Printf("[unexpected] TLS cert and key write in read-only mode")
|
||||
}
|
||||
if err := dnsname.ValidHostname(domain); err != nil {
|
||||
return fmt.Errorf("invalid domain name %q: %w", domain, err)
|
||||
}
|
||||
secretName := s.secretName
|
||||
data := map[string][]byte{
|
||||
domain + ".crt": cert,
|
||||
domain + ".key": key,
|
||||
}
|
||||
// If we run in cert share mode, cert and key for a DNS name are written
|
||||
// to a separate Secret.
|
||||
if s.certShareMode == "rw" {
|
||||
secretName = domain
|
||||
data = map[string][]byte{
|
||||
keyTLSCert: cert,
|
||||
keyTLSKey: key,
|
||||
}
|
||||
}
|
||||
if err := s.updateSecret(data, secretName); err != nil {
|
||||
return fmt.Errorf("error writing TLS cert and key to Secret: %w", err)
|
||||
}
|
||||
// TODO(irbekrm): certs for write replicas are currently not
|
||||
// written to memory to avoid out of sync memory state after
|
||||
// Ingress resources have been recreated. This means that TLS
|
||||
// certs for write replicas are retrieved from the Secret on
|
||||
// each HTTPS request. This is a temporary solution till we
|
||||
// implement a Secret watch.
|
||||
if s.certShareMode != "rw" {
|
||||
s.memory.WriteState(ipn.StateKey(domain+".crt"), cert)
|
||||
s.memory.WriteState(ipn.StateKey(domain+".key"), key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadTLSCertAndKey reads a TLS cert and key from memory or from a
|
||||
// domain-specific Secret. It first checks the in-memory store, if not found in
|
||||
// memory and running cert store in read-only mode, looks up a Secret.
|
||||
// Note that write replicas of HA Ingress always retrieve TLS certs from Secrets.
|
||||
func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) {
|
||||
if err := dnsname.ValidHostname(domain); err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid domain name %q: %w", domain, err)
|
||||
}
|
||||
certKey := domain + ".crt"
|
||||
keyKey := domain + ".key"
|
||||
cert, err = s.memory.ReadState(ipn.StateKey(certKey))
|
||||
if err == nil {
|
||||
key, err = s.memory.ReadState(ipn.StateKey(keyKey))
|
||||
if err == nil {
|
||||
return cert, key, nil
|
||||
}
|
||||
}
|
||||
if s.certShareMode == "" {
|
||||
return nil, nil, ipn.ErrStateNotExist
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
secret, err := s.client.GetSecret(ctx, domain)
|
||||
if err != nil {
|
||||
if kubeclient.IsNotFoundErr(err) {
|
||||
// TODO(irbekrm): we should return a more specific error
|
||||
// that wraps ipn.ErrStateNotExist here.
|
||||
return nil, nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, nil, fmt.Errorf("getting TLS Secret %q: %w", domain, err)
|
||||
}
|
||||
cert = secret.Data[keyTLSCert]
|
||||
key = secret.Data[keyTLSKey]
|
||||
if len(cert) == 0 || len(key) == 0 {
|
||||
return nil, nil, ipn.ErrStateNotExist
|
||||
}
|
||||
// TODO(irbekrm): a read between these two separate writes would
|
||||
// get a mismatched cert and key. Allow writing both cert and
|
||||
// key to the memory store in a single, lock-protected operation.
|
||||
//
|
||||
// TODO(irbekrm): currently certs for write replicas of HA Ingress get
|
||||
// retrieved from the cluster Secret on each HTTPS request to avoid a
|
||||
// situation when after Ingress recreation stale certs are read from
|
||||
// memory.
|
||||
// Fix this by watching Secrets to ensure that memory store gets updated
|
||||
// when Secrets are deleted.
|
||||
if s.certShareMode == "ro" {
|
||||
s.memory.WriteState(ipn.StateKey(certKey), cert)
|
||||
s.memory.WriteState(ipn.StateKey(keyKey), key)
|
||||
}
|
||||
return cert, key, nil
|
||||
}
|
||||
|
||||
func (s *Store) updateSecret(data map[string][]byte, secretName string) (err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil {
|
||||
log.Printf("kubestore: error creating tailscaled state update Event: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateUpdated, "Successfully updated tailscaled state Secret"); err != nil {
|
||||
log.Printf("kubestore: error creating tailscaled state Event: %v", err)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
secret, err := s.client.GetSecret(ctx, secretName)
|
||||
if err != nil {
|
||||
// If the Secret does not exist, create it with the required data.
|
||||
if kubeclient.IsNotFoundErr(err) && s.canCreateSecret(secretName) {
|
||||
return s.client.CreateSecret(ctx, &kubeapi.Secret{
|
||||
TypeMeta: kubeapi.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Secret",
|
||||
},
|
||||
ObjectMeta: kubeapi.ObjectMeta{
|
||||
Name: secretName,
|
||||
},
|
||||
Data: func(m map[string][]byte) map[string][]byte {
|
||||
d := make(map[string][]byte, len(m))
|
||||
for key, val := range m {
|
||||
d[sanitizeKey(key)] = val
|
||||
}
|
||||
return d
|
||||
}(data),
|
||||
})
|
||||
}
|
||||
return fmt.Errorf("error getting Secret %s: %w", secretName, err)
|
||||
}
|
||||
if s.canPatchSecret(secretName) {
|
||||
var m []kubeclient.JSONPatch
|
||||
// If the user has pre-created a Secret with no data, we need to ensure the top level /data field.
|
||||
if len(secret.Data) == 0 {
|
||||
m = []kubeclient.JSONPatch{
|
||||
{
|
||||
Op: "add",
|
||||
Path: "/data",
|
||||
Value: func(m map[string][]byte) map[string][]byte {
|
||||
d := make(map[string][]byte, len(m))
|
||||
for key, val := range m {
|
||||
d[sanitizeKey(key)] = val
|
||||
}
|
||||
return d
|
||||
}(data),
|
||||
},
|
||||
}
|
||||
// If the Secret has data, patch it with the new data.
|
||||
} else {
|
||||
for key, val := range data {
|
||||
m = append(m, kubeclient.JSONPatch{
|
||||
Op: "add",
|
||||
Path: "/data/" + sanitizeKey(key),
|
||||
Value: val,
|
||||
})
|
||||
}
|
||||
}
|
||||
if err := s.client.JSONPatchResource(ctx, secretName, kubeclient.TypeSecrets, m); err != nil {
|
||||
return fmt.Errorf("error patching Secret %s: %w", secretName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// No patch permissions, use UPDATE instead.
|
||||
for key, val := range data {
|
||||
mak.Set(&secret.Data, sanitizeKey(key), val)
|
||||
}
|
||||
if err := s.client.UpdateSecret(ctx, secret); err != nil {
|
||||
return fmt.Errorf("error updating Secret %s: %w", s.secretName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) loadState() (err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
secret, err := s.client.GetSecret(ctx, s.secretName)
|
||||
if err != nil {
|
||||
if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 {
|
||||
return ipn.ErrStateNotExist
|
||||
}
|
||||
if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateLoadFailed, err.Error()); err != nil {
|
||||
log.Printf("kubestore: error creating Event: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateLoaded, "Successfully loaded tailscaled state from Secret"); err != nil {
|
||||
log.Printf("kubestore: error creating Event: %v", err)
|
||||
}
|
||||
s.memory.LoadFromMap(secret.Data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// runCertReload relists and reloads all TLS certs for endpoints shared by this
|
||||
// node from Secrets other than the state Secret to ensure that renewed certs get eventually loaded.
|
||||
// It is not critical to reload a cert immediately after
|
||||
// renewal, so a daily check is acceptable.
|
||||
// Currently (3/2025) this is only used for the shared HA Ingress certs on 'read' replicas.
|
||||
// Note that if shared certs are not found in memory on an HTTPS request, we
|
||||
// do a Secret lookup, so this mechanism does not need to ensure that newly
|
||||
// added Ingresses' certs get loaded.
|
||||
func (s *Store) runCertReload(ctx context.Context, logf logger.Logf) {
|
||||
ticker := time.NewTicker(time.Hour * 24)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
sel := s.certSecretSelector()
|
||||
if err := s.loadCerts(ctx, sel); err != nil {
|
||||
logf("[unexpected] error reloading TLS certs: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadCerts lists all Secrets matching the provided selector and loads TLS
|
||||
// certs and keys from those.
|
||||
func (s *Store) loadCerts(ctx context.Context, sel map[string]string) error {
|
||||
ss, err := s.client.ListSecrets(ctx, sel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing TLS Secrets: %w", err)
|
||||
}
|
||||
for _, secret := range ss.Items {
|
||||
if !hasTLSData(&secret) {
|
||||
continue
|
||||
}
|
||||
// Only load secrets that have valid domain names (ending in .ts.net)
|
||||
if !strings.HasSuffix(secret.Name, ".ts.net") {
|
||||
continue
|
||||
}
|
||||
s.memory.WriteState(ipn.StateKey(secret.Name)+".crt", secret.Data[keyTLSCert])
|
||||
s.memory.WriteState(ipn.StateKey(secret.Name)+".key", secret.Data[keyTLSKey])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// canCreateSecret returns true if this node should be allowed to create the given
|
||||
// Secret in its namespace.
|
||||
func (s *Store) canCreateSecret(secret string) bool {
|
||||
// Only allow creating the state Secret (and not TLS Secrets).
|
||||
return secret == s.secretName
|
||||
}
|
||||
|
||||
// canPatchSecret returns true if this node should be allowed to patch the given
|
||||
// Secret.
|
||||
func (s *Store) canPatchSecret(secret string) bool {
|
||||
// For backwards compatibility reasons, setups where the proxies are not
|
||||
// given PATCH permissions for state Secrets are allowed. For TLS
|
||||
// Secrets, we should always have PATCH permissions.
|
||||
if secret == s.secretName {
|
||||
return s.canPatch
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// certSecretSelector returns a label selector that can be used to list all
|
||||
// Secrets that aren't Tailscale state Secrets and contain TLS certificates for
|
||||
// HTTPS endpoints that this node serves.
|
||||
// Currently (3/2025) this only applies to the Kubernetes Operator's ingress
|
||||
// ProxyGroup.
|
||||
func (s *Store) certSecretSelector() map[string]string {
|
||||
if s.podName == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
p := strings.LastIndex(s.podName, "-")
|
||||
if p == -1 {
|
||||
return map[string]string{}
|
||||
}
|
||||
pgName := s.podName[:p]
|
||||
return map[string]string{
|
||||
kubetypes.LabelSecretType: "certs",
|
||||
kubetypes.LabelManaged: "true",
|
||||
"tailscale.com/proxy-group": pgName,
|
||||
}
|
||||
}
|
||||
|
||||
// hasTLSData returns true if the provided Secret contains non-empty TLS cert and key.
|
||||
func hasTLSData(s *kubeapi.Secret) bool {
|
||||
return len(s.Data[keyTLSCert]) != 0 && len(s.Data[keyTLSKey]) != 0
|
||||
}
|
||||
|
||||
// sanitizeKey converts any value that can be converted to a string into a valid Kubernetes Secret key.
|
||||
// Valid characters are alphanumeric, -, _, and .
|
||||
// https://kubernetes.io/docs/concepts/configuration/secret/#restriction-names-data.
|
||||
func sanitizeKey[T ~string](k T) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}, string(k))
|
||||
}
|
||||
26
vendor/tailscale.com/ipn/store/store_aws.go
generated
vendored
26
vendor/tailscale.com/ipn/store/store_aws.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build (ts_aws || (linux && (arm64 || amd64))) && !ts_omit_aws
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/awsstore"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerAvailableExternalStores = append(registerAvailableExternalStores, registerAWSStore)
|
||||
}
|
||||
|
||||
func registerAWSStore() {
|
||||
Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) {
|
||||
ssmARN, opts, err := awsstore.ParseARNAndOpts(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return awsstore.New(logf, ssmARN, opts...)
|
||||
})
|
||||
}
|
||||
25
vendor/tailscale.com/ipn/store/store_kube.go
generated
vendored
25
vendor/tailscale.com/ipn/store/store_kube.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build (ts_kube || (linux && (arm64 || amd64))) && !ts_omit_kube
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/kubestore"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerAvailableExternalStores = append(registerAvailableExternalStores, registerKubeStore)
|
||||
}
|
||||
|
||||
func registerKubeStore() {
|
||||
Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) {
|
||||
secretName := strings.TrimPrefix(path, "kube:")
|
||||
return kubestore.New(logf, secretName)
|
||||
})
|
||||
}
|
||||
175
vendor/tailscale.com/ipn/store/stores.go
generated
vendored
175
vendor/tailscale.com/ipn/store/stores.go
generated
vendored
@@ -7,10 +7,14 @@ package store
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"iter"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -20,26 +24,22 @@ import (
|
||||
"tailscale.com/paths"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/testenv"
|
||||
)
|
||||
|
||||
// Provider returns a StateStore for the provided path.
|
||||
// The arg is of the form "prefix:rest", where prefix was previously registered with Register.
|
||||
type Provider func(logf logger.Logf, arg string) (ipn.StateStore, error)
|
||||
|
||||
var regOnce sync.Once
|
||||
|
||||
var registerAvailableExternalStores []func()
|
||||
|
||||
func registerDefaultStores() {
|
||||
func init() {
|
||||
Register("mem:", mem.New)
|
||||
|
||||
for _, f := range registerAvailableExternalStores {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
var knownStores map[string]Provider
|
||||
|
||||
// TPMPrefix is the path prefix used for TPM-encrypted StateStore.
|
||||
const TPMPrefix = "tpmseal:"
|
||||
|
||||
// New returns a StateStore based on the provided arg
|
||||
// and registered stores.
|
||||
// The arg is of the form "prefix:rest", where prefix was previously
|
||||
@@ -53,19 +53,31 @@ var knownStores map[string]Provider
|
||||
// the suffix an AWS ARN for an SSM.
|
||||
// - (Linux-only) if the string begins with "kube:",
|
||||
// the suffix is a Kubernetes secret name
|
||||
// - (Linux or Windows) if the string begins with "tpmseal:", the suffix is
|
||||
// filepath that is sealed with the local TPM device.
|
||||
// - In all other cases, the path is treated as a filepath.
|
||||
func New(logf logger.Logf, path string) (ipn.StateStore, error) {
|
||||
regOnce.Do(registerDefaultStores)
|
||||
for prefix, sf := range knownStores {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
// We can't strip the prefix here as some NewStoreFunc (like arn:)
|
||||
// expect the prefix.
|
||||
if prefix == TPMPrefix {
|
||||
if runtime.GOOS == "windows" {
|
||||
path = TPMPrefix + TryWindowsAppDataMigration(logf, strings.TrimPrefix(path, TPMPrefix))
|
||||
}
|
||||
if err := maybeMigrateLocalStateFile(logf, path); err != nil {
|
||||
return nil, fmt.Errorf("failed to migrate existing state file to TPM-sealed format: %w", err)
|
||||
}
|
||||
}
|
||||
return sf(logf, path)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
path = TryWindowsAppDataMigration(logf, path)
|
||||
}
|
||||
if err := maybeMigrateLocalStateFile(logf, path); err != nil {
|
||||
return nil, fmt.Errorf("failed to migrate existing TPM-sealed state file to plaintext format: %w", err)
|
||||
}
|
||||
return NewFileStore(logf, path)
|
||||
}
|
||||
|
||||
@@ -84,6 +96,29 @@ func Register(prefix string, fn Provider) {
|
||||
mak.Set(&knownStores, prefix, fn)
|
||||
}
|
||||
|
||||
// RegisterForTest registers a prefix to be used for NewStore in tests. An
|
||||
// existing registered prefix will be replaced.
|
||||
func RegisterForTest(t testenv.TB, prefix string, fn Provider) {
|
||||
if len(prefix) == 0 {
|
||||
panic("prefix is empty")
|
||||
}
|
||||
old := maps.Clone(knownStores)
|
||||
t.Cleanup(func() { knownStores = old })
|
||||
|
||||
mak.Set(&knownStores, prefix, fn)
|
||||
}
|
||||
|
||||
// HasKnownProviderPrefix reports whether path uses one of the registered
|
||||
// Provider prefixes.
|
||||
func HasKnownProviderPrefix(path string) bool {
|
||||
for prefix := range knownStores {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TryWindowsAppDataMigration attempts to copy the Windows state file
|
||||
// from its old location to the new location. (Issue 2856)
|
||||
//
|
||||
@@ -186,3 +221,123 @@ func (s *FileStore) WriteState(id ipn.StateKey, bs []byte) error {
|
||||
}
|
||||
return atomicfile.WriteFile(s.path, bs, 0600)
|
||||
}
|
||||
|
||||
func (s *FileStore) All() iter.Seq2[ipn.StateKey, []byte] {
|
||||
return func(yield func(ipn.StateKey, []byte) bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
for k, v := range s.cache {
|
||||
if !yield(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure FileStore implements ExportableStore for migration to/from
|
||||
// tpm.tpmStore.
|
||||
var _ ExportableStore = (*FileStore)(nil)
|
||||
|
||||
// ExportableStore is an ipn.StateStore that can export all of its contents.
|
||||
// This interface is optional to implement, and used for migrating the state
|
||||
// between different store implementations.
|
||||
type ExportableStore interface {
|
||||
ipn.StateStore
|
||||
|
||||
// All returns an iterator over all store keys. Using ReadState or
|
||||
// WriteState is not safe while iterating and can lead to a deadlock. The
|
||||
// order of keys in the iterator is not specified and may change between
|
||||
// runs.
|
||||
All() iter.Seq2[ipn.StateKey, []byte]
|
||||
}
|
||||
|
||||
func maybeMigrateLocalStateFile(logf logger.Logf, path string) error {
|
||||
path, toTPM := strings.CutPrefix(path, TPMPrefix)
|
||||
|
||||
// Extract JSON keys from the file on disk and guess what kind it is.
|
||||
bs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
var content map[string]any
|
||||
if err := json.Unmarshal(bs, &content); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal %q: %w", path, err)
|
||||
}
|
||||
keys := slices.Sorted(maps.Keys(content))
|
||||
tpmKeys := []string{"key", "nonce", "data"}
|
||||
slices.Sort(tpmKeys)
|
||||
// TPM-sealed files will have exactly these keys.
|
||||
existingFileSealed := slices.Equal(keys, tpmKeys)
|
||||
// Plaintext files for nodes that registered at least once will have this
|
||||
// key, plus other dynamic ones.
|
||||
_, existingFilePlaintext := content["_machinekey"]
|
||||
isTPM := existingFileSealed && !existingFilePlaintext
|
||||
|
||||
if isTPM == toTPM {
|
||||
// No migration needed.
|
||||
return nil
|
||||
}
|
||||
|
||||
newTPMStore, ok := knownStores[TPMPrefix]
|
||||
if !ok {
|
||||
return errors.New("this build does not support TPM integration")
|
||||
}
|
||||
|
||||
// Open from (old format) and to (new format) stores for migration. The
|
||||
// "to" store will be at tmpPath.
|
||||
var from, to ipn.StateStore
|
||||
tmpPath := path + ".tmp"
|
||||
if toTPM {
|
||||
// Migrate plaintext file to be TPM-sealed.
|
||||
from, err = NewFileStore(logf, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("NewFileStore(%q): %w", path, err)
|
||||
}
|
||||
to, err = newTPMStore(logf, TPMPrefix+tmpPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("newTPMStore(%q): %w", tmpPath, err)
|
||||
}
|
||||
} else {
|
||||
// Migrate TPM-selaed file to plaintext.
|
||||
from, err = newTPMStore(logf, TPMPrefix+path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("newTPMStore(%q): %w", path, err)
|
||||
}
|
||||
to, err = NewFileStore(logf, tmpPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("NewFileStore(%q): %w", tmpPath, err)
|
||||
}
|
||||
}
|
||||
defer os.Remove(tmpPath)
|
||||
|
||||
fromExp, ok := from.(ExportableStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("%T does not implement the exportableStore interface", from)
|
||||
}
|
||||
|
||||
// Copy all the items. This is pretty inefficient, because both stores
|
||||
// write the file to disk for each WriteState, but that's ok for a one-time
|
||||
// migration.
|
||||
for k, v := range fromExp.All() {
|
||||
if err := to.WriteState(k, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, overwrite the state file with the new one we created at
|
||||
// tmpPath.
|
||||
if err := atomicfile.Rename(tmpPath, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if toTPM {
|
||||
logf("migrated %q from plaintext to TPM-sealed format", path)
|
||||
} else {
|
||||
logf("migrated %q from TPM-sealed to plaintext format", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user