Update dependencies
This commit is contained in:
256
vendor/tailscale.com/ipn/backend.go
generated
vendored
Normal file
256
vendor/tailscale.com/ipn/backend.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/empty"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/structs"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
type State int
|
||||
|
||||
const (
|
||||
NoState State = 0
|
||||
InUseOtherUser State = 1
|
||||
NeedsLogin State = 2
|
||||
NeedsMachineAuth State = 3
|
||||
Stopped State = 4
|
||||
Starting State = 5
|
||||
Running State = 6
|
||||
)
|
||||
|
||||
// GoogleIDToken Type is the tailcfg.Oauth2Token.TokenType for the Google
|
||||
// ID tokens used by the Android client.
|
||||
const GoogleIDTokenType = "ts_android_google_login"
|
||||
|
||||
func (s State) String() string {
|
||||
return [...]string{
|
||||
"NoState",
|
||||
"InUseOtherUser",
|
||||
"NeedsLogin",
|
||||
"NeedsMachineAuth",
|
||||
"Stopped",
|
||||
"Starting",
|
||||
"Running"}[s]
|
||||
}
|
||||
|
||||
// EngineStatus contains WireGuard engine stats.
|
||||
type EngineStatus struct {
|
||||
RBytes, WBytes int64
|
||||
NumLive int
|
||||
LiveDERPs int // number of active DERP connections
|
||||
LivePeers map[key.NodePublic]ipnstate.PeerStatusLite
|
||||
}
|
||||
|
||||
// NotifyWatchOpt is a bitmask of options about what type of Notify messages
|
||||
// to subscribe to.
|
||||
type NotifyWatchOpt uint64
|
||||
|
||||
const (
|
||||
// NotifyWatchEngineUpdates, if set, causes Engine updates to be sent to the
|
||||
// client either regularly or when they change, without having to ask for
|
||||
// each one via Engine.RequestStatus.
|
||||
NotifyWatchEngineUpdates NotifyWatchOpt = 1 << iota
|
||||
|
||||
NotifyInitialState // if set, the first Notify message (sent immediately) will contain the current State + BrowseToURL + SessionID
|
||||
NotifyInitialPrefs // if set, the first Notify message (sent immediately) will contain the current Prefs
|
||||
NotifyInitialNetMap // if set, the first Notify message (sent immediately) will contain the current NetMap
|
||||
|
||||
NotifyNoPrivateKeys // if set, private keys that would normally be sent in updates are zeroed out
|
||||
NotifyInitialDriveShares // if set, the first Notify message (sent immediately) will contain the current Taildrive Shares
|
||||
NotifyInitialOutgoingFiles // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles
|
||||
|
||||
NotifyInitialHealthState // if set, the first Notify message (sent immediately) will contain the current health.State of the client
|
||||
)
|
||||
|
||||
// Notify is a communication from a backend (e.g. tailscaled) to a frontend
|
||||
// (cmd/tailscale, iOS, macOS, Win Tasktray).
|
||||
// In any given notification, any or all of these may be nil, meaning
|
||||
// that they have not changed.
|
||||
// They are JSON-encoded on the wire, despite the lack of struct tags.
|
||||
type Notify struct {
|
||||
_ structs.Incomparable
|
||||
Version string // version number of IPN backend
|
||||
|
||||
// SessionID identifies the unique WatchIPNBus session.
|
||||
// This field is only set in the first message when requesting
|
||||
// NotifyInitialState. Clients must store it on their side as
|
||||
// following notifications will not include this field.
|
||||
SessionID string `json:",omitempty"`
|
||||
|
||||
// ErrMessage, if non-nil, contains a critical error message.
|
||||
// For State InUseOtherUser, ErrMessage is not critical and just contains the details.
|
||||
ErrMessage *string
|
||||
|
||||
LoginFinished *empty.Message // non-nil when/if the login process succeeded
|
||||
State *State // if non-nil, the new or current IPN state
|
||||
Prefs *PrefsView // if non-nil && Valid, the new or current preferences
|
||||
NetMap *netmap.NetworkMap // if non-nil, the new or current netmap
|
||||
Engine *EngineStatus // if non-nil, the new or current wireguard stats
|
||||
BrowseToURL *string // if non-nil, UI should open a browser right now
|
||||
BackendLogID *string // if non-nil, the public logtail ID used by backend
|
||||
|
||||
// FilesWaiting if non-nil means that files are buffered in
|
||||
// the Tailscale daemon and ready for local transfer to the
|
||||
// user's preferred storage location.
|
||||
//
|
||||
// Deprecated: use LocalClient.AwaitWaitingFiles instead.
|
||||
FilesWaiting *empty.Message `json:",omitempty"`
|
||||
|
||||
// IncomingFiles, if non-nil, specifies which files are in the
|
||||
// process of being received. A nil IncomingFiles means this
|
||||
// Notify should not update the state of file transfers. A non-nil
|
||||
// but empty IncomingFiles means that no files are in the middle
|
||||
// of being transferred.
|
||||
//
|
||||
// Deprecated: use LocalClient.AwaitWaitingFiles instead.
|
||||
IncomingFiles []PartialFile `json:",omitempty"`
|
||||
|
||||
// OutgoingFiles, if non-nil, tracks which files are in the process of
|
||||
// being sent via TailDrop, including files that finished, whether
|
||||
// successful or failed. This slice is sorted by Started time, then Name.
|
||||
OutgoingFiles []*OutgoingFile `json:",omitempty"`
|
||||
|
||||
// LocalTCPPort, if non-nil, informs the UI frontend which
|
||||
// (non-zero) localhost TCP port it's listening on.
|
||||
// This is currently only used by Tailscale when run in the
|
||||
// macOS Network Extension.
|
||||
LocalTCPPort *uint16 `json:",omitempty"`
|
||||
|
||||
// ClientVersion, if non-nil, describes whether a client version update
|
||||
// is available.
|
||||
ClientVersion *tailcfg.ClientVersion `json:",omitempty"`
|
||||
|
||||
// DriveShares tracks the full set of current DriveShares that we're
|
||||
// publishing. Some client applications, like the MacOS and Windows clients,
|
||||
// will listen for updates to this and handle serving these shares under
|
||||
// the identity of the unprivileged user that is running the application. A
|
||||
// nil value here means that we're not broadcasting shares information, an
|
||||
// empty value means that there are no shares.
|
||||
DriveShares views.SliceView[*drive.Share, drive.ShareView]
|
||||
|
||||
// Health is the last-known health state of the backend. When this field is
|
||||
// non-nil, a change in health verified, and the API client should surface
|
||||
// any changes to the user in the UI.
|
||||
Health *health.State `json:",omitempty"`
|
||||
|
||||
// type is mirrored in xcode/Shared/IPN.swift
|
||||
}
|
||||
|
||||
func (n Notify) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Notify{")
|
||||
if n.ErrMessage != nil {
|
||||
fmt.Fprintf(&sb, "err=%q ", *n.ErrMessage)
|
||||
}
|
||||
if n.LoginFinished != nil {
|
||||
sb.WriteString("LoginFinished ")
|
||||
}
|
||||
if n.State != nil {
|
||||
fmt.Fprintf(&sb, "state=%v ", *n.State)
|
||||
}
|
||||
if n.Prefs != nil && n.Prefs.Valid() {
|
||||
fmt.Fprintf(&sb, "%v ", n.Prefs.Pretty())
|
||||
}
|
||||
if n.NetMap != nil {
|
||||
sb.WriteString("NetMap{...} ")
|
||||
}
|
||||
if n.Engine != nil {
|
||||
fmt.Fprintf(&sb, "wg=%v ", *n.Engine)
|
||||
}
|
||||
if n.BrowseToURL != nil {
|
||||
sb.WriteString("URL=<...> ")
|
||||
}
|
||||
if n.BackendLogID != nil {
|
||||
sb.WriteString("BackendLogID ")
|
||||
}
|
||||
if n.FilesWaiting != nil {
|
||||
sb.WriteString("FilesWaiting ")
|
||||
}
|
||||
if len(n.IncomingFiles) != 0 {
|
||||
sb.WriteString("IncomingFiles ")
|
||||
}
|
||||
if n.LocalTCPPort != nil {
|
||||
fmt.Fprintf(&sb, "tcpport=%v ", n.LocalTCPPort)
|
||||
}
|
||||
if n.Health != nil {
|
||||
sb.WriteString("Health{...} ")
|
||||
}
|
||||
s := sb.String()
|
||||
return s[0:len(s)-1] + "}"
|
||||
}
|
||||
|
||||
// PartialFile represents an in-progress incoming file transfer.
|
||||
type PartialFile struct {
|
||||
Name string // e.g. "foo.jpg"
|
||||
Started time.Time // time transfer started
|
||||
DeclaredSize int64 // or -1 if unknown
|
||||
Received int64 // bytes copied thus far
|
||||
|
||||
// PartialPath is set non-empty in "direct" file mode to the
|
||||
// in-progress '*.partial' file's path when the peerapi isn't
|
||||
// being used; see LocalBackend.SetDirectFileRoot.
|
||||
PartialPath string `json:",omitempty"`
|
||||
FinalPath string `json:",omitempty"`
|
||||
|
||||
// Done is set in "direct" mode when the partial file has been
|
||||
// closed and is ready for the caller to rename away the
|
||||
// ".partial" suffix.
|
||||
Done bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// OutgoingFile represents an in-progress outgoing file transfer.
|
||||
type OutgoingFile struct {
|
||||
ID string `json:",omitempty"` // unique identifier for this transfer (a type 4 UUID)
|
||||
PeerID tailcfg.StableNodeID `json:",omitempty"` // identifier for the peer to which this is being transferred
|
||||
Name string `json:",omitempty"` // e.g. "foo.jpg"
|
||||
Started time.Time // time transfer started
|
||||
DeclaredSize int64 // or -1 if unknown
|
||||
Sent int64 // bytes copied thus far
|
||||
Finished bool // indicates whether or not the transfer finished
|
||||
Succeeded bool // for a finished transfer, indicates whether or not it was successful
|
||||
}
|
||||
|
||||
// StateKey is an opaque identifier for a set of LocalBackend state
|
||||
// (preferences, private keys, etc.). It is also used as a key for
|
||||
// the various LoginProfiles that the instance may be signed into.
|
||||
//
|
||||
// Additionally, the StateKey can be debug setting name:
|
||||
//
|
||||
// - "_debug_magicsock_until" with value being a unix timestamp stringified
|
||||
// - "_debug_<component>_until" with value being a unix timestamp stringified
|
||||
type StateKey string
|
||||
|
||||
// DebuggableComponents is a list of components whose debugging can be turned on
|
||||
// and off individually using the tailscale debug command.
|
||||
var DebuggableComponents = []string{
|
||||
"magicsock",
|
||||
"sockstats",
|
||||
"syspolicy",
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
// FrontendLogID is the public logtail id used by the frontend.
|
||||
FrontendLogID string
|
||||
// UpdatePrefs, if provided, overrides the Prefs already stored in the
|
||||
// backend state, *except* for the Persist member.
|
||||
//
|
||||
// TODO(apenwarr): Rename this to Prefs, and possibly move Prefs.Persist
|
||||
// elsewhere entirely (as it always should have been).
|
||||
UpdatePrefs *Prefs
|
||||
// AuthKey is an optional node auth key used to authorize a
|
||||
// new node key without user interaction.
|
||||
AuthKey string
|
||||
}
|
||||
141
vendor/tailscale.com/ipn/conf.go
generated
vendored
Normal file
141
vendor/tailscale.com/ipn/conf.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/preftype"
|
||||
)
|
||||
|
||||
// ConfigVAlpha is the config file format for the "alpha0" version.
|
||||
type ConfigVAlpha struct {
|
||||
Version string // "alpha0" for now
|
||||
Locked opt.Bool `json:",omitempty"` // whether the config is locked from being changed by 'tailscale set'; it defaults to true
|
||||
|
||||
ServerURL *string `json:",omitempty"` // defaults to https://controlplane.tailscale.com
|
||||
AuthKey *string `json:",omitempty"` // as needed if NeedsLogin. either key or path to a file (if prefixed with "file:")
|
||||
Enabled opt.Bool `json:",omitempty"` // wantRunning; empty string defaults to true
|
||||
|
||||
OperatorUser *string `json:",omitempty"` // local user name who is allowed to operate tailscaled without being root or using sudo
|
||||
Hostname *string `json:",omitempty"`
|
||||
|
||||
AcceptDNS opt.Bool `json:"acceptDNS,omitempty"` // --accept-dns
|
||||
AcceptRoutes opt.Bool `json:"acceptRoutes,omitempty"` // --accept-routes defaults to true
|
||||
|
||||
ExitNode *string `json:"exitNode,omitempty"` // IP, StableID, or MagicDNS base name
|
||||
AllowLANWhileUsingExitNode opt.Bool `json:"allowLANWhileUsingExitNode,omitempty"`
|
||||
|
||||
AdvertiseRoutes []netip.Prefix `json:",omitempty"`
|
||||
DisableSNAT opt.Bool `json:",omitempty"`
|
||||
|
||||
NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert"
|
||||
NoStatefulFiltering opt.Bool `json:",omitempty"`
|
||||
|
||||
PostureChecking opt.Bool `json:",omitempty"`
|
||||
RunSSHServer opt.Bool `json:",omitempty"` // Tailscale SSH
|
||||
RunWebClient opt.Bool `json:",omitempty"`
|
||||
ShieldsUp opt.Bool `json:",omitempty"`
|
||||
AutoUpdate *AutoUpdatePrefs `json:",omitempty"`
|
||||
ServeConfigTemp *ServeConfig `json:",omitempty"` // TODO(bradfitz,maisem): make separate stable type for this
|
||||
|
||||
// StaticEndpoints are additional, user-defined endpoints that this node
|
||||
// should advertise amongst its wireguard endpoints.
|
||||
StaticEndpoints []netip.AddrPort `json:",omitempty"`
|
||||
|
||||
// TODO(bradfitz,maisem): future something like:
|
||||
// Profile map[string]*Config // keyed by alice@gmail.com, corp.com (TailnetSID)
|
||||
}
|
||||
|
||||
func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) {
|
||||
var mp MaskedPrefs
|
||||
if c == nil {
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
mp.WantRunning = !c.Enabled.EqualBool(false)
|
||||
mp.WantRunningSet = mp.WantRunning || c.Enabled != ""
|
||||
if c.ServerURL != nil {
|
||||
mp.ControlURL = *c.ServerURL
|
||||
mp.ControlURLSet = true
|
||||
}
|
||||
if c.AuthKey != nil && *c.AuthKey != "" {
|
||||
mp.LoggedOut = false
|
||||
mp.LoggedOutSet = true
|
||||
}
|
||||
if c.OperatorUser != nil {
|
||||
mp.OperatorUser = *c.OperatorUser
|
||||
mp.OperatorUserSet = true
|
||||
}
|
||||
if c.Hostname != nil {
|
||||
mp.Hostname = *c.Hostname
|
||||
mp.HostnameSet = true
|
||||
}
|
||||
if c.AcceptDNS != "" {
|
||||
mp.CorpDNS = c.AcceptDNS.EqualBool(true)
|
||||
mp.CorpDNSSet = true
|
||||
}
|
||||
if c.AcceptRoutes != "" {
|
||||
mp.RouteAll = c.AcceptRoutes.EqualBool(true)
|
||||
mp.RouteAllSet = true
|
||||
}
|
||||
if c.ExitNode != nil {
|
||||
ip, err := netip.ParseAddr(*c.ExitNode)
|
||||
if err == nil {
|
||||
mp.ExitNodeIP = ip
|
||||
mp.ExitNodeIPSet = true
|
||||
} else {
|
||||
mp.ExitNodeID = tailcfg.StableNodeID(*c.ExitNode)
|
||||
mp.ExitNodeIDSet = true
|
||||
}
|
||||
}
|
||||
if c.AllowLANWhileUsingExitNode != "" {
|
||||
mp.ExitNodeAllowLANAccess = c.AllowLANWhileUsingExitNode.EqualBool(true)
|
||||
mp.ExitNodeAllowLANAccessSet = true
|
||||
}
|
||||
if c.AdvertiseRoutes != nil {
|
||||
mp.AdvertiseRoutes = c.AdvertiseRoutes
|
||||
mp.AdvertiseRoutesSet = true
|
||||
}
|
||||
if c.DisableSNAT != "" {
|
||||
mp.NoSNAT = c.DisableSNAT.EqualBool(true)
|
||||
mp.NoSNAT = true
|
||||
}
|
||||
if c.NoStatefulFiltering != "" {
|
||||
mp.NoStatefulFiltering = c.NoStatefulFiltering
|
||||
mp.NoStatefulFilteringSet = true
|
||||
}
|
||||
|
||||
if c.NetfilterMode != nil {
|
||||
m, err := preftype.ParseNetfilterMode(*c.NetfilterMode)
|
||||
if err != nil {
|
||||
return mp, err
|
||||
}
|
||||
mp.NetfilterMode = m
|
||||
mp.NetfilterModeSet = true
|
||||
}
|
||||
if c.PostureChecking != "" {
|
||||
mp.PostureChecking = c.PostureChecking.EqualBool(true)
|
||||
mp.PostureCheckingSet = true
|
||||
}
|
||||
if c.RunSSHServer != "" {
|
||||
mp.RunSSH = c.RunSSHServer.EqualBool(true)
|
||||
mp.RunSSHSet = true
|
||||
}
|
||||
if c.RunWebClient != "" {
|
||||
mp.RunWebClient = c.RunWebClient.EqualBool(true)
|
||||
mp.RunWebClientSet = true
|
||||
}
|
||||
if c.ShieldsUp != "" {
|
||||
mp.ShieldsUp = c.ShieldsUp.EqualBool(true)
|
||||
mp.ShieldsUpSet = true
|
||||
}
|
||||
if c.AutoUpdate != nil {
|
||||
mp.AutoUpdate = *c.AutoUpdate
|
||||
mp.AutoUpdateSet = AutoUpdatePrefsMask{ApplySet: true, CheckSet: true}
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
59
vendor/tailscale.com/ipn/conffile/cloudconf.go
generated
vendored
Normal file
59
vendor/tailscale.com/ipn/conffile/cloudconf.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package conffile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/omit"
|
||||
)
|
||||
|
||||
func getEC2MetadataToken() (string, error) {
|
||||
if omit.AWS {
|
||||
return "", omit.Err
|
||||
}
|
||||
req, _ := http.NewRequest("PUT", "http://169.254.169.254/latest/api/token", nil)
|
||||
req.Header.Add("X-aws-ec2-metadata-token-ttl-seconds", "300")
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get metadata token: %w", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return "", fmt.Errorf("failed to get metadata token: %v", res.Status)
|
||||
}
|
||||
all, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read metadata token: %w", err)
|
||||
}
|
||||
return strings.TrimSpace(string(all)), nil
|
||||
}
|
||||
|
||||
func readVMUserData() ([]byte, error) {
|
||||
// TODO(bradfitz): support GCP, Azure, Proxmox/cloud-init
|
||||
// (NoCloud/ConfigDrive ISO), etc.
|
||||
|
||||
if omit.AWS {
|
||||
return nil, omit.Err
|
||||
}
|
||||
token, tokErr := getEC2MetadataToken()
|
||||
req, _ := http.NewRequest("GET", "http://169.254.169.254/latest/user-data", nil)
|
||||
req.Header.Add("X-aws-ec2-metadata-token", token)
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
if tokErr != nil {
|
||||
return nil, fmt.Errorf("failed to get VM user data: %v; also failed to get metadata token: %v", res.Status, tokErr)
|
||||
}
|
||||
return nil, errors.New(res.Status)
|
||||
}
|
||||
return io.ReadAll(res.Body)
|
||||
}
|
||||
100
vendor/tailscale.com/ipn/conffile/conffile.go
generated
vendored
Normal file
100
vendor/tailscale.com/ipn/conffile/conffile.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package conffile contains code to load, manipulate, and access config file
|
||||
// settings.
|
||||
package conffile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
// Config describes a config file.
|
||||
type Config struct {
|
||||
Path string // disk path of HuJSON, or VMUserDataPath
|
||||
Raw []byte // raw bytes from disk, in HuJSON form
|
||||
Std []byte // standardized JSON form
|
||||
Version string // "alpha0" for now
|
||||
|
||||
// Parsed is the parsed config, converted from its on-disk version to the
|
||||
// latest known format.
|
||||
//
|
||||
// As of 2023-10-15 there is exactly one format ("alpha0") so this is both
|
||||
// the on-disk format and the in-memory upgraded format.
|
||||
Parsed ipn.ConfigVAlpha
|
||||
}
|
||||
|
||||
// WantRunning reports whether c is non-nil and it's configured to be running.
|
||||
func (c *Config) WantRunning() bool {
|
||||
return c != nil && !c.Parsed.Enabled.EqualBool(false)
|
||||
}
|
||||
|
||||
// VMUserDataPath is a sentinel value for Load to use to get the data
|
||||
// from the VM's metadata service's user-data field.
|
||||
const VMUserDataPath = "vm:user-data"
|
||||
|
||||
// hujsonStandardize is set to hujson.Standardize by conffile_hujson.go on
|
||||
// platforms that support config files.
|
||||
var hujsonStandardize func([]byte) ([]byte, error)
|
||||
|
||||
// Load reads and parses the config file at the provided path on disk.
|
||||
func Load(path string) (*Config, error) {
|
||||
switch runtime.GOOS {
|
||||
case "ios", "android":
|
||||
// compile-time for deadcode elimination
|
||||
return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS)
|
||||
}
|
||||
if hujsonStandardize == nil {
|
||||
// Build tags are wrong in conffile_hujson.go
|
||||
return nil, errors.New("[unexpected] config file loading not wired up")
|
||||
}
|
||||
var c Config
|
||||
c.Path = path
|
||||
var err error
|
||||
|
||||
switch path {
|
||||
case VMUserDataPath:
|
||||
c.Raw, err = readVMUserData()
|
||||
default:
|
||||
c.Raw, err = os.ReadFile(path)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Std, err = hujsonStandardize(c.Raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err)
|
||||
}
|
||||
var ver struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
if err := json.Unmarshal(c.Std, &ver); err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file %s: %w", path, err)
|
||||
}
|
||||
switch ver.Version {
|
||||
case "":
|
||||
return nil, fmt.Errorf("error parsing config file %s: no \"version\" field defined", path)
|
||||
case "alpha0":
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing config file %s: unsupported \"version\" value %q; want \"alpha0\" for now", path, ver.Version)
|
||||
}
|
||||
c.Version = ver.Version
|
||||
|
||||
jd := json.NewDecoder(bytes.NewReader(c.Std))
|
||||
jd.DisallowUnknownFields()
|
||||
err = jd.Decode(&c.Parsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file %s: %w", path, err)
|
||||
}
|
||||
if jd.More() {
|
||||
return nil, fmt.Errorf("error parsing config file %s: trailing data after JSON object", path)
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
20
vendor/tailscale.com/ipn/conffile/conffile_hujson.go
generated
vendored
Normal file
20
vendor/tailscale.com/ipn/conffile/conffile_hujson.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android
|
||||
|
||||
package conffile
|
||||
|
||||
import "github.com/tailscale/hujson"
|
||||
|
||||
// Only link the hujson package on platforms that use it, to reduce binary size
|
||||
// & memory a bit.
|
||||
//
|
||||
// (iOS and Android don't have config files)
|
||||
|
||||
// While the linker's deadcode mostly handles the hujson package today, this
|
||||
// keeps us honest for the future.
|
||||
|
||||
func init() {
|
||||
hujsonStandardize = hujson.Standardize
|
||||
}
|
||||
12
vendor/tailscale.com/ipn/doc.go
generated
vendored
Normal file
12
vendor/tailscale.com/ipn/doc.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:generate go run tailscale.com/cmd/viewer -type=Prefs,ServeConfig,TCPPortHandler,HTTPHandler,WebServerConfig
|
||||
|
||||
// Package ipn implements the interactions between the Tailscale cloud
|
||||
// control plane and the local network stack.
|
||||
//
|
||||
// IPN is the abbreviated name for a Tailscale network. What's less
|
||||
// clear is what it's an abbreviation for: Identified Private Network?
|
||||
// IP Network? Internet Private Network? I Privately Network?
|
||||
package ipn
|
||||
190
vendor/tailscale.com/ipn/ipn_clone.go
generated
vendored
Normal file
190
vendor/tailscale.com/ipn/ipn_clone.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT.
|
||||
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/preftype"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// Clone makes a deep copy of Prefs.
|
||||
// The result aliases no memory with the original.
|
||||
func (src *Prefs) Clone() *Prefs {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
dst := new(Prefs)
|
||||
*dst = *src
|
||||
dst.AdvertiseTags = append(src.AdvertiseTags[:0:0], src.AdvertiseTags...)
|
||||
dst.AdvertiseRoutes = append(src.AdvertiseRoutes[:0:0], src.AdvertiseRoutes...)
|
||||
if src.DriveShares != nil {
|
||||
dst.DriveShares = make([]*drive.Share, len(src.DriveShares))
|
||||
for i := range dst.DriveShares {
|
||||
if src.DriveShares[i] == nil {
|
||||
dst.DriveShares[i] = nil
|
||||
} else {
|
||||
dst.DriveShares[i] = src.DriveShares[i].Clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
dst.Persist = src.Persist.Clone()
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _PrefsCloneNeedsRegeneration = Prefs(struct {
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
ExitNodeAllowLANAccess bool
|
||||
CorpDNS bool
|
||||
RunSSH bool
|
||||
RunWebClient bool
|
||||
WantRunning bool
|
||||
LoggedOut bool
|
||||
ShieldsUp bool
|
||||
AdvertiseTags []string
|
||||
Hostname string
|
||||
NotepadURLs bool
|
||||
ForceDaemon bool
|
||||
Egg bool
|
||||
AdvertiseRoutes []netip.Prefix
|
||||
NoSNAT bool
|
||||
NoStatefulFiltering opt.Bool
|
||||
NetfilterMode preftype.NetfilterMode
|
||||
OperatorUser string
|
||||
ProfileName string
|
||||
AutoUpdate AutoUpdatePrefs
|
||||
AppConnector AppConnectorPrefs
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of ServeConfig.
|
||||
// The result aliases no memory with the original.
|
||||
func (src *ServeConfig) Clone() *ServeConfig {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
dst := new(ServeConfig)
|
||||
*dst = *src
|
||||
if dst.TCP != nil {
|
||||
dst.TCP = map[uint16]*TCPPortHandler{}
|
||||
for k, v := range src.TCP {
|
||||
if v == nil {
|
||||
dst.TCP[k] = nil
|
||||
} else {
|
||||
dst.TCP[k] = ptr.To(*v)
|
||||
}
|
||||
}
|
||||
}
|
||||
if dst.Web != nil {
|
||||
dst.Web = map[HostPort]*WebServerConfig{}
|
||||
for k, v := range src.Web {
|
||||
if v == nil {
|
||||
dst.Web[k] = nil
|
||||
} else {
|
||||
dst.Web[k] = v.Clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
dst.AllowFunnel = maps.Clone(src.AllowFunnel)
|
||||
if dst.Foreground != nil {
|
||||
dst.Foreground = map[string]*ServeConfig{}
|
||||
for k, v := range src.Foreground {
|
||||
if v == nil {
|
||||
dst.Foreground[k] = nil
|
||||
} else {
|
||||
dst.Foreground[k] = v.Clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _ServeConfigCloneNeedsRegeneration = ServeConfig(struct {
|
||||
TCP map[uint16]*TCPPortHandler
|
||||
Web map[HostPort]*WebServerConfig
|
||||
AllowFunnel map[HostPort]bool
|
||||
Foreground map[string]*ServeConfig
|
||||
ETag string
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of TCPPortHandler.
|
||||
// The result aliases no memory with the original.
|
||||
func (src *TCPPortHandler) Clone() *TCPPortHandler {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
dst := new(TCPPortHandler)
|
||||
*dst = *src
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _TCPPortHandlerCloneNeedsRegeneration = TCPPortHandler(struct {
|
||||
HTTPS bool
|
||||
HTTP bool
|
||||
TCPForward string
|
||||
TerminateTLS string
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of HTTPHandler.
|
||||
// The result aliases no memory with the original.
|
||||
func (src *HTTPHandler) Clone() *HTTPHandler {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
dst := new(HTTPHandler)
|
||||
*dst = *src
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct {
|
||||
Path string
|
||||
Proxy string
|
||||
Text string
|
||||
}{})
|
||||
|
||||
// Clone makes a deep copy of WebServerConfig.
|
||||
// The result aliases no memory with the original.
|
||||
func (src *WebServerConfig) Clone() *WebServerConfig {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
dst := new(WebServerConfig)
|
||||
*dst = *src
|
||||
if dst.Handlers != nil {
|
||||
dst.Handlers = map[string]*HTTPHandler{}
|
||||
for k, v := range src.Handlers {
|
||||
if v == nil {
|
||||
dst.Handlers[k] = nil
|
||||
} else {
|
||||
dst.Handlers[k] = ptr.To(*v)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _WebServerConfigCloneNeedsRegeneration = WebServerConfig(struct {
|
||||
Handlers map[string]*HTTPHandler
|
||||
}{})
|
||||
382
vendor/tailscale.com/ipn/ipn_view.go
generated
vendored
Normal file
382
vendor/tailscale.com/ipn/ipn_view.go
generated
vendored
Normal file
@@ -0,0 +1,382 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Code generated by tailscale/cmd/viewer; DO NOT EDIT.
|
||||
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/preftype"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,ServeConfig,TCPPortHandler,HTTPHandler,WebServerConfig
|
||||
|
||||
// View returns a readonly view of Prefs.
|
||||
func (p *Prefs) View() PrefsView {
|
||||
return PrefsView{ж: p}
|
||||
}
|
||||
|
||||
// PrefsView provides a read-only view over Prefs.
|
||||
//
|
||||
// Its methods should only be called if `Valid()` returns true.
|
||||
type PrefsView struct {
|
||||
// ж is the underlying mutable value, named with a hard-to-type
|
||||
// character that looks pointy like a pointer.
|
||||
// It is named distinctively to make you think of how dangerous it is to escape
|
||||
// to callers. You must not let callers be able to mutate it.
|
||||
ж *Prefs
|
||||
}
|
||||
|
||||
// Valid reports whether underlying value is non-nil.
|
||||
func (v PrefsView) Valid() bool { return v.ж != nil }
|
||||
|
||||
// AsStruct returns a clone of the underlying value which aliases no memory with
|
||||
// the original.
|
||||
func (v PrefsView) AsStruct() *Prefs {
|
||||
if v.ж == nil {
|
||||
return nil
|
||||
}
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
|
||||
func (v *PrefsView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
var x Prefs
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v PrefsView) ControlURL() string { return v.ж.ControlURL }
|
||||
func (v PrefsView) RouteAll() bool { return v.ж.RouteAll }
|
||||
func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID }
|
||||
func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP }
|
||||
func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.ж.InternalExitNodePrior }
|
||||
func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess }
|
||||
func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS }
|
||||
func (v PrefsView) RunSSH() bool { return v.ж.RunSSH }
|
||||
func (v PrefsView) RunWebClient() bool { return v.ж.RunWebClient }
|
||||
func (v PrefsView) WantRunning() bool { return v.ж.WantRunning }
|
||||
func (v PrefsView) LoggedOut() bool { return v.ж.LoggedOut }
|
||||
func (v PrefsView) ShieldsUp() bool { return v.ж.ShieldsUp }
|
||||
func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseTags) }
|
||||
func (v PrefsView) Hostname() string { return v.ж.Hostname }
|
||||
func (v PrefsView) NotepadURLs() bool { return v.ж.NotepadURLs }
|
||||
func (v PrefsView) ForceDaemon() bool { return v.ж.ForceDaemon }
|
||||
func (v PrefsView) Egg() bool { return v.ж.Egg }
|
||||
func (v PrefsView) AdvertiseRoutes() views.Slice[netip.Prefix] {
|
||||
return views.SliceOf(v.ж.AdvertiseRoutes)
|
||||
}
|
||||
func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT }
|
||||
func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering }
|
||||
func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.ж.NetfilterMode }
|
||||
func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser }
|
||||
func (v PrefsView) ProfileName() string { return v.ж.ProfileName }
|
||||
func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate }
|
||||
func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector }
|
||||
func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking }
|
||||
func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind }
|
||||
func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] {
|
||||
return views.SliceOfViews[*drive.Share, drive.ShareView](v.ж.DriveShares)
|
||||
}
|
||||
func (v PrefsView) AllowSingleHosts() marshalAsTrueInJSON { return v.ж.AllowSingleHosts }
|
||||
func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _PrefsViewNeedsRegeneration = Prefs(struct {
|
||||
ControlURL string
|
||||
RouteAll bool
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
ExitNodeAllowLANAccess bool
|
||||
CorpDNS bool
|
||||
RunSSH bool
|
||||
RunWebClient bool
|
||||
WantRunning bool
|
||||
LoggedOut bool
|
||||
ShieldsUp bool
|
||||
AdvertiseTags []string
|
||||
Hostname string
|
||||
NotepadURLs bool
|
||||
ForceDaemon bool
|
||||
Egg bool
|
||||
AdvertiseRoutes []netip.Prefix
|
||||
NoSNAT bool
|
||||
NoStatefulFiltering opt.Bool
|
||||
NetfilterMode preftype.NetfilterMode
|
||||
OperatorUser string
|
||||
ProfileName string
|
||||
AutoUpdate AutoUpdatePrefs
|
||||
AppConnector AppConnectorPrefs
|
||||
PostureChecking bool
|
||||
NetfilterKind string
|
||||
DriveShares []*drive.Share
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
Persist *persist.Persist
|
||||
}{})
|
||||
|
||||
// View returns a readonly view of ServeConfig.
|
||||
func (p *ServeConfig) View() ServeConfigView {
|
||||
return ServeConfigView{ж: p}
|
||||
}
|
||||
|
||||
// ServeConfigView provides a read-only view over ServeConfig.
|
||||
//
|
||||
// Its methods should only be called if `Valid()` returns true.
|
||||
type ServeConfigView struct {
|
||||
// ж is the underlying mutable value, named with a hard-to-type
|
||||
// character that looks pointy like a pointer.
|
||||
// It is named distinctively to make you think of how dangerous it is to escape
|
||||
// to callers. You must not let callers be able to mutate it.
|
||||
ж *ServeConfig
|
||||
}
|
||||
|
||||
// Valid reports whether underlying value is non-nil.
|
||||
func (v ServeConfigView) Valid() bool { return v.ж != nil }
|
||||
|
||||
// AsStruct returns a clone of the underlying value which aliases no memory with
|
||||
// the original.
|
||||
func (v ServeConfigView) AsStruct() *ServeConfig {
|
||||
if v.ж == nil {
|
||||
return nil
|
||||
}
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v ServeConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
|
||||
func (v *ServeConfigView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
var x ServeConfig
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v ServeConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] {
|
||||
return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] {
|
||||
return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] {
|
||||
return views.MapOf(v.ж.AllowFunnel)
|
||||
}
|
||||
|
||||
func (v ServeConfigView) Foreground() views.MapFn[string, *ServeConfig, ServeConfigView] {
|
||||
return views.MapFnOf(v.ж.Foreground, func(t *ServeConfig) ServeConfigView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
func (v ServeConfigView) ETag() string { return v.ж.ETag }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _ServeConfigViewNeedsRegeneration = ServeConfig(struct {
|
||||
TCP map[uint16]*TCPPortHandler
|
||||
Web map[HostPort]*WebServerConfig
|
||||
AllowFunnel map[HostPort]bool
|
||||
Foreground map[string]*ServeConfig
|
||||
ETag string
|
||||
}{})
|
||||
|
||||
// View returns a readonly view of TCPPortHandler.
|
||||
func (p *TCPPortHandler) View() TCPPortHandlerView {
|
||||
return TCPPortHandlerView{ж: p}
|
||||
}
|
||||
|
||||
// TCPPortHandlerView provides a read-only view over TCPPortHandler.
|
||||
//
|
||||
// Its methods should only be called if `Valid()` returns true.
|
||||
type TCPPortHandlerView struct {
|
||||
// ж is the underlying mutable value, named with a hard-to-type
|
||||
// character that looks pointy like a pointer.
|
||||
// It is named distinctively to make you think of how dangerous it is to escape
|
||||
// to callers. You must not let callers be able to mutate it.
|
||||
ж *TCPPortHandler
|
||||
}
|
||||
|
||||
// Valid reports whether underlying value is non-nil.
|
||||
func (v TCPPortHandlerView) Valid() bool { return v.ж != nil }
|
||||
|
||||
// AsStruct returns a clone of the underlying value which aliases no memory with
|
||||
// the original.
|
||||
func (v TCPPortHandlerView) AsStruct() *TCPPortHandler {
|
||||
if v.ж == nil {
|
||||
return nil
|
||||
}
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
|
||||
func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
var x TCPPortHandler
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v TCPPortHandlerView) HTTPS() bool { return v.ж.HTTPS }
|
||||
func (v TCPPortHandlerView) HTTP() bool { return v.ж.HTTP }
|
||||
func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward }
|
||||
func (v TCPPortHandlerView) TerminateTLS() string { return v.ж.TerminateTLS }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _TCPPortHandlerViewNeedsRegeneration = TCPPortHandler(struct {
|
||||
HTTPS bool
|
||||
HTTP bool
|
||||
TCPForward string
|
||||
TerminateTLS string
|
||||
}{})
|
||||
|
||||
// View returns a readonly view of HTTPHandler.
|
||||
func (p *HTTPHandler) View() HTTPHandlerView {
|
||||
return HTTPHandlerView{ж: p}
|
||||
}
|
||||
|
||||
// HTTPHandlerView provides a read-only view over HTTPHandler.
|
||||
//
|
||||
// Its methods should only be called if `Valid()` returns true.
|
||||
type HTTPHandlerView struct {
|
||||
// ж is the underlying mutable value, named with a hard-to-type
|
||||
// character that looks pointy like a pointer.
|
||||
// It is named distinctively to make you think of how dangerous it is to escape
|
||||
// to callers. You must not let callers be able to mutate it.
|
||||
ж *HTTPHandler
|
||||
}
|
||||
|
||||
// Valid reports whether underlying value is non-nil.
|
||||
func (v HTTPHandlerView) Valid() bool { return v.ж != nil }
|
||||
|
||||
// AsStruct returns a clone of the underlying value which aliases no memory with
|
||||
// the original.
|
||||
func (v HTTPHandlerView) AsStruct() *HTTPHandler {
|
||||
if v.ж == nil {
|
||||
return nil
|
||||
}
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
|
||||
func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
var x HTTPHandler
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v HTTPHandlerView) Path() string { return v.ж.Path }
|
||||
func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy }
|
||||
func (v HTTPHandlerView) Text() string { return v.ж.Text }
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct {
|
||||
Path string
|
||||
Proxy string
|
||||
Text string
|
||||
}{})
|
||||
|
||||
// View returns a readonly view of WebServerConfig.
|
||||
func (p *WebServerConfig) View() WebServerConfigView {
|
||||
return WebServerConfigView{ж: p}
|
||||
}
|
||||
|
||||
// WebServerConfigView provides a read-only view over WebServerConfig.
|
||||
//
|
||||
// Its methods should only be called if `Valid()` returns true.
|
||||
type WebServerConfigView struct {
|
||||
// ж is the underlying mutable value, named with a hard-to-type
|
||||
// character that looks pointy like a pointer.
|
||||
// It is named distinctively to make you think of how dangerous it is to escape
|
||||
// to callers. You must not let callers be able to mutate it.
|
||||
ж *WebServerConfig
|
||||
}
|
||||
|
||||
// Valid reports whether underlying value is non-nil.
|
||||
func (v WebServerConfigView) Valid() bool { return v.ж != nil }
|
||||
|
||||
// AsStruct returns a clone of the underlying value which aliases no memory with
|
||||
// the original.
|
||||
func (v WebServerConfigView) AsStruct() *WebServerConfig {
|
||||
if v.ж == nil {
|
||||
return nil
|
||||
}
|
||||
return v.ж.Clone()
|
||||
}
|
||||
|
||||
func (v WebServerConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||
|
||||
func (v *WebServerConfigView) UnmarshalJSON(b []byte) error {
|
||||
if v.ж != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
var x WebServerConfig
|
||||
if err := json.Unmarshal(b, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
v.ж = &x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v WebServerConfigView) Handlers() views.MapFn[string, *HTTPHandler, HTTPHandlerView] {
|
||||
return views.MapFnOf(v.ж.Handlers, func(t *HTTPHandler) HTTPHandlerView {
|
||||
return t.View()
|
||||
})
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _WebServerConfigViewNeedsRegeneration = WebServerConfig(struct {
|
||||
Handlers map[string]*HTTPHandler
|
||||
}{})
|
||||
47
vendor/tailscale.com/ipn/ipnauth/actor.go
generated
vendored
Normal file
47
vendor/tailscale.com/ipn/ipnauth/actor.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnauth
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
// Actor is any actor using the [ipnlocal.LocalBackend].
|
||||
//
|
||||
// It typically represents a specific OS user, indicating that an operation
|
||||
// is performed on behalf of this user, should be evaluated against their
|
||||
// access rights, and performed in their security context when applicable.
|
||||
type Actor interface {
|
||||
// UserID returns an OS-specific UID of the user represented by the receiver,
|
||||
// or "" if the actor does not represent a specific user on a multi-user system.
|
||||
// As of 2024-08-27, it is only used on Windows.
|
||||
UserID() ipn.WindowsUserID
|
||||
// Username returns the user name associated with the receiver,
|
||||
// or "" if the actor does not represent a specific user.
|
||||
Username() (string, error)
|
||||
|
||||
// IsLocalSystem reports whether the actor is the Windows' Local System account.
|
||||
//
|
||||
// Deprecated: this method exists for compatibility with the current (as of 2024-08-27)
|
||||
// permission model and will be removed as we progress on tailscale/corp#18342.
|
||||
IsLocalSystem() bool
|
||||
|
||||
// IsLocalAdmin reports whether the actor has administrative access to the
|
||||
// local machine, for whatever that means with respect to the current OS.
|
||||
//
|
||||
// The operatorUID is only used on Unix-like platforms and specifies the ID
|
||||
// of a local user (in the os/user.User.Uid string form) who is allowed to
|
||||
// operate tailscaled without being root or using sudo.
|
||||
//
|
||||
// Deprecated: this method exists for compatibility with the current (as of 2024-08-27)
|
||||
// permission model and will be removed as we progress on tailscale/corp#18342.
|
||||
IsLocalAdmin(operatorUID string) bool
|
||||
}
|
||||
|
||||
// ActorCloser is an optional interface that might be implemented by an [Actor]
|
||||
// that must be closed when done to release the resources.
|
||||
type ActorCloser interface {
|
||||
// Close releases resources associated with the receiver.
|
||||
Close() error
|
||||
}
|
||||
209
vendor/tailscale.com/ipn/ipnauth/ipnauth.go
generated
vendored
Normal file
209
vendor/tailscale.com/ipn/ipnauth/ipnauth.go
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package ipnauth controls access to the LocalAPI.
|
||||
package ipnauth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/tailscale/peercred"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/safesocket"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/groupmember"
|
||||
"tailscale.com/util/winutil"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// ErrNotImplemented is returned by ConnIdentity.WindowsToken when it is not
|
||||
// implemented for the current GOOS.
|
||||
var ErrNotImplemented = errors.New("not implemented for GOOS=" + runtime.GOOS)
|
||||
|
||||
// WindowsToken represents the current security context of a Windows user.
|
||||
type WindowsToken interface {
|
||||
io.Closer
|
||||
// EqualUIDs reports whether other refers to the same user ID as the receiver.
|
||||
EqualUIDs(other WindowsToken) bool
|
||||
// IsAdministrator reports whether the receiver is a member of the built-in
|
||||
// Administrators group, or else an error. Use IsElevated to determine whether
|
||||
// the receiver is actually utilizing administrative rights.
|
||||
IsAdministrator() (bool, error)
|
||||
// IsUID reports whether the receiver's user ID matches uid.
|
||||
IsUID(uid ipn.WindowsUserID) bool
|
||||
// UID returns the ipn.WindowsUserID associated with the receiver, or else
|
||||
// an error.
|
||||
UID() (ipn.WindowsUserID, error)
|
||||
// IsElevated reports whether the receiver is currently executing as an
|
||||
// elevated administrative user.
|
||||
IsElevated() bool
|
||||
// IsLocalSystem reports whether the receiver is the built-in SYSTEM user.
|
||||
IsLocalSystem() bool
|
||||
// UserDir returns the special directory identified by folderID as associated
|
||||
// with the receiver. folderID must be one of the KNOWNFOLDERID values from
|
||||
// the x/sys/windows package, serialized as a stringified GUID.
|
||||
UserDir(folderID string) (string, error)
|
||||
// Username returns the user name associated with the receiver.
|
||||
Username() (string, error)
|
||||
}
|
||||
|
||||
// ConnIdentity represents the owner of a localhost TCP or unix socket connection
|
||||
// connecting to the LocalAPI.
|
||||
type ConnIdentity struct {
|
||||
conn net.Conn
|
||||
notWindows bool // runtime.GOOS != "windows"
|
||||
|
||||
// Fields used when NotWindows:
|
||||
isUnixSock bool // Conn is a *net.UnixConn
|
||||
creds *peercred.Creds // or nil
|
||||
|
||||
// Used on Windows:
|
||||
// TODO(bradfitz): merge these into the peercreds package and
|
||||
// use that for all.
|
||||
pid int
|
||||
}
|
||||
|
||||
// WindowsUserID returns the local machine's userid of the connection
|
||||
// if it's on Windows. Otherwise it returns the empty string.
|
||||
//
|
||||
// It's suitable for passing to LookupUserFromID (os/user.LookupId) on any
|
||||
// operating system.
|
||||
func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID {
|
||||
if envknob.GOOS() != "windows" {
|
||||
return ""
|
||||
}
|
||||
if tok, err := ci.WindowsToken(); err == nil {
|
||||
defer tok.Close()
|
||||
if uid, err := tok.UID(); err == nil {
|
||||
return uid
|
||||
}
|
||||
}
|
||||
// For Linux tests running as Windows:
|
||||
const isBroken = true // TODO(bradfitz,maisem): fix tests; this doesn't work yet
|
||||
if ci.creds != nil && !isBroken {
|
||||
if uid, ok := ci.creds.UserID(); ok {
|
||||
return ipn.WindowsUserID(uid)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ci *ConnIdentity) Pid() int { return ci.pid }
|
||||
func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock }
|
||||
func (ci *ConnIdentity) Creds() *peercred.Creds { return ci.creds }
|
||||
|
||||
var metricIssue869Workaround = clientmetric.NewCounter("issue_869_workaround")
|
||||
|
||||
// LookupUserFromID is a wrapper around os/user.LookupId that works around some
|
||||
// issues on Windows. On non-Windows platforms it's identical to user.LookupId.
|
||||
func LookupUserFromID(logf logger.Logf, uid string) (*user.User, error) {
|
||||
u, err := user.LookupId(uid)
|
||||
if err != nil && runtime.GOOS == "windows" {
|
||||
// See if uid resolves as a pseudo-user. Temporary workaround until
|
||||
// https://github.com/golang/go/issues/49509 resolves and ships.
|
||||
if u, err := winutil.LookupPseudoUser(uid); err == nil {
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// TODO(aaron): With LookupPseudoUser in place, I don't expect us to reach
|
||||
// this point anymore. Leaving the below workaround in for now to confirm
|
||||
// that pseudo-user resolution sufficiently handles this problem.
|
||||
|
||||
// The below workaround is only applicable when uid represents a
|
||||
// valid security principal. Omitting this check causes us to succeed
|
||||
// even when uid represents a deleted user.
|
||||
if !winutil.IsSIDValidPrincipal(uid) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metricIssue869Workaround.Add(1)
|
||||
logf("[warning] issue 869: os/user.LookupId failed; ignoring")
|
||||
// Work around https://github.com/tailscale/tailscale/issues/869 for
|
||||
// now. We don't strictly need the username. It's just a nice-to-have.
|
||||
// So make up a *user.User if their machine is broken in this way.
|
||||
return &user.User{
|
||||
Uid: uid,
|
||||
Username: "unknown-user-" + uid,
|
||||
Name: "unknown user " + uid,
|
||||
}, nil
|
||||
}
|
||||
return u, err
|
||||
}
|
||||
|
||||
// IsReadonlyConn reports whether the connection should be considered read-only,
|
||||
// meaning it's not allowed to change the state of the node.
|
||||
//
|
||||
// Read-only also means it's not allowed to access sensitive information, which
|
||||
// admittedly doesn't follow from the name. Consider this "IsUnprivileged".
|
||||
// Also, Windows doesn't use this. For Windows it always returns false.
|
||||
//
|
||||
// TODO(bradfitz): rename it? Also make Windows use this.
|
||||
func (ci *ConnIdentity) IsReadonlyConn(operatorUID string, logf logger.Logf) bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows doesn't need/use this mechanism, at least yet. It
|
||||
// has a different last-user-wins auth model.
|
||||
return false
|
||||
}
|
||||
const ro = true
|
||||
const rw = false
|
||||
if !safesocket.PlatformUsesPeerCreds() {
|
||||
return rw
|
||||
}
|
||||
creds := ci.creds
|
||||
if creds == nil {
|
||||
logf("connection from unknown peer; read-only")
|
||||
return ro
|
||||
}
|
||||
uid, ok := creds.UserID()
|
||||
if !ok {
|
||||
logf("connection from peer with unknown userid; read-only")
|
||||
return ro
|
||||
}
|
||||
if uid == "0" {
|
||||
logf("connection from userid %v; root has access", uid)
|
||||
return rw
|
||||
}
|
||||
if selfUID := os.Getuid(); selfUID != 0 && uid == strconv.Itoa(selfUID) {
|
||||
logf("connection from userid %v; connection from non-root user matching daemon has access", uid)
|
||||
return rw
|
||||
}
|
||||
if operatorUID != "" && uid == operatorUID {
|
||||
logf("connection from userid %v; is configured operator", uid)
|
||||
return rw
|
||||
}
|
||||
if yes, err := isLocalAdmin(uid); err != nil {
|
||||
logf("connection from userid %v; read-only; %v", uid, err)
|
||||
return ro
|
||||
} else if yes {
|
||||
logf("connection from userid %v; is local admin, has access", uid)
|
||||
return rw
|
||||
}
|
||||
logf("connection from userid %v; read-only", uid)
|
||||
return ro
|
||||
}
|
||||
|
||||
func isLocalAdmin(uid string) (bool, error) {
|
||||
u, err := user.LookupId(uid)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var adminGroup string
|
||||
switch {
|
||||
case runtime.GOOS == "darwin":
|
||||
adminGroup = "admin"
|
||||
case distro.Get() == distro.QNAP:
|
||||
adminGroup = "administrators"
|
||||
default:
|
||||
return false, fmt.Errorf("no system admin group found")
|
||||
}
|
||||
return groupmember.IsMemberOfGroup(adminGroup, u.Username)
|
||||
}
|
||||
29
vendor/tailscale.com/ipn/ipnauth/ipnauth_notwindows.go
generated
vendored
Normal file
29
vendor/tailscale.com/ipn/ipnauth/ipnauth_notwindows.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package ipnauth
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/tailscale/peercred"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
// GetConnIdentity extracts the identity information from the connection
|
||||
// based on the user who owns the other end of the connection.
|
||||
// and couldn't. The returned connIdentity has NotWindows set to true.
|
||||
func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) {
|
||||
ci = &ConnIdentity{conn: c, notWindows: true}
|
||||
_, ci.isUnixSock = c.(*net.UnixConn)
|
||||
ci.creds, _ = peercred.Get(c)
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
// WindowsToken is unsupported when GOOS != windows and always returns
|
||||
// ErrNotImplemented.
|
||||
func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
190
vendor/tailscale.com/ipn/ipnauth/ipnauth_windows.go
generated
vendored
Normal file
190
vendor/tailscale.com/ipn/ipnauth/ipnauth_windows.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnauth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/safesocket"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/winutil"
|
||||
)
|
||||
|
||||
// GetConnIdentity extracts the identity information from the connection
|
||||
// based on the user who owns the other end of the connection.
|
||||
// If c is not backed by a named pipe, an error is returned.
|
||||
func GetConnIdentity(logf logger.Logf, c net.Conn) (ci *ConnIdentity, err error) {
|
||||
ci = &ConnIdentity{conn: c, notWindows: false}
|
||||
wcc, ok := c.(*safesocket.WindowsClientConn)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not a WindowsClientConn: %T", c)
|
||||
}
|
||||
ci.pid, err = wcc.ClientPID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
type token struct {
|
||||
t windows.Token
|
||||
}
|
||||
|
||||
func (t *token) UID() (ipn.WindowsUserID, error) {
|
||||
sid, err := t.uid()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to look up user from token: %w", err)
|
||||
}
|
||||
|
||||
return ipn.WindowsUserID(sid.String()), nil
|
||||
}
|
||||
|
||||
func (t *token) Username() (string, error) {
|
||||
sid, err := t.uid()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to look up user from token: %w", err)
|
||||
}
|
||||
|
||||
username, domain, _, err := sid.LookupAccount("")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to look up username from SID: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`%s\%s`, domain, username), nil
|
||||
}
|
||||
|
||||
func (t *token) IsAdministrator() (bool, error) {
|
||||
baSID, err := windows.CreateWellKnownSid(windows.WinBuiltinAdministratorsSid)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
isMember, err := t.t.IsMember(baSID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if isMember {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
isLimited, err := winutil.IsTokenLimited(t.t)
|
||||
if err != nil || !isLimited {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Try to obtain a linked token, and if present, check it.
|
||||
// (This should be the elevated token associated with limited UAC accounts.)
|
||||
linkedToken, err := t.t.GetLinkedToken()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer linkedToken.Close()
|
||||
|
||||
return linkedToken.IsMember(baSID)
|
||||
}
|
||||
|
||||
func (t *token) IsElevated() bool {
|
||||
return t.t.IsElevated()
|
||||
}
|
||||
|
||||
func (t *token) IsLocalSystem() bool {
|
||||
// https://web.archive.org/web/2024/https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/manage/understand-security-identifiers
|
||||
const systemUID = ipn.WindowsUserID("S-1-5-18")
|
||||
return t.IsUID(systemUID)
|
||||
}
|
||||
|
||||
func (t *token) UserDir(folderID string) (string, error) {
|
||||
guid, err := windows.GUIDFromString(folderID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return t.t.KnownFolderPath((*windows.KNOWNFOLDERID)(unsafe.Pointer(&guid)), 0)
|
||||
}
|
||||
|
||||
func (t *token) Close() error {
|
||||
if t.t == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := t.t.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.t = 0
|
||||
runtime.SetFinalizer(t, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *token) EqualUIDs(other WindowsToken) bool {
|
||||
if t != nil && other == nil || t == nil && other != nil {
|
||||
return false
|
||||
}
|
||||
ot, ok := other.(*token)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if t == ot {
|
||||
return true
|
||||
}
|
||||
uid, err := t.uid()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
oUID, err := ot.uid()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return uid.Equals(oUID)
|
||||
}
|
||||
|
||||
func (t *token) uid() (*windows.SID, error) {
|
||||
tu, err := t.t.GetTokenUser()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tu.User.Sid, nil
|
||||
}
|
||||
|
||||
func (t *token) IsUID(uid ipn.WindowsUserID) bool {
|
||||
tUID, err := t.UID()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return tUID == uid
|
||||
}
|
||||
|
||||
// WindowsToken returns the WindowsToken representing the security context
|
||||
// of the connection's client.
|
||||
func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) {
|
||||
var wcc *safesocket.WindowsClientConn
|
||||
var ok bool
|
||||
if wcc, ok = ci.conn.(*safesocket.WindowsClientConn); !ok {
|
||||
return nil, fmt.Errorf("not a WindowsClientConn: %T", ci.conn)
|
||||
}
|
||||
|
||||
// We duplicate the token's handle so that the WindowsToken we return may have
|
||||
// a lifetime independent from the original connection.
|
||||
var h windows.Handle
|
||||
if err := windows.DuplicateHandle(
|
||||
windows.CurrentProcess(),
|
||||
windows.Handle(wcc.Token()),
|
||||
windows.CurrentProcess(),
|
||||
&h,
|
||||
0,
|
||||
false,
|
||||
windows.DUPLICATE_SAME_ACCESS,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &token{t: windows.Token(h)}
|
||||
runtime.SetFinalizer(result, func(t *token) { t.Close() })
|
||||
return result, nil
|
||||
}
|
||||
65
vendor/tailscale.com/ipn/ipnlocal/autoupdate.go
generated
vendored
Normal file
65
vendor/tailscale.com/ipn/ipnlocal/autoupdate.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux || windows
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) stopOfflineAutoUpdate() {
|
||||
if b.offlineAutoUpdateCancel != nil {
|
||||
b.logf("offline auto-update: stopping update checks")
|
||||
b.offlineAutoUpdateCancel()
|
||||
b.offlineAutoUpdateCancel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) {
|
||||
if !prefs.AutoUpdate().Apply.EqualBool(true) {
|
||||
return
|
||||
}
|
||||
// AutoUpdate.Apply field in prefs can only be true for platforms that
|
||||
// support auto-updates. But check it here again, just in case.
|
||||
if !clientupdate.CanAutoUpdate() {
|
||||
return
|
||||
}
|
||||
// On macsys, auto-updates are managed by Sparkle.
|
||||
if version.IsMacSysExt() {
|
||||
return
|
||||
}
|
||||
|
||||
if b.offlineAutoUpdateCancel != nil {
|
||||
// Already running.
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
b.offlineAutoUpdateCancel = cancel
|
||||
|
||||
b.logf("offline auto-update: starting update checks")
|
||||
go b.offlineAutoUpdate(ctx)
|
||||
}
|
||||
|
||||
const offlineAutoUpdateCheckPeriod = time.Hour
|
||||
|
||||
func (b *LocalBackend) offlineAutoUpdate(ctx context.Context) {
|
||||
t := time.NewTicker(offlineAutoUpdateCheckPeriod)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
if err := b.startAutoUpdate("offline auto-update"); err != nil {
|
||||
b.logf("offline auto-update: failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
18
vendor/tailscale.com/ipn/ipnlocal/autoupdate_disabled.go
generated
vendored
Normal file
18
vendor/tailscale.com/ipn/ipnlocal/autoupdate_disabled.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !(linux || windows)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) stopOfflineAutoUpdate() {
|
||||
// Not supported on this platform.
|
||||
}
|
||||
|
||||
func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) {
|
||||
// Not supported on this platform.
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_darwin.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
breakTCPConns = breakTCPConnsDarwin
|
||||
}
|
||||
|
||||
func breakTCPConnsDarwin() error {
|
||||
var matched int
|
||||
for fd := 0; fd < 1000; fd++ {
|
||||
_, err := unix.GetsockoptTCPConnectionInfo(fd, unix.IPPROTO_TCP, unix.TCP_CONNECTION_INFO)
|
||||
if err == nil {
|
||||
matched++
|
||||
err = unix.Close(fd)
|
||||
log.Printf("debug: closed TCP fd %v: %v", fd, err)
|
||||
}
|
||||
}
|
||||
if matched == 0 {
|
||||
log.Printf("debug: no TCP connections found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_linux.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_linux.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
breakTCPConns = breakTCPConnsLinux
|
||||
}
|
||||
|
||||
func breakTCPConnsLinux() error {
|
||||
var matched int
|
||||
for fd := 0; fd < 1000; fd++ {
|
||||
_, err := unix.GetsockoptTCPInfo(fd, unix.IPPROTO_TCP, unix.TCP_INFO)
|
||||
if err == nil {
|
||||
matched++
|
||||
err = unix.Close(fd)
|
||||
log.Printf("debug: closed TCP fd %v: %v", fd, err)
|
||||
}
|
||||
}
|
||||
if matched == 0 {
|
||||
log.Printf("debug: no TCP connections found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
591
vendor/tailscale.com/ipn/ipnlocal/c2n.go
generated
vendored
Normal file
591
vendor/tailscale.com/ipn/ipnlocal/c2n.go
generated
vendored
Normal file
@@ -0,0 +1,591 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kortschak/wol"
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/net/sockstats"
|
||||
"tailscale.com/posture"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/goroutines"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/syspolicy"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// c2nHandlers maps an HTTP method and URI path (without query parameters) to
|
||||
// its handler. The exact method+path match is preferred, but if no entry
|
||||
// exists for that, a map entry with an empty method is used as a fallback.
|
||||
var c2nHandlers = map[methodAndPath]c2nHandler{
|
||||
// Debug.
|
||||
req("/echo"): handleC2NEcho,
|
||||
req("/debug/goroutines"): handleC2NDebugGoroutines,
|
||||
req("/debug/prefs"): handleC2NDebugPrefs,
|
||||
req("/debug/metrics"): handleC2NDebugMetrics,
|
||||
req("/debug/component-logging"): handleC2NDebugComponentLogging,
|
||||
req("/debug/logheap"): handleC2NDebugLogHeap,
|
||||
|
||||
// PPROF - We only expose a subset of typical pprof endpoints for security.
|
||||
req("/debug/pprof/heap"): handleC2NPprof,
|
||||
req("/debug/pprof/allocs"): handleC2NPprof,
|
||||
|
||||
req("POST /logtail/flush"): handleC2NLogtailFlush,
|
||||
req("POST /sockstats"): handleC2NSockStats,
|
||||
|
||||
// Check TLS certificate status.
|
||||
req("GET /tls-cert-status"): handleC2NTLSCertStatus,
|
||||
|
||||
// SSH
|
||||
req("/ssh/usernames"): handleC2NSSHUsernames,
|
||||
|
||||
// Auto-updates.
|
||||
req("GET /update"): handleC2NUpdateGet,
|
||||
req("POST /update"): handleC2NUpdatePost,
|
||||
|
||||
// Wake-on-LAN.
|
||||
req("POST /wol"): handleC2NWoL,
|
||||
|
||||
// Device posture.
|
||||
req("GET /posture/identity"): handleC2NPostureIdentityGet,
|
||||
|
||||
// App Connectors.
|
||||
req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet,
|
||||
|
||||
// Linux netfilter.
|
||||
req("POST /netfilter-kind"): handleC2NSetNetfilterKind,
|
||||
}
|
||||
|
||||
type c2nHandler func(*LocalBackend, http.ResponseWriter, *http.Request)
|
||||
|
||||
type methodAndPath struct {
|
||||
method string // empty string means fallback
|
||||
path string // Request.URL.Path (without query string)
|
||||
}
|
||||
|
||||
func req(s string) methodAndPath {
|
||||
if m, p, ok := strings.Cut(s, " "); ok {
|
||||
return methodAndPath{m, p}
|
||||
}
|
||||
return methodAndPath{"", s}
|
||||
}
|
||||
|
||||
// c2nHandlerPaths is all the set of paths from c2nHandlers, without their HTTP methods.
|
||||
// It's used to detect requests with a non-matching method.
|
||||
var c2nHandlerPaths = set.Set[string]{}
|
||||
|
||||
func init() {
|
||||
for k := range c2nHandlers {
|
||||
c2nHandlerPaths.Add(k.path)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) handleC2N(w http.ResponseWriter, r *http.Request) {
|
||||
// First try to match by both method and path,
|
||||
if h, ok := c2nHandlers[methodAndPath{r.Method, r.URL.Path}]; ok {
|
||||
h(b, w, r)
|
||||
return
|
||||
}
|
||||
// Then try to match by just path.
|
||||
if h, ok := c2nHandlers[methodAndPath{path: r.URL.Path}]; ok {
|
||||
h(b, w, r)
|
||||
return
|
||||
}
|
||||
if c2nHandlerPaths.Contains(r.URL.Path) {
|
||||
http.Error(w, "bad method", http.StatusMethodNotAllowed)
|
||||
} else {
|
||||
http.Error(w, "unknown c2n path", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
func handleC2NEcho(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
// Test handler.
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
w.Write(body)
|
||||
}
|
||||
|
||||
func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if b.TryFlushLogs() {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
} else {
|
||||
http.Error(w, "no log flusher wired up", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write(goroutines.ScrubbedGoroutineDump(true))
|
||||
}
|
||||
|
||||
func handleC2NDebugPrefs(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
writeJSON(w, b.Prefs())
|
||||
}
|
||||
|
||||
func handleC2NDebugMetrics(_ *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
clientmetric.WritePrometheusExpositionFormat(w)
|
||||
}
|
||||
|
||||
func handleC2NDebugComponentLogging(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
component := r.FormValue("component")
|
||||
secs, _ := strconv.Atoi(r.FormValue("secs"))
|
||||
if secs == 0 {
|
||||
secs -= 1
|
||||
}
|
||||
until := b.clock.Now().Add(time.Duration(secs) * time.Second)
|
||||
err := b.SetComponentDebugLogging(component, until)
|
||||
var res struct {
|
||||
Error string `json:",omitempty"`
|
||||
}
|
||||
if err != nil {
|
||||
res.Error = err.Error()
|
||||
}
|
||||
writeJSON(w, res)
|
||||
}
|
||||
|
||||
var c2nLogHeap func(http.ResponseWriter, *http.Request) // non-nil on most platforms (c2n_pprof.go)
|
||||
|
||||
func handleC2NDebugLogHeap(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if c2nLogHeap == nil {
|
||||
// Not implemented on platforms trying to optimize for binary size or
|
||||
// reduced memory usage.
|
||||
http.Error(w, "not implemented", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
c2nLogHeap(w, r)
|
||||
}
|
||||
|
||||
var c2nPprof func(http.ResponseWriter, *http.Request, string) // non-nil on most platforms (c2n_pprof.go)
|
||||
|
||||
func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if c2nPprof == nil {
|
||||
// Not implemented on platforms trying to optimize for binary size or
|
||||
// reduced memory usage.
|
||||
http.Error(w, "not implemented", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
_, profile := path.Split(r.URL.Path)
|
||||
c2nPprof(w, r, profile)
|
||||
}
|
||||
|
||||
func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
var req tailcfg.C2NSSHUsernamesRequest
|
||||
if r.Method == "POST" {
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
res, err := b.getSSHUsernames(&req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
writeJSON(w, res)
|
||||
}
|
||||
|
||||
func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
if b.sockstatLogger == nil {
|
||||
http.Error(w, "no sockstatLogger", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
b.sockstatLogger.Flush()
|
||||
fmt.Fprintf(w, "logid: %s\n", b.sockstatLogger.LogID())
|
||||
fmt.Fprintf(w, "debug info: %v\n", sockstats.DebugInfo())
|
||||
}
|
||||
|
||||
// handleC2NAppConnectorDomainRoutesGet handles returning the domains
|
||||
// that the app connector is responsible for, as well as the resolved
|
||||
// IP addresses for each domain. If the node is not configured as
|
||||
// an app connector, an empty map is returned.
|
||||
func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /appconnector/routes received")
|
||||
|
||||
var res tailcfg.C2NAppConnectorDomainRoutesResponse
|
||||
if b.appConnector == nil {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
return
|
||||
}
|
||||
|
||||
res.Domains = b.appConnector.DomainRoutes()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: POST /netfilter-kind received")
|
||||
|
||||
if version.OS() != "linux" {
|
||||
http.Error(w, "netfilter kind only settable on linux", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
kind := r.FormValue("kind")
|
||||
b.logf("c2n: switching netfilter to %s", kind)
|
||||
|
||||
_, err := b.EditPrefs(&ipn.MaskedPrefs{
|
||||
NetfilterKindSet: true,
|
||||
Prefs: ipn.Prefs{
|
||||
NetfilterKind: kind,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
b.authReconfig()
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /update received")
|
||||
|
||||
res := b.newC2NUpdateResponse()
|
||||
res.Started = b.c2nUpdateStarted()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func handleC2NUpdatePost(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: POST /update received")
|
||||
res := b.newC2NUpdateResponse()
|
||||
defer func() {
|
||||
if res.Err != "" {
|
||||
b.logf("c2n: POST /update failed: %s", res.Err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}()
|
||||
|
||||
if !res.Enabled {
|
||||
res.Err = "not enabled"
|
||||
return
|
||||
}
|
||||
if !res.Supported {
|
||||
res.Err = "not supported"
|
||||
return
|
||||
}
|
||||
|
||||
// Do not update if we have active inbound SSH connections. Control can set
|
||||
// force=true query parameter to override this.
|
||||
if r.FormValue("force") != "true" && b.sshServer != nil && b.sshServer.NumActiveConns() > 0 {
|
||||
res.Err = "not updating due to active SSH connections"
|
||||
return
|
||||
}
|
||||
|
||||
if err := b.startAutoUpdate("c2n"); err != nil {
|
||||
res.Err = err.Error()
|
||||
return
|
||||
}
|
||||
res.Started = true
|
||||
}
|
||||
|
||||
func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /posture/identity received")
|
||||
|
||||
res := tailcfg.C2NPostureIdentityResponse{}
|
||||
|
||||
// Only collect posture identity if enabled on the client,
|
||||
// this will first check syspolicy, MDM settings like Registry
|
||||
// on Windows or defaults on macOS. If they are not set, it falls
|
||||
// back to the cli-flag, `--posture-checking`.
|
||||
choice, err := syspolicy.GetPreferenceOption(syspolicy.PostureChecking)
|
||||
if err != nil {
|
||||
b.logf(
|
||||
"c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s",
|
||||
b.Prefs().PostureChecking(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if choice.ShouldEnable(b.Prefs().PostureChecking()) {
|
||||
sns, err := posture.GetSerialNumbers(b.logf)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
res.SerialNumbers = sns
|
||||
|
||||
// TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release
|
||||
// and looks good in client metrics, remove this parameter and always report MAC
|
||||
// addresses.
|
||||
if r.FormValue("hwaddrs") == "true" {
|
||||
res.IfaceHardwareAddrs, err = posture.GetHardwareAddrs()
|
||||
if err != nil {
|
||||
b.logf("c2n: GetHardwareAddrs returned error: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
res.PostureDisabled = true
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse {
|
||||
// If NewUpdater does not return an error, we can update the installation.
|
||||
//
|
||||
// Note that we create the Updater solely to check for errors; we do not
|
||||
// invoke it here. For this purpose, it is ok to pass it a zero Arguments.
|
||||
prefs := b.Prefs().AutoUpdate()
|
||||
return tailcfg.C2NUpdateResponse{
|
||||
Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply.EqualBool(true),
|
||||
Supported: clientupdate.CanAutoUpdate() && !version.IsMacSysExt(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) c2nUpdateStarted() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.c2nUpdateStatus.started
|
||||
}
|
||||
|
||||
func (b *LocalBackend) setC2NUpdateStarted(v bool) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.c2nUpdateStatus.started = v
|
||||
}
|
||||
|
||||
func (b *LocalBackend) trySetC2NUpdateStarted() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.c2nUpdateStatus.started {
|
||||
return false
|
||||
}
|
||||
b.c2nUpdateStatus.started = true
|
||||
return true
|
||||
}
|
||||
|
||||
// findCmdTailscale looks for the cmd/tailscale that corresponds to the
|
||||
// currently running cmd/tailscaled. It's up to the caller to verify that the
|
||||
// two match, but this function does its best to find the right one. Notably, it
|
||||
// doesn't use $PATH for security reasons.
|
||||
func findCmdTailscale() (string, error) {
|
||||
self, err := os.Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var ts string
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" {
|
||||
ts = "/usr/bin/tailscale"
|
||||
}
|
||||
if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" {
|
||||
ts = "/usr/local/bin/tailscale"
|
||||
}
|
||||
switch distro.Get() {
|
||||
case distro.QNAP:
|
||||
// The volume under /share/ where qpkg are installed is not
|
||||
// predictable. But the rest of the path is.
|
||||
ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self)
|
||||
if err == nil && ok {
|
||||
ts = filepath.Join(filepath.Dir(self), "tailscale")
|
||||
}
|
||||
case distro.Unraid:
|
||||
if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" {
|
||||
ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale"
|
||||
}
|
||||
}
|
||||
case "windows":
|
||||
ts = filepath.Join(filepath.Dir(self), "tailscale.exe")
|
||||
case "freebsd":
|
||||
if self == "/usr/local/bin/tailscaled" {
|
||||
ts = "/usr/local/bin/tailscale"
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported OS %v", runtime.GOOS)
|
||||
}
|
||||
if ts != "" && regularFileExists(ts) {
|
||||
return ts, nil
|
||||
}
|
||||
return "", errors.New("tailscale executable not found in expected place")
|
||||
}
|
||||
|
||||
func tailscaleUpdateCmd(cmdTS string) *exec.Cmd {
|
||||
defaultCmd := exec.Command(cmdTS, "update", "--yes")
|
||||
if runtime.GOOS != "linux" {
|
||||
return defaultCmd
|
||||
}
|
||||
if _, err := exec.LookPath("systemd-run"); err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
|
||||
// When systemd-run is available, use it to run the update command. This
|
||||
// creates a new temporary unit separate from the tailscaled unit. When
|
||||
// tailscaled is restarted during the update, systemd won't kill this
|
||||
// temporary update unit, which could cause unexpected breakage.
|
||||
//
|
||||
// We want to use a few optional flags:
|
||||
// * --wait, to block the update command until completion (added in systemd 232)
|
||||
// * --pipe, to collect stdout/stderr (added in systemd 235)
|
||||
// * --collect, to clean up failed runs from memory (added in systemd 236)
|
||||
//
|
||||
// We need to check the version of systemd to figure out if those flags are
|
||||
// available.
|
||||
//
|
||||
// The output will look like:
|
||||
//
|
||||
// systemd 255 (255.7-1-arch)
|
||||
// +PAM +AUDIT ... other feature flags ...
|
||||
systemdVerOut, err := exec.Command("systemd-run", "--version").Output()
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
parts := strings.Fields(string(systemdVerOut))
|
||||
if len(parts) < 2 || parts[0] != "systemd" {
|
||||
return defaultCmd
|
||||
}
|
||||
systemdVer, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
if systemdVer >= 236 {
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes")
|
||||
} else if systemdVer >= 235 {
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes")
|
||||
} else if systemdVer >= 232 {
|
||||
return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes")
|
||||
} else {
|
||||
return exec.Command("systemd-run", cmdTS, "update", "--yes")
|
||||
}
|
||||
}
|
||||
|
||||
func regularFileExists(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
return err == nil && fi.Mode().IsRegular()
|
||||
}
|
||||
|
||||
func handleC2NWoL(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
r.ParseForm()
|
||||
var macs []net.HardwareAddr
|
||||
for _, macStr := range r.Form["mac"] {
|
||||
mac, err := net.ParseMAC(macStr)
|
||||
if err != nil {
|
||||
http.Error(w, "bad 'mac' param", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
macs = append(macs, mac)
|
||||
}
|
||||
var res struct {
|
||||
SentTo []string
|
||||
Errors []string
|
||||
}
|
||||
st := b.sys.NetMon.Get().InterfaceState()
|
||||
if st == nil {
|
||||
res.Errors = append(res.Errors, "no interface state")
|
||||
writeJSON(w, &res)
|
||||
return
|
||||
}
|
||||
var password []byte // TODO(bradfitz): support? does anything use WoL passwords?
|
||||
for _, mac := range macs {
|
||||
for ifName, ips := range st.InterfaceIPs {
|
||||
for _, ip := range ips {
|
||||
if ip.Addr().IsLoopback() || ip.Addr().Is6() {
|
||||
continue
|
||||
}
|
||||
local := &net.UDPAddr{
|
||||
IP: ip.Addr().AsSlice(),
|
||||
Port: 0,
|
||||
}
|
||||
remote := &net.UDPAddr{
|
||||
IP: net.IPv4bcast,
|
||||
Port: 0,
|
||||
}
|
||||
if err := wol.Wake(mac, password, local, remote); err != nil {
|
||||
res.Errors = append(res.Errors, err.Error())
|
||||
} else {
|
||||
res.SentTo = append(res.SentTo, ifName)
|
||||
}
|
||||
break // one per interface is enough
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(res.SentTo)
|
||||
writeJSON(w, &res)
|
||||
}
|
||||
|
||||
// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the
|
||||
// provided domain. This can be called by the controlplane to clean up DNS TXT
|
||||
// records when they're no longer needed by LetsEncrypt.
|
||||
//
|
||||
// It does not kick off a cert fetch or async refresh. It only reports anything
|
||||
// that's already sitting on disk, and only reports metadata about the public
|
||||
// cert (stuff that'd be the in CT logs anyway).
|
||||
func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
cs, err := b.getCertStore()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
domain := r.FormValue("domain")
|
||||
if domain == "" {
|
||||
http.Error(w, "no 'domain'", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
ret := &tailcfg.C2NTLSCertInfo{}
|
||||
pair, err := getCertPEMCached(cs, domain, b.clock.Now())
|
||||
ret.Valid = err == nil
|
||||
if err != nil {
|
||||
ret.Error = err.Error()
|
||||
if errors.Is(err, errCertExpired) {
|
||||
ret.Expired = true
|
||||
} else if errors.Is(err, ipn.ErrStateNotExist) {
|
||||
ret.Missing = true
|
||||
ret.Error = "no certificate"
|
||||
}
|
||||
} else {
|
||||
block, _ := pem.Decode(pair.CertPEM)
|
||||
if block == nil {
|
||||
ret.Error = "invalid PEM"
|
||||
ret.Valid = false
|
||||
} else {
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
ret.Error = fmt.Sprintf("invalid certificate: %v", err)
|
||||
ret.Valid = false
|
||||
} else {
|
||||
ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339)
|
||||
ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, ret)
|
||||
}
|
||||
45
vendor/tailscale.com/ipn/ipnlocal/c2n_pprof.go
generated
vendored
Normal file
45
vendor/tailscale.com/ipn/ipnlocal/c2n_pprof.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !js && !wasm
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func init() {
|
||||
c2nLogHeap = func(w http.ResponseWriter, r *http.Request) {
|
||||
// Support same optional gc parameter as net/http/pprof:
|
||||
if gc, _ := strconv.Atoi(r.FormValue("gc")); gc > 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
pprof.WriteHeapProfile(w)
|
||||
}
|
||||
|
||||
c2nPprof = func(w http.ResponseWriter, r *http.Request, profile string) {
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
p := pprof.Lookup(string(profile))
|
||||
if p == nil {
|
||||
http.Error(w, "Unknown profile", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
gc, _ := strconv.Atoi(r.FormValue("gc"))
|
||||
if profile == "heap" && gc > 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
debug, _ := strconv.Atoi(r.FormValue("debug"))
|
||||
if debug != 0 {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, profile))
|
||||
}
|
||||
p.WriteTo(w, debug)
|
||||
}
|
||||
}
|
||||
731
vendor/tailscale.com/ipn/ipnlocal/cert.go
generated
vendored
Normal file
731
vendor/tailscale.com/ipn/ipnlocal/cert.go
generated
vendored
Normal file
@@ -0,0 +1,731 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !js
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
randv2 "math/rand/v2"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tailscale/golang-x-crypto/acme"
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/ipn/store"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/testenv"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// Process-wide cache. (A new *Handler is created per connection,
|
||||
// effectively per request)
|
||||
var (
|
||||
// acmeMu guards all ACME operations, so concurrent requests
|
||||
// for certs don't slam ACME. The first will go through and
|
||||
// populate the on-disk cache and the rest should use that.
|
||||
acmeMu sync.Mutex
|
||||
|
||||
renewMu sync.Mutex // lock order: acmeMu before renewMu
|
||||
renewCertAt = map[string]time.Time{}
|
||||
)
|
||||
|
||||
// certDir returns (creating if needed) the directory in which cached
|
||||
// cert keypairs are stored.
|
||||
func (b *LocalBackend) certDir() (string, error) {
|
||||
d := b.TailscaleVarRoot()
|
||||
|
||||
// As a workaround for Synology DSM6 not having a "var" directory, use the
|
||||
// app's "etc" directory (on a small partition) to hold certs at least.
|
||||
// See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251
|
||||
if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 {
|
||||
d = "/var/packages/Tailscale/etc" // base; we append "certs" below
|
||||
}
|
||||
if d == "" {
|
||||
return "", errors.New("no TailscaleVarRoot")
|
||||
}
|
||||
full := filepath.Join(d, "certs")
|
||||
if err := os.MkdirAll(full, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return full, nil
|
||||
}
|
||||
|
||||
var acmeDebug = envknob.RegisterBool("TS_DEBUG_ACME")
|
||||
|
||||
// GetCertPEM gets the TLSCertKeyPair for domain, either from cache or via the
|
||||
// ACME process. ACME process is used for new domain certs, existing expired
|
||||
// certs or existing certs that should get renewed due to upcoming expiry.
|
||||
//
|
||||
// If a cert is expired, it will be renewed synchronously otherwise it will be
|
||||
// renewed asynchronously.
|
||||
func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) {
|
||||
return b.GetCertPEMWithValidity(ctx, domain, 0)
|
||||
}
|
||||
|
||||
// GetCertPEMWithValidity gets the TLSCertKeyPair for domain, either from cache
|
||||
// or via the ACME process. ACME process is used for new domain certs, existing
|
||||
// expired certs or existing certs that should get renewed sooner than
|
||||
// minValidity.
|
||||
//
|
||||
// If a cert is expired, or expires sooner than minValidity, it will be renewed
|
||||
// synchronously. Otherwise it will be renewed asynchronously.
|
||||
func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string, minValidity time.Duration) (*TLSCertKeyPair, error) {
|
||||
if !validLookingCertDomain(domain) {
|
||||
return nil, errors.New("invalid domain")
|
||||
}
|
||||
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
|
||||
now := b.clock.Now()
|
||||
traceACME := func(v any) {
|
||||
if !acmeDebug() {
|
||||
return
|
||||
}
|
||||
j, _ := json.MarshalIndent(v, "", "\t")
|
||||
log.Printf("acme %T: %s", v, j)
|
||||
}
|
||||
|
||||
cs, err := b.getCertStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pair, err := getCertPEMCached(cs, domain, now); err == nil {
|
||||
// If we got here, we have a valid unexpired cert.
|
||||
// Check whether we should start an async renewal.
|
||||
shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair, minValidity)
|
||||
if err != nil {
|
||||
logf("error checking for certificate renewal: %v", err)
|
||||
// Renewal check failed, but the current cert is valid and not
|
||||
// expired, so it's safe to return.
|
||||
return pair, nil
|
||||
}
|
||||
if !shouldRenew {
|
||||
return pair, nil
|
||||
}
|
||||
if minValidity == 0 {
|
||||
logf("starting async renewal")
|
||||
// Start renewal in the background, return current valid cert.
|
||||
go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, now, minValidity)
|
||||
return pair, nil
|
||||
}
|
||||
// If the caller requested a specific validity duration, fall through
|
||||
// to synchronous renewal to fulfill that.
|
||||
logf("starting sync renewal")
|
||||
}
|
||||
|
||||
pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now, minValidity)
|
||||
if err != nil {
|
||||
logf("getCertPEM: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return pair, nil
|
||||
}
|
||||
|
||||
// shouldStartDomainRenewal reports whether the domain's cert should be renewed
|
||||
// based on the current time, the cert's expiry, and the ARI check.
|
||||
func (b *LocalBackend) shouldStartDomainRenewal(cs certStore, domain string, now time.Time, pair *TLSCertKeyPair, minValidity time.Duration) (bool, error) {
|
||||
if minValidity != 0 {
|
||||
cert, err := pair.parseCertificate()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing certificate: %w", err)
|
||||
}
|
||||
return cert.NotAfter.Sub(now) < minValidity, nil
|
||||
}
|
||||
renewMu.Lock()
|
||||
defer renewMu.Unlock()
|
||||
if renewAt, ok := renewCertAt[domain]; ok {
|
||||
return now.After(renewAt), nil
|
||||
}
|
||||
|
||||
renewTime, err := b.domainRenewalTimeByARI(cs, pair)
|
||||
if err != nil {
|
||||
// Log any ARI failure and fall back to checking for renewal by expiry.
|
||||
b.logf("acme: ARI check failed: %v; falling back to expiry-based check", err)
|
||||
renewTime, err = b.domainRenewalTimeByExpiry(pair)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
renewCertAt[domain] = renewTime
|
||||
return now.After(renewTime), nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) domainRenewed(domain string) {
|
||||
renewMu.Lock()
|
||||
defer renewMu.Unlock()
|
||||
delete(renewCertAt, domain)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) domainRenewalTimeByExpiry(pair *TLSCertKeyPair) (time.Time, error) {
|
||||
cert, err := pair.parseCertificate()
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("parsing certificate: %w", err)
|
||||
}
|
||||
|
||||
certLifetime := cert.NotAfter.Sub(cert.NotBefore)
|
||||
if certLifetime < 0 {
|
||||
return time.Time{}, fmt.Errorf("negative certificate lifetime %v", certLifetime)
|
||||
}
|
||||
|
||||
// Per https://github.com/tailscale/tailscale/issues/8204, check
|
||||
// whether we're more than 2/3 of the way through the certificate's
|
||||
// lifetime, which is the officially-recommended best practice by Let's
|
||||
// Encrypt.
|
||||
renewalDuration := certLifetime * 2 / 3
|
||||
renewAt := cert.NotBefore.Add(renewalDuration)
|
||||
return renewAt, nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) domainRenewalTimeByARI(cs certStore, pair *TLSCertKeyPair) (time.Time, error) {
|
||||
var blocks []*pem.Block
|
||||
rest := pair.CertPEM
|
||||
for len(rest) > 0 {
|
||||
var block *pem.Block
|
||||
block, rest = pem.Decode(rest)
|
||||
if block == nil {
|
||||
return time.Time{}, fmt.Errorf("parsing certificate PEM")
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
if len(blocks) < 1 {
|
||||
return time.Time{}, fmt.Errorf("could not parse certificate chain from certStore, got %d PEM block(s)", len(blocks))
|
||||
}
|
||||
ac, err := acmeClient(cs)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
ri, err := ac.FetchRenewalInfo(ctx, blocks[0].Bytes)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to fetch renewal info from ACME server: %w", err)
|
||||
}
|
||||
if acmeDebug() {
|
||||
b.logf("acme: ARI response: %+v", ri)
|
||||
}
|
||||
|
||||
// Select a random time in the suggested window and renew if that time has
|
||||
// passed. Time is randomized per recommendation in
|
||||
// https://datatracker.ietf.org/doc/draft-ietf-acme-ari/
|
||||
start, end := ri.SuggestedWindow.Start, ri.SuggestedWindow.End
|
||||
renewTime := start.Add(randv2.N(end.Sub(start)))
|
||||
return renewTime, nil
|
||||
}
|
||||
|
||||
// certStore provides a way to perist and retrieve TLS certificates.
|
||||
// As of 2023-02-01, we use store certs in directories on disk everywhere
|
||||
// except on Kubernetes, where we use the state store.
|
||||
type certStore interface {
|
||||
// Read returns the cert and key for domain, if they exist and are valid
|
||||
// for now. If they're expired, it returns errCertExpired.
|
||||
// If they don't exist, it returns ipn.ErrStateNotExist.
|
||||
Read(domain string, now time.Time) (*TLSCertKeyPair, error)
|
||||
// WriteCert writes the cert for domain.
|
||||
WriteCert(domain string, cert []byte) error
|
||||
// WriteKey writes the key for domain.
|
||||
WriteKey(domain string, key []byte) error
|
||||
// ACMEKey returns the value previously stored via WriteACMEKey.
|
||||
// It is a PEM encoded ECDSA key.
|
||||
ACMEKey() ([]byte, error)
|
||||
// WriteACMEKey stores the provided PEM encoded ECDSA key.
|
||||
WriteACMEKey([]byte) error
|
||||
}
|
||||
|
||||
var errCertExpired = errors.New("cert expired")
|
||||
|
||||
var testX509Roots *x509.CertPool // set non-nil by tests
|
||||
|
||||
func (b *LocalBackend) getCertStore() (certStore, error) {
|
||||
switch b.store.(type) {
|
||||
case *store.FileStore:
|
||||
case *mem.Store:
|
||||
default:
|
||||
if hostinfo.GetEnvType() == hostinfo.Kubernetes {
|
||||
// We're running in Kubernetes with a custom StateStore,
|
||||
// use that instead of the cert directory.
|
||||
// TODO(maisem): expand this to other environments?
|
||||
return certStateStore{StateStore: b.store}, nil
|
||||
}
|
||||
}
|
||||
dir, err := b.certDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if testX509Roots != nil && !testenv.InTest() {
|
||||
panic("use of test hook outside of tests")
|
||||
}
|
||||
return certFileStore{dir: dir, testRoots: testX509Roots}, nil
|
||||
}
|
||||
|
||||
// certFileStore implements certStore by storing the cert & key files in the named directory.
|
||||
type certFileStore struct {
|
||||
dir string
|
||||
|
||||
// This field allows a test to override the CA root(s) for certificate
|
||||
// verification. If nil the default system pool is used.
|
||||
testRoots *x509.CertPool
|
||||
}
|
||||
|
||||
const acmePEMName = "acme-account.key.pem"
|
||||
|
||||
func (f certFileStore) ACMEKey() ([]byte, error) {
|
||||
pemName := filepath.Join(f.dir, acmePEMName)
|
||||
v, err := os.ReadFile(pemName)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (f certFileStore) WriteACMEKey(b []byte) error {
|
||||
pemName := filepath.Join(f.dir, acmePEMName)
|
||||
return atomicfile.WriteFile(pemName, b, 0600)
|
||||
}
|
||||
|
||||
func (f certFileStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) {
|
||||
certPEM, err := os.ReadFile(certFile(f.dir, domain))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
keyPEM, err := os.ReadFile(keyFile(f.dir, domain))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if !validCertPEM(domain, keyPEM, certPEM, f.testRoots, now) {
|
||||
return nil, errCertExpired
|
||||
}
|
||||
return &TLSCertKeyPair{CertPEM: certPEM, KeyPEM: keyPEM, Cached: true}, nil
|
||||
}
|
||||
|
||||
func (f certFileStore) WriteCert(domain string, cert []byte) error {
|
||||
return atomicfile.WriteFile(certFile(f.dir, domain), cert, 0644)
|
||||
}
|
||||
|
||||
func (f certFileStore) WriteKey(domain string, key []byte) error {
|
||||
return atomicfile.WriteFile(keyFile(f.dir, domain), key, 0600)
|
||||
}
|
||||
|
||||
// certStateStore implements certStore by storing the cert & key files in an ipn.StateStore.
|
||||
type certStateStore struct {
|
||||
ipn.StateStore
|
||||
|
||||
// This field allows a test to override the CA root(s) for certificate
|
||||
// verification. If nil the default system pool is used.
|
||||
testRoots *x509.CertPool
|
||||
}
|
||||
|
||||
func (s certStateStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) {
|
||||
certPEM, err := s.ReadState(ipn.StateKey(domain + ".crt"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyPEM, err := s.ReadState(ipn.StateKey(domain + ".key"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !validCertPEM(domain, keyPEM, certPEM, s.testRoots, now) {
|
||||
return nil, errCertExpired
|
||||
}
|
||||
return &TLSCertKeyPair{CertPEM: certPEM, KeyPEM: keyPEM, Cached: true}, nil
|
||||
}
|
||||
|
||||
func (s certStateStore) WriteCert(domain string, cert []byte) error {
|
||||
return ipn.WriteState(s.StateStore, ipn.StateKey(domain+".crt"), cert)
|
||||
}
|
||||
|
||||
func (s certStateStore) WriteKey(domain string, key []byte) error {
|
||||
return ipn.WriteState(s.StateStore, ipn.StateKey(domain+".key"), key)
|
||||
}
|
||||
|
||||
func (s certStateStore) ACMEKey() ([]byte, error) {
|
||||
return s.ReadState(ipn.StateKey(acmePEMName))
|
||||
}
|
||||
|
||||
func (s certStateStore) WriteACMEKey(key []byte) error {
|
||||
return ipn.WriteState(s.StateStore, ipn.StateKey(acmePEMName), key)
|
||||
}
|
||||
|
||||
// TLSCertKeyPair is a TLS public and private key, and whether they were obtained
|
||||
// from cache or freshly obtained.
|
||||
type TLSCertKeyPair struct {
|
||||
CertPEM []byte // public key, in PEM form
|
||||
KeyPEM []byte // private key, in PEM form
|
||||
Cached bool // whether result came from cache
|
||||
}
|
||||
|
||||
func (kp TLSCertKeyPair) parseCertificate() (*x509.Certificate, error) {
|
||||
block, _ := pem.Decode(kp.CertPEM)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("error parsing certificate PEM")
|
||||
}
|
||||
if block.Type != "CERTIFICATE" {
|
||||
return nil, fmt.Errorf("PEM block is %q, not a CERTIFICATE", block.Type)
|
||||
}
|
||||
return x509.ParseCertificate(block.Bytes)
|
||||
}
|
||||
|
||||
func keyFile(dir, domain string) string { return filepath.Join(dir, domain+".key") }
|
||||
func certFile(dir, domain string) string { return filepath.Join(dir, domain+".crt") }
|
||||
|
||||
// getCertPEMCached returns a non-nil keyPair if a cached keypair for domain
|
||||
// exists on disk in dir that is valid at the provided now time.
|
||||
//
|
||||
// If the keypair is expired, it returns errCertExpired.
|
||||
// If the keypair doesn't exist, it returns ipn.ErrStateNotExist.
|
||||
func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
|
||||
if !validLookingCertDomain(domain) {
|
||||
// Before we read files from disk using it, validate it's halfway
|
||||
// reasonable looking.
|
||||
return nil, fmt.Errorf("invalid domain %q", domain)
|
||||
}
|
||||
return cs.Read(domain, now)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) {
|
||||
acmeMu.Lock()
|
||||
defer acmeMu.Unlock()
|
||||
|
||||
// In case this method was triggered multiple times in parallel (when
|
||||
// serving incoming requests), check whether one of the other goroutines
|
||||
// already renewed the cert before us.
|
||||
if p, err := getCertPEMCached(cs, domain, now); err == nil {
|
||||
// shouldStartDomainRenewal caches its result so it's OK to call this
|
||||
// frequently.
|
||||
shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, p, minValidity)
|
||||
if err != nil {
|
||||
logf("error checking for certificate renewal: %v", err)
|
||||
} else if !shouldRenew {
|
||||
return p, nil
|
||||
}
|
||||
} else if !errors.Is(err, ipn.ErrStateNotExist) && !errors.Is(err, errCertExpired) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ac, err := acmeClient(cs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a, err := ac.GetReg(ctx, "" /* pre-RFC param */)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Great, already registered.
|
||||
logf("already had ACME account.")
|
||||
case err == acme.ErrNoAccount:
|
||||
a, err = ac.Register(ctx, new(acme.Account), acme.AcceptTOS)
|
||||
if err == acme.ErrAccountAlreadyExists {
|
||||
// Potential race. Double check.
|
||||
a, err = ac.GetReg(ctx, "" /* pre-RFC param */)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acme.Register: %w", err)
|
||||
}
|
||||
logf("registered ACME account.")
|
||||
traceACME(a)
|
||||
default:
|
||||
return nil, fmt.Errorf("acme.GetReg: %w", err)
|
||||
|
||||
}
|
||||
if a.Status != acme.StatusValid {
|
||||
return nil, fmt.Errorf("unexpected ACME account status %q", a.Status)
|
||||
}
|
||||
|
||||
// Before hitting LetsEncrypt, see if this is a domain that Tailscale will do DNS challenges for.
|
||||
st := b.StatusWithoutPeers()
|
||||
if err := checkCertDomain(st, domain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
traceACME(order)
|
||||
|
||||
for _, aurl := range order.AuthzURLs {
|
||||
az, err := ac.GetAuthorization(ctx, aurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
traceACME(az)
|
||||
for _, ch := range az.Challenges {
|
||||
if ch.Type == "dns-01" {
|
||||
rec, err := ac.DNS01ChallengeRecord(ch.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := "_acme-challenge." + domain
|
||||
|
||||
// Do a best-effort lookup to see if we've already created this DNS name
|
||||
// in a previous attempt. Don't burn too much time on it, though. Worst
|
||||
// case we ask the server to create something that already exists.
|
||||
var resolver net.Resolver
|
||||
lookupCtx, lookupCancel := context.WithTimeout(ctx, 500*time.Millisecond)
|
||||
txts, _ := resolver.LookupTXT(lookupCtx, key)
|
||||
lookupCancel()
|
||||
if slices.Contains(txts, rec) {
|
||||
logf("TXT record already existed")
|
||||
} else {
|
||||
logf("starting SetDNS call...")
|
||||
err = b.SetDNS(ctx, key, rec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SetDNS %q => %q: %w", key, rec, err)
|
||||
}
|
||||
logf("did SetDNS")
|
||||
}
|
||||
|
||||
chal, err := ac.Accept(ctx, ch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Accept: %v", err)
|
||||
}
|
||||
traceACME(chal)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
orderURI := order.URI
|
||||
order, err = ac.WaitOrder(ctx, orderURI)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if oe, ok := err.(*acme.OrderError); ok {
|
||||
logf("acme: WaitOrder: OrderError status %q", oe.Status)
|
||||
} else {
|
||||
logf("acme: WaitOrder error: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
traceACME(order)
|
||||
|
||||
certPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var privPEM bytes.Buffer
|
||||
if err := encodeECDSAKey(&privPEM, certPrivKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cs.WriteKey(domain, privPEM.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
csr, err := certRequest(certPrivKey, domain, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logf("requesting cert...")
|
||||
der, _, err := ac.CreateOrderCert(ctx, order.FinalizeURL, csr, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CreateOrder: %v", err)
|
||||
}
|
||||
logf("got cert")
|
||||
|
||||
var certPEM bytes.Buffer
|
||||
for _, b := range der {
|
||||
pb := &pem.Block{Type: "CERTIFICATE", Bytes: b}
|
||||
if err := pem.Encode(&certPEM, pb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := cs.WriteCert(domain, certPEM.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.domainRenewed(domain)
|
||||
|
||||
return &TLSCertKeyPair{CertPEM: certPEM.Bytes(), KeyPEM: privPEM.Bytes()}, nil
|
||||
}
|
||||
|
||||
// certRequest generates a CSR for the given common name cn and optional SANs.
|
||||
func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) {
|
||||
req := &x509.CertificateRequest{
|
||||
Subject: pkix.Name{CommonName: cn},
|
||||
DNSNames: san,
|
||||
ExtraExtensions: ext,
|
||||
}
|
||||
return x509.CreateCertificateRequest(rand.Reader, req, key)
|
||||
}
|
||||
|
||||
func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error {
|
||||
b, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
|
||||
return pem.Encode(w, pb)
|
||||
}
|
||||
|
||||
// parsePrivateKey is a copy of x/crypto/acme's parsePrivateKey.
|
||||
//
|
||||
// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates
|
||||
// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys.
|
||||
// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three.
|
||||
//
|
||||
// Inspired by parsePrivateKey in crypto/tls/tls.go.
|
||||
func parsePrivateKey(der []byte) (crypto.Signer, error) {
|
||||
if key, err := x509.ParsePKCS1PrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
if key, err := x509.ParsePKCS8PrivateKey(der); err == nil {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return key, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return key, nil
|
||||
default:
|
||||
return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping")
|
||||
}
|
||||
}
|
||||
if key, err := x509.ParseECPrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("acme/autocert: failed to parse private key")
|
||||
}
|
||||
|
||||
func acmeKey(cs certStore) (crypto.Signer, error) {
|
||||
if v, err := cs.ACMEKey(); err == nil {
|
||||
priv, _ := pem.Decode(v)
|
||||
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
||||
return nil, errors.New("acme/autocert: invalid account key found in cache")
|
||||
}
|
||||
return parsePrivateKey(priv.Bytes)
|
||||
} else if !errors.Is(err, ipn.ErrStateNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pemBuf bytes.Buffer
|
||||
if err := encodeECDSAKey(&pemBuf, privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cs.WriteACMEKey(pemBuf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return privKey, nil
|
||||
}
|
||||
|
||||
func acmeClient(cs certStore) (*acme.Client, error) {
|
||||
key, err := acmeKey(cs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acmeKey: %w", err)
|
||||
}
|
||||
// Note: if we add support for additional ACME providers (other than
|
||||
// LetsEncrypt), we should make sure that they support ARI extension (see
|
||||
// shouldStartDomainRenewalARI).
|
||||
return &acme.Client{
|
||||
Key: key,
|
||||
UserAgent: "tailscaled/" + version.Long(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validCertPEM reports whether the given certificate is valid for domain at now.
|
||||
//
|
||||
// If roots != nil, it is used instead of the system root pool. This is meant
|
||||
// to support testing, and production code should pass roots == nil.
|
||||
func validCertPEM(domain string, keyPEM, certPEM []byte, roots *x509.CertPool, now time.Time) bool {
|
||||
if len(keyPEM) == 0 || len(certPEM) == 0 {
|
||||
return false
|
||||
}
|
||||
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var leaf *x509.Certificate
|
||||
intermediates := x509.NewCertPool()
|
||||
for i, certDER := range tlsCert.Certificate {
|
||||
cert, err := x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if i == 0 {
|
||||
leaf = cert
|
||||
} else {
|
||||
intermediates.AddCert(cert)
|
||||
}
|
||||
}
|
||||
if leaf == nil {
|
||||
return false
|
||||
}
|
||||
_, err = leaf.Verify(x509.VerifyOptions{
|
||||
DNSName: domain,
|
||||
CurrentTime: now,
|
||||
Roots: roots,
|
||||
Intermediates: intermediates,
|
||||
})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// validLookingCertDomain reports whether name looks like a valid domain name that
|
||||
// we might be able to get a cert for.
|
||||
//
|
||||
// It's a light check primarily for double checking before it's used
|
||||
// as part of a filesystem path. The actual validation happens in checkCertDomain.
|
||||
func validLookingCertDomain(name string) bool {
|
||||
if name == "" ||
|
||||
strings.Contains(name, "..") ||
|
||||
strings.ContainsAny(name, ":/\\\x00") ||
|
||||
!strings.Contains(name, ".") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkCertDomain(st *ipnstate.Status, domain string) error {
|
||||
if domain == "" {
|
||||
return errors.New("missing domain name")
|
||||
}
|
||||
for _, d := range st.CertDomains {
|
||||
if d == domain {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if len(st.CertDomains) == 0 {
|
||||
return errors.New("your Tailscale account does not support getting TLS certs")
|
||||
}
|
||||
return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains)
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/cert_js.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/cert_js.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TLSCertKeyPair struct {
|
||||
CertPEM, KeyPEM []byte
|
||||
}
|
||||
|
||||
func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
}
|
||||
|
||||
var errCertExpired = errors.New("cert expired")
|
||||
|
||||
type certStore interface{}
|
||||
|
||||
func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getCertStore() (certStore, error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
}
|
||||
370
vendor/tailscale.com/ipn/ipnlocal/drive.go
generated
vendored
Normal file
370
vendor/tailscale.com/ipn/ipnlocal/drive.go
generated
vendored
Normal file
@@ -0,0 +1,370 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
const (
|
||||
// DriveLocalPort is the port on which the Taildrive listens for location
|
||||
// connections on quad 100.
|
||||
DriveLocalPort = 8080
|
||||
)
|
||||
|
||||
// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is
|
||||
// enabled. This is currently based on checking for the drive:share node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveSharingEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.driveSharingEnabledLocked()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveSharingEnabledLocked() bool {
|
||||
return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveShare)
|
||||
}
|
||||
|
||||
// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes
|
||||
// is enabled. This is currently based on checking for the drive:access node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveAccessEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.driveAccessEnabledLocked()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveAccessEnabledLocked() bool {
|
||||
return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveAccess)
|
||||
}
|
||||
|
||||
// DriveSetServerAddr tells Taildrive to use the given address for connecting
|
||||
// to the drive.FileServer that's exposing local files as an unprivileged
|
||||
// user.
|
||||
func (b *LocalBackend) DriveSetServerAddr(addr string) error {
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
fs.SetFileServerAddr(addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DriveSetShare adds the given share if no share with that name exists, or
|
||||
// replaces the existing share if one with the same name already exists. To
|
||||
// avoid potential incompatibilities across file systems, share names are
|
||||
// limited to alphanumeric characters and the underscore _.
|
||||
func (b *LocalBackend) DriveSetShare(share *drive.Share) error {
|
||||
var err error
|
||||
share.Name, err = drive.NormalizeShareName(share.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
shares, err := b.driveSetShareLocked(share)
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.driveNotifyShares(shares)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveSetShareLocked(share *drive.Share) (views.SliceView[*drive.Share, drive.ShareView], error) {
|
||||
existingShares := b.pm.prefs.DriveShares()
|
||||
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return existingShares, drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
addedShare := false
|
||||
var shares []*drive.Share
|
||||
for _, existing := range existingShares.All() {
|
||||
if existing.Name() != share.Name {
|
||||
if !addedShare && existing.Name() > share.Name {
|
||||
// Add share in order
|
||||
shares = append(shares, share)
|
||||
addedShare = true
|
||||
}
|
||||
shares = append(shares, existing.AsStruct())
|
||||
}
|
||||
}
|
||||
if !addedShare {
|
||||
shares = append(shares, share)
|
||||
}
|
||||
|
||||
err := b.driveSetSharesLocked(shares)
|
||||
if err != nil {
|
||||
return existingShares, err
|
||||
}
|
||||
fs.SetShares(shares)
|
||||
|
||||
return b.pm.prefs.DriveShares(), nil
|
||||
}
|
||||
|
||||
// DriveRenameShare renames the share at old name to new name. To avoid
|
||||
// potential incompatibilities across file systems, the new share name is
|
||||
// limited to alphanumeric characters and the underscore _.
|
||||
// Any of the following will result in an error.
|
||||
// - no share found under old name
|
||||
// - new share name contains disallowed characters
|
||||
// - share already exists under new name
|
||||
func (b *LocalBackend) DriveRenameShare(oldName, newName string) error {
|
||||
var err error
|
||||
newName, err = drive.NormalizeShareName(newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
shares, err := b.driveRenameShareLocked(oldName, newName)
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.driveNotifyShares(shares)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRenameShareLocked(oldName, newName string) (views.SliceView[*drive.Share, drive.ShareView], error) {
|
||||
existingShares := b.pm.prefs.DriveShares()
|
||||
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return existingShares, drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
found := false
|
||||
var shares []*drive.Share
|
||||
for _, existing := range existingShares.All() {
|
||||
if existing.Name() == newName {
|
||||
return existingShares, os.ErrExist
|
||||
}
|
||||
if existing.Name() == oldName {
|
||||
share := existing.AsStruct()
|
||||
share.Name = newName
|
||||
shares = append(shares, share)
|
||||
found = true
|
||||
} else {
|
||||
shares = append(shares, existing.AsStruct())
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return existingShares, os.ErrNotExist
|
||||
}
|
||||
|
||||
slices.SortFunc(shares, drive.CompareShares)
|
||||
err := b.driveSetSharesLocked(shares)
|
||||
if err != nil {
|
||||
return existingShares, err
|
||||
}
|
||||
fs.SetShares(shares)
|
||||
|
||||
return b.pm.prefs.DriveShares(), nil
|
||||
}
|
||||
|
||||
// DriveRemoveShare removes the named share. Share names are forced to
|
||||
// lowercase.
|
||||
func (b *LocalBackend) DriveRemoveShare(name string) error {
|
||||
// Force all share names to lowercase to avoid potential incompatibilities
|
||||
// with clients that don't support case-sensitive filenames.
|
||||
var err error
|
||||
name, err = drive.NormalizeShareName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
shares, err := b.driveRemoveShareLocked(name)
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.driveNotifyShares(shares)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRemoveShareLocked(name string) (views.SliceView[*drive.Share, drive.ShareView], error) {
|
||||
existingShares := b.pm.prefs.DriveShares()
|
||||
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return existingShares, drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
found := false
|
||||
var shares []*drive.Share
|
||||
for _, existing := range existingShares.All() {
|
||||
if existing.Name() != name {
|
||||
shares = append(shares, existing.AsStruct())
|
||||
} else {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return existingShares, os.ErrNotExist
|
||||
}
|
||||
|
||||
err := b.driveSetSharesLocked(shares)
|
||||
if err != nil {
|
||||
return existingShares, err
|
||||
}
|
||||
fs.SetShares(shares)
|
||||
|
||||
return b.pm.prefs.DriveShares(), nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveSetSharesLocked(shares []*drive.Share) error {
|
||||
prefs := b.pm.prefs.AsStruct()
|
||||
prefs.ApplyEdits(&ipn.MaskedPrefs{
|
||||
Prefs: ipn.Prefs{
|
||||
DriveShares: shares,
|
||||
},
|
||||
DriveSharesSet: true,
|
||||
})
|
||||
return b.pm.setPrefsNoPermCheck(prefs.View())
|
||||
}
|
||||
|
||||
// driveNotifyShares notifies IPN bus listeners (e.g. Mac Application process)
|
||||
// about the latest list of shares, if and only if the shares have changed since
|
||||
// the last time we notified.
|
||||
func (b *LocalBackend) driveNotifyShares(shares views.SliceView[*drive.Share, drive.ShareView]) {
|
||||
b.lastNotifiedDriveSharesMu.Lock()
|
||||
defer b.lastNotifiedDriveSharesMu.Unlock()
|
||||
if b.lastNotifiedDriveShares != nil && driveShareViewsEqual(b.lastNotifiedDriveShares, shares) {
|
||||
// shares are unchanged since last notification, don't bother notifying
|
||||
return
|
||||
}
|
||||
b.lastNotifiedDriveShares = &shares
|
||||
|
||||
// Ensures shares is not nil to distinguish "no shares" from "not notifying shares"
|
||||
if shares.IsNil() {
|
||||
shares = views.SliceOfViews(make([]*drive.Share, 0))
|
||||
}
|
||||
b.send(ipn.Notify{DriveShares: shares})
|
||||
}
|
||||
|
||||
// driveNotifyCurrentSharesLocked sends an ipn.Notify if the current set of
|
||||
// shares has changed since the last notification.
|
||||
func (b *LocalBackend) driveNotifyCurrentSharesLocked() {
|
||||
var shares views.SliceView[*drive.Share, drive.ShareView]
|
||||
if b.driveSharingEnabledLocked() {
|
||||
// Only populate shares if sharing is enabled.
|
||||
shares = b.pm.prefs.DriveShares()
|
||||
}
|
||||
|
||||
// Do the below on a goroutine to avoid deadlocking on b.mu in b.send().
|
||||
go b.driveNotifyShares(shares)
|
||||
}
|
||||
|
||||
func driveShareViewsEqual(a *views.SliceView[*drive.Share, drive.ShareView], b views.SliceView[*drive.Share, drive.ShareView]) bool {
|
||||
if a == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.Len() != b.Len() {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range a.Len() {
|
||||
if !drive.ShareViewsEqual(a.At(i), b.At(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DriveGetShares gets the current list of Taildrive shares, sorted by name.
|
||||
func (b *LocalBackend) DriveGetShares() views.SliceView[*drive.Share, drive.ShareView] {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
return b.pm.prefs.DriveShares()
|
||||
}
|
||||
|
||||
// updateDrivePeersLocked sets all applicable peers from the netmap as Taildrive
|
||||
// remotes.
|
||||
func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) {
|
||||
fs, ok := b.sys.DriveForLocal.GetOK()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var driveRemotes []*drive.Remote
|
||||
if b.driveAccessEnabledLocked() {
|
||||
// Only populate peers if access is enabled, otherwise leave blank.
|
||||
driveRemotes = b.driveRemotesFromPeers(nm)
|
||||
}
|
||||
|
||||
fs.SetRemotes(b.netMap.Domain, driveRemotes, b.newDriveTransport())
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote {
|
||||
driveRemotes := make([]*drive.Remote, 0, len(nm.Peers))
|
||||
for _, p := range nm.Peers {
|
||||
peerID := p.ID()
|
||||
url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:])
|
||||
driveRemotes = append(driveRemotes, &drive.Remote{
|
||||
Name: p.DisplayName(false),
|
||||
URL: url,
|
||||
Available: func() bool {
|
||||
// Peers are available to Taildrive if:
|
||||
// - They are online
|
||||
// - They are allowed to share at least one folder with us
|
||||
b.mu.Lock()
|
||||
latestNetMap := b.netMap
|
||||
b.mu.Unlock()
|
||||
|
||||
idx, found := slices.BinarySearchFunc(latestNetMap.Peers, peerID, func(candidate tailcfg.NodeView, id tailcfg.NodeID) int {
|
||||
return cmp.Compare(candidate.ID(), id)
|
||||
})
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
peer := latestNetMap.Peers[idx]
|
||||
|
||||
// Exclude offline peers.
|
||||
// TODO(oxtoacart): for some reason, this correctly
|
||||
// catches when a node goes from offline to online,
|
||||
// but not the other way around...
|
||||
online := peer.Online()
|
||||
if online == nil || !*online {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check that the peer is allowed to share with us.
|
||||
addresses := peer.Addresses()
|
||||
for i := range addresses.Len() {
|
||||
addr := addresses.At(i)
|
||||
capsMap := b.PeerCaps(addr.Addr())
|
||||
if capsMap.HasCapability(tailcfg.PeerCapabilityTaildriveSharer) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
})
|
||||
}
|
||||
return driveRemotes
|
||||
}
|
||||
226
vendor/tailscale.com/ipn/ipnlocal/expiry.go
generated
vendored
Normal file
226
vendor/tailscale.com/ipn/ipnlocal/expiry.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
)
|
||||
|
||||
// For extra defense-in-depth, when we're testing expired nodes we check
|
||||
// ControlTime against this 'epoch' (set to the approximate time that this code
|
||||
// was written) such that if control (or Headscale, etc.) sends a ControlTime
|
||||
// that's sufficiently far in the past, we can safely ignore it.
|
||||
var flagExpiredPeersEpoch = time.Unix(1673373066, 0)
|
||||
|
||||
// If the offset between the current time and the time received from control is
|
||||
// larger than this, we store an offset in our expiryManager to adjust future
|
||||
// clock timings.
|
||||
const minClockDelta = 1 * time.Minute
|
||||
|
||||
// expiryManager tracks the state of expired nodes and the delta from the
|
||||
// current clock time to the time returned from control, and allows mutating a
|
||||
// netmap to mark peers as expired based on the current delta-adjusted time.
|
||||
type expiryManager struct {
|
||||
// previouslyExpired stores nodes that have already expired so we can
|
||||
// only log on state transitions.
|
||||
previouslyExpired map[tailcfg.StableNodeID]bool
|
||||
|
||||
// clockDelta stores the delta between the current time and the time
|
||||
// received from control such that:
|
||||
// time.Now().Add(clockDelta) == MapResponse.ControlTime
|
||||
clockDelta syncs.AtomicValue[time.Duration]
|
||||
|
||||
logf logger.Logf
|
||||
clock tstime.Clock
|
||||
}
|
||||
|
||||
func newExpiryManager(logf logger.Logf) *expiryManager {
|
||||
return &expiryManager{
|
||||
previouslyExpired: map[tailcfg.StableNodeID]bool{},
|
||||
logf: logf,
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
}
|
||||
|
||||
// onControlTime is called whenever we receive a new timestamp from the control
|
||||
// server to store the delta.
|
||||
func (em *expiryManager) onControlTime(t time.Time) {
|
||||
localNow := em.clock.Now()
|
||||
delta := t.Sub(localNow)
|
||||
if delta.Abs() > minClockDelta {
|
||||
em.logf("[v1] netmap: flagExpiredPeers: setting clock delta to %v", delta)
|
||||
em.clockDelta.Store(delta)
|
||||
} else {
|
||||
em.clockDelta.Store(0)
|
||||
}
|
||||
}
|
||||
|
||||
// flagExpiredPeers updates mapRes.Peers, mutating all peers that have expired,
|
||||
// taking into account any clock skew detected by using the ControlTime field
|
||||
// in the MapResponse. We don't actually remove expired peers from the Peers
|
||||
// array; instead, we clear some fields of the Node object, and set
|
||||
// Node.Expired so other parts of the codebase can provide more clear error
|
||||
// messages when attempting to e.g. ping an expired node.
|
||||
//
|
||||
// The localNow time should be the output of time.Now for the local system; it
|
||||
// will be adjusted by any stored clock skew from ControlTime.
|
||||
//
|
||||
// This is additionally a defense-in-depth against something going wrong with
|
||||
// control such that we start seeing expired peers with a valid Endpoints or
|
||||
// DERP field.
|
||||
//
|
||||
// This function is safe to call concurrently with onControlTime but not
|
||||
// concurrently with any other call to flagExpiredPeers.
|
||||
func (em *expiryManager) flagExpiredPeers(netmap *netmap.NetworkMap, localNow time.Time) {
|
||||
// Adjust our current time by any saved delta to adjust for clock skew.
|
||||
controlNow := localNow.Add(em.clockDelta.Load())
|
||||
if controlNow.Before(flagExpiredPeersEpoch) {
|
||||
em.logf("netmap: flagExpiredPeers: [unexpected] delta-adjusted current time is before hardcoded epoch; skipping")
|
||||
return
|
||||
}
|
||||
|
||||
for i, peer := range netmap.Peers {
|
||||
// Nodes that don't expire have KeyExpiry set to the zero time;
|
||||
// skip those and peers that are already marked as expired
|
||||
// (e.g. from control).
|
||||
if peer.KeyExpiry().IsZero() || peer.KeyExpiry().After(controlNow) {
|
||||
delete(em.previouslyExpired, peer.StableID())
|
||||
continue
|
||||
} else if peer.Expired() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !em.previouslyExpired[peer.StableID()] {
|
||||
em.logf("[v1] netmap: flagExpiredPeers: clearing expired peer %v", peer.StableID())
|
||||
em.previouslyExpired[peer.StableID()] = true
|
||||
}
|
||||
|
||||
mut := peer.AsStruct()
|
||||
|
||||
// Actually mark the node as expired
|
||||
mut.Expired = true
|
||||
|
||||
// Control clears the Endpoints and DERP fields of expired
|
||||
// nodes; do so here as well. The Expired bool is the correct
|
||||
// thing to set, but this replicates the previous behaviour.
|
||||
//
|
||||
// NOTE: this is insufficient to actually break connectivity,
|
||||
// since we discover endpoints via DERP, and due to DERP return
|
||||
// path optimization.
|
||||
mut.Endpoints = nil
|
||||
mut.DERP = ""
|
||||
|
||||
// Defense-in-depth: break the node's public key as well, in
|
||||
// case something tries to communicate.
|
||||
mut.Key = key.NodePublicWithBadOldPrefix(peer.Key())
|
||||
|
||||
netmap.Peers[i] = mut.View()
|
||||
}
|
||||
}
|
||||
|
||||
// nextPeerExpiry returns the time that the next node in the netmap expires
|
||||
// (including the self node), based on their KeyExpiry. It skips nodes that are
|
||||
// already marked as Expired. If there are no nodes expiring in the future,
|
||||
// then the zero Time will be returned.
|
||||
//
|
||||
// The localNow time should be the output of time.Now for the local system; it
|
||||
// will be adjusted by any stored clock skew from ControlTime.
|
||||
//
|
||||
// This function is safe to call concurrently with other methods of this expiryManager.
|
||||
func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Time) time.Time {
|
||||
if nm == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
controlNow := localNow.Add(em.clockDelta.Load())
|
||||
if controlNow.Before(flagExpiredPeersEpoch) {
|
||||
em.logf("netmap: nextPeerExpiry: [unexpected] delta-adjusted current time is before hardcoded epoch; skipping")
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
var nextExpiry time.Time // zero if none
|
||||
for _, peer := range nm.Peers {
|
||||
if peer.KeyExpiry().IsZero() {
|
||||
continue // tagged node
|
||||
} else if peer.Expired() {
|
||||
// Peer already expired; Expired is set by the
|
||||
// flagExpiredPeers function, above.
|
||||
continue
|
||||
} else if peer.KeyExpiry().Before(controlNow) {
|
||||
// This peer already expired, and peer.Expired
|
||||
// isn't set for some reason. Skip this node.
|
||||
continue
|
||||
}
|
||||
|
||||
// nextExpiry being zero is a sentinel that we haven't yet set
|
||||
// an expiry; otherwise, only update if this node's expiry is
|
||||
// sooner than the currently-stored one (since we want the
|
||||
// soonest-occurring expiry time).
|
||||
if nextExpiry.IsZero() || peer.KeyExpiry().Before(nextExpiry) {
|
||||
nextExpiry = peer.KeyExpiry()
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that we also fire this timer if our own node key expires.
|
||||
if nm.SelfNode.Valid() {
|
||||
selfExpiry := nm.SelfNode.KeyExpiry()
|
||||
|
||||
if selfExpiry.IsZero() {
|
||||
// No expiry for self node
|
||||
} else if selfExpiry.Before(controlNow) {
|
||||
// Self node already expired; we don't want to return a
|
||||
// time in the past, so skip this.
|
||||
} else if nextExpiry.IsZero() || selfExpiry.Before(nextExpiry) {
|
||||
// Self node expires after now, but before the soonest
|
||||
// peer in the netmap; update our next expiry to this
|
||||
// time.
|
||||
nextExpiry = selfExpiry
|
||||
}
|
||||
}
|
||||
|
||||
// As an additional defense in depth, never return a time that is
|
||||
// before the current time from the perspective of the local system
|
||||
// (since timers with a zero or negative duration will fire
|
||||
// immediately and can cause unnecessary reconfigurations).
|
||||
//
|
||||
// This can happen if the local clock is running fast; for example:
|
||||
// localTime = 2pm
|
||||
// controlTime = 1pm (real time)
|
||||
// nextExpiry = 1:30pm (real time)
|
||||
//
|
||||
// In the above case, we'd return a nextExpiry of 1:30pm while the
|
||||
// current clock reads 2pm; in this case, setting a timer for
|
||||
// nextExpiry.Sub(now) would result in a negative duration and a timer
|
||||
// that fired immediately.
|
||||
//
|
||||
// In this particular edge-case, return an expiry time 30 seconds after
|
||||
// the local time so that any timers created based on this expiry won't
|
||||
// fire too quickly.
|
||||
//
|
||||
// The alternative would be to do all comparisons in local time,
|
||||
// unadjusted for clock skew, but that doesn't handle cases where the
|
||||
// local clock is "fixed" between netmap updates.
|
||||
if !nextExpiry.IsZero() && nextExpiry.Before(localNow) {
|
||||
em.logf("netmap: nextPeerExpiry: skipping nextExpiry %q before local time %q due to clock skew",
|
||||
nextExpiry.UTC().Format(time.RFC3339),
|
||||
localNow.UTC().Format(time.RFC3339))
|
||||
return localNow.Add(30 * time.Second)
|
||||
}
|
||||
|
||||
return nextExpiry
|
||||
}
|
||||
|
||||
// ControlNow estimates the current time on the control server, calculated as
|
||||
// localNow + the delta between local and control server clocks as recorded
|
||||
// when the LocalBackend last received a time message from the control server.
|
||||
func (b *LocalBackend) ControlNow(localNow time.Time) time.Time {
|
||||
return localNow.Add(b.em.clockDelta.Load())
|
||||
}
|
||||
7366
vendor/tailscale.com/ipn/ipnlocal/local.go
generated
vendored
Normal file
7366
vendor/tailscale.com/ipn/ipnlocal/local.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1474
vendor/tailscale.com/ipn/ipnlocal/network-lock.go
generated
vendored
Normal file
1474
vendor/tailscale.com/ipn/ipnlocal/network-lock.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1279
vendor/tailscale.com/ipn/ipnlocal/peerapi.go
generated
vendored
Normal file
1279
vendor/tailscale.com/ipn/ipnlocal/peerapi.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/tailscale.com/ipn/ipnlocal/peerapi_h2c.go
generated
vendored
Normal file
20
vendor/tailscale.com/ipn/ipnlocal/peerapi_h2c.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android && !js
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
)
|
||||
|
||||
func init() {
|
||||
addH2C = func(s *http.Server) {
|
||||
h2s := &http2.Server{}
|
||||
s.Handler = h2c.NewHandler(s.Handler, h2s)
|
||||
}
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/peerapi_macios_ext.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/peerapi_macios_ext.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ts_macext && (darwin || ios)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/net/netmon"
|
||||
"tailscale.com/net/netns"
|
||||
)
|
||||
|
||||
func init() {
|
||||
initListenConfig = initListenConfigNetworkExtension
|
||||
}
|
||||
|
||||
// initListenConfigNetworkExtension configures nc for listening on IP
|
||||
// through the iOS/macOS Network/System Extension (Packet Tunnel
|
||||
// Provider) sandbox.
|
||||
func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, st *netmon.State, tunIfName string) error {
|
||||
tunIf, ok := st.Interface[tunIfName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no interface with name %q", tunIfName)
|
||||
}
|
||||
return netns.SetListenConfigInterfaceIndex(nc, tunIf.Index)
|
||||
}
|
||||
825
vendor/tailscale.com/ipn/ipnlocal/profiles.go
generated
vendored
Normal file
825
vendor/tailscale.com/ipn/ipnlocal/profiles.go
generated
vendored
Normal file
@@ -0,0 +1,825 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/clientmetric"
|
||||
)
|
||||
|
||||
var debug = envknob.RegisterBool("TS_DEBUG_PROFILES")
|
||||
|
||||
// profileManager is a wrapper around an [ipn.StateStore] that manages
|
||||
// multiple profiles and the current profile.
|
||||
//
|
||||
// It is not safe for concurrent use.
|
||||
type profileManager struct {
|
||||
goos string // used for TestProfileManagementWindows
|
||||
store ipn.StateStore
|
||||
logf logger.Logf
|
||||
health *health.Tracker
|
||||
|
||||
currentUserID ipn.WindowsUserID
|
||||
knownProfiles map[ipn.ProfileID]*ipn.LoginProfile // always non-nil
|
||||
currentProfile *ipn.LoginProfile // always non-nil
|
||||
prefs ipn.PrefsView // always Valid.
|
||||
}
|
||||
|
||||
func (pm *profileManager) dlogf(format string, args ...any) {
|
||||
if !debug() {
|
||||
return
|
||||
}
|
||||
pm.logf(format, args...)
|
||||
}
|
||||
|
||||
func (pm *profileManager) WriteState(id ipn.StateKey, val []byte) error {
|
||||
return ipn.WriteState(pm.store, id, val)
|
||||
}
|
||||
|
||||
// CurrentUserID returns the current user ID. It is only non-empty on
|
||||
// Windows where we have a multi-user system.
|
||||
func (pm *profileManager) CurrentUserID() ipn.WindowsUserID {
|
||||
return pm.currentUserID
|
||||
}
|
||||
|
||||
// SetCurrentUserID sets the current user ID and switches to that user's default (last used) profile.
|
||||
// If the specified user does not have a default profile, or the default profile could not be loaded,
|
||||
// it creates a new one and switches to it. The uid is only non-empty on Windows where we have a multi-user system.
|
||||
func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) {
|
||||
if pm.currentUserID == uid {
|
||||
return
|
||||
}
|
||||
pm.currentUserID = uid
|
||||
if err := pm.SwitchToDefaultProfile(); err != nil {
|
||||
// SetCurrentUserID should never fail and must always switch to the
|
||||
// user's default profile or create a new profile for the current user.
|
||||
// Until we implement multi-user support and the new permission model,
|
||||
// and remove the concept of the "current user" completely, we must ensure
|
||||
// that when SetCurrentUserID exits, the profile in pm.currentProfile
|
||||
// is either an existing profile owned by the user, or a new, empty profile.
|
||||
pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err)
|
||||
pm.NewProfileForUser(uid)
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultUserProfileID returns [ipn.ProfileID] of the default (last used) profile for the specified user,
|
||||
// or an empty string if the specified user does not have a default profile.
|
||||
func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.ProfileID {
|
||||
// Read the CurrentProfileKey from the store which stores
|
||||
// the selected profile for the specified user.
|
||||
b, err := pm.store.ReadState(ipn.CurrentProfileKey(string(uid)))
|
||||
pm.dlogf("DefaultUserProfileID: ReadState(%q) = %v, %v", string(uid), len(b), err)
|
||||
if err == ipn.ErrStateNotExist || len(b) == 0 {
|
||||
if runtime.GOOS == "windows" {
|
||||
pm.dlogf("DefaultUserProfileID: windows: migrating from legacy preferences")
|
||||
profile, err := pm.migrateFromLegacyPrefs(uid, false)
|
||||
if err == nil {
|
||||
return profile.ID
|
||||
}
|
||||
pm.logf("failed to migrate from legacy preferences: %v", err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
pk := ipn.StateKey(string(b))
|
||||
prof := pm.findProfileByKey(pk)
|
||||
if prof == nil {
|
||||
pm.dlogf("DefaultUserProfileID: no profile found for key: %q", pk)
|
||||
return ""
|
||||
}
|
||||
return prof.ID
|
||||
}
|
||||
|
||||
// checkProfileAccess returns an [errProfileAccessDenied] if the current user
|
||||
// does not have access to the specified profile.
|
||||
func (pm *profileManager) checkProfileAccess(profile *ipn.LoginProfile) error {
|
||||
if pm.currentUserID != "" && profile.LocalUserID != pm.currentUserID {
|
||||
return errProfileAccessDenied
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// allProfiles returns all profiles accessible to the current user.
|
||||
// The returned profiles are sorted by Name.
|
||||
func (pm *profileManager) allProfiles() (out []*ipn.LoginProfile) {
|
||||
for _, p := range pm.knownProfiles {
|
||||
if pm.checkProfileAccess(p) == nil {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(out, func(a, b *ipn.LoginProfile) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// matchingProfiles is like [profileManager.allProfiles], but returns only profiles
|
||||
// matching the given predicate.
|
||||
func (pm *profileManager) matchingProfiles(f func(*ipn.LoginProfile) bool) (out []*ipn.LoginProfile) {
|
||||
all := pm.allProfiles()
|
||||
out = all[:0]
|
||||
for _, p := range all {
|
||||
if f(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// findMatchingProfiles returns all profiles accessible to the current user
|
||||
// that represent the same node/user as prefs.
|
||||
// The returned profiles are sorted by Name.
|
||||
func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []*ipn.LoginProfile {
|
||||
return pm.matchingProfiles(func(p *ipn.LoginProfile) bool {
|
||||
return p.ControlURL == prefs.ControlURL() &&
|
||||
(p.UserProfile.ID == prefs.Persist().UserProfile().ID ||
|
||||
p.NodeID == prefs.Persist().NodeID())
|
||||
})
|
||||
}
|
||||
|
||||
// ProfileIDForName returns the profile ID for the profile with the
|
||||
// given name. It returns "" if no such profile exists among profiles
|
||||
// accessible to the current user.
|
||||
func (pm *profileManager) ProfileIDForName(name string) ipn.ProfileID {
|
||||
p := pm.findProfileByName(name)
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return p.ID
|
||||
}
|
||||
|
||||
func (pm *profileManager) findProfileByName(name string) *ipn.LoginProfile {
|
||||
out := pm.matchingProfiles(func(p *ipn.LoginProfile) bool {
|
||||
return p.Name == name
|
||||
})
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(out) > 1 {
|
||||
pm.logf("[unexpected] multiple profiles with the same name")
|
||||
}
|
||||
return out[0]
|
||||
}
|
||||
|
||||
func (pm *profileManager) findProfileByKey(key ipn.StateKey) *ipn.LoginProfile {
|
||||
out := pm.matchingProfiles(func(p *ipn.LoginProfile) bool {
|
||||
return p.Key == key
|
||||
})
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(out) > 1 {
|
||||
pm.logf("[unexpected] multiple profiles with the same key")
|
||||
}
|
||||
return out[0]
|
||||
}
|
||||
|
||||
func (pm *profileManager) setUnattendedModeAsConfigured() error {
|
||||
if pm.goos != "windows" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pm.currentProfile.Key != "" && pm.prefs.ForceDaemon() {
|
||||
return pm.WriteState(ipn.ServerModeStartKey, []byte(pm.currentProfile.Key))
|
||||
} else {
|
||||
return pm.WriteState(ipn.ServerModeStartKey, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset unloads the current profile, if any.
|
||||
func (pm *profileManager) Reset() {
|
||||
pm.currentUserID = ""
|
||||
pm.NewProfile()
|
||||
}
|
||||
|
||||
// SetPrefs sets the current profile's prefs to the provided value.
|
||||
// It also saves the prefs to the [ipn.StateStore]. It stores a copy of the
|
||||
// provided prefs, which may be accessed via [profileManager.CurrentPrefs].
|
||||
//
|
||||
// The [ipn.NetworkProfile] stores additional information about the tailnet the user
|
||||
// is logged into so that we can keep track of things like their domain name
|
||||
// across user switches to disambiguate the same account but a different tailnet.
|
||||
func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) error {
|
||||
cp := pm.currentProfile
|
||||
if persist := prefsIn.Persist(); !persist.Valid() || persist.NodeID() == "" || persist.UserProfile().LoginName == "" {
|
||||
// We don't know anything about this profile, so ignore it for now.
|
||||
return pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefsIn.AsStruct().View())
|
||||
}
|
||||
|
||||
// Check if we already have an existing profile that matches the user/node.
|
||||
if existing := pm.findMatchingProfiles(prefsIn); len(existing) > 0 {
|
||||
// We already have a profile for this user/node we should reuse it. Also
|
||||
// cleanup any other duplicate profiles.
|
||||
cp = existing[0]
|
||||
existing = existing[1:]
|
||||
for _, p := range existing {
|
||||
// Clear the state.
|
||||
if err := pm.store.WriteState(p.Key, nil); err != nil {
|
||||
// We couldn't delete the state, so keep the profile around.
|
||||
continue
|
||||
}
|
||||
// Remove the profile, knownProfiles will be persisted
|
||||
// in [profileManager.setProfilePrefs] below.
|
||||
delete(pm.knownProfiles, p.ID)
|
||||
}
|
||||
}
|
||||
pm.currentProfile = cp
|
||||
if err := pm.SetProfilePrefs(cp, prefsIn, np); err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.setProfileAsUserDefault(cp)
|
||||
|
||||
}
|
||||
|
||||
// SetProfilePrefs is like [profileManager.SetPrefs], but sets prefs for the specified [ipn.LoginProfile]
|
||||
// which is not necessarily the [profileManager.CurrentProfile]. It returns an [errProfileAccessDenied]
|
||||
// if the specified profile is not accessible by the current user.
|
||||
func (pm *profileManager) SetProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.PrefsView, np ipn.NetworkProfile) error {
|
||||
if err := pm.checkProfileAccess(lp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// An empty profile.ID indicates that the profile is new, the node info wasn't available,
|
||||
// and it hasn't been persisted yet. We'll generate both an ID and [ipn.StateKey]
|
||||
// once the information is available and needs to be persisted.
|
||||
if lp.ID == "" {
|
||||
if persist := prefsIn.Persist(); persist.Valid() && persist.NodeID() != "" && persist.UserProfile().LoginName != "" {
|
||||
// Generate an ID and [ipn.StateKey] now that we have the node info.
|
||||
lp.ID, lp.Key = newUnusedID(pm.knownProfiles)
|
||||
}
|
||||
|
||||
// Set the current user as the profile owner, unless the current user ID does
|
||||
// not represent a specific user, or the profile is already owned by a different user.
|
||||
// It is only relevant on Windows where we have a multi-user system.
|
||||
if lp.LocalUserID == "" && pm.currentUserID != "" {
|
||||
lp.LocalUserID = pm.currentUserID
|
||||
}
|
||||
}
|
||||
|
||||
var up tailcfg.UserProfile
|
||||
if persist := prefsIn.Persist(); persist.Valid() {
|
||||
up = persist.UserProfile()
|
||||
if up.DisplayName == "" {
|
||||
up.DisplayName = up.LoginName
|
||||
}
|
||||
lp.NodeID = persist.NodeID()
|
||||
} else {
|
||||
lp.NodeID = ""
|
||||
}
|
||||
|
||||
if prefsIn.ProfileName() != "" {
|
||||
lp.Name = prefsIn.ProfileName()
|
||||
} else {
|
||||
lp.Name = up.LoginName
|
||||
}
|
||||
lp.ControlURL = prefsIn.ControlURL()
|
||||
lp.UserProfile = up
|
||||
lp.NetworkProfile = np
|
||||
|
||||
// An empty profile.ID indicates that the node info is not available yet,
|
||||
// and the profile doesn't need to be saved on disk.
|
||||
if lp.ID != "" {
|
||||
pm.knownProfiles[lp.ID] = lp
|
||||
if err := pm.writeKnownProfiles(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Clone prefsIn and create a read-only view as a safety measure to
|
||||
// prevent accidental preference mutations, both externally and internally.
|
||||
if err := pm.setProfilePrefsNoPermCheck(lp, prefsIn.AsStruct().View()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newUnusedID(knownProfiles map[ipn.ProfileID]*ipn.LoginProfile) (ipn.ProfileID, ipn.StateKey) {
|
||||
var idb [2]byte
|
||||
for {
|
||||
rand.Read(idb[:])
|
||||
id := ipn.ProfileID(fmt.Sprintf("%x", idb))
|
||||
if _, ok := knownProfiles[id]; ok {
|
||||
continue
|
||||
}
|
||||
return id, ipn.StateKey("profile-" + id)
|
||||
}
|
||||
}
|
||||
|
||||
// setProfilePrefsNoPermCheck sets the profile's prefs to the provided value.
|
||||
// If the profile has the [ipn.LoginProfile.Key] set, it saves the prefs to the
|
||||
// [ipn.StateStore] under that key. It returns an error if the profile is non-current
|
||||
// and does not have its Key set, or if the prefs could not be saved.
|
||||
// The method does not perform any additional checks on the specified
|
||||
// profile, such as verifying the caller's access rights or checking
|
||||
// if another profile for the same node already exists.
|
||||
func (pm *profileManager) setProfilePrefsNoPermCheck(profile *ipn.LoginProfile, clonedPrefs ipn.PrefsView) error {
|
||||
isCurrentProfile := pm.currentProfile == profile
|
||||
if isCurrentProfile {
|
||||
pm.prefs = clonedPrefs
|
||||
pm.updateHealth()
|
||||
}
|
||||
if profile.Key != "" {
|
||||
if err := pm.writePrefsToStore(profile.Key, clonedPrefs); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !isCurrentProfile {
|
||||
return errors.New("cannot set prefs for a non-current in-memory profile")
|
||||
}
|
||||
if isCurrentProfile {
|
||||
return pm.setUnattendedModeAsConfigured()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPrefsNoPermCheck is like [profileManager.setProfilePrefsNoPermCheck], but sets the current profile's prefs.
|
||||
func (pm *profileManager) setPrefsNoPermCheck(clonedPrefs ipn.PrefsView) error {
|
||||
return pm.setProfilePrefsNoPermCheck(pm.currentProfile, clonedPrefs)
|
||||
}
|
||||
|
||||
func (pm *profileManager) writePrefsToStore(key ipn.StateKey, prefs ipn.PrefsView) error {
|
||||
if key == "" {
|
||||
return nil
|
||||
}
|
||||
if err := pm.WriteState(key, prefs.ToBytes()); err != nil {
|
||||
pm.logf("WriteState(%q): %v", key, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Profiles returns the list of known profiles accessible to the current user.
|
||||
func (pm *profileManager) Profiles() []ipn.LoginProfile {
|
||||
allProfiles := pm.allProfiles()
|
||||
out := make([]ipn.LoginProfile, len(allProfiles))
|
||||
for i, p := range allProfiles {
|
||||
out[i] = *p
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ProfileByID returns a profile with the given id, if it is accessible to the current user.
|
||||
// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied].
|
||||
// If the profile does not exist, it returns an [errProfileNotFound].
|
||||
func (pm *profileManager) ProfileByID(id ipn.ProfileID) (ipn.LoginProfile, error) {
|
||||
kp, err := pm.profileByIDNoPermCheck(id)
|
||||
if err != nil {
|
||||
return ipn.LoginProfile{}, err
|
||||
}
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return ipn.LoginProfile{}, err
|
||||
}
|
||||
return *kp, nil
|
||||
}
|
||||
|
||||
// profileByIDNoPermCheck is like [profileManager.ProfileByID], but it doesn't
|
||||
// check user's access rights to the profile.
|
||||
func (pm *profileManager) profileByIDNoPermCheck(id ipn.ProfileID) (*ipn.LoginProfile, error) {
|
||||
if id == pm.currentProfile.ID {
|
||||
return pm.currentProfile, nil
|
||||
}
|
||||
kp, ok := pm.knownProfiles[id]
|
||||
if !ok {
|
||||
return nil, errProfileNotFound
|
||||
}
|
||||
return kp, nil
|
||||
}
|
||||
|
||||
// ProfilePrefs returns preferences for a profile with the given id.
|
||||
// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied].
|
||||
// If the profile does not exist, it returns an [errProfileNotFound].
|
||||
func (pm *profileManager) ProfilePrefs(id ipn.ProfileID) (ipn.PrefsView, error) {
|
||||
kp, err := pm.profileByIDNoPermCheck(id)
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, errProfileNotFound
|
||||
}
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
return pm.profilePrefs(kp)
|
||||
}
|
||||
|
||||
func (pm *profileManager) profilePrefs(p *ipn.LoginProfile) (ipn.PrefsView, error) {
|
||||
if p.ID == pm.currentProfile.ID {
|
||||
return pm.prefs, nil
|
||||
}
|
||||
return pm.loadSavedPrefs(p.Key)
|
||||
}
|
||||
|
||||
// SwitchProfile switches to the profile with the given id.
|
||||
// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied].
|
||||
// If the profile does not exist, it returns an [errProfileNotFound].
|
||||
func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error {
|
||||
metricSwitchProfile.Add(1)
|
||||
|
||||
kp, ok := pm.knownProfiles[id]
|
||||
if !ok {
|
||||
return errProfileNotFound
|
||||
}
|
||||
if pm.currentProfile != nil && kp.ID == pm.currentProfile.ID && pm.prefs.Valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return fmt.Errorf("%w: profile %q is not accessible to the current user", err, id)
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(kp.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pm.prefs = prefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = kp
|
||||
return pm.setProfileAsUserDefault(kp)
|
||||
}
|
||||
|
||||
// SwitchToDefaultProfile switches to the default (last used) profile for the current user.
|
||||
// It creates a new one and switches to it if the current user does not have a default profile,
|
||||
// or returns an error if the default profile is inaccessible or could not be loaded.
|
||||
func (pm *profileManager) SwitchToDefaultProfile() error {
|
||||
if id := pm.DefaultUserProfileID(pm.currentUserID); id != "" {
|
||||
return pm.SwitchProfile(id)
|
||||
}
|
||||
pm.NewProfileForUser(pm.currentUserID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// setProfileAsUserDefault sets the specified profile as the default for the current user.
|
||||
// It returns an [errProfileAccessDenied] if the specified profile is not accessible to the current user.
|
||||
func (pm *profileManager) setProfileAsUserDefault(profile *ipn.LoginProfile) error {
|
||||
if profile.Key == "" {
|
||||
// The profile has not been persisted yet; ignore it for now.
|
||||
return nil
|
||||
}
|
||||
if err := pm.checkProfileAccess(profile); err != nil {
|
||||
return errProfileAccessDenied
|
||||
}
|
||||
k := ipn.CurrentProfileKey(string(pm.currentUserID))
|
||||
return pm.WriteState(k, []byte(profile.Key))
|
||||
}
|
||||
|
||||
func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) {
|
||||
bs, err := pm.store.ReadState(key)
|
||||
if err == ipn.ErrStateNotExist || len(bs) == 0 {
|
||||
return defaultPrefs, nil
|
||||
}
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
savedPrefs := ipn.NewPrefs()
|
||||
if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil {
|
||||
return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err)
|
||||
}
|
||||
pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty())
|
||||
|
||||
// Ignore any old stored preferences for https://login.tailscale.com
|
||||
// as the control server that would override the new default of
|
||||
// controlplane.tailscale.com.
|
||||
if savedPrefs.ControlURL != "" &&
|
||||
savedPrefs.ControlURL != ipn.DefaultControlURL &&
|
||||
ipn.IsLoginServerSynonym(savedPrefs.ControlURL) {
|
||||
savedPrefs.ControlURL = ""
|
||||
}
|
||||
// Before
|
||||
// https://github.com/tailscale/tailscale/pull/11814/commits/1613b18f8280c2bce786980532d012c9f0454fa2#diff-314ba0d799f70c8998940903efb541e511f352b39a9eeeae8d475c921d66c2ac
|
||||
// prefs could set AutoUpdate.Apply=true via EditPrefs or tailnet
|
||||
// auto-update defaults. After that change, such value is "invalid" and
|
||||
// cause any EditPrefs calls to fail (other than disabling auto-updates).
|
||||
//
|
||||
// Reset AutoUpdate.Apply if we detect such invalid prefs.
|
||||
if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() {
|
||||
savedPrefs.AutoUpdate.Apply.Clear()
|
||||
}
|
||||
|
||||
return savedPrefs.View(), nil
|
||||
}
|
||||
|
||||
// CurrentProfile returns the current LoginProfile.
|
||||
// The value may be zero if the profile is not persisted.
|
||||
func (pm *profileManager) CurrentProfile() ipn.LoginProfile {
|
||||
return *pm.currentProfile
|
||||
}
|
||||
|
||||
// errProfileNotFound is returned by methods that accept a ProfileID
|
||||
// when the specified profile does not exist.
|
||||
var errProfileNotFound = errors.New("profile not found")
|
||||
|
||||
// errProfileAccessDenied is returned by methods that accept a ProfileID
|
||||
// when the current user does not have access to the specified profile.
|
||||
// It is used temporarily until we implement access checks based on the
|
||||
// caller's identity in tailscale/corp#18342.
|
||||
var errProfileAccessDenied = errors.New("profile access denied")
|
||||
|
||||
// DeleteProfile removes the profile with the given id. It returns
|
||||
// [errProfileNotFound] if the profile does not exist, or an
|
||||
// [errProfileAccessDenied] if the specified profile is not accessible
|
||||
// to the current user.
|
||||
// If the profile is the current profile, it is the equivalent of
|
||||
// calling [profileManager.NewProfile] followed by [profileManager.DeleteProfile](id).
|
||||
// This is useful for deleting the last profile. In other cases, it is
|
||||
// recommended to call [profileManager.SwitchProfile] first.
|
||||
func (pm *profileManager) DeleteProfile(id ipn.ProfileID) error {
|
||||
metricDeleteProfile.Add(1)
|
||||
if id == pm.currentProfile.ID {
|
||||
return pm.deleteCurrentProfile()
|
||||
}
|
||||
kp, ok := pm.knownProfiles[id]
|
||||
if !ok {
|
||||
return errProfileNotFound
|
||||
}
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.deleteProfileNoPermCheck(kp)
|
||||
}
|
||||
|
||||
func (pm *profileManager) deleteCurrentProfile() error {
|
||||
if err := pm.checkProfileAccess(pm.currentProfile); err != nil {
|
||||
return err
|
||||
}
|
||||
if pm.currentProfile.ID == "" {
|
||||
// Deleting the in-memory only new profile, just create a new one.
|
||||
pm.NewProfile()
|
||||
return nil
|
||||
}
|
||||
return pm.deleteProfileNoPermCheck(pm.currentProfile)
|
||||
}
|
||||
|
||||
// deleteProfileNoPermCheck is like [profileManager.DeleteProfile],
|
||||
// but it doesn't check user's access rights to the profile.
|
||||
func (pm *profileManager) deleteProfileNoPermCheck(profile *ipn.LoginProfile) error {
|
||||
if profile.ID == pm.currentProfile.ID {
|
||||
pm.NewProfile()
|
||||
}
|
||||
if err := pm.WriteState(profile.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(pm.knownProfiles, profile.ID)
|
||||
return pm.writeKnownProfiles()
|
||||
}
|
||||
|
||||
// DeleteAllProfilesForUser removes all known profiles accessible to the current user
|
||||
// and switches to a new, empty profile.
|
||||
func (pm *profileManager) DeleteAllProfilesForUser() error {
|
||||
metricDeleteAllProfile.Add(1)
|
||||
|
||||
currentProfileDeleted := false
|
||||
writeKnownProfiles := func() error {
|
||||
if currentProfileDeleted || pm.currentProfile.ID == "" {
|
||||
pm.NewProfile()
|
||||
}
|
||||
return pm.writeKnownProfiles()
|
||||
}
|
||||
|
||||
for _, kp := range pm.knownProfiles {
|
||||
if pm.checkProfileAccess(kp) != nil {
|
||||
// Skip profiles we don't have access to.
|
||||
continue
|
||||
}
|
||||
if err := pm.WriteState(kp.Key, nil); err != nil {
|
||||
// Write to remove references to profiles we've already deleted, but
|
||||
// return the original error.
|
||||
writeKnownProfiles()
|
||||
return err
|
||||
}
|
||||
delete(pm.knownProfiles, kp.ID)
|
||||
if kp.ID == pm.currentProfile.ID {
|
||||
currentProfileDeleted = true
|
||||
}
|
||||
}
|
||||
return writeKnownProfiles()
|
||||
}
|
||||
|
||||
func (pm *profileManager) writeKnownProfiles() error {
|
||||
b, err := json.Marshal(pm.knownProfiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.WriteState(ipn.KnownProfilesStateKey, b)
|
||||
}
|
||||
|
||||
func (pm *profileManager) updateHealth() {
|
||||
if !pm.prefs.Valid() {
|
||||
return
|
||||
}
|
||||
pm.health.SetAutoUpdatePrefs(pm.prefs.AutoUpdate().Check, pm.prefs.AutoUpdate().Apply)
|
||||
}
|
||||
|
||||
// NewProfile creates and switches to a new unnamed profile. The new profile is
|
||||
// not persisted until [profileManager.SetPrefs] is called with a logged-in user.
|
||||
func (pm *profileManager) NewProfile() {
|
||||
pm.NewProfileForUser(pm.currentUserID)
|
||||
}
|
||||
|
||||
// NewProfileForUser is like [profileManager.NewProfile], but it switches to the
|
||||
// specified user and sets that user as the profile owner for the new profile.
|
||||
func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) {
|
||||
pm.currentUserID = uid
|
||||
|
||||
metricNewProfile.Add(1)
|
||||
|
||||
pm.prefs = defaultPrefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = &ipn.LoginProfile{LocalUserID: uid}
|
||||
}
|
||||
|
||||
// newProfileWithPrefs creates a new profile with the specified prefs and assigns
|
||||
// the specified uid as the profile owner. If switchNow is true, it switches to the
|
||||
// newly created profile immediately. It returns the newly created profile on success,
|
||||
// or an error on failure.
|
||||
func (pm *profileManager) newProfileWithPrefs(uid ipn.WindowsUserID, prefs ipn.PrefsView, switchNow bool) (*ipn.LoginProfile, error) {
|
||||
metricNewProfile.Add(1)
|
||||
|
||||
profile := &ipn.LoginProfile{LocalUserID: uid}
|
||||
if err := pm.SetProfilePrefs(profile, prefs, ipn.NetworkProfile{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if switchNow {
|
||||
pm.currentProfile = profile
|
||||
pm.prefs = prefs.AsStruct().View()
|
||||
pm.updateHealth()
|
||||
if err := pm.setProfileAsUserDefault(profile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
// defaultPrefs is the default prefs for a new profile. This initializes before
|
||||
// even this package's init() so do not rely on other parts of the system being
|
||||
// fully initialized here (for example, syspolicy will not be available on
|
||||
// Apple platforms).
|
||||
var defaultPrefs = func() ipn.PrefsView {
|
||||
prefs := ipn.NewPrefs()
|
||||
prefs.LoggedOut = true
|
||||
prefs.WantRunning = false
|
||||
|
||||
return prefs.View()
|
||||
}()
|
||||
|
||||
// Store returns the [ipn.StateStore] used by the [profileManager].
|
||||
func (pm *profileManager) Store() ipn.StateStore {
|
||||
return pm.store
|
||||
}
|
||||
|
||||
// CurrentPrefs returns a read-only view of the current prefs.
|
||||
// The returned view is always valid.
|
||||
func (pm *profileManager) CurrentPrefs() ipn.PrefsView {
|
||||
return pm.prefs
|
||||
}
|
||||
|
||||
// ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing.
|
||||
func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) {
|
||||
ht := new(health.Tracker) // in tests, don't care about the health status
|
||||
pm, err := newProfileManager(store, logf, ht)
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
return pm.CurrentPrefs(), nil
|
||||
}
|
||||
|
||||
// newProfileManager creates a new [profileManager] using the provided [ipn.StateStore].
|
||||
// It also loads the list of known profiles from the store.
|
||||
func newProfileManager(store ipn.StateStore, logf logger.Logf, health *health.Tracker) (*profileManager, error) {
|
||||
return newProfileManagerWithGOOS(store, logf, health, envknob.GOOS())
|
||||
}
|
||||
|
||||
func readAutoStartKey(store ipn.StateStore, goos string) (ipn.StateKey, error) {
|
||||
startKey := ipn.CurrentProfileStateKey
|
||||
if goos == "windows" {
|
||||
// When tailscaled runs on Windows it is not typically run unattended.
|
||||
// So we can't use the profile mechanism to load the profile at startup.
|
||||
startKey = ipn.ServerModeStartKey
|
||||
}
|
||||
autoStartKey, err := store.ReadState(startKey)
|
||||
if err != nil && err != ipn.ErrStateNotExist {
|
||||
return "", fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
return ipn.StateKey(autoStartKey), nil
|
||||
}
|
||||
|
||||
func readKnownProfiles(store ipn.StateStore) (map[ipn.ProfileID]*ipn.LoginProfile, error) {
|
||||
var knownProfiles map[ipn.ProfileID]*ipn.LoginProfile
|
||||
prfB, err := store.ReadState(ipn.KnownProfilesStateKey)
|
||||
switch err {
|
||||
case nil:
|
||||
if err := json.Unmarshal(prfB, &knownProfiles); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling known profiles: %w", err)
|
||||
}
|
||||
case ipn.ErrStateNotExist:
|
||||
knownProfiles = make(map[ipn.ProfileID]*ipn.LoginProfile)
|
||||
default:
|
||||
return nil, fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
return knownProfiles, nil
|
||||
}
|
||||
|
||||
func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *health.Tracker, goos string) (*profileManager, error) {
|
||||
logf = logger.WithPrefix(logf, "pm: ")
|
||||
stateKey, err := readAutoStartKey(store, goos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
knownProfiles, err := readKnownProfiles(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pm := &profileManager{
|
||||
goos: goos,
|
||||
store: store,
|
||||
knownProfiles: knownProfiles,
|
||||
logf: logf,
|
||||
health: ht,
|
||||
}
|
||||
|
||||
if stateKey != "" {
|
||||
for _, v := range knownProfiles {
|
||||
if v.Key == stateKey {
|
||||
pm.currentProfile = v
|
||||
}
|
||||
}
|
||||
if pm.currentProfile == nil {
|
||||
if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok {
|
||||
pm.currentUserID = ipn.WindowsUserID(suf)
|
||||
}
|
||||
pm.NewProfile()
|
||||
} else {
|
||||
pm.currentUserID = pm.currentProfile.LocalUserID
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(stateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Most platform behavior is controlled by the goos parameter, however
|
||||
// some behavior is implied by build tag and fails when run on Windows,
|
||||
// so we explicitly avoid that behavior when running on Windows.
|
||||
// Specifically this reaches down into legacy preference loading that is
|
||||
// specialized by profiles_windows.go and fails in tests on an invalid
|
||||
// uid passed in from the unix tests. The uid's used for Windows tests
|
||||
// and runtime must be valid Windows security identifier structures.
|
||||
} else if len(knownProfiles) == 0 && goos != "windows" && runtime.GOOS != "windows" {
|
||||
// No known profiles, try a migration.
|
||||
pm.dlogf("no known profiles; trying to migrate from legacy prefs")
|
||||
if _, err := pm.migrateFromLegacyPrefs(pm.currentUserID, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pm.NewProfile()
|
||||
}
|
||||
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNow bool) (*ipn.LoginProfile, error) {
|
||||
metricMigration.Add(1)
|
||||
sentinel, prefs, err := pm.loadLegacyPrefs(uid)
|
||||
if err != nil {
|
||||
metricMigrationError.Add(1)
|
||||
return nil, fmt.Errorf("load legacy prefs: %w", err)
|
||||
}
|
||||
pm.dlogf("loaded legacy preferences; sentinel=%q", sentinel)
|
||||
profile, err := pm.newProfileWithPrefs(uid, prefs, switchNow)
|
||||
if err != nil {
|
||||
metricMigrationError.Add(1)
|
||||
return nil, fmt.Errorf("migrating _daemon profile: %w", err)
|
||||
}
|
||||
pm.completeMigration(sentinel)
|
||||
pm.dlogf("completed legacy preferences migration with sentinel=%q", sentinel)
|
||||
metricMigrationSuccess.Add(1)
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) requiresBackfill() bool {
|
||||
return pm != nil &&
|
||||
pm.currentProfile != nil &&
|
||||
pm.currentProfile.NetworkProfile.RequiresBackfill()
|
||||
}
|
||||
|
||||
var (
|
||||
metricNewProfile = clientmetric.NewCounter("profiles_new")
|
||||
metricSwitchProfile = clientmetric.NewCounter("profiles_switch")
|
||||
metricDeleteProfile = clientmetric.NewCounter("profiles_delete")
|
||||
metricDeleteAllProfile = clientmetric.NewCounter("profiles_delete_all")
|
||||
|
||||
metricMigration = clientmetric.NewCounter("profiles_migration")
|
||||
metricMigrationError = clientmetric.NewCounter("profiles_migration_error")
|
||||
metricMigrationSuccess = clientmetric.NewCounter("profiles_migration_success")
|
||||
)
|
||||
35
vendor/tailscale.com/ipn/ipnlocal/profiles_notwindows.go
generated
vendored
Normal file
35
vendor/tailscale.com/ipn/ipnlocal/profiles_notwindows.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func (pm *profileManager) loadLegacyPrefs(ipn.WindowsUserID) (string, ipn.PrefsView, error) {
|
||||
k := ipn.LegacyGlobalDaemonStateKey
|
||||
switch {
|
||||
case runtime.GOOS == "ios", version.IsSandboxedMacOS():
|
||||
k = "ipn-go-bridge"
|
||||
case runtime.GOOS == "android":
|
||||
k = "ipn-android"
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(k)
|
||||
if err != nil {
|
||||
return "", ipn.PrefsView{}, fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
pm.logf("migrating %q profile to new format", k)
|
||||
return "", prefs, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) completeMigration(migrationSentinel string) {
|
||||
// Do not delete the old state key, as we may be downgraded to an
|
||||
// older version that still relies on it.
|
||||
}
|
||||
79
vendor/tailscale.com/ipn/ipnlocal/profiles_windows.go
generated
vendored
Normal file
79
vendor/tailscale.com/ipn/ipnlocal/profiles_windows.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/util/winutil/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
legacyPrefsFile = "prefs"
|
||||
legacyPrefsMigrationSentinelFile = "_migrated-to-profiles"
|
||||
legacyPrefsExt = ".conf"
|
||||
)
|
||||
|
||||
var errAlreadyMigrated = errors.New("profile migration already completed")
|
||||
|
||||
func legacyPrefsDir(uid ipn.WindowsUserID) (string, error) {
|
||||
// TODO(aaron): Ideally we'd have the impersonation token for the pipe's
|
||||
// client and use it to call SHGetKnownFolderPath, thus yielding the correct
|
||||
// path without having to make gross assumptions about directory names.
|
||||
usr, err := user.LookupId(string(uid))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if usr.HomeDir == "" {
|
||||
return "", fmt.Errorf("user %q does not have a home directory", uid)
|
||||
}
|
||||
userLegacyPrefsDir := filepath.Join(usr.HomeDir, "AppData", "Local", "Tailscale")
|
||||
return userLegacyPrefsDir, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) loadLegacyPrefs(uid ipn.WindowsUserID) (string, ipn.PrefsView, error) {
|
||||
userLegacyPrefsDir, err := legacyPrefsDir(uid)
|
||||
if err != nil {
|
||||
pm.dlogf("no legacy preferences directory for %q: %v", uid, err)
|
||||
return "", ipn.PrefsView{}, err
|
||||
}
|
||||
|
||||
migrationSentinel := filepath.Join(userLegacyPrefsDir, legacyPrefsMigrationSentinelFile+legacyPrefsExt)
|
||||
// verify that migration sentinel is not present
|
||||
_, err = os.Stat(migrationSentinel)
|
||||
if err == nil {
|
||||
pm.dlogf("migration sentinel %q already exists", migrationSentinel)
|
||||
return "", ipn.PrefsView{}, errAlreadyMigrated
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
pm.dlogf("os.Stat(%q) = %v", migrationSentinel, err)
|
||||
return "", ipn.PrefsView{}, err
|
||||
}
|
||||
|
||||
prefsPath := filepath.Join(userLegacyPrefsDir, legacyPrefsFile+legacyPrefsExt)
|
||||
prefs, err := ipn.LoadPrefsWindows(prefsPath)
|
||||
pm.dlogf("ipn.LoadPrefs(%q) = %v, %v", prefsPath, prefs, err)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", ipn.PrefsView{}, errAlreadyMigrated
|
||||
}
|
||||
if err != nil {
|
||||
return "", ipn.PrefsView{}, err
|
||||
}
|
||||
|
||||
prefs.ControlURL = policy.SelectControlURL(defaultPrefs.ControlURL(), prefs.ControlURL)
|
||||
|
||||
pm.logf("migrating Windows profile to new format")
|
||||
return migrationSentinel, prefs.View(), nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) completeMigration(migrationSentinel string) {
|
||||
atomicfile.WriteFile(migrationSentinel, []byte{}, 0600)
|
||||
}
|
||||
939
vendor/tailscale.com/ipn/ipnlocal/serve.go
generated
vendored
Normal file
939
vendor/tailscale.com/ipn/ipnlocal/serve.go
generated
vendored
Normal file
@@ -0,0 +1,939 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/lazy"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/ctxkey"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
grpcBaseContentType = "application/grpc"
|
||||
)
|
||||
|
||||
// ErrETagMismatch signals that the given
|
||||
// If-Match header does not match with the
|
||||
// current etag of a resource.
|
||||
var ErrETagMismatch = errors.New("etag mismatch")
|
||||
|
||||
var serveHTTPContextKey ctxkey.Key[*serveHTTPContext]
|
||||
|
||||
type serveHTTPContext struct {
|
||||
SrcAddr netip.AddrPort
|
||||
DestPort uint16
|
||||
|
||||
// provides funnel-specific context, nil if not funneled
|
||||
Funnel *funnelFlow
|
||||
}
|
||||
|
||||
// funnelFlow represents a funneled connection initiated via IngressPeer
|
||||
// to Host.
|
||||
type funnelFlow struct {
|
||||
Host string
|
||||
IngressPeer tailcfg.NodeView
|
||||
}
|
||||
|
||||
// localListener is the state of host-level net.Listen for a specific (Tailscale IP, port)
|
||||
// combination. If there are two TailscaleIPs (v4 and v6) and three ports being served,
|
||||
// then there will be six of these active and looping in their Run method.
|
||||
//
|
||||
// This is not used in userspace-networking mode.
|
||||
//
|
||||
// localListener is used by tailscale serve (TCP only), the built-in web client and Taildrive.
|
||||
// Most serve traffic and peer traffic for the web client are intercepted by netstack.
|
||||
// This listener exists purely for connections from the machine itself, as that goes via the kernel,
|
||||
// so we need to be in the kernel's listening/routing tables.
|
||||
type localListener struct {
|
||||
b *LocalBackend
|
||||
ap netip.AddrPort
|
||||
ctx context.Context // valid while listener is desired
|
||||
cancel context.CancelFunc // for ctx, to close listener
|
||||
logf logger.Logf
|
||||
bo *backoff.Backoff // for retrying failed Listen calls
|
||||
|
||||
handler func(net.Conn) error // handler for inbound connections
|
||||
closeListener syncs.AtomicValue[func() error] // Listener's Close method, if any
|
||||
}
|
||||
|
||||
func (b *LocalBackend) newServeListener(ctx context.Context, ap netip.AddrPort, logf logger.Logf) *localListener {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &localListener{
|
||||
b: b,
|
||||
ap: ap,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logf: logf,
|
||||
|
||||
handler: func(conn net.Conn) error {
|
||||
srcAddr := conn.RemoteAddr().(*net.TCPAddr).AddrPort()
|
||||
handler := b.tcpHandlerForServe(ap.Port(), srcAddr, nil)
|
||||
if handler == nil {
|
||||
b.logf("[unexpected] local-serve: no handler for %v to port %v", srcAddr, ap.Port())
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
return handler(conn)
|
||||
},
|
||||
bo: backoff.NewBackoff("serve-listener", logf, 30*time.Second),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Close cancels the context and closes the listener, if any.
|
||||
func (s *localListener) Close() error {
|
||||
s.cancel()
|
||||
if close, ok := s.closeListener.LoadOk(); ok {
|
||||
s.closeListener.Store(nil)
|
||||
close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run starts a net.Listen for the localListener's address and port.
|
||||
// If unable to listen, it retries with exponential backoff.
|
||||
// Listen is retried until the context is canceled.
|
||||
func (s *localListener) Run() {
|
||||
for {
|
||||
ip := s.ap.Addr()
|
||||
ipStr := ip.String()
|
||||
|
||||
var lc net.ListenConfig
|
||||
if initListenConfig != nil {
|
||||
// On macOS, this sets the lc.Control hook to
|
||||
// setsockopt the interface index to bind to. This is
|
||||
// required by the network sandbox to allow binding to
|
||||
// a specific interface. Without this hook, the system
|
||||
// chooses a default interface to bind to.
|
||||
if err := initListenConfig(&lc, ip, s.b.prevIfState, s.b.dialer.TUNName()); err != nil {
|
||||
s.logf("localListener failed to init listen config %v, backing off: %v", s.ap, err)
|
||||
s.bo.BackOff(s.ctx, err)
|
||||
continue
|
||||
}
|
||||
// On macOS (AppStore or macsys) and if we're binding to a privileged port,
|
||||
if version.IsSandboxedMacOS() && s.ap.Port() < 1024 {
|
||||
// On macOS, we need to bind to ""/all-interfaces due to
|
||||
// the network sandbox. Ideally we would only bind to the
|
||||
// Tailscale interface, but macOS errors out if we try to
|
||||
// to listen on privileged ports binding only to a specific
|
||||
// interface. (#6364)
|
||||
ipStr = ""
|
||||
}
|
||||
}
|
||||
|
||||
tcp4or6 := "tcp4"
|
||||
if ip.Is6() {
|
||||
tcp4or6 = "tcp6"
|
||||
}
|
||||
|
||||
// while we were backing off and trying again, the context got canceled
|
||||
// so don't bind, just return, because otherwise there will be no way
|
||||
// to close this listener
|
||||
if s.ctx.Err() != nil {
|
||||
s.logf("localListener context closed before binding")
|
||||
return
|
||||
}
|
||||
|
||||
ln, err := lc.Listen(s.ctx, tcp4or6, net.JoinHostPort(ipStr, fmt.Sprint(s.ap.Port())))
|
||||
if err != nil {
|
||||
if s.shouldWarnAboutListenError(err) {
|
||||
s.logf("localListener failed to listen on %v, backing off: %v", s.ap, err)
|
||||
}
|
||||
s.bo.BackOff(s.ctx, err)
|
||||
continue
|
||||
}
|
||||
s.closeListener.Store(ln.Close)
|
||||
|
||||
s.logf("listening on %v", s.ap)
|
||||
err = s.handleListenersAccept(ln)
|
||||
if s.ctx.Err() != nil {
|
||||
// context canceled, we're done
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
s.logf("localListener accept error, retrying: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *localListener) shouldWarnAboutListenError(err error) bool {
|
||||
if !s.b.sys.NetMon.Get().InterfaceState().HasIP(s.ap.Addr()) {
|
||||
// Machine likely doesn't have IPv6 enabled (or the IP is still being
|
||||
// assigned). No need to warn. Notably, WSL2 (Issue 6303).
|
||||
return false
|
||||
}
|
||||
// TODO(bradfitz): check errors.Is(err, syscall.EADDRNOTAVAIL) etc? Let's
|
||||
// see what happens in practice.
|
||||
return true
|
||||
}
|
||||
|
||||
// handleListenersAccept accepts connections for the Listener. It calls the
|
||||
// handler in a new goroutine for each accepted connection. This is used to
|
||||
// handle local "tailscale serve" and web client traffic originating from the
|
||||
// machine itself.
|
||||
func (s *localListener) handleListenersAccept(ln net.Listener) error {
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go s.handler(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// updateServeTCPPortNetMapAddrListenersLocked starts a net.Listen for configured
|
||||
// Serve ports on all the node's addresses.
|
||||
// Existing Listeners are closed if port no longer in incoming ports list.
|
||||
//
|
||||
// b.mu must be held.
|
||||
func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint16) {
|
||||
// close existing listeners where port
|
||||
// is no longer in incoming ports list
|
||||
for ap, sl := range b.serveListeners {
|
||||
if !slices.Contains(ports, ap.Port()) {
|
||||
b.logf("closing listener %v", ap)
|
||||
sl.Close()
|
||||
delete(b.serveListeners, ap)
|
||||
}
|
||||
}
|
||||
|
||||
nm := b.netMap
|
||||
if nm == nil {
|
||||
b.logf("netMap is nil")
|
||||
return
|
||||
}
|
||||
if !nm.SelfNode.Valid() {
|
||||
b.logf("netMap SelfNode is nil")
|
||||
return
|
||||
}
|
||||
|
||||
addrs := nm.GetAddresses()
|
||||
for i := range addrs.Len() {
|
||||
a := addrs.At(i)
|
||||
for _, p := range ports {
|
||||
addrPort := netip.AddrPortFrom(a.Addr(), p)
|
||||
if _, ok := b.serveListeners[addrPort]; ok {
|
||||
continue // already listening
|
||||
}
|
||||
|
||||
sl := b.newServeListener(context.Background(), addrPort, b.logf)
|
||||
mak.Set(&b.serveListeners, addrPort, sl)
|
||||
|
||||
go sl.Run()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetServeConfig establishes or replaces the current serve config.
|
||||
// ETag is an optional parameter to enforce Optimistic Concurrency Control.
|
||||
// If it is an empty string, then the config will be overwritten.
|
||||
func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig, etag string) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.setServeConfigLocked(config, etag)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string) error {
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
if config.IsFunnelOn() && prefs.ShieldsUp() {
|
||||
return errors.New("Unable to turn on Funnel while shields-up is enabled")
|
||||
}
|
||||
if b.isConfigLocked_Locked() {
|
||||
return errors.New("can't reconfigure tailscaled when using a config file; config file is locked")
|
||||
}
|
||||
|
||||
nm := b.netMap
|
||||
if nm == nil {
|
||||
return errors.New("netMap is nil")
|
||||
}
|
||||
if !nm.SelfNode.Valid() {
|
||||
return errors.New("netMap SelfNode is nil")
|
||||
}
|
||||
|
||||
// If etag is present, check that it has
|
||||
// not changed from the last config.
|
||||
prevConfig := b.serveConfig
|
||||
if etag != "" {
|
||||
// Note that we marshal b.serveConfig
|
||||
// and not use b.lastServeConfJSON as that might
|
||||
// be a Go nil value, which produces a different
|
||||
// checksum from a JSON "null" value.
|
||||
prevBytes, err := json.Marshal(prevConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error encoding previous config: %w", err)
|
||||
}
|
||||
sum := sha256.Sum256(prevBytes)
|
||||
previousEtag := hex.EncodeToString(sum[:])
|
||||
if etag != previousEtag {
|
||||
return ErrETagMismatch
|
||||
}
|
||||
}
|
||||
|
||||
var bs []byte
|
||||
if config != nil {
|
||||
j, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding serve config: %w", err)
|
||||
}
|
||||
bs = j
|
||||
}
|
||||
|
||||
profileID := b.pm.CurrentProfile().ID
|
||||
confKey := ipn.ServeConfigKey(profileID)
|
||||
if err := b.store.WriteState(confKey, bs); err != nil {
|
||||
return fmt.Errorf("writing ServeConfig to StateStore: %w", err)
|
||||
}
|
||||
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs())
|
||||
|
||||
// clean up and close all previously open foreground sessions
|
||||
// if the current ServeConfig has overwritten them.
|
||||
if prevConfig.Valid() {
|
||||
has := func(string) bool { return false }
|
||||
if b.serveConfig.Valid() {
|
||||
has = b.serveConfig.Foreground().Contains
|
||||
}
|
||||
prevConfig.Foreground().Range(func(k string, v ipn.ServeConfigView) (cont bool) {
|
||||
if !has(k) {
|
||||
for _, sess := range b.notifyWatchers {
|
||||
if sess.sessionID == k {
|
||||
sess.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServeConfig provides a view of the current serve mappings.
|
||||
// If serving is not configured, the returned view is not Valid.
|
||||
func (b *LocalBackend) ServeConfig() ipn.ServeConfigView {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.serveConfig
|
||||
}
|
||||
|
||||
// DeleteForegroundSession deletes a ServeConfig's foreground session
|
||||
// in the LocalBackend if it exists. It also ensures check, delete, and
|
||||
// set operations happen within the same mutex lock to avoid any races.
|
||||
func (b *LocalBackend) DeleteForegroundSession(sessionID string) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if !b.serveConfig.Valid() || !b.serveConfig.Foreground().Contains(sessionID) {
|
||||
return nil
|
||||
}
|
||||
sc := b.serveConfig.AsStruct()
|
||||
delete(sc.Foreground, sessionID)
|
||||
return b.setServeConfigLocked(sc, "")
|
||||
}
|
||||
|
||||
// HandleIngressTCPConn handles a TCP connection initiated by the ingressPeer
|
||||
// proxied to the local node over the PeerAPI.
|
||||
// Target represents the destination HostPort of the conn.
|
||||
// srcAddr represents the source AddrPort and not that of the ingressPeer.
|
||||
// getConnOrReset is a callback to get the connection, or reset if the connection
|
||||
// is no longer available.
|
||||
// sendRST is a callback to send a TCP RST to the ingressPeer indicating that
|
||||
// the connection was not accepted.
|
||||
func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target ipn.HostPort, srcAddr netip.AddrPort, getConnOrReset func() (net.Conn, bool), sendRST func()) {
|
||||
b.mu.Lock()
|
||||
sc := b.serveConfig
|
||||
b.mu.Unlock()
|
||||
|
||||
// TODO(maisem,bradfitz): make this not alloc for every conn.
|
||||
logf := logger.WithPrefix(b.logf, "handleIngress: ")
|
||||
|
||||
if !sc.Valid() {
|
||||
logf("got ingress conn w/o serveConfig; rejecting")
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
|
||||
if !sc.HasFunnelForTarget(target) {
|
||||
logf("got ingress conn for unconfigured %q; rejecting", target)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(string(target))
|
||||
if err != nil {
|
||||
logf("got ingress conn for bad target %q; rejecting", target)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
port16, err := strconv.ParseUint(port, 10, 16)
|
||||
if err != nil {
|
||||
logf("got ingress conn for bad target %q; rejecting", target)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
dport := uint16(port16)
|
||||
if b.getTCPHandlerForFunnelFlow != nil {
|
||||
handler := b.getTCPHandlerForFunnelFlow(srcAddr, dport)
|
||||
if handler != nil {
|
||||
c, ok := getConnOrReset()
|
||||
if !ok {
|
||||
logf("getConn didn't complete from %v to port %v", srcAddr, dport)
|
||||
return
|
||||
}
|
||||
handler(c)
|
||||
return
|
||||
}
|
||||
}
|
||||
handler := b.tcpHandlerForServe(dport, srcAddr, &funnelFlow{
|
||||
Host: host,
|
||||
IngressPeer: ingressPeer,
|
||||
})
|
||||
if handler == nil {
|
||||
logf("[unexpected] no matching ingress serve handler for %v to port %v", srcAddr, dport)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
c, ok := getConnOrReset()
|
||||
if !ok {
|
||||
logf("getConn didn't complete from %v to port %v", srcAddr, dport)
|
||||
return
|
||||
}
|
||||
handler(c)
|
||||
}
|
||||
|
||||
// tcpHandlerForServe returns a handler for a TCP connection to be served via
|
||||
// the ipn.ServeConfig. The funnelFlow can be nil if this is not a funneled
|
||||
// connection.
|
||||
func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, f *funnelFlow) (handler func(net.Conn) error) {
|
||||
b.mu.Lock()
|
||||
sc := b.serveConfig
|
||||
b.mu.Unlock()
|
||||
|
||||
if !sc.Valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
tcph, ok := sc.FindTCP(dport)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if tcph.HTTPS() || tcph.HTTP() {
|
||||
hs := &http.Server{
|
||||
Handler: http.HandlerFunc(b.serveWebHandler),
|
||||
BaseContext: func(_ net.Listener) context.Context {
|
||||
return serveHTTPContextKey.WithValue(context.Background(), &serveHTTPContext{
|
||||
Funnel: f,
|
||||
SrcAddr: srcAddr,
|
||||
DestPort: dport,
|
||||
})
|
||||
},
|
||||
}
|
||||
if tcph.HTTPS() {
|
||||
hs.TLSConfig = &tls.Config{
|
||||
GetCertificate: b.getTLSServeCertForPort(dport),
|
||||
}
|
||||
return func(c net.Conn) error {
|
||||
return hs.ServeTLS(netutil.NewOneConnListener(c, nil), "", "")
|
||||
}
|
||||
}
|
||||
|
||||
return func(c net.Conn) error {
|
||||
return hs.Serve(netutil.NewOneConnListener(c, nil))
|
||||
}
|
||||
}
|
||||
|
||||
if backDst := tcph.TCPForward(); backDst != "" {
|
||||
return func(conn net.Conn) error {
|
||||
defer conn.Close()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
backConn, err := b.dialer.SystemDial(ctx, "tcp", backDst)
|
||||
cancel()
|
||||
if err != nil {
|
||||
b.logf("localbackend: failed to TCP proxy port %v (from %v) to %s: %v", dport, srcAddr, backDst, err)
|
||||
return nil
|
||||
}
|
||||
defer backConn.Close()
|
||||
if sni := tcph.TerminateTLS(); sni != "" {
|
||||
conn = tls.Server(conn, &tls.Config{
|
||||
GetCertificate: func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
pair, err := b.GetCertPEM(ctx, sni)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := tls.X509KeyPair(pair.CertPEM, pair.KeyPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cert, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(bradfitz): do the RegisterIPPortIdentity and
|
||||
// UnregisterIPPortIdentity stuff that netstack does
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := io.Copy(backConn, conn)
|
||||
errc <- err
|
||||
}()
|
||||
go func() {
|
||||
_, err := io.Copy(conn, backConn)
|
||||
errc <- err
|
||||
}()
|
||||
return <-errc
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, at string, ok bool) {
|
||||
var z ipn.HTTPHandlerView // zero value
|
||||
|
||||
hostname := r.Host
|
||||
if r.TLS == nil {
|
||||
tcd := "." + b.Status().CurrentTailnet.MagicDNSSuffix
|
||||
if host, _, err := net.SplitHostPort(hostname); err == nil {
|
||||
hostname = host
|
||||
}
|
||||
if !strings.HasSuffix(hostname, tcd) {
|
||||
hostname += tcd
|
||||
}
|
||||
} else {
|
||||
hostname = r.TLS.ServerName
|
||||
}
|
||||
|
||||
sctx, ok := serveHTTPContextKey.ValueOk(r.Context())
|
||||
if !ok {
|
||||
b.logf("[unexpected] localbackend: no serveHTTPContext in request")
|
||||
return z, "", false
|
||||
}
|
||||
wsc, ok := b.webServerConfig(hostname, sctx.DestPort)
|
||||
if !ok {
|
||||
return z, "", false
|
||||
}
|
||||
|
||||
if h, ok := wsc.Handlers().GetOk(r.URL.Path); ok {
|
||||
return h, r.URL.Path, true
|
||||
}
|
||||
pth := path.Clean(r.URL.Path)
|
||||
for {
|
||||
withSlash := pth + "/"
|
||||
if h, ok := wsc.Handlers().GetOk(withSlash); ok {
|
||||
return h, withSlash, true
|
||||
}
|
||||
if h, ok := wsc.Handlers().GetOk(pth); ok {
|
||||
return h, pth, true
|
||||
}
|
||||
if pth == "/" {
|
||||
return z, "", false
|
||||
}
|
||||
pth = path.Dir(pth)
|
||||
}
|
||||
}
|
||||
|
||||
// proxyHandlerForBackend creates a new HTTP reverse proxy for a particular backend that
|
||||
// we serve requests for. `backend` is a HTTPHandler.Proxy string (url, hostport or just port).
|
||||
func (b *LocalBackend) proxyHandlerForBackend(backend string) (http.Handler, error) {
|
||||
targetURL, insecure := expandProxyArg(backend)
|
||||
u, err := url.Parse(targetURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid url %s: %w", targetURL, err)
|
||||
}
|
||||
p := &reverseProxy{
|
||||
logf: b.logf,
|
||||
url: u,
|
||||
insecure: insecure,
|
||||
backend: backend,
|
||||
lb: b,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// reverseProxy is a proxy that forwards a request to a backend host
|
||||
// (preconfigured via ipn.ServeConfig). If the host is configured with
|
||||
// http+insecure prefix, connection between proxy and backend will be over
|
||||
// insecure TLS. If the backend host has a http prefix and the incoming request
|
||||
// has application/grpc content type header, the connection will be over h2c.
|
||||
// Otherwise standard Go http transport will be used.
|
||||
type reverseProxy struct {
|
||||
logf logger.Logf
|
||||
url *url.URL
|
||||
// insecure tracks whether the connection to an https backend should be
|
||||
// insecure (i.e because we cannot verify its CA).
|
||||
insecure bool
|
||||
backend string
|
||||
lb *LocalBackend
|
||||
httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends
|
||||
h2cTransport lazy.SyncValue[*http2.Transport] // transport for h2c backends
|
||||
// closed tracks whether proxy is closed/currently closing.
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// close ensures that any open backend connections get closed.
|
||||
func (rp *reverseProxy) close() {
|
||||
rp.closed.Store(true)
|
||||
if h2cT := rp.h2cTransport.Get(func() *http2.Transport {
|
||||
return nil
|
||||
}); h2cT != nil {
|
||||
h2cT.CloseIdleConnections()
|
||||
}
|
||||
if httpTransport := rp.httpTransport.Get(func() *http.Transport {
|
||||
return nil
|
||||
}); httpTransport != nil {
|
||||
httpTransport.CloseIdleConnections()
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if closed := rp.closed.Load(); closed {
|
||||
rp.logf("received a request for a proxy that's being closed or has been closed")
|
||||
http.Error(w, "proxy is closed", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
p := &httputil.ReverseProxy{Rewrite: func(r *httputil.ProxyRequest) {
|
||||
oldOutPath := r.Out.URL.Path
|
||||
r.SetURL(rp.url)
|
||||
|
||||
// If mount point matches the request path exactly, the outbound
|
||||
// request URL was set to empty string in serveWebHandler which
|
||||
// would have resulted in the outbound path set to <proxy path>
|
||||
// + '/' in SetURL. In that case, if the proxy path was set, we
|
||||
// want to send the request to the <proxy path> (without the
|
||||
// '/') .
|
||||
if oldOutPath == "" && rp.url.Path != "" {
|
||||
r.Out.URL.Path = rp.url.Path
|
||||
r.Out.URL.RawPath = rp.url.RawPath
|
||||
}
|
||||
|
||||
r.Out.Host = r.In.Host
|
||||
addProxyForwardedHeaders(r)
|
||||
rp.lb.addTailscaleIdentityHeaders(r)
|
||||
}}
|
||||
|
||||
// There is no way to autodetect h2c as per RFC 9113
|
||||
// https://datatracker.ietf.org/doc/html/rfc9113#name-starting-http-2.
|
||||
// However, we assume that http:// proxy prefix in combination with the
|
||||
// protoccol being HTTP/2 is sufficient to detect h2c for our needs. Only use this for
|
||||
// gRPC to fix a known problem of plaintext gRPC backends
|
||||
if rp.shouldProxyViaH2C(r) {
|
||||
rp.logf("received a proxy request for plaintext gRPC")
|
||||
p.Transport = rp.getH2CTransport()
|
||||
} else {
|
||||
p.Transport = rp.getTransport()
|
||||
}
|
||||
p.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// getTransport returns the Transport used for regular (non-GRPC) requests
|
||||
// to the backend. The Transport gets created lazily, at most once.
|
||||
func (rp *reverseProxy) getTransport() *http.Transport {
|
||||
return rp.httpTransport.Get(func() *http.Transport {
|
||||
return &http.Transport{
|
||||
DialContext: rp.lb.dialer.SystemDial,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: rp.insecure,
|
||||
},
|
||||
// Values for the following parameters have been copied from http.DefaultTransport.
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// getH2CTransport returns the Transport used for GRPC requests to the backend.
|
||||
// The Transport gets created lazily, at most once.
|
||||
func (rp *reverseProxy) getH2CTransport() *http2.Transport {
|
||||
return rp.h2cTransport.Get(func() *http2.Transport {
|
||||
return &http2.Transport{
|
||||
AllowHTTP: true,
|
||||
DialTLSContext: func(ctx context.Context, network string, addr string, _ *tls.Config) (net.Conn, error) {
|
||||
return rp.lb.dialer.SystemDial(ctx, "tcp", rp.url.Host)
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// This is not a generally reliable way how to determine whether a request is
|
||||
// for a h2c server, but sufficient for our particular use case.
|
||||
func (rp *reverseProxy) shouldProxyViaH2C(r *http.Request) bool {
|
||||
contentType := r.Header.Get(contentTypeHeader)
|
||||
return r.ProtoMajor == 2 && strings.HasPrefix(rp.backend, "http://") && isGRPCContentType(contentType)
|
||||
}
|
||||
|
||||
// isGRPC accepts an HTTP request's content type header value and determines
|
||||
// whether this is gRPC content. grpc-go considers a value that equals
|
||||
// application/grpc or has a prefix of application/grpc+ or application/grpc; a
|
||||
// valid grpc content type header.
|
||||
// https://github.com/grpc/grpc-go/blob/v1.60.0-dev/internal/grpcutil/method.go#L41-L78
|
||||
func isGRPCContentType(contentType string) bool {
|
||||
s, ok := strings.CutPrefix(contentType, grpcBaseContentType)
|
||||
return ok && (len(s) == 0 || s[0] == '+' || s[0] == ';')
|
||||
}
|
||||
|
||||
func addProxyForwardedHeaders(r *httputil.ProxyRequest) {
|
||||
r.Out.Header.Set("X-Forwarded-Host", r.In.Host)
|
||||
if r.In.TLS != nil {
|
||||
r.Out.Header.Set("X-Forwarded-Proto", "https")
|
||||
}
|
||||
if c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()); ok {
|
||||
r.Out.Header.Set("X-Forwarded-For", c.SrcAddr.Addr().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) addTailscaleIdentityHeaders(r *httputil.ProxyRequest) {
|
||||
// Clear any incoming values squatting in the headers.
|
||||
r.Out.Header.Del("Tailscale-User-Login")
|
||||
r.Out.Header.Del("Tailscale-User-Name")
|
||||
r.Out.Header.Del("Tailscale-User-Profile-Pic")
|
||||
r.Out.Header.Del("Tailscale-Funnel-Request")
|
||||
r.Out.Header.Del("Tailscale-Headers-Info")
|
||||
|
||||
c, ok := serveHTTPContextKey.ValueOk(r.Out.Context())
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if c.Funnel != nil {
|
||||
r.Out.Header.Set("Tailscale-Funnel-Request", "?1")
|
||||
return
|
||||
}
|
||||
node, user, ok := b.WhoIs("tcp", c.SrcAddr)
|
||||
if !ok {
|
||||
return // traffic from outside of Tailnet (funneled or local machine)
|
||||
}
|
||||
if node.IsTagged() {
|
||||
// 2023-06-14: Not setting identity headers for tagged nodes.
|
||||
// Only currently set for nodes with user identities.
|
||||
return
|
||||
}
|
||||
r.Out.Header.Set("Tailscale-User-Login", encTailscaleHeaderValue(user.LoginName))
|
||||
r.Out.Header.Set("Tailscale-User-Name", encTailscaleHeaderValue(user.DisplayName))
|
||||
r.Out.Header.Set("Tailscale-User-Profile-Pic", user.ProfilePicURL)
|
||||
r.Out.Header.Set("Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers")
|
||||
}
|
||||
|
||||
// encTailscaleHeaderValue cleans or encodes as necessary v, to be suitable in
|
||||
// an HTTP header value. See
|
||||
// https://github.com/tailscale/tailscale/issues/11603.
|
||||
//
|
||||
// If v is not a valid UTF-8 string, it returns an empty string.
|
||||
// If v is a valid ASCII string, it returns v unmodified.
|
||||
// If v is a valid UTF-8 string with non-ASCII characters, it returns a
|
||||
// RFC 2047 Q-encoded string.
|
||||
func encTailscaleHeaderValue(v string) string {
|
||||
if !utf8.ValidString(v) {
|
||||
return ""
|
||||
}
|
||||
return mime.QEncoding.Encode("utf-8", v)
|
||||
}
|
||||
|
||||
// serveWebHandler is an http.HandlerFunc that maps incoming requests to the
|
||||
// correct *http.
|
||||
func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h, mountPoint, ok := b.getServeHandler(r)
|
||||
if !ok {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
if s := h.Text(); s != "" {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
io.WriteString(w, s)
|
||||
return
|
||||
}
|
||||
if v := h.Path(); v != "" {
|
||||
b.serveFileOrDirectory(w, r, v, mountPoint)
|
||||
return
|
||||
}
|
||||
if v := h.Proxy(); v != "" {
|
||||
p, ok := b.serveProxyHandlers.Load(v)
|
||||
if !ok {
|
||||
http.Error(w, "unknown proxy destination", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
h := p.(http.Handler)
|
||||
// Trim the mount point from the URL path before proxying. (#6571)
|
||||
if r.URL.Path != "/" {
|
||||
h = http.StripPrefix(strings.TrimSuffix(mountPoint, "/"), h)
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, "empty handler", 500)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) serveFileOrDirectory(w http.ResponseWriter, r *http.Request, fileOrDir, mountPoint string) {
|
||||
fi, err := os.Stat(fileOrDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
b.logf("error calling stat on %s: %v", fileOrDir, err)
|
||||
http.Error(w, "an error occurred reading the file or directory", 500)
|
||||
return
|
||||
}
|
||||
if fi.Mode().IsRegular() {
|
||||
if mountPoint != r.URL.Path {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
f, err := os.Open(fileOrDir)
|
||||
if err != nil {
|
||||
b.logf("error opening %s: %v", fileOrDir, err)
|
||||
http.Error(w, "an error occurred reading the file or directory", 500)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
http.ServeContent(w, r, path.Base(mountPoint), fi.ModTime(), f)
|
||||
return
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
http.Error(w, "not a file or directory", 500)
|
||||
return
|
||||
}
|
||||
if len(r.URL.Path) < len(mountPoint) && r.URL.Path+"/" == mountPoint {
|
||||
http.Redirect(w, r, mountPoint, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
var fs http.Handler = http.FileServer(http.Dir(fileOrDir))
|
||||
if mountPoint != "/" {
|
||||
fs = http.StripPrefix(strings.TrimSuffix(mountPoint, "/"), fs)
|
||||
}
|
||||
fs.ServeHTTP(&fixLocationHeaderResponseWriter{
|
||||
ResponseWriter: w,
|
||||
mountPoint: mountPoint,
|
||||
}, r)
|
||||
}
|
||||
|
||||
// fixLocationHeaderResponseWriter is an http.ResponseWriter wrapper that, upon
|
||||
// flushing HTTP headers, prefixes any Location header with the mount point.
|
||||
type fixLocationHeaderResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
mountPoint string
|
||||
fixOnce sync.Once // guards call to fix
|
||||
}
|
||||
|
||||
func (w *fixLocationHeaderResponseWriter) fix() {
|
||||
h := w.ResponseWriter.Header()
|
||||
if v := h.Get("Location"); v != "" {
|
||||
h.Set("Location", w.mountPoint+v)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fixLocationHeaderResponseWriter) WriteHeader(code int) {
|
||||
w.fixOnce.Do(w.fix)
|
||||
w.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (w *fixLocationHeaderResponseWriter) Write(p []byte) (int, error) {
|
||||
w.fixOnce.Do(w.fix)
|
||||
return w.ResponseWriter.Write(p)
|
||||
}
|
||||
|
||||
// expandProxyArg returns a URL from s, where s can be of form:
|
||||
//
|
||||
// * port number ("8080")
|
||||
// * host:port ("localhost:8080")
|
||||
// * full URL ("http://localhost:8080", in which case it's returned unchanged)
|
||||
// * insecure TLS ("https+insecure://127.0.0.1:4430")
|
||||
func expandProxyArg(s string) (targetURL string, insecureSkipVerify bool) {
|
||||
if s == "" {
|
||||
return "", false
|
||||
}
|
||||
if strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://") {
|
||||
return s, false
|
||||
}
|
||||
if rest, ok := strings.CutPrefix(s, "https+insecure://"); ok {
|
||||
return "https://" + rest, true
|
||||
}
|
||||
if allNumeric(s) {
|
||||
return "http://127.0.0.1:" + s, false
|
||||
}
|
||||
return "http://" + s, false
|
||||
}
|
||||
|
||||
func allNumeric(s string) bool {
|
||||
for i := range len(s) {
|
||||
if s[i] < '0' || s[i] > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s != ""
|
||||
}
|
||||
|
||||
func (b *LocalBackend) webServerConfig(hostname string, port uint16) (c ipn.WebServerConfigView, ok bool) {
|
||||
key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port))
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if !b.serveConfig.Valid() {
|
||||
return c, false
|
||||
}
|
||||
return b.serveConfig.FindWeb(key)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getTLSServeCertForPort(port uint16) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if hi == nil || hi.ServerName == "" {
|
||||
return nil, errors.New("no SNI ServerName")
|
||||
}
|
||||
_, ok := b.webServerConfig(hi.ServerName, port)
|
||||
if !ok {
|
||||
return nil, errors.New("no webserver configured for name/port")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
pair, err := b.GetCertPEM(ctx, hi.ServerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := tls.X509KeyPair(pair.CertPEM, pair.KeyPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
}
|
||||
232
vendor/tailscale.com/ipn/ipnlocal/ssh.go
generated
vendored
Normal file
232
vendor/tailscale.com/ipn/ipnlocal/ssh.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux || (darwin && !ios) || freebsd || openbsd
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/tailscale/golang-x-crypto/ssh"
|
||||
"go4.org/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/lineread"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
// keyTypes are the SSH key types that we either try to read from the
|
||||
// system's OpenSSH keys or try to generate for ourselves when not
|
||||
// running as root.
|
||||
var keyTypes = []string{"rsa", "ecdsa", "ed25519"}
|
||||
|
||||
// getSSHUsernames discovers and returns the list of usernames that are
|
||||
// potential Tailscale SSH user targets.
|
||||
//
|
||||
// Invariant: must not be called with b.mu held.
|
||||
func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) {
|
||||
res := new(tailcfg.C2NSSHUsernamesResponse)
|
||||
if !b.tailscaleSSHEnabled() {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
max := 10
|
||||
if req != nil && req.Max != 0 {
|
||||
max = req.Max
|
||||
}
|
||||
|
||||
add := func(u string) {
|
||||
if req != nil && req.Exclude[u] {
|
||||
return
|
||||
}
|
||||
switch u {
|
||||
case "nobody", "daemon", "sync":
|
||||
return
|
||||
}
|
||||
if slices.Contains(res.Usernames, u) {
|
||||
return
|
||||
}
|
||||
if len(res.Usernames) > max {
|
||||
// Enough for a hint.
|
||||
return
|
||||
}
|
||||
res.Usernames = append(res.Usernames, u)
|
||||
}
|
||||
|
||||
if opUser := b.operatorUserName(); opUser != "" {
|
||||
add(opUser)
|
||||
}
|
||||
|
||||
// Check popular usernames and see if they exist with a real shell.
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
out, err := exec.Command("dscl", ".", "list", "/Users").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lineread.Reader(bytes.NewReader(out), func(line []byte) error {
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '_' {
|
||||
return nil
|
||||
}
|
||||
add(string(line))
|
||||
return nil
|
||||
})
|
||||
default:
|
||||
lineread.File("/etc/passwd", func(line []byte) error {
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == '_' {
|
||||
return nil
|
||||
}
|
||||
if mem.HasSuffix(mem.B(line), mem.S("/nologin")) ||
|
||||
mem.HasSuffix(mem.B(line), mem.S("/false")) {
|
||||
return nil
|
||||
}
|
||||
colon := bytes.IndexByte(line, ':')
|
||||
if colon != -1 {
|
||||
add(string(line[:colon]))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) GetSSH_HostKeys() (keys []ssh.Signer, err error) {
|
||||
var existing map[string]ssh.Signer
|
||||
if os.Geteuid() == 0 {
|
||||
existing = b.getSystemSSH_HostKeys()
|
||||
}
|
||||
return b.getTailscaleSSH_HostKeys(existing)
|
||||
}
|
||||
|
||||
// getTailscaleSSH_HostKeys returns the three (rsa, ecdsa, ed25519) SSH host
|
||||
// keys, reusing the provided ones in existing if present in the map.
|
||||
func (b *LocalBackend) getTailscaleSSH_HostKeys(existing map[string]ssh.Signer) (keys []ssh.Signer, err error) {
|
||||
var keyDir string // lazily initialized $TAILSCALE_VAR/ssh dir.
|
||||
for _, typ := range keyTypes {
|
||||
if s, ok := existing[typ]; ok {
|
||||
keys = append(keys, s)
|
||||
continue
|
||||
}
|
||||
if keyDir == "" {
|
||||
root := b.TailscaleVarRoot()
|
||||
if root == "" {
|
||||
return nil, errors.New("no var root for ssh keys")
|
||||
}
|
||||
keyDir = filepath.Join(root, "ssh")
|
||||
if err := os.MkdirAll(keyDir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
hostKey, err := b.hostKeyFileOrCreate(keyDir, typ)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating SSH host key type %q in %q: %w", typ, keyDir, err)
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(hostKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing SSH host key type %q from %q: %w", typ, keyDir, err)
|
||||
}
|
||||
keys = append(keys, signer)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
var keyGenMu sync.Mutex
|
||||
|
||||
func (b *LocalBackend) hostKeyFileOrCreate(keyDir, typ string) ([]byte, error) {
|
||||
keyGenMu.Lock()
|
||||
defer keyGenMu.Unlock()
|
||||
|
||||
path := filepath.Join(keyDir, "ssh_host_"+typ+"_key")
|
||||
v, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
var priv any
|
||||
switch typ {
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type %q", typ)
|
||||
case "ed25519":
|
||||
_, priv, err = ed25519.GenerateKey(rand.Reader)
|
||||
case "ecdsa":
|
||||
// curve is arbitrary. We pick whatever will at
|
||||
// least pacify clients as the actual encryption
|
||||
// doesn't matter: it's all over WireGuard anyway.
|
||||
curve := elliptic.P256()
|
||||
priv, err = ecdsa.GenerateKey(curve, rand.Reader)
|
||||
case "rsa":
|
||||
// keySize is arbitrary. We pick whatever will at
|
||||
// least pacify clients as the actual encryption
|
||||
// doesn't matter: it's all over WireGuard anyway.
|
||||
const keySize = 2048
|
||||
priv, err = rsa.GenerateKey(rand.Reader, keySize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mk, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pemGen := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk})
|
||||
err = os.WriteFile(path, pemGen, 0700)
|
||||
return pemGen, err
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getSystemSSH_HostKeys() (ret map[string]ssh.Signer) {
|
||||
for _, typ := range keyTypes {
|
||||
filename := "/etc/ssh/ssh_host_" + typ + "_key"
|
||||
hostKey, err := os.ReadFile(filename)
|
||||
if err != nil || len(bytes.TrimSpace(hostKey)) == 0 {
|
||||
continue
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(hostKey)
|
||||
if err != nil {
|
||||
b.logf("warning: error reading host key %s: %v (generating one instead)", filename, err)
|
||||
continue
|
||||
}
|
||||
mak.Set(&ret, typ, signer)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) {
|
||||
signers, err := b.GetSSH_HostKeys()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keyStrings []string
|
||||
for _, signer := range signers {
|
||||
keyStrings = append(keyStrings, strings.TrimSpace(string(ssh.MarshalAuthorizedKey(signer.PublicKey()))))
|
||||
}
|
||||
return keyStrings, nil
|
||||
}
|
||||
|
||||
// tailscaleSSHEnabled reports whether Tailscale SSH is currently enabled based
|
||||
// on prefs. It returns false if there are no prefs set.
|
||||
func (b *LocalBackend) tailscaleSSHEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
p := b.pm.CurrentPrefs()
|
||||
return p.Valid() && p.RunSSH()
|
||||
}
|
||||
20
vendor/tailscale.com/ipn/ipnlocal/ssh_stub.go
generated
vendored
Normal file
20
vendor/tailscale.com/ipn/ipnlocal/ssh_stub.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ios || (!linux && !darwin && !freebsd && !openbsd)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getSSHUsernames(*tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
35
vendor/tailscale.com/ipn/ipnlocal/taildrop.go
generated
vendored
Normal file
35
vendor/tailscale.com/ipn/ipnlocal/taildrop.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
// UpdateOutgoingFiles updates b.outgoingFiles to reflect the given updates and
|
||||
// sends an ipn.Notify with the full list of outgoingFiles.
|
||||
func (b *LocalBackend) UpdateOutgoingFiles(updates map[string]*ipn.OutgoingFile) {
|
||||
b.mu.Lock()
|
||||
if b.outgoingFiles == nil {
|
||||
b.outgoingFiles = make(map[string]*ipn.OutgoingFile, len(updates))
|
||||
}
|
||||
maps.Copy(b.outgoingFiles, updates)
|
||||
outgoingFiles := make([]*ipn.OutgoingFile, 0, len(b.outgoingFiles))
|
||||
for _, file := range b.outgoingFiles {
|
||||
outgoingFiles = append(outgoingFiles, file)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
slices.SortFunc(outgoingFiles, func(a, b *ipn.OutgoingFile) int {
|
||||
t := a.Started.Compare(b.Started)
|
||||
if t != 0 {
|
||||
return t
|
||||
}
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
})
|
||||
b.send(ipn.Notify{OutgoingFiles: outgoingFiles})
|
||||
}
|
||||
205
vendor/tailscale.com/ipn/ipnlocal/web_client.go
generated
vendored
Normal file
205
vendor/tailscale.com/ipn/ipnlocal/web_client.go
generated
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/web"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
const webClientPort = web.ListenPort
|
||||
|
||||
// webClient holds state for the web interface for managing this
|
||||
// tailscale instance. The web interface is not used by default,
|
||||
// but initialized by calling LocalBackend.WebClientGetOrInit.
|
||||
type webClient struct {
|
||||
mu sync.Mutex // protects webClient fields
|
||||
|
||||
server *web.Server // or nil, initialized lazily
|
||||
|
||||
// lc optionally specifies a LocalClient to use to connect
|
||||
// to the localapi for this tailscaled instance.
|
||||
// If nil, a default is used.
|
||||
lc *tailscale.LocalClient
|
||||
}
|
||||
|
||||
// ConfigureWebClient configures b.web prior to use.
|
||||
// Specifially, it sets b.web.lc to the provided LocalClient.
|
||||
// If provided as nil, b.web.lc is cleared out.
|
||||
func (b *LocalBackend) ConfigureWebClient(lc *tailscale.LocalClient) {
|
||||
b.webClient.mu.Lock()
|
||||
defer b.webClient.mu.Unlock()
|
||||
b.webClient.lc = lc
|
||||
}
|
||||
|
||||
// webClientGetOrInit gets or initializes the web server for managing
|
||||
// this tailscaled instance.
|
||||
// s is always non-nil if err is empty.
|
||||
func (b *LocalBackend) webClientGetOrInit() (s *web.Server, err error) {
|
||||
if !b.ShouldRunWebClient() {
|
||||
return nil, errors.New("web client not enabled for this device")
|
||||
}
|
||||
|
||||
b.webClient.mu.Lock()
|
||||
defer b.webClient.mu.Unlock()
|
||||
if b.webClient.server != nil {
|
||||
return b.webClient.server, nil
|
||||
}
|
||||
|
||||
b.logf("webClientGetOrInit: initializing web ui")
|
||||
if b.webClient.server, err = web.NewServer(web.ServerOpts{
|
||||
Mode: web.ManageServerMode,
|
||||
LocalClient: b.webClient.lc,
|
||||
Logf: b.logf,
|
||||
NewAuthURL: b.newWebClientAuthURL,
|
||||
WaitAuthURL: b.waitWebClientAuthURL,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("web.NewServer: %w", err)
|
||||
}
|
||||
|
||||
b.logf("webClientGetOrInit: started web ui")
|
||||
return b.webClient.server, nil
|
||||
}
|
||||
|
||||
// WebClientShutdown shuts down any running b.webClient servers and
|
||||
// clears out b.webClient state (besides the b.webClient.lc field,
|
||||
// which is left untouched because required for future web startups).
|
||||
// WebClientShutdown obtains the b.mu lock.
|
||||
func (b *LocalBackend) webClientShutdown() {
|
||||
b.mu.Lock()
|
||||
for ap, ln := range b.webClientListeners {
|
||||
ln.Close()
|
||||
delete(b.webClientListeners, ap)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
b.webClient.mu.Lock() // webClient struct uses its own mutext
|
||||
server := b.webClient.server
|
||||
b.webClient.server = nil
|
||||
b.webClient.mu.Unlock() // release lock before shutdown
|
||||
if server != nil {
|
||||
server.Shutdown()
|
||||
b.logf("WebClientShutdown: shut down web ui")
|
||||
}
|
||||
}
|
||||
|
||||
// handleWebClientConn serves web client requests.
|
||||
func (b *LocalBackend) handleWebClientConn(c net.Conn) error {
|
||||
webServer, err := b.webClientGetOrInit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s := http.Server{Handler: webServer}
|
||||
return s.Serve(netutil.NewOneConnListener(c, nil))
|
||||
}
|
||||
|
||||
// updateWebClientListenersLocked creates listeners on the web client port (5252)
|
||||
// for each of the local device's Tailscale IP addresses. This is needed to properly
|
||||
// route local traffic when using kernel networking mode.
|
||||
func (b *LocalBackend) updateWebClientListenersLocked() {
|
||||
if b.netMap == nil {
|
||||
return
|
||||
}
|
||||
|
||||
addrs := b.netMap.GetAddresses()
|
||||
for i := range addrs.Len() {
|
||||
addrPort := netip.AddrPortFrom(addrs.At(i).Addr(), webClientPort)
|
||||
if _, ok := b.webClientListeners[addrPort]; ok {
|
||||
continue // already listening
|
||||
}
|
||||
|
||||
sl := b.newWebClientListener(context.Background(), addrPort, b.logf)
|
||||
mak.Set(&b.webClientListeners, addrPort, sl)
|
||||
|
||||
go sl.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// newWebClientListener returns a listener for local connections to the built-in web client
|
||||
// used to manage this Tailscale instance.
|
||||
func (b *LocalBackend) newWebClientListener(ctx context.Context, ap netip.AddrPort, logf logger.Logf) *localListener {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &localListener{
|
||||
b: b,
|
||||
ap: ap,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logf: logf,
|
||||
|
||||
handler: b.handleWebClientConn,
|
||||
bo: backoff.NewBackoff("webclient-listener", logf, 30*time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
// newWebClientAuthURL talks to the control server to create a new auth
|
||||
// URL that can be used to validate a browser session to manage this
|
||||
// tailscaled instance via the web client.
|
||||
func (b *LocalBackend) newWebClientAuthURL(ctx context.Context, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) {
|
||||
return b.doWebClientNoiseRequest(ctx, "", src)
|
||||
}
|
||||
|
||||
// waitWebClientAuthURL connects to the control server and blocks
|
||||
// until the associated auth URL has been completed by its user,
|
||||
// or until ctx is canceled.
|
||||
func (b *LocalBackend) waitWebClientAuthURL(ctx context.Context, id string, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) {
|
||||
return b.doWebClientNoiseRequest(ctx, id, src)
|
||||
}
|
||||
|
||||
// doWebClientNoiseRequest handles making the "/machine/webclient"
|
||||
// noise requests to the control server for web client user auth.
|
||||
//
|
||||
// It either creates a new control auth URL or waits for an existing
|
||||
// one to be completed, based on the presence or absence of the
|
||||
// provided id value.
|
||||
func (b *LocalBackend) doWebClientNoiseRequest(ctx context.Context, id string, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) {
|
||||
nm := b.NetMap()
|
||||
if nm == nil || !nm.SelfNode.Valid() {
|
||||
return nil, errors.New("[unexpected] no self node")
|
||||
}
|
||||
dst := nm.SelfNode.ID()
|
||||
var noiseURL string
|
||||
if id != "" {
|
||||
noiseURL = fmt.Sprintf("https://unused/machine/webclient/wait/%d/to/%d/%s", src, dst, id)
|
||||
} else {
|
||||
noiseURL = fmt.Sprintf("https://unused/machine/webclient/init/%d/to/%d", src, dst)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", noiseURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := b.DoNoiseRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed request: %s", body)
|
||||
}
|
||||
var authResp *tailcfg.WebClientAuthResponse
|
||||
if err := json.Unmarshal(body, &authResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return authResp, nil
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/web_client_stub.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/web_client_stub.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ios || android
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
)
|
||||
|
||||
const webClientPort = 5252
|
||||
|
||||
type webClient struct{}
|
||||
|
||||
func (b *LocalBackend) ConfigureWebClient(lc *tailscale.LocalClient) {}
|
||||
|
||||
func (b *LocalBackend) webClientGetOrInit() error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (b *LocalBackend) webClientShutdown() {}
|
||||
|
||||
func (b *LocalBackend) handleWebClientConn(c net.Conn) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
func (b *LocalBackend) updateWebClientListenersLocked() {}
|
||||
767
vendor/tailscale.com/ipn/ipnstate/ipnstate.go
generated
vendored
Normal file
767
vendor/tailscale.com/ipn/ipnstate/ipnstate.go
generated
vendored
Normal file
@@ -0,0 +1,767 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package ipnstate captures the entire state of the Tailscale network.
|
||||
//
|
||||
// It's a leaf package so ipn, wgengine, and magicsock can all depend on it.
|
||||
package ipnstate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html"
|
||||
"io"
|
||||
"log"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TKAPeer
|
||||
|
||||
// Status represents the entire state of the IPN network.
|
||||
type Status struct {
|
||||
// Version is the daemon's long version (see version.Long).
|
||||
Version string
|
||||
|
||||
// TUN is whether /dev/net/tun (or equivalent kernel interface) is being
|
||||
// used. If false, it's running in userspace mode.
|
||||
TUN bool
|
||||
|
||||
// BackendState is an ipn.State string value:
|
||||
// "NoState", "NeedsLogin", "NeedsMachineAuth", "Stopped",
|
||||
// "Starting", "Running".
|
||||
BackendState string
|
||||
|
||||
// HaveNodeKey is whether the current profile has a node key configured.
|
||||
HaveNodeKey bool `json:",omitempty"`
|
||||
|
||||
AuthURL string // current URL provided by control to authorize client
|
||||
TailscaleIPs []netip.Addr // Tailscale IP(s) assigned to this node
|
||||
Self *PeerStatus
|
||||
|
||||
// ExitNodeStatus describes the current exit node.
|
||||
// If nil, an exit node is not in use.
|
||||
ExitNodeStatus *ExitNodeStatus `json:"ExitNodeStatus,omitempty"`
|
||||
|
||||
// Health contains health check problems.
|
||||
// Empty means everything is good. (or at least that no known
|
||||
// problems are detected)
|
||||
Health []string
|
||||
|
||||
// This field is the legacy name of CurrentTailnet.MagicDNSSuffix.
|
||||
//
|
||||
// Deprecated: use CurrentTailnet.MagicDNSSuffix instead.
|
||||
MagicDNSSuffix string
|
||||
|
||||
// CurrentTailnet is information about the tailnet that the node
|
||||
// is currently connected to. When not connected, this field is nil.
|
||||
CurrentTailnet *TailnetStatus
|
||||
|
||||
// CertDomains are the set of DNS names for which the control
|
||||
// plane server will assist with provisioning TLS
|
||||
// certificates. See SetDNSRequest for dns-01 ACME challenges
|
||||
// for e.g. LetsEncrypt. These names are FQDNs without
|
||||
// trailing periods, and without any "_acme-challenge." prefix.
|
||||
CertDomains []string
|
||||
|
||||
// Peer is the state of each peer, keyed by each peer's current public key.
|
||||
Peer map[key.NodePublic]*PeerStatus
|
||||
|
||||
// User contains profile information about UserIDs referenced by
|
||||
// PeerStatus.UserID, PeerStatus.AltSharerUserID, etc.
|
||||
User map[tailcfg.UserID]tailcfg.UserProfile
|
||||
|
||||
// ClientVersion, when non-nil, contains information about the latest
|
||||
// version of the Tailscale client that's available. Depending on
|
||||
// the platform and client settings, it may not be available.
|
||||
ClientVersion *tailcfg.ClientVersion
|
||||
}
|
||||
|
||||
// TKAKey describes a key trusted by network lock.
|
||||
type TKAKey struct {
|
||||
Key key.NLPublic
|
||||
Metadata map[string]string
|
||||
Votes uint
|
||||
}
|
||||
|
||||
// TKAPeer describes a peer and its network lock details.
|
||||
type TKAPeer struct {
|
||||
Name string // DNS
|
||||
ID tailcfg.NodeID
|
||||
StableID tailcfg.StableNodeID
|
||||
TailscaleIPs []netip.Addr // Tailscale IP(s) assigned to this node
|
||||
NodeKey key.NodePublic
|
||||
NodeKeySignature tka.NodeKeySignature
|
||||
}
|
||||
|
||||
// NetworkLockStatus represents whether network-lock is enabled,
|
||||
// along with details about the locally-known state of the tailnet
|
||||
// key authority.
|
||||
type NetworkLockStatus struct {
|
||||
// Enabled is true if network lock is enabled.
|
||||
Enabled bool
|
||||
|
||||
// Head describes the AUM hash of the leaf AUM. Head is nil
|
||||
// if network lock is not enabled.
|
||||
Head *[32]byte
|
||||
|
||||
// PublicKey describes the node's network-lock public key.
|
||||
// It may be zero if the node has not logged in.
|
||||
PublicKey key.NLPublic
|
||||
|
||||
// NodeKey describes the node's current node-key. This field is not
|
||||
// populated if the node is not operating (i.e. waiting for a login).
|
||||
NodeKey *key.NodePublic
|
||||
|
||||
// NodeKeySigned is true if our node is authorized by network-lock.
|
||||
NodeKeySigned bool
|
||||
|
||||
// NodeKeySignature is the current signature of this node's key.
|
||||
NodeKeySignature *tka.NodeKeySignature
|
||||
|
||||
// TrustedKeys describes the keys currently trusted to make changes
|
||||
// to network-lock.
|
||||
TrustedKeys []TKAKey
|
||||
|
||||
// VisiblePeers describes peers which are visible in the netmap that
|
||||
// have valid Tailnet Lock signatures signatures.
|
||||
VisiblePeers []*TKAPeer
|
||||
|
||||
// FilteredPeers describes peers which were removed from the netmap
|
||||
// (i.e. no connectivity) because they failed tailnet lock
|
||||
// checks.
|
||||
FilteredPeers []*TKAPeer
|
||||
|
||||
// StateID is a nonce associated with the network lock authority,
|
||||
// generated upon enablement. This field is not populated if the
|
||||
// network lock is disabled.
|
||||
StateID uint64
|
||||
}
|
||||
|
||||
// NetworkLockUpdate describes a change to network-lock state.
|
||||
type NetworkLockUpdate struct {
|
||||
Hash [32]byte
|
||||
Change string // values of tka.AUMKind.String()
|
||||
|
||||
// Raw contains the serialized AUM. The AUM is sent in serialized
|
||||
// form to avoid transitive dependences bloating this package.
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
// TailnetStatus is information about a Tailscale network ("tailnet").
|
||||
type TailnetStatus struct {
|
||||
// Name is the name of the network that's currently in use.
|
||||
Name string
|
||||
|
||||
// MagicDNSSuffix is the network's MagicDNS suffix for nodes
|
||||
// in the network such as "userfoo.tailscale.net".
|
||||
// There are no surrounding dots.
|
||||
// MagicDNSSuffix should be populated regardless of whether a domain
|
||||
// has MagicDNS enabled.
|
||||
MagicDNSSuffix string
|
||||
|
||||
// MagicDNSEnabled is whether or not the network has MagicDNS enabled.
|
||||
// Note that the current device may still not support MagicDNS if
|
||||
// `--accept-dns=false` was used.
|
||||
MagicDNSEnabled bool
|
||||
}
|
||||
|
||||
// ExitNodeStatus describes the current exit node.
|
||||
type ExitNodeStatus struct {
|
||||
// ID is the exit node's ID.
|
||||
ID tailcfg.StableNodeID
|
||||
|
||||
// Online is whether the exit node is alive.
|
||||
Online bool
|
||||
|
||||
// TailscaleIPs are the exit node's IP addresses assigned to the node.
|
||||
TailscaleIPs []netip.Prefix
|
||||
}
|
||||
|
||||
func (s *Status) Peers() []key.NodePublic {
|
||||
kk := make([]key.NodePublic, 0, len(s.Peer))
|
||||
for k := range s.Peer {
|
||||
kk = append(kk, k)
|
||||
}
|
||||
sort.Slice(kk, func(i, j int) bool { return kk[i].Less(kk[j]) })
|
||||
return kk
|
||||
}
|
||||
|
||||
type PeerStatusLite struct {
|
||||
// NodeKey is this peer's public node key.
|
||||
NodeKey key.NodePublic
|
||||
|
||||
// TxBytes/RxBytes are the total number of bytes transmitted to/received
|
||||
// from this peer.
|
||||
TxBytes, RxBytes int64
|
||||
|
||||
// LastHandshake is the last time a handshake succeeded with this peer. (Or
|
||||
// we got key confirmation via the first data message, which is
|
||||
// approximately the same thing.)
|
||||
//
|
||||
// The time.Time zero value means that no handshake has succeeded, at least
|
||||
// since this peer was last known to WireGuard. (Tailscale removes peers
|
||||
// from the wireguard peer that are idle.)
|
||||
LastHandshake time.Time
|
||||
}
|
||||
|
||||
// PeerStatus describes a peer node and its current state.
|
||||
type PeerStatus struct {
|
||||
ID tailcfg.StableNodeID
|
||||
PublicKey key.NodePublic
|
||||
HostName string // HostInfo's Hostname (not a DNS name or necessarily unique)
|
||||
|
||||
// DNSName is the Peer's FQDN. It ends with a dot.
|
||||
// It has the form "host.<MagicDNSSuffix>."
|
||||
DNSName string
|
||||
OS string // HostInfo.OS
|
||||
UserID tailcfg.UserID
|
||||
|
||||
// AltSharerUserID is the user who shared this node
|
||||
// if it's different than UserID. Otherwise it's zero.
|
||||
AltSharerUserID tailcfg.UserID `json:",omitempty"`
|
||||
|
||||
// TailscaleIPs are the IP addresses assigned to the node.
|
||||
TailscaleIPs []netip.Addr
|
||||
// AllowedIPs are IP addresses allowed to route to this node.
|
||||
AllowedIPs *views.Slice[netip.Prefix] `json:",omitempty"`
|
||||
|
||||
// Tags are the list of ACL tags applied to this node.
|
||||
// See tailscale.com/tailcfg#Node.Tags for more information.
|
||||
Tags *views.Slice[string] `json:",omitempty"`
|
||||
|
||||
// PrimaryRoutes are the routes this node is currently the primary
|
||||
// subnet router for, as determined by the control plane. It does
|
||||
// not include the IPs in TailscaleIPs.
|
||||
PrimaryRoutes *views.Slice[netip.Prefix] `json:",omitempty"`
|
||||
|
||||
// Endpoints:
|
||||
Addrs []string
|
||||
CurAddr string // one of Addrs, or unique if roaming
|
||||
Relay string // DERP region
|
||||
|
||||
RxBytes int64
|
||||
TxBytes int64
|
||||
Created time.Time // time registered with tailcontrol
|
||||
LastWrite time.Time // time last packet sent
|
||||
LastSeen time.Time // last seen to tailcontrol; only present if offline
|
||||
LastHandshake time.Time // with local wireguard
|
||||
Online bool // whether node is connected to the control plane
|
||||
ExitNode bool // true if this is the currently selected exit node.
|
||||
ExitNodeOption bool // true if this node can be an exit node (offered && approved)
|
||||
|
||||
// Active is whether the node was recently active. The
|
||||
// definition is somewhat undefined but has historically and
|
||||
// currently means that there was some packet sent to this
|
||||
// peer in the past two minutes. That definition is subject to
|
||||
// change.
|
||||
Active bool
|
||||
|
||||
// PeerAPIURL are the URLs of the node's PeerAPI servers.
|
||||
PeerAPIURL []string
|
||||
|
||||
// Capabilities are capabilities that the node has.
|
||||
// They're free-form strings, but should be in the form of URLs/URIs
|
||||
// such as:
|
||||
// "https://tailscale.com/cap/is-admin"
|
||||
// "https://tailscale.com/cap/file-sharing"
|
||||
// "funnel"
|
||||
//
|
||||
// Deprecated: use CapMap instead. See https://github.com/tailscale/tailscale/issues/11508
|
||||
// Every value is Capabilities is also a key in CapMap, even if it
|
||||
// has no values in that map.
|
||||
Capabilities []tailcfg.NodeCapability `json:",omitempty"`
|
||||
|
||||
// CapMap is a map of capabilities to their values.
|
||||
CapMap tailcfg.NodeCapMap `json:",omitempty"`
|
||||
|
||||
// SSH_HostKeys are the node's SSH host keys, if known.
|
||||
SSH_HostKeys []string `json:"sshHostKeys,omitempty"`
|
||||
|
||||
// ShareeNode indicates this node exists in the netmap because
|
||||
// it's owned by a shared-to user and that node might connect
|
||||
// to us. These nodes should be hidden by "tailscale status"
|
||||
// etc by default.
|
||||
ShareeNode bool `json:",omitempty"`
|
||||
|
||||
// InNetworkMap means that this peer was seen in our latest network map.
|
||||
// In theory, all of InNetworkMap and InMagicSock and InEngine should all be true.
|
||||
InNetworkMap bool
|
||||
|
||||
// InMagicSock means that this peer is being tracked by magicsock.
|
||||
// In theory, all of InNetworkMap and InMagicSock and InEngine should all be true.
|
||||
InMagicSock bool
|
||||
|
||||
// InEngine means that this peer is tracked by the wireguard engine.
|
||||
// In theory, all of InNetworkMap and InMagicSock and InEngine should all be true.
|
||||
InEngine bool
|
||||
|
||||
// Expired means that this peer's node key has expired, based on either
|
||||
// information from control or optimisically set on the client if the
|
||||
// expiration time has passed.
|
||||
Expired bool `json:",omitempty"`
|
||||
|
||||
// KeyExpiry, if present, is the time at which the node key expired or
|
||||
// will expire.
|
||||
KeyExpiry *time.Time `json:",omitempty"`
|
||||
|
||||
Location *tailcfg.Location `json:",omitempty"`
|
||||
}
|
||||
|
||||
// HasCap reports whether ps has the given capability.
|
||||
func (ps *PeerStatus) HasCap(cap tailcfg.NodeCapability) bool {
|
||||
return ps.CapMap.Contains(cap)
|
||||
}
|
||||
|
||||
// IsTagged reports whether ps is tagged.
|
||||
func (ps *PeerStatus) IsTagged() bool {
|
||||
return ps.Tags != nil && ps.Tags.Len() > 0
|
||||
}
|
||||
|
||||
// StatusBuilder is a request to construct a Status. A new StatusBuilder is
|
||||
// passed to various subsystems which then call methods on it to populate state.
|
||||
// Call its Status method to return the final constructed Status.
|
||||
type StatusBuilder struct {
|
||||
WantPeers bool // whether caller wants peers
|
||||
|
||||
locked bool
|
||||
st Status
|
||||
}
|
||||
|
||||
// MutateStatus calls f with the status to mutate.
|
||||
//
|
||||
// It may not assume other fields of status are already populated, and
|
||||
// may not retain or write to the Status after f returns.
|
||||
func (sb *StatusBuilder) MutateStatus(f func(*Status)) {
|
||||
f(&sb.st)
|
||||
}
|
||||
|
||||
// Status returns the status that has been built up so far from previous
|
||||
// calls to MutateStatus, MutateSelfStatus, AddPeer, etc.
|
||||
func (sb *StatusBuilder) Status() *Status {
|
||||
sb.locked = true
|
||||
return &sb.st
|
||||
}
|
||||
|
||||
// MutateSelfStatus calls f with the PeerStatus of our own node to mutate.
|
||||
//
|
||||
// It may not assume other fields of status are already populated, and
|
||||
// may not retain or write to the Status after f returns.
|
||||
//
|
||||
// MutateStatus acquires a lock so f must not call back into sb.
|
||||
func (sb *StatusBuilder) MutateSelfStatus(f func(*PeerStatus)) {
|
||||
if sb.st.Self == nil {
|
||||
sb.st.Self = new(PeerStatus)
|
||||
}
|
||||
f(sb.st.Self)
|
||||
}
|
||||
|
||||
// AddUser adds a user profile to the status.
|
||||
func (sb *StatusBuilder) AddUser(id tailcfg.UserID, up tailcfg.UserProfile) {
|
||||
if sb.locked {
|
||||
log.Printf("[unexpected] ipnstate: AddUser after Locked")
|
||||
return
|
||||
}
|
||||
|
||||
if sb.st.User == nil {
|
||||
sb.st.User = make(map[tailcfg.UserID]tailcfg.UserProfile)
|
||||
}
|
||||
|
||||
sb.st.User[id] = up
|
||||
}
|
||||
|
||||
// AddIP adds a Tailscale IP address to the status.
|
||||
func (sb *StatusBuilder) AddTailscaleIP(ip netip.Addr) {
|
||||
if sb.locked {
|
||||
log.Printf("[unexpected] ipnstate: AddIP after Locked")
|
||||
return
|
||||
}
|
||||
|
||||
sb.st.TailscaleIPs = append(sb.st.TailscaleIPs, ip)
|
||||
}
|
||||
|
||||
// AddPeer adds a peer node to the status.
|
||||
//
|
||||
// Its PeerStatus is mixed with any previous status already added.
|
||||
func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) {
|
||||
if st == nil {
|
||||
panic("nil PeerStatus")
|
||||
}
|
||||
|
||||
if sb.locked {
|
||||
log.Printf("[unexpected] ipnstate: AddPeer after Locked")
|
||||
return
|
||||
}
|
||||
|
||||
if sb.st.Peer == nil {
|
||||
sb.st.Peer = make(map[key.NodePublic]*PeerStatus)
|
||||
}
|
||||
e, ok := sb.st.Peer[peer]
|
||||
if !ok {
|
||||
sb.st.Peer[peer] = st
|
||||
st.PublicKey = peer
|
||||
return
|
||||
}
|
||||
|
||||
if v := st.ID; v != "" {
|
||||
e.ID = v
|
||||
}
|
||||
if v := st.HostName; v != "" {
|
||||
e.HostName = v
|
||||
}
|
||||
if v := st.DNSName; v != "" {
|
||||
e.DNSName = v
|
||||
}
|
||||
if v := st.Relay; v != "" {
|
||||
e.Relay = v
|
||||
}
|
||||
if v := st.UserID; v != 0 {
|
||||
e.UserID = v
|
||||
}
|
||||
if v := st.AltSharerUserID; v != 0 {
|
||||
e.AltSharerUserID = v
|
||||
}
|
||||
if v := st.TailscaleIPs; v != nil {
|
||||
e.TailscaleIPs = v
|
||||
}
|
||||
if v := st.PrimaryRoutes; v != nil && !v.IsNil() {
|
||||
e.PrimaryRoutes = v
|
||||
}
|
||||
if v := st.AllowedIPs; v != nil && !v.IsNil() {
|
||||
e.AllowedIPs = v
|
||||
}
|
||||
if v := st.Tags; v != nil && !v.IsNil() {
|
||||
e.Tags = v
|
||||
}
|
||||
if v := st.OS; v != "" {
|
||||
e.OS = st.OS
|
||||
}
|
||||
if v := st.SSH_HostKeys; v != nil {
|
||||
e.SSH_HostKeys = v
|
||||
}
|
||||
if v := st.Addrs; v != nil {
|
||||
e.Addrs = v
|
||||
}
|
||||
if v := st.CurAddr; v != "" {
|
||||
e.CurAddr = v
|
||||
}
|
||||
if v := st.RxBytes; v != 0 {
|
||||
e.RxBytes = v
|
||||
}
|
||||
if v := st.TxBytes; v != 0 {
|
||||
e.TxBytes = v
|
||||
}
|
||||
if v := st.LastHandshake; !v.IsZero() {
|
||||
e.LastHandshake = v
|
||||
}
|
||||
if v := st.Created; !v.IsZero() {
|
||||
e.Created = v
|
||||
}
|
||||
if v := st.LastSeen; !v.IsZero() {
|
||||
e.LastSeen = v
|
||||
}
|
||||
if v := st.LastWrite; !v.IsZero() {
|
||||
e.LastWrite = v
|
||||
}
|
||||
if st.Online {
|
||||
e.Online = true
|
||||
}
|
||||
if st.InNetworkMap {
|
||||
e.InNetworkMap = true
|
||||
}
|
||||
if st.InMagicSock {
|
||||
e.InMagicSock = true
|
||||
}
|
||||
if st.InEngine {
|
||||
e.InEngine = true
|
||||
}
|
||||
if st.ExitNode {
|
||||
e.ExitNode = true
|
||||
}
|
||||
if st.ExitNodeOption {
|
||||
e.ExitNodeOption = true
|
||||
}
|
||||
if st.ShareeNode {
|
||||
e.ShareeNode = true
|
||||
}
|
||||
if st.Active {
|
||||
e.Active = true
|
||||
}
|
||||
if st.PeerAPIURL != nil {
|
||||
e.PeerAPIURL = st.PeerAPIURL
|
||||
}
|
||||
if st.Expired {
|
||||
e.Expired = true
|
||||
}
|
||||
if t := st.KeyExpiry; t != nil {
|
||||
e.KeyExpiry = ptr.To(*t)
|
||||
}
|
||||
if v := st.CapMap; v != nil {
|
||||
e.CapMap = v
|
||||
}
|
||||
if v := st.Capabilities; v != nil {
|
||||
e.Capabilities = v
|
||||
}
|
||||
e.Location = st.Location
|
||||
}
|
||||
|
||||
type StatusUpdater interface {
|
||||
UpdateStatus(*StatusBuilder)
|
||||
}
|
||||
|
||||
func (st *Status) WriteHTML(w io.Writer) {
|
||||
f := func(format string, args ...any) { fmt.Fprintf(w, format, args...) }
|
||||
|
||||
f(`<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1">
|
||||
<title>Tailscale State</title>
|
||||
<style>
|
||||
body { font-family: monospace; }
|
||||
.owner { text-decoration: underline; }
|
||||
.tailaddr { font-style: italic; }
|
||||
.acenter { text-align: center; }
|
||||
.aright { text-align: right; }
|
||||
table, th, td { border: 1px solid black; border-spacing : 0; border-collapse : collapse; }
|
||||
thead { background-color: #FFA500; }
|
||||
th, td { padding: 5px; }
|
||||
td { vertical-align: top; }
|
||||
table tbody tr:nth-child(even) td { background-color: #f5f5f5; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Tailscale State</h1>
|
||||
`)
|
||||
|
||||
//f("<p><b>logid:</b> %s</p>\n", logid)
|
||||
//f("<p><b>opts:</b> <code>%s</code></p>\n", html.EscapeString(fmt.Sprintf("%+v", opts)))
|
||||
|
||||
ips := make([]string, 0, len(st.TailscaleIPs))
|
||||
for _, ip := range st.TailscaleIPs {
|
||||
ips = append(ips, ip.String())
|
||||
}
|
||||
f("<p>Tailscale IP: %s", strings.Join(ips, ", "))
|
||||
|
||||
f("<table>\n<thead>\n")
|
||||
f("<tr><th>Peer</th><th>OS</th><th>Node</th><th>Owner</th><th>Rx</th><th>Tx</th><th>Activity</th><th>Connection</th></tr>\n")
|
||||
f("</thead>\n<tbody>\n")
|
||||
|
||||
now := time.Now()
|
||||
|
||||
var peers []*PeerStatus
|
||||
for _, peer := range st.Peers() {
|
||||
ps := st.Peer[peer]
|
||||
if ps.ShareeNode {
|
||||
continue
|
||||
}
|
||||
peers = append(peers, ps)
|
||||
}
|
||||
SortPeers(peers)
|
||||
|
||||
for _, ps := range peers {
|
||||
var actAgo string
|
||||
if !ps.LastWrite.IsZero() {
|
||||
ago := now.Sub(ps.LastWrite)
|
||||
actAgo = ago.Round(time.Second).String() + " ago"
|
||||
if ago < 5*time.Minute {
|
||||
actAgo = "<b>" + actAgo + "</b>"
|
||||
}
|
||||
}
|
||||
var owner string
|
||||
if up, ok := st.User[ps.UserID]; ok {
|
||||
owner = up.LoginName
|
||||
if i := strings.Index(owner, "@"); i != -1 {
|
||||
owner = owner[:i]
|
||||
}
|
||||
}
|
||||
|
||||
hostName := dnsname.SanitizeHostname(ps.HostName)
|
||||
dnsName := dnsname.TrimSuffix(ps.DNSName, st.MagicDNSSuffix)
|
||||
if strings.EqualFold(dnsName, hostName) || ps.UserID != st.Self.UserID {
|
||||
hostName = ""
|
||||
}
|
||||
var hostNameHTML string
|
||||
if hostName != "" {
|
||||
hostNameHTML = "<br>" + html.EscapeString(hostName)
|
||||
}
|
||||
|
||||
var tailAddr string
|
||||
if len(ps.TailscaleIPs) > 0 {
|
||||
tailAddr = ps.TailscaleIPs[0].String()
|
||||
}
|
||||
f("<tr><td>%s</td><td class=acenter>%s</td>"+
|
||||
"<td><b>%s</b>%s<div class=\"tailaddr\">%s</div></td><td class=\"acenter owner\">%s</td><td class=\"aright\">%v</td><td class=\"aright\">%v</td><td class=\"aright\">%v</td>",
|
||||
ps.PublicKey.ShortString(),
|
||||
osEmoji(ps.OS),
|
||||
html.EscapeString(dnsName),
|
||||
hostNameHTML,
|
||||
tailAddr,
|
||||
html.EscapeString(owner),
|
||||
ps.RxBytes,
|
||||
ps.TxBytes,
|
||||
actAgo,
|
||||
)
|
||||
f("<td>")
|
||||
|
||||
if ps.Active {
|
||||
if ps.Relay != "" && ps.CurAddr == "" {
|
||||
f("relay <b>%s</b>", html.EscapeString(ps.Relay))
|
||||
} else if ps.CurAddr != "" {
|
||||
f("direct <b>%s</b>", html.EscapeString(ps.CurAddr))
|
||||
}
|
||||
}
|
||||
|
||||
f("</td>") // end Addrs
|
||||
|
||||
f("</tr>\n")
|
||||
}
|
||||
f("</tbody>\n</table>\n")
|
||||
f("</body>\n</html>\n")
|
||||
}
|
||||
|
||||
func osEmoji(os string) string {
|
||||
switch os {
|
||||
case "linux":
|
||||
return "🐧"
|
||||
case "macOS":
|
||||
return "🍎"
|
||||
case "windows":
|
||||
return "🖥️"
|
||||
case "iOS":
|
||||
return "📱"
|
||||
case "tvOS":
|
||||
return "🍎📺"
|
||||
case "android":
|
||||
return "🤖"
|
||||
case "freebsd":
|
||||
return "👿"
|
||||
case "openbsd":
|
||||
return "🐡"
|
||||
case "illumos":
|
||||
return "☀️"
|
||||
}
|
||||
return "👽"
|
||||
}
|
||||
|
||||
// PingResult contains response information for the "tailscale ping" subcommand,
|
||||
// saying how Tailscale can reach a Tailscale IP or subnet-routed IP.
|
||||
// See tailcfg.PingResponse for a related response that is sent back to control
|
||||
// for remote diagnostic pings.
|
||||
type PingResult struct {
|
||||
IP string // ping destination
|
||||
NodeIP string // Tailscale IP of node handling IP (different for subnet routers)
|
||||
NodeName string // DNS name base or (possibly not unique) hostname
|
||||
|
||||
Err string
|
||||
LatencySeconds float64
|
||||
|
||||
// Endpoint is the ip:port if direct UDP was used.
|
||||
// It is not currently set for TSMP pings.
|
||||
Endpoint string
|
||||
|
||||
// DERPRegionID is non-zero DERP region ID if DERP was used.
|
||||
// It is not currently set for TSMP pings.
|
||||
DERPRegionID int
|
||||
|
||||
// DERPRegionCode is the three-letter region code
|
||||
// corresponding to DERPRegionID.
|
||||
// It is not currently set for TSMP pings.
|
||||
DERPRegionCode string
|
||||
|
||||
// PeerAPIPort is set by TSMP ping responses for peers that
|
||||
// are running a peerapi server. This is the port they're
|
||||
// running the server on.
|
||||
PeerAPIPort uint16 `json:",omitempty"`
|
||||
|
||||
// PeerAPIURL is the URL that was hit for pings of type "peerapi" (tailcfg.PingPeerAPI).
|
||||
// It's of the form "http://ip:port" (or [ip]:port for IPv6).
|
||||
PeerAPIURL string `json:",omitempty"`
|
||||
|
||||
// IsLocalIP is whether the ping request error is due to it being
|
||||
// a ping to the local node.
|
||||
IsLocalIP bool `json:",omitempty"`
|
||||
|
||||
// TODO(bradfitz): details like whether port mapping was used on either side? (Once supported)
|
||||
}
|
||||
|
||||
func (pr *PingResult) ToPingResponse(pingType tailcfg.PingType) *tailcfg.PingResponse {
|
||||
return &tailcfg.PingResponse{
|
||||
Type: pingType,
|
||||
IP: pr.IP,
|
||||
NodeIP: pr.NodeIP,
|
||||
NodeName: pr.NodeName,
|
||||
Err: pr.Err,
|
||||
LatencySeconds: pr.LatencySeconds,
|
||||
Endpoint: pr.Endpoint,
|
||||
DERPRegionID: pr.DERPRegionID,
|
||||
DERPRegionCode: pr.DERPRegionCode,
|
||||
PeerAPIPort: pr.PeerAPIPort,
|
||||
IsLocalIP: pr.IsLocalIP,
|
||||
}
|
||||
}
|
||||
|
||||
// SortPeers sorts peers by either their DNS name, hostname, Tailscale IP,
|
||||
// or ultimately their current public key.
|
||||
func SortPeers(peers []*PeerStatus) {
|
||||
slices.SortStableFunc(peers, (*PeerStatus).compare)
|
||||
}
|
||||
|
||||
func (a *PeerStatus) compare(b *PeerStatus) int {
|
||||
if a.DNSName != "" || b.DNSName != "" {
|
||||
if v := strings.Compare(a.DNSName, b.DNSName); v != 0 {
|
||||
return v
|
||||
}
|
||||
}
|
||||
if a.HostName != "" || b.HostName != "" {
|
||||
if v := strings.Compare(a.HostName, b.HostName); v != 0 {
|
||||
return v
|
||||
}
|
||||
}
|
||||
if len(a.TailscaleIPs) > 0 && len(b.TailscaleIPs) > 0 {
|
||||
if v := a.TailscaleIPs[0].Compare(b.TailscaleIPs[0]); v != 0 {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return a.PublicKey.Compare(b.PublicKey)
|
||||
}
|
||||
|
||||
// DebugDERPRegionReport is the result of a "tailscale debug derp" command,
|
||||
// to let people debug a custom DERP setup.
|
||||
type DebugDERPRegionReport struct {
|
||||
Info []string
|
||||
Warnings []string
|
||||
Errors []string
|
||||
}
|
||||
|
||||
type SelfUpdateStatus string
|
||||
|
||||
const (
|
||||
UpdateFinished SelfUpdateStatus = "UpdateFinished"
|
||||
UpdateInProgress SelfUpdateStatus = "UpdateInProgress"
|
||||
UpdateFailed SelfUpdateStatus = "UpdateFailed"
|
||||
)
|
||||
|
||||
type UpdateProgress struct {
|
||||
Status SelfUpdateStatus `json:"status,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
func NewUpdateProgress(ps SelfUpdateStatus, msg string) UpdateProgress {
|
||||
return UpdateProgress{
|
||||
Status: ps,
|
||||
Message: msg,
|
||||
Version: version.Short(),
|
||||
}
|
||||
}
|
||||
37
vendor/tailscale.com/ipn/ipnstate/ipnstate_clone.go
generated
vendored
Normal file
37
vendor/tailscale.com/ipn/ipnstate/ipnstate_clone.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT.
|
||||
|
||||
package ipnstate
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// Clone makes a deep copy of TKAPeer.
|
||||
// The result aliases no memory with the original.
|
||||
func (src *TKAPeer) Clone() *TKAPeer {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
dst := new(TKAPeer)
|
||||
*dst = *src
|
||||
dst.TailscaleIPs = append(src.TailscaleIPs[:0:0], src.TailscaleIPs...)
|
||||
dst.NodeKeySignature = *src.NodeKeySignature.Clone()
|
||||
return dst
|
||||
}
|
||||
|
||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||
var _TKAPeerCloneNeedsRegeneration = TKAPeer(struct {
|
||||
Name string
|
||||
ID tailcfg.NodeID
|
||||
StableID tailcfg.StableNodeID
|
||||
TailscaleIPs []netip.Addr
|
||||
NodeKey key.NodePublic
|
||||
NodeKeySignature tka.NodeKeySignature
|
||||
}{})
|
||||
62
vendor/tailscale.com/ipn/localapi/cert.go
generated
vendored
Normal file
62
vendor/tailscale.com/ipn/localapi/cert.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android && !js
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
)
|
||||
|
||||
func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite && !h.PermitCert {
|
||||
http.Error(w, "cert access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
domain, ok := strings.CutPrefix(r.URL.Path, "/localapi/v0/cert/")
|
||||
if !ok {
|
||||
http.Error(w, "internal handler config wired wrong", 500)
|
||||
return
|
||||
}
|
||||
var minValidity time.Duration
|
||||
if minValidityStr := r.URL.Query().Get("min_validity"); minValidityStr != "" {
|
||||
var err error
|
||||
minValidity, err = time.ParseDuration(minValidityStr)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("invalid validity parameter: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
pair, err := h.b.GetCertPEMWithValidity(r.Context(), domain, minValidity)
|
||||
if err != nil {
|
||||
// TODO(bradfitz): 500 is a little lazy here. The errors returned from
|
||||
// GetCertPEM (and everywhere) should carry info info to get whether
|
||||
// they're 400 vs 403 vs 500 at minimum. And then we should have helpers
|
||||
// (in tsweb probably) to return an error that looks at the error value
|
||||
// to determine the HTTP status code.
|
||||
http.Error(w, fmt.Sprint(err), 500)
|
||||
return
|
||||
}
|
||||
serveKeyPair(w, r, pair)
|
||||
}
|
||||
|
||||
func serveKeyPair(w http.ResponseWriter, r *http.Request, p *ipnlocal.TLSCertKeyPair) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
switch r.URL.Query().Get("type") {
|
||||
case "", "crt", "cert":
|
||||
w.Write(p.CertPEM)
|
||||
case "key":
|
||||
w.Write(p.KeyPEM)
|
||||
case "pair":
|
||||
w.Write(p.KeyPEM)
|
||||
w.Write(p.CertPEM)
|
||||
default:
|
||||
http.Error(w, `invalid type; want "cert" (default), "key", or "pair"`, 400)
|
||||
}
|
||||
}
|
||||
304
vendor/tailscale.com/ipn/localapi/debugderp.go
generated
vendored
Normal file
304
vendor/tailscale.com/ipn/localapi/debugderp.go
generated
vendored
Normal file
@@ -0,0 +1,304 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"tailscale.com/derp/derphttp"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/net/netaddr"
|
||||
"tailscale.com/net/netns"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/nettype"
|
||||
)
|
||||
|
||||
func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitWrite {
|
||||
http.Error(w, "debug access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "POST required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
var st ipnstate.DebugDERPRegionReport
|
||||
defer func() {
|
||||
j, _ := json.Marshal(st)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(j)
|
||||
}()
|
||||
|
||||
dm := h.b.DERPMap()
|
||||
if dm == nil {
|
||||
st.Errors = append(st.Errors, "no DERP map (not connected?)")
|
||||
return
|
||||
}
|
||||
regStr := r.FormValue("region")
|
||||
var reg *tailcfg.DERPRegion
|
||||
if id, err := strconv.Atoi(regStr); err == nil {
|
||||
reg = dm.Regions[id]
|
||||
} else {
|
||||
for _, r := range dm.Regions {
|
||||
if r.RegionCode == regStr {
|
||||
reg = r
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if reg == nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("no such region %q in DERP map", regStr))
|
||||
return
|
||||
}
|
||||
st.Info = append(st.Info, fmt.Sprintf("Region %v == %q", reg.RegionID, reg.RegionCode))
|
||||
if len(dm.Regions) == 1 {
|
||||
st.Warnings = append(st.Warnings, "Having only a single DERP region (i.e. removing the default Tailscale-provided regions) is a single point of failure and could hamper connectivity")
|
||||
}
|
||||
|
||||
if reg.Avoid {
|
||||
st.Warnings = append(st.Warnings, "Region is marked with Avoid bit")
|
||||
}
|
||||
if len(reg.Nodes) == 0 {
|
||||
st.Errors = append(st.Errors, "Region has no nodes defined")
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
var (
|
||||
dialer net.Dialer
|
||||
client *http.Client = http.DefaultClient
|
||||
)
|
||||
checkConn := func(derpNode *tailcfg.DERPNode) bool {
|
||||
port := firstNonzero(derpNode.DERPPort, 443)
|
||||
|
||||
var (
|
||||
hasIPv4 bool
|
||||
hasIPv6 bool
|
||||
)
|
||||
|
||||
// Check IPv4 first
|
||||
addr := net.JoinHostPort(firstNonzero(derpNode.IPv4, derpNode.HostName), strconv.Itoa(port))
|
||||
conn, err := dialer.DialContext(ctx, "tcp4", addr)
|
||||
if err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ %q over IPv4: %v", derpNode.HostName, addr, err))
|
||||
} else {
|
||||
defer conn.Close()
|
||||
|
||||
// Upgrade to TLS and verify that works properly.
|
||||
tlsConn := tls.Client(conn, &tls.Config{
|
||||
ServerName: firstNonzero(derpNode.CertName, derpNode.HostName),
|
||||
})
|
||||
if err := tlsConn.HandshakeContext(ctx); err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error upgrading connection to node %q @ %q to TLS over IPv4: %v", derpNode.HostName, addr, err))
|
||||
} else {
|
||||
hasIPv4 = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check IPv6
|
||||
addr = net.JoinHostPort(firstNonzero(derpNode.IPv6, derpNode.HostName), strconv.Itoa(port))
|
||||
conn, err = dialer.DialContext(ctx, "tcp6", addr)
|
||||
if err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ %q over IPv6: %v", derpNode.HostName, addr, err))
|
||||
} else {
|
||||
defer conn.Close()
|
||||
|
||||
// Upgrade to TLS and verify that works properly.
|
||||
tlsConn := tls.Client(conn, &tls.Config{
|
||||
ServerName: firstNonzero(derpNode.CertName, derpNode.HostName),
|
||||
// TODO(andrew-d): we should print more
|
||||
// detailed failure information on if/why TLS
|
||||
// verification fails
|
||||
})
|
||||
if err := tlsConn.HandshakeContext(ctx); err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error upgrading connection to node %q @ %q to TLS over IPv6: %v", derpNode.HostName, addr, err))
|
||||
} else {
|
||||
hasIPv6 = true
|
||||
}
|
||||
}
|
||||
|
||||
// If we only have an IPv6 conn, then warn; we want both.
|
||||
if hasIPv6 && !hasIPv4 {
|
||||
st.Warnings = append(st.Warnings, fmt.Sprintf("Node %q only has IPv6 connectivity, not IPv4", derpNode.HostName))
|
||||
} else if hasIPv6 && hasIPv4 {
|
||||
st.Info = append(st.Info, fmt.Sprintf("Node %q has working IPv4 and IPv6 connectivity", derpNode.HostName))
|
||||
}
|
||||
|
||||
return hasIPv4 || hasIPv6
|
||||
}
|
||||
|
||||
checkSTUN4 := func(derpNode *tailcfg.DERPNode) {
|
||||
u4, err := nettype.MakePacketListenerWithNetIP(netns.Listener(h.logf, h.b.NetMon())).ListenPacket(ctx, "udp4", ":0")
|
||||
if err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error creating IPv4 STUN listener: %v", err))
|
||||
return
|
||||
}
|
||||
defer u4.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var addr netip.Addr
|
||||
if derpNode.IPv4 != "" {
|
||||
addr, err = netip.ParseAddr(derpNode.IPv4)
|
||||
if err != nil {
|
||||
// Error printed elsewhere
|
||||
return
|
||||
}
|
||||
} else {
|
||||
addrs, err := net.DefaultResolver.LookupNetIP(ctx, "ip4", derpNode.HostName)
|
||||
if err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error resolving node %q IPv4 addresses: %v", derpNode.HostName, err))
|
||||
return
|
||||
}
|
||||
addr = addrs[0]
|
||||
}
|
||||
|
||||
addrPort := netip.AddrPortFrom(addr, uint16(firstNonzero(derpNode.STUNPort, 3478)))
|
||||
|
||||
txID := stun.NewTxID()
|
||||
req := stun.Request(txID)
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-done:
|
||||
}
|
||||
u4.Close()
|
||||
}()
|
||||
|
||||
gotResponse := make(chan netip.AddrPort, 1)
|
||||
go func() {
|
||||
defer u4.Close()
|
||||
|
||||
var buf [64 << 10]byte
|
||||
for {
|
||||
n, addr, err := u4.ReadFromUDPAddrPort(buf[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pkt := buf[:n]
|
||||
if !stun.Is(pkt) {
|
||||
continue
|
||||
}
|
||||
ap := netaddr.Unmap(addr)
|
||||
if !ap.IsValid() {
|
||||
continue
|
||||
}
|
||||
tx, addrPort, err := stun.ParseResponse(pkt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if tx == txID {
|
||||
gotResponse <- addrPort
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = u4.WriteToUDPAddrPort(req, addrPort)
|
||||
if err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error sending IPv4 STUN packet to %v (%q): %v", addrPort, derpNode.HostName, err))
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case resp := <-gotResponse:
|
||||
st.Info = append(st.Info, fmt.Sprintf("Node %q returned IPv4 STUN response: %v", derpNode.HostName, resp))
|
||||
case <-ctx.Done():
|
||||
st.Warnings = append(st.Warnings, fmt.Sprintf("Node %q did not return a IPv4 STUN response", derpNode.HostName))
|
||||
}
|
||||
}
|
||||
|
||||
// Start by checking whether we can establish a HTTP connection
|
||||
for _, derpNode := range reg.Nodes {
|
||||
connSuccess := checkConn(derpNode)
|
||||
|
||||
// Verify that the /generate_204 endpoint works
|
||||
captivePortalURL := "http://" + derpNode.HostName + "/generate_204"
|
||||
resp, err := client.Get(captivePortalURL)
|
||||
if err != nil {
|
||||
st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL))
|
||||
} else {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
if !connSuccess {
|
||||
continue
|
||||
}
|
||||
|
||||
fakePrivKey := key.NewNode()
|
||||
|
||||
// Next, repeatedly get the server key to see if the node is
|
||||
// behind a load balancer (incorrectly).
|
||||
serverPubKeys := make(map[key.NodePublic]bool)
|
||||
for i := range 5 {
|
||||
func() {
|
||||
rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion {
|
||||
return &tailcfg.DERPRegion{
|
||||
RegionID: reg.RegionID,
|
||||
RegionCode: reg.RegionCode,
|
||||
RegionName: reg.RegionName,
|
||||
Nodes: []*tailcfg.DERPNode{derpNode},
|
||||
}
|
||||
})
|
||||
if err := rc.Connect(ctx); err != nil {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err))
|
||||
return
|
||||
}
|
||||
|
||||
if len(serverPubKeys) == 0 {
|
||||
st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName))
|
||||
}
|
||||
serverPubKeys[rc.ServerPublicKey()] = true
|
||||
}()
|
||||
}
|
||||
if len(serverPubKeys) > 1 {
|
||||
st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys)))
|
||||
}
|
||||
|
||||
// Send a STUN query to this node to verify whether or not it
|
||||
// correctly returns an IP address.
|
||||
checkSTUN4(derpNode)
|
||||
}
|
||||
|
||||
// TODO(bradfitz): finish:
|
||||
// * try to DERP auth with new public key.
|
||||
// * if rejected, add Info that it's likely the DERP server authz is on,
|
||||
// try with LocalBackend's node key instead.
|
||||
// * if they have more then one node, try to relay a packet between them
|
||||
// and see if it works (like cmd/derpprobe). But if server authz is on,
|
||||
// we won't be able to, so just warn. Say to turn that off, try again,
|
||||
// then turn it back on. TODO(bradfitz): maybe add a debug frame to DERP
|
||||
// protocol to say how many peers it's meshed with. Should match count
|
||||
// in DERPRegion. Or maybe even list all their server pub keys that it's peered
|
||||
// with.
|
||||
// * If their certificate is bad, either expired or just wrongly
|
||||
// issued in the first place, tell them specifically that the
|
||||
// cert is bad not just that the connection failed.
|
||||
}
|
||||
|
||||
func firstNonzero[T comparable](items ...T) T {
|
||||
var zero T
|
||||
for _, item := range items {
|
||||
if item != zero {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return zero
|
||||
}
|
||||
15
vendor/tailscale.com/ipn/localapi/disabled_stubs.go
generated
vendored
Normal file
15
vendor/tailscale.com/ipn/localapi/disabled_stubs.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ios || android || js
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "disabled on "+runtime.GOOS, http.StatusNotFound)
|
||||
}
|
||||
2931
vendor/tailscale.com/ipn/localapi/localapi.go
generated
vendored
Normal file
2931
vendor/tailscale.com/ipn/localapi/localapi.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
28
vendor/tailscale.com/ipn/localapi/pprof.go
generated
vendored
Normal file
28
vendor/tailscale.com/ipn/localapi/pprof.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android && !js
|
||||
|
||||
// We don't include it on mobile where we're more memory constrained and
|
||||
// there's no CLI to get at the results anyway.
|
||||
|
||||
package localapi
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
servePprofFunc = servePprof
|
||||
}
|
||||
|
||||
func servePprof(w http.ResponseWriter, r *http.Request) {
|
||||
name := r.FormValue("name")
|
||||
switch name {
|
||||
case "profile":
|
||||
pprof.Profile(w, r)
|
||||
default:
|
||||
pprof.Handler(name).ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
47
vendor/tailscale.com/ipn/policy/policy.go
generated
vendored
Normal file
47
vendor/tailscale.com/ipn/policy/policy.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package policy contains various policy decisions that need to be
|
||||
// shared between the node client & control server.
|
||||
package policy
|
||||
|
||||
import (
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// IsInterestingService reports whether service s on the given operating
|
||||
// system (a version.OS value) is an interesting enough port to report
|
||||
// to our peer nodes for discovery purposes.
|
||||
func IsInterestingService(s tailcfg.Service, os string) bool {
|
||||
switch s.Proto {
|
||||
case tailcfg.PeerAPI4, tailcfg.PeerAPI6, tailcfg.PeerAPIDNS:
|
||||
return true
|
||||
}
|
||||
if s.Proto != tailcfg.TCP {
|
||||
return false
|
||||
}
|
||||
if os != "windows" {
|
||||
// For non-Windows machines, assume all TCP listeners
|
||||
// are interesting enough. We don't see listener spam
|
||||
// there.
|
||||
return true
|
||||
}
|
||||
// Windows has tons of TCP listeners. We need to move to a denylist
|
||||
// model later, but for now we just allow some common ones:
|
||||
switch s.Port {
|
||||
case 22, // ssh
|
||||
80, // http
|
||||
443, // https (but no hostname, so little useless)
|
||||
3389, // rdp
|
||||
5900, // vnc
|
||||
32400, // plex
|
||||
|
||||
// And now some arbitrary HTTP dev server ports:
|
||||
// Eventually we'll remove this and make all ports
|
||||
// work, once we nicely filter away noisy system
|
||||
// ports.
|
||||
8000, 8080, 8443, 8888:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
991
vendor/tailscale.com/ipn/prefs.go
generated
vendored
Normal file
991
vendor/tailscale.com/ipn/prefs.go
generated
vendored
Normal file
@@ -0,0 +1,991 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/net/netaddr"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/preftype"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/util/syspolicy"
|
||||
)
|
||||
|
||||
// DefaultControlURL is the URL base of the control plane
|
||||
// ("coordination server") for use when no explicit one is configured.
|
||||
// The default control plane is the hosted version run by Tailscale.com.
|
||||
const DefaultControlURL = "https://controlplane.tailscale.com"
|
||||
|
||||
var (
|
||||
// ErrExitNodeIDAlreadySet is returned from (*Prefs).SetExitNodeIP when the
|
||||
// Prefs.ExitNodeID field is already set.
|
||||
ErrExitNodeIDAlreadySet = errors.New("cannot set ExitNodeIP when ExitNodeID is already set")
|
||||
)
|
||||
|
||||
// IsLoginServerSynonym reports whether a URL is a drop-in replacement
|
||||
// for the primary Tailscale login server.
|
||||
func IsLoginServerSynonym(val any) bool {
|
||||
return val == "https://login.tailscale.com" || val == "https://controlplane.tailscale.com"
|
||||
}
|
||||
|
||||
// Prefs are the user modifiable settings of the Tailscale node agent.
|
||||
// When you add a Pref to this struct, remember to add a corresponding
|
||||
// field in MaskedPrefs, and check your field for equality in Prefs.Equals().
|
||||
type Prefs struct {
|
||||
// ControlURL is the URL of the control server to use.
|
||||
//
|
||||
// If empty, the default for new installs, DefaultControlURL
|
||||
// is used. It's set non-empty once the daemon has been started
|
||||
// for the first time.
|
||||
//
|
||||
// TODO(apenwarr): Make it safe to update this with EditPrefs().
|
||||
// Right now, you have to pass it in the initial prefs in Start(),
|
||||
// which is the only code that actually uses the ControlURL value.
|
||||
// It would be more consistent to restart controlclient
|
||||
// automatically whenever this variable changes.
|
||||
//
|
||||
// Meanwhile, you have to provide this as part of
|
||||
// Options.LegacyMigrationPrefs or Options.UpdatePrefs when
|
||||
// calling Backend.Start().
|
||||
ControlURL string
|
||||
|
||||
// RouteAll specifies whether to accept subnets advertised by
|
||||
// other nodes on the Tailscale network. Note that this does not
|
||||
// include default routes (0.0.0.0/0 and ::/0), those are
|
||||
// controlled by ExitNodeID/IP below.
|
||||
RouteAll bool
|
||||
|
||||
// ExitNodeID and ExitNodeIP specify the node that should be used
|
||||
// as an exit node for internet traffic. At most one of these
|
||||
// should be non-zero.
|
||||
//
|
||||
// The preferred way to express the chosen node is ExitNodeID, but
|
||||
// in some cases it's not possible to use that ID (e.g. in the
|
||||
// linux CLI, before tailscaled has a netmap). For those
|
||||
// situations, we allow specifying the exit node by IP, and
|
||||
// ipnlocal.LocalBackend will translate the IP into an ID when the
|
||||
// node is found in the netmap.
|
||||
//
|
||||
// If the selected exit node doesn't exist (e.g. it's not part of
|
||||
// the current tailnet), or it doesn't offer exit node services, a
|
||||
// blackhole route will be installed on the local system to
|
||||
// prevent any traffic escaping to the local network.
|
||||
ExitNodeID tailcfg.StableNodeID
|
||||
ExitNodeIP netip.Addr
|
||||
|
||||
// InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by
|
||||
// the backend on transition from exit node on to off and used by the
|
||||
// backend.
|
||||
//
|
||||
// As an Internal field, it can't be set by LocalAPI clients, rather it is set indirectly
|
||||
// when the ExitNodeID value is zero'd and via the set-use-exit-node-enabled endpoint.
|
||||
InternalExitNodePrior tailcfg.StableNodeID
|
||||
|
||||
// ExitNodeAllowLANAccess indicates whether locally accessible subnets should be
|
||||
// routed directly or via the exit node.
|
||||
ExitNodeAllowLANAccess bool
|
||||
|
||||
// CorpDNS specifies whether to install the Tailscale network's
|
||||
// DNS configuration, if it exists.
|
||||
CorpDNS bool
|
||||
|
||||
// RunSSH bool is whether this node should run an SSH
|
||||
// server, permitting access to peers according to the
|
||||
// policies as configured by the Tailnet's admin(s).
|
||||
RunSSH bool
|
||||
|
||||
// RunWebClient bool is whether this node should expose
|
||||
// its web client over Tailscale at port 5252,
|
||||
// permitting access to peers according to the
|
||||
// policies as configured by the Tailnet's admin(s).
|
||||
RunWebClient bool
|
||||
|
||||
// WantRunning indicates whether networking should be active on
|
||||
// this node.
|
||||
WantRunning bool
|
||||
|
||||
// LoggedOut indicates whether the user intends to be logged out.
|
||||
// There are other reasons we may be logged out, including no valid
|
||||
// keys.
|
||||
// We need to remember this state so that, on next startup, we can
|
||||
// generate the "Login" vs "Connect" buttons correctly, without having
|
||||
// to contact the server to confirm our nodekey status first.
|
||||
LoggedOut bool
|
||||
|
||||
// ShieldsUp indicates whether to block all incoming connections,
|
||||
// regardless of the control-provided packet filter. If false, we
|
||||
// use the packet filter as provided. If true, we block incoming
|
||||
// connections. This overrides tailcfg.Hostinfo's ShieldsUp.
|
||||
ShieldsUp bool
|
||||
|
||||
// AdvertiseTags specifies groups that this node wants to join, for
|
||||
// purposes of ACL enforcement. These can be referenced from the ACL
|
||||
// security policy. Note that advertising a tag doesn't guarantee that
|
||||
// the control server will allow you to take on the rights for that
|
||||
// tag.
|
||||
AdvertiseTags []string
|
||||
|
||||
// Hostname is the hostname to use for identifying the node. If
|
||||
// not set, os.Hostname is used.
|
||||
Hostname string
|
||||
|
||||
// NotepadURLs is a debugging setting that opens OAuth URLs in
|
||||
// notepad.exe on Windows, rather than loading them in a browser.
|
||||
//
|
||||
// apenwarr 2020-04-29: Unfortunately this is still needed sometimes.
|
||||
// Windows' default browser setting is sometimes screwy and this helps
|
||||
// users narrow it down a bit.
|
||||
NotepadURLs bool
|
||||
|
||||
// ForceDaemon specifies whether a platform that normally
|
||||
// operates in "client mode" (that is, requires an active user
|
||||
// logged in with the GUI app running) should keep running after the
|
||||
// GUI ends and/or the user logs out.
|
||||
//
|
||||
// The only current applicable platform is Windows. This
|
||||
// forced Windows to go into "server mode" where Tailscale is
|
||||
// running even with no users logged in. This might also be
|
||||
// used for macOS in the future. This setting has no effect
|
||||
// for Linux/etc, which always operate in daemon mode.
|
||||
ForceDaemon bool `json:"ForceDaemon,omitempty"`
|
||||
|
||||
// Egg is a optional debug flag.
|
||||
Egg bool `json:",omitempty"`
|
||||
|
||||
// The following block of options only have an effect on Linux.
|
||||
|
||||
// AdvertiseRoutes specifies CIDR prefixes to advertise into the
|
||||
// Tailscale network as reachable through the current
|
||||
// node.
|
||||
AdvertiseRoutes []netip.Prefix
|
||||
|
||||
// NoSNAT specifies whether to source NAT traffic going to
|
||||
// destinations in AdvertiseRoutes. The default is to apply source
|
||||
// NAT, which makes the traffic appear to come from the router
|
||||
// machine rather than the peer's Tailscale IP.
|
||||
//
|
||||
// Disabling SNAT requires additional manual configuration in your
|
||||
// network to route Tailscale traffic back to the subnet relay
|
||||
// machine.
|
||||
//
|
||||
// Linux-only.
|
||||
NoSNAT bool
|
||||
|
||||
// NoStatefulFiltering specifies whether to apply stateful filtering when
|
||||
// advertising routes in AdvertiseRoutes. The default is to not apply
|
||||
// stateful filtering.
|
||||
//
|
||||
// To allow inbound connections from advertised routes, both NoSNAT and
|
||||
// NoStatefulFiltering must be true.
|
||||
//
|
||||
// This is an opt.Bool because it was first added after NoSNAT, with a
|
||||
// backfill based on the value of that parameter. The backfill has been
|
||||
// removed since then, but the field remains an opt.Bool.
|
||||
//
|
||||
// Linux-only.
|
||||
NoStatefulFiltering opt.Bool `json:",omitempty"`
|
||||
|
||||
// NetfilterMode specifies how much to manage netfilter rules for
|
||||
// Tailscale, if at all.
|
||||
NetfilterMode preftype.NetfilterMode
|
||||
|
||||
// OperatorUser is the local machine user name who is allowed to
|
||||
// operate tailscaled without being root or using sudo.
|
||||
OperatorUser string `json:",omitempty"`
|
||||
|
||||
// ProfileName is the desired name of the profile. If empty, then the user's
|
||||
// LoginName is used. It is only used for display purposes in the client UI
|
||||
// and CLI.
|
||||
ProfileName string `json:",omitempty"`
|
||||
|
||||
// AutoUpdate sets the auto-update preferences for the node agent. See
|
||||
// AutoUpdatePrefs docs for more details.
|
||||
AutoUpdate AutoUpdatePrefs
|
||||
|
||||
// AppConnector sets the app connector preferences for the node agent. See
|
||||
// AppConnectorPrefs docs for more details.
|
||||
AppConnector AppConnectorPrefs
|
||||
|
||||
// PostureChecking enables the collection of information used for device
|
||||
// posture checks.
|
||||
PostureChecking bool
|
||||
|
||||
// NetfilterKind specifies what netfilter implementation to use.
|
||||
//
|
||||
// Linux-only.
|
||||
NetfilterKind string
|
||||
|
||||
// DriveShares are the configured DriveShares, stored in increasing order
|
||||
// by name.
|
||||
DriveShares []*drive.Share
|
||||
|
||||
// AllowSingleHosts was a legacy field that was always true
|
||||
// for the past 4.5 years. It controlled whether Tailscale
|
||||
// peers got /32 or /127 routes for each other.
|
||||
// As of 2024-05-17 we're starting to ignore it, but to let
|
||||
// people still downgrade Tailscale versions and not break
|
||||
// all peer-to-peer networking we still write it to disk (as JSON)
|
||||
// so it can be loaded back by old versions.
|
||||
// TODO(bradfitz): delete this in 2025 sometime. See #12058.
|
||||
AllowSingleHosts marshalAsTrueInJSON
|
||||
|
||||
// The Persist field is named 'Config' in the file for backward
|
||||
// compatibility with earlier versions.
|
||||
// TODO(apenwarr): We should move this out of here, it's not a pref.
|
||||
// We can maybe do that once we're sure which module should persist
|
||||
// it (backend or frontend?)
|
||||
Persist *persist.Persist `json:"Config"`
|
||||
}
|
||||
|
||||
// AutoUpdatePrefs are the auto update settings for the node agent.
|
||||
type AutoUpdatePrefs struct {
|
||||
// Check specifies whether background checks for updates are enabled. When
|
||||
// enabled, tailscaled will periodically check for available updates and
|
||||
// notify the user about them.
|
||||
Check bool
|
||||
// Apply specifies whether background auto-updates are enabled. When
|
||||
// enabled, tailscaled will apply available updates in the background.
|
||||
// Check must also be set when Apply is set.
|
||||
Apply opt.Bool
|
||||
}
|
||||
|
||||
func (au1 AutoUpdatePrefs) Equals(au2 AutoUpdatePrefs) bool {
|
||||
// This could almost be as easy as `au1.Apply == au2.Apply`, except that
|
||||
// opt.Bool("") and opt.Bool("unset") should be treated as equal.
|
||||
apply1, ok1 := au1.Apply.Get()
|
||||
apply2, ok2 := au2.Apply.Get()
|
||||
return au1.Check == au2.Check &&
|
||||
apply1 == apply2 &&
|
||||
ok1 == ok2
|
||||
}
|
||||
|
||||
type marshalAsTrueInJSON struct{}
|
||||
|
||||
var trueJSON = []byte("true")
|
||||
|
||||
func (marshalAsTrueInJSON) MarshalJSON() ([]byte, error) { return trueJSON, nil }
|
||||
func (*marshalAsTrueInJSON) UnmarshalJSON([]byte) error { return nil }
|
||||
|
||||
// AppConnectorPrefs are the app connector settings for the node agent.
|
||||
type AppConnectorPrefs struct {
|
||||
// Advertise specifies whether the app connector subsystem is advertising
|
||||
// this node as a connector.
|
||||
Advertise bool
|
||||
}
|
||||
|
||||
// MaskedPrefs is a Prefs with an associated bitmask of which fields are set.
|
||||
//
|
||||
// Each FooSet field maps to a corresponding Foo field in Prefs. FooSet can be
|
||||
// a struct, in which case inner fields of FooSet map to inner fields of Foo in
|
||||
// Prefs (see AutoUpdateSet for example).
|
||||
type MaskedPrefs struct {
|
||||
Prefs
|
||||
|
||||
ControlURLSet bool `json:",omitempty"`
|
||||
RouteAllSet bool `json:",omitempty"`
|
||||
ExitNodeIDSet bool `json:",omitempty"`
|
||||
ExitNodeIPSet bool `json:",omitempty"`
|
||||
InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients
|
||||
ExitNodeAllowLANAccessSet bool `json:",omitempty"`
|
||||
CorpDNSSet bool `json:",omitempty"`
|
||||
RunSSHSet bool `json:",omitempty"`
|
||||
RunWebClientSet bool `json:",omitempty"`
|
||||
WantRunningSet bool `json:",omitempty"`
|
||||
LoggedOutSet bool `json:",omitempty"`
|
||||
ShieldsUpSet bool `json:",omitempty"`
|
||||
AdvertiseTagsSet bool `json:",omitempty"`
|
||||
HostnameSet bool `json:",omitempty"`
|
||||
NotepadURLsSet bool `json:",omitempty"`
|
||||
ForceDaemonSet bool `json:",omitempty"`
|
||||
EggSet bool `json:",omitempty"`
|
||||
AdvertiseRoutesSet bool `json:",omitempty"`
|
||||
NoSNATSet bool `json:",omitempty"`
|
||||
NoStatefulFilteringSet bool `json:",omitempty"`
|
||||
NetfilterModeSet bool `json:",omitempty"`
|
||||
OperatorUserSet bool `json:",omitempty"`
|
||||
ProfileNameSet bool `json:",omitempty"`
|
||||
AutoUpdateSet AutoUpdatePrefsMask `json:",omitempty"`
|
||||
AppConnectorSet bool `json:",omitempty"`
|
||||
PostureCheckingSet bool `json:",omitempty"`
|
||||
NetfilterKindSet bool `json:",omitempty"`
|
||||
DriveSharesSet bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// SetsInternal reports whether mp has any of the Internal*Set field bools set
|
||||
// to true.
|
||||
func (mp *MaskedPrefs) SetsInternal() bool {
|
||||
return mp.InternalExitNodePriorSet
|
||||
}
|
||||
|
||||
type AutoUpdatePrefsMask struct {
|
||||
CheckSet bool `json:",omitempty"`
|
||||
ApplySet bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (m AutoUpdatePrefsMask) Pretty(au AutoUpdatePrefs) string {
|
||||
var fields []string
|
||||
if m.CheckSet {
|
||||
fields = append(fields, fmt.Sprintf("Check=%v", au.Check))
|
||||
}
|
||||
if m.ApplySet {
|
||||
fields = append(fields, fmt.Sprintf("Apply=%v", au.Apply))
|
||||
}
|
||||
return strings.Join(fields, " ")
|
||||
}
|
||||
|
||||
// ApplyEdits mutates p, assigning fields from m.Prefs for each MaskedPrefs
|
||||
// Set field that's true.
|
||||
func (p *Prefs) ApplyEdits(m *MaskedPrefs) {
|
||||
if p == nil {
|
||||
panic("can't edit nil Prefs")
|
||||
}
|
||||
pv := reflect.ValueOf(p).Elem()
|
||||
mv := reflect.ValueOf(m).Elem()
|
||||
mpv := reflect.ValueOf(&m.Prefs).Elem()
|
||||
applyPrefsEdits(mpv, pv, maskFields(mv))
|
||||
}
|
||||
|
||||
func applyPrefsEdits(src, dst reflect.Value, mask map[string]reflect.Value) {
|
||||
for n, m := range mask {
|
||||
switch m.Kind() {
|
||||
case reflect.Bool:
|
||||
if m.Bool() {
|
||||
dst.FieldByName(n).Set(src.FieldByName(n))
|
||||
}
|
||||
case reflect.Struct:
|
||||
applyPrefsEdits(src.FieldByName(n), dst.FieldByName(n), maskFields(m))
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported mask field kind %v", m.Kind()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func maskFields(v reflect.Value) map[string]reflect.Value {
|
||||
mask := make(map[string]reflect.Value)
|
||||
for i := range v.NumField() {
|
||||
f := v.Type().Field(i).Name
|
||||
if !strings.HasSuffix(f, "Set") {
|
||||
continue
|
||||
}
|
||||
mask[strings.TrimSuffix(f, "Set")] = v.Field(i)
|
||||
}
|
||||
return mask
|
||||
}
|
||||
|
||||
// IsEmpty reports whether there are no masks set or if m is nil.
|
||||
func (m *MaskedPrefs) IsEmpty() bool {
|
||||
if m == nil {
|
||||
return true
|
||||
}
|
||||
mv := reflect.ValueOf(m).Elem()
|
||||
fields := mv.NumField()
|
||||
for i := 1; i < fields; i++ {
|
||||
if !mv.Field(i).IsZero() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MaskedPrefs) Pretty() string {
|
||||
if m == nil {
|
||||
return "MaskedPrefs{<nil>}"
|
||||
}
|
||||
var sb strings.Builder
|
||||
sb.WriteString("MaskedPrefs{")
|
||||
mv := reflect.ValueOf(m).Elem()
|
||||
mt := mv.Type()
|
||||
mpv := reflect.ValueOf(&m.Prefs).Elem()
|
||||
first := true
|
||||
|
||||
format := func(v reflect.Value) string {
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
return "%s=%q"
|
||||
case reflect.Slice:
|
||||
// []string
|
||||
if v.Type().Elem().Kind() == reflect.String {
|
||||
return "%s=%q"
|
||||
}
|
||||
case reflect.Struct:
|
||||
return "%s=%+v"
|
||||
case reflect.Pointer:
|
||||
if v.Type().Elem().Kind() == reflect.Struct {
|
||||
return "%s=%+v"
|
||||
}
|
||||
}
|
||||
return "%s=%v"
|
||||
}
|
||||
|
||||
for i := 1; i < mt.NumField(); i++ {
|
||||
name := mt.Field(i).Name
|
||||
mf := mv.Field(i)
|
||||
switch mf.Kind() {
|
||||
case reflect.Bool:
|
||||
if mf.Bool() {
|
||||
if !first {
|
||||
sb.WriteString(" ")
|
||||
}
|
||||
first = false
|
||||
f := mpv.Field(i - 1)
|
||||
fmt.Fprintf(&sb, format(f),
|
||||
strings.TrimSuffix(name, "Set"),
|
||||
f.Interface())
|
||||
}
|
||||
case reflect.Struct:
|
||||
if mf.IsZero() {
|
||||
continue
|
||||
}
|
||||
mpf := mpv.Field(i - 1)
|
||||
// This would be much simpler with reflect.MethodByName("Pretty"),
|
||||
// but using MethodByName disables some linker optimizations and
|
||||
// makes our binaries much larger. See
|
||||
// https://github.com/tailscale/tailscale/issues/10627#issuecomment-1861211945
|
||||
//
|
||||
// Instead, have this explicit switch by field name to do type
|
||||
// assertions.
|
||||
switch name {
|
||||
case "AutoUpdateSet":
|
||||
p := mf.Interface().(AutoUpdatePrefsMask).Pretty(mpf.Interface().(AutoUpdatePrefs))
|
||||
fmt.Fprintf(&sb, "%s={%s}", strings.TrimSuffix(name, "Set"), p)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected MaskedPrefs field %q", name))
|
||||
}
|
||||
}
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// IsEmpty reports whether p is nil or pointing to a Prefs zero value.
|
||||
func (p *Prefs) IsEmpty() bool { return p == nil || p.Equals(&Prefs{}) }
|
||||
|
||||
func (p PrefsView) Pretty() string { return p.ж.Pretty() }
|
||||
|
||||
func (p *Prefs) Pretty() string { return p.pretty(runtime.GOOS) }
|
||||
func (p *Prefs) pretty(goos string) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Prefs{")
|
||||
fmt.Fprintf(&sb, "ra=%v ", p.RouteAll)
|
||||
fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning)
|
||||
if p.RunSSH {
|
||||
sb.WriteString("ssh=true ")
|
||||
}
|
||||
if p.RunWebClient {
|
||||
sb.WriteString("webclient=true ")
|
||||
}
|
||||
if p.LoggedOut {
|
||||
sb.WriteString("loggedout=true ")
|
||||
}
|
||||
if p.ForceDaemon {
|
||||
sb.WriteString("server=true ")
|
||||
}
|
||||
if p.NotepadURLs {
|
||||
sb.WriteString("notepad=true ")
|
||||
}
|
||||
if p.ShieldsUp {
|
||||
sb.WriteString("shields=true ")
|
||||
}
|
||||
if p.ExitNodeIP.IsValid() {
|
||||
fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess)
|
||||
} else if !p.ExitNodeID.IsZero() {
|
||||
fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess)
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || goos == "linux" {
|
||||
fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes)
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || p.NoSNAT {
|
||||
fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT)
|
||||
}
|
||||
if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) {
|
||||
// Only print if we're advertising any routes, or the user has
|
||||
// turned off stateful filtering (NoStatefulFiltering=true ⇒
|
||||
// StatefulFiltering=false).
|
||||
bb, _ := p.NoStatefulFiltering.Get()
|
||||
fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb)
|
||||
}
|
||||
if len(p.AdvertiseTags) > 0 {
|
||||
fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ","))
|
||||
}
|
||||
if goos == "linux" {
|
||||
fmt.Fprintf(&sb, "nf=%v ", p.NetfilterMode)
|
||||
}
|
||||
if p.ControlURL != "" && p.ControlURL != DefaultControlURL {
|
||||
fmt.Fprintf(&sb, "url=%q ", p.ControlURL)
|
||||
}
|
||||
if p.Hostname != "" {
|
||||
fmt.Fprintf(&sb, "host=%q ", p.Hostname)
|
||||
}
|
||||
if p.OperatorUser != "" {
|
||||
fmt.Fprintf(&sb, "op=%q ", p.OperatorUser)
|
||||
}
|
||||
if p.NetfilterKind != "" {
|
||||
fmt.Fprintf(&sb, "netfilterKind=%s ", p.NetfilterKind)
|
||||
}
|
||||
sb.WriteString(p.AutoUpdate.Pretty())
|
||||
sb.WriteString(p.AppConnector.Pretty())
|
||||
if p.Persist != nil {
|
||||
sb.WriteString(p.Persist.Pretty())
|
||||
} else {
|
||||
sb.WriteString("Persist=nil")
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (p PrefsView) ToBytes() []byte {
|
||||
return p.ж.ToBytes()
|
||||
}
|
||||
|
||||
func (p *Prefs) ToBytes() []byte {
|
||||
data, err := json.MarshalIndent(p, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatalf("Prefs marshal: %v\n", err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func (p PrefsView) Equals(p2 PrefsView) bool {
|
||||
return p.ж.Equals(p2.ж)
|
||||
}
|
||||
|
||||
func (p *Prefs) Equals(p2 *Prefs) bool {
|
||||
if p == nil && p2 == nil {
|
||||
return true
|
||||
}
|
||||
if p == nil || p2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return p.ControlURL == p2.ControlURL &&
|
||||
p.RouteAll == p2.RouteAll &&
|
||||
p.ExitNodeID == p2.ExitNodeID &&
|
||||
p.ExitNodeIP == p2.ExitNodeIP &&
|
||||
p.InternalExitNodePrior == p2.InternalExitNodePrior &&
|
||||
p.ExitNodeAllowLANAccess == p2.ExitNodeAllowLANAccess &&
|
||||
p.CorpDNS == p2.CorpDNS &&
|
||||
p.RunSSH == p2.RunSSH &&
|
||||
p.RunWebClient == p2.RunWebClient &&
|
||||
p.WantRunning == p2.WantRunning &&
|
||||
p.LoggedOut == p2.LoggedOut &&
|
||||
p.NotepadURLs == p2.NotepadURLs &&
|
||||
p.ShieldsUp == p2.ShieldsUp &&
|
||||
p.NoSNAT == p2.NoSNAT &&
|
||||
p.NoStatefulFiltering == p2.NoStatefulFiltering &&
|
||||
p.NetfilterMode == p2.NetfilterMode &&
|
||||
p.OperatorUser == p2.OperatorUser &&
|
||||
p.Hostname == p2.Hostname &&
|
||||
p.ForceDaemon == p2.ForceDaemon &&
|
||||
compareIPNets(p.AdvertiseRoutes, p2.AdvertiseRoutes) &&
|
||||
compareStrings(p.AdvertiseTags, p2.AdvertiseTags) &&
|
||||
p.Persist.Equals(p2.Persist) &&
|
||||
p.ProfileName == p2.ProfileName &&
|
||||
p.AutoUpdate.Equals(p2.AutoUpdate) &&
|
||||
p.AppConnector == p2.AppConnector &&
|
||||
p.PostureChecking == p2.PostureChecking &&
|
||||
slices.EqualFunc(p.DriveShares, p2.DriveShares, drive.SharesEqual) &&
|
||||
p.NetfilterKind == p2.NetfilterKind
|
||||
}
|
||||
|
||||
func (au AutoUpdatePrefs) Pretty() string {
|
||||
if au.Apply.EqualBool(true) {
|
||||
return "update=on "
|
||||
}
|
||||
if au.Check {
|
||||
return "update=check "
|
||||
}
|
||||
return "update=off "
|
||||
}
|
||||
|
||||
func (ap AppConnectorPrefs) Pretty() string {
|
||||
if ap.Advertise {
|
||||
return "appconnector=advertise "
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func compareIPNets(a, b []netip.Prefix) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func compareStrings(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NewPrefs returns the default preferences to use.
|
||||
func NewPrefs() *Prefs {
|
||||
// Provide default values for options which might be missing
|
||||
// from the json data for any reason. The json can still
|
||||
// override them to false.
|
||||
return &Prefs{
|
||||
// ControlURL is explicitly not set to signal that
|
||||
// it's not yet configured, which relaxes the CLI "up"
|
||||
// safety net features. It will get set to DefaultControlURL
|
||||
// on first up. Or, if not, DefaultControlURL will be used
|
||||
// later anyway.
|
||||
ControlURL: "",
|
||||
|
||||
RouteAll: true,
|
||||
CorpDNS: true,
|
||||
WantRunning: false,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
NoStatefulFiltering: opt.NewBool(true),
|
||||
AutoUpdate: AutoUpdatePrefs{
|
||||
Check: true,
|
||||
Apply: opt.Bool("unset"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ControlURLOrDefault returns the coordination server's URL base.
|
||||
//
|
||||
// If not configured, or if the configured value is a legacy name equivalent to
|
||||
// the default, then DefaultControlURL is returned instead.
|
||||
func (p PrefsView) ControlURLOrDefault() string {
|
||||
return p.ж.ControlURLOrDefault()
|
||||
}
|
||||
|
||||
// ControlURLOrDefault returns the coordination server's URL base.
|
||||
//
|
||||
// If not configured, or if the configured value is a legacy name equivalent to
|
||||
// the default, then DefaultControlURL is returned instead.
|
||||
func (p *Prefs) ControlURLOrDefault() string {
|
||||
controlURL, err := syspolicy.GetString(syspolicy.ControlURL, p.ControlURL)
|
||||
if err != nil {
|
||||
controlURL = p.ControlURL
|
||||
}
|
||||
|
||||
if controlURL != "" {
|
||||
if controlURL != DefaultControlURL && IsLoginServerSynonym(controlURL) {
|
||||
return DefaultControlURL
|
||||
}
|
||||
return controlURL
|
||||
}
|
||||
return DefaultControlURL
|
||||
}
|
||||
|
||||
// AdminPageURL returns the admin web site URL for the current ControlURL.
|
||||
func (p PrefsView) AdminPageURL() string { return p.ж.AdminPageURL() }
|
||||
|
||||
// AdminPageURL returns the admin web site URL for the current ControlURL.
|
||||
func (p *Prefs) AdminPageURL() string {
|
||||
url := p.ControlURLOrDefault()
|
||||
if IsLoginServerSynonym(url) {
|
||||
// TODO(crawshaw): In future release, make this https://console.tailscale.com
|
||||
url = "https://login.tailscale.com"
|
||||
}
|
||||
return url + "/admin"
|
||||
}
|
||||
|
||||
// AdvertisesExitNode reports whether p is advertising both the v4 and
|
||||
// v6 /0 exit node routes.
|
||||
func (p PrefsView) AdvertisesExitNode() bool { return p.ж.AdvertisesExitNode() }
|
||||
|
||||
// AdvertisesExitNode reports whether p is advertising both the v4 and
|
||||
// v6 /0 exit node routes.
|
||||
func (p *Prefs) AdvertisesExitNode() bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
return tsaddr.ContainsExitRoutes(views.SliceOf(p.AdvertiseRoutes))
|
||||
}
|
||||
|
||||
// SetAdvertiseExitNode mutates p (if non-nil) to add or remove the two
|
||||
// /0 exit node routes.
|
||||
func (p *Prefs) SetAdvertiseExitNode(runExit bool) {
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
all := p.AdvertiseRoutes
|
||||
p.AdvertiseRoutes = p.AdvertiseRoutes[:0]
|
||||
for _, r := range all {
|
||||
if r.Bits() != 0 {
|
||||
p.AdvertiseRoutes = append(p.AdvertiseRoutes, r)
|
||||
}
|
||||
}
|
||||
if !runExit {
|
||||
return
|
||||
}
|
||||
p.AdvertiseRoutes = append(p.AdvertiseRoutes,
|
||||
netip.PrefixFrom(netaddr.IPv4(0, 0, 0, 0), 0),
|
||||
netip.PrefixFrom(netip.IPv6Unspecified(), 0))
|
||||
}
|
||||
|
||||
// peerWithTailscaleIP returns the peer in st with the provided
|
||||
// Tailscale IP.
|
||||
func peerWithTailscaleIP(st *ipnstate.Status, ip netip.Addr) (ps *ipnstate.PeerStatus, ok bool) {
|
||||
for _, ps := range st.Peer {
|
||||
for _, ip2 := range ps.TailscaleIPs {
|
||||
if ip == ip2 {
|
||||
return ps, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func isRemoteIP(st *ipnstate.Status, ip netip.Addr) bool {
|
||||
for _, selfIP := range st.TailscaleIPs {
|
||||
if ip == selfIP {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ClearExitNode sets the ExitNodeID and ExitNodeIP to their zero values.
|
||||
func (p *Prefs) ClearExitNode() {
|
||||
p.ExitNodeID = ""
|
||||
p.ExitNodeIP = netip.Addr{}
|
||||
}
|
||||
|
||||
// ExitNodeLocalIPError is returned when the requested IP address for an exit
|
||||
// node belongs to the local machine.
|
||||
type ExitNodeLocalIPError struct {
|
||||
hostOrIP string
|
||||
}
|
||||
|
||||
func (e ExitNodeLocalIPError) Error() string {
|
||||
return fmt.Sprintf("cannot use %s as an exit node as it is a local IP address to this machine", e.hostOrIP)
|
||||
}
|
||||
|
||||
func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) {
|
||||
if s == "" {
|
||||
return ip, os.ErrInvalid
|
||||
}
|
||||
ip, err = netip.ParseAddr(s)
|
||||
if err == nil {
|
||||
// If we're online already and have a netmap, double check that the IP
|
||||
// address specified is valid.
|
||||
if st.BackendState == "Running" {
|
||||
ps, ok := peerWithTailscaleIP(st, ip)
|
||||
if !ok {
|
||||
return ip, fmt.Errorf("no node found in netmap with IP %v", ip)
|
||||
}
|
||||
if !ps.ExitNodeOption {
|
||||
return ip, fmt.Errorf("node %v is not advertising an exit node", ip)
|
||||
}
|
||||
}
|
||||
if !isRemoteIP(st, ip) {
|
||||
return ip, ExitNodeLocalIPError{s}
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
match := 0
|
||||
for _, ps := range st.Peer {
|
||||
baseName := dnsname.TrimSuffix(ps.DNSName, st.MagicDNSSuffix)
|
||||
if !strings.EqualFold(s, baseName) && !strings.EqualFold(s, ps.DNSName) {
|
||||
continue
|
||||
}
|
||||
match++
|
||||
if len(ps.TailscaleIPs) == 0 {
|
||||
return ip, fmt.Errorf("node %q has no Tailscale IP?", s)
|
||||
}
|
||||
if !ps.ExitNodeOption {
|
||||
return ip, fmt.Errorf("node %q is not advertising an exit node", s)
|
||||
}
|
||||
ip = ps.TailscaleIPs[0]
|
||||
}
|
||||
switch match {
|
||||
case 0:
|
||||
return ip, fmt.Errorf("invalid value %q for --exit-node; must be IP or unique node name", s)
|
||||
case 1:
|
||||
if !isRemoteIP(st, ip) {
|
||||
return ip, ExitNodeLocalIPError{s}
|
||||
}
|
||||
return ip, nil
|
||||
default:
|
||||
return ip, fmt.Errorf("ambiguous exit node name %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// SetExitNodeIP validates and sets the ExitNodeIP from a user-provided string
|
||||
// specifying either an IP address or a MagicDNS base name ("foo", as opposed to
|
||||
// "foo.bar.beta.tailscale.net"). This method does not mutate ExitNodeID and
|
||||
// will fail if ExitNodeID is already set.
|
||||
func (p *Prefs) SetExitNodeIP(s string, st *ipnstate.Status) error {
|
||||
if !p.ExitNodeID.IsZero() {
|
||||
return ErrExitNodeIDAlreadySet
|
||||
}
|
||||
ip, err := exitNodeIPOfArg(s, st)
|
||||
if err == nil {
|
||||
p.ExitNodeIP = ip
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ShouldSSHBeRunning reports whether the SSH server should be running based on
|
||||
// the prefs.
|
||||
func (p PrefsView) ShouldSSHBeRunning() bool {
|
||||
return p.Valid() && p.ж.ShouldSSHBeRunning()
|
||||
}
|
||||
|
||||
// ShouldSSHBeRunning reports whether the SSH server should be running based on
|
||||
// the prefs.
|
||||
func (p *Prefs) ShouldSSHBeRunning() bool {
|
||||
return p.WantRunning && p.RunSSH
|
||||
}
|
||||
|
||||
// ShouldWebClientBeRunning reports whether the web client server should be running based on
|
||||
// the prefs.
|
||||
func (p PrefsView) ShouldWebClientBeRunning() bool {
|
||||
return p.Valid() && p.ж.ShouldWebClientBeRunning()
|
||||
}
|
||||
|
||||
// ShouldWebClientBeRunning reports whether the web client server should be running based on
|
||||
// the prefs.
|
||||
func (p *Prefs) ShouldWebClientBeRunning() bool {
|
||||
return p.WantRunning && p.RunWebClient
|
||||
}
|
||||
|
||||
// PrefsFromBytes deserializes Prefs from a JSON blob b into base. Values in
|
||||
// base are preserved, unless they are populated in the JSON blob.
|
||||
func PrefsFromBytes(b []byte, base *Prefs) error {
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return json.Unmarshal(b, base)
|
||||
}
|
||||
|
||||
var jsonEscapedZero = []byte(`\u0000`)
|
||||
|
||||
// LoadPrefsWindows loads a legacy relaynode config file into Prefs with
|
||||
// sensible migration defaults set. Windows-only.
|
||||
func LoadPrefsWindows(filename string) (*Prefs, error) {
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("LoadPrefs open: %w", err) // err includes path
|
||||
}
|
||||
if bytes.Contains(data, jsonEscapedZero) {
|
||||
// Tailscale 1.2.0 - 1.2.8 on Windows had a memory corruption bug
|
||||
// in the backend process that ended up sending NULL bytes over JSON
|
||||
// to the frontend which wrote them out to JSON files on disk.
|
||||
// So if we see one, treat is as corrupt and the user will need
|
||||
// to log in again. (better than crashing)
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
p := NewPrefs()
|
||||
if err := PrefsFromBytes(data, p); err != nil {
|
||||
return nil, fmt.Errorf("LoadPrefs(%q) decode: %w", filename, err)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func SavePrefs(filename string, p *Prefs) {
|
||||
log.Printf("Saving prefs %v %v\n", filename, p.Pretty())
|
||||
data := p.ToBytes()
|
||||
os.MkdirAll(filepath.Dir(filename), 0700)
|
||||
if err := atomicfile.WriteFile(filename, data, 0600); err != nil {
|
||||
log.Printf("SavePrefs: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ProfileID is an auto-generated system-wide unique identifier for a login
|
||||
// profile. It is a 4 character hex string like "1ab3".
|
||||
type ProfileID string
|
||||
|
||||
// WindowsUserID is a userid (suitable for passing to ipnauth.LookupUserFromID
|
||||
// or os/user.LookupId) but only set on Windows. It's empty on all other
|
||||
// platforms, unless envknob.GOOS is in used, making Linux act like Windows for
|
||||
// tests.
|
||||
type WindowsUserID string
|
||||
|
||||
// NetworkProfile is a subset of netmap.NetworkMap
|
||||
// that should be saved with each user profile.
|
||||
type NetworkProfile struct {
|
||||
MagicDNSName string
|
||||
DomainName string
|
||||
}
|
||||
|
||||
// RequiresBackfill returns whether this object does not have all the data
|
||||
// expected. This is because this struct is a later addition to LoginProfile and
|
||||
// this method can be checked to see if it's been backfilled to the current
|
||||
// expectation or not. Note that for now, it just checks if the struct is empty.
|
||||
// In the future, if we have new optional fields, this method can be changed to
|
||||
// do more explicit checks to return whether it's apt for a backfill or not.
|
||||
func (n NetworkProfile) RequiresBackfill() bool {
|
||||
return n == NetworkProfile{}
|
||||
}
|
||||
|
||||
// LoginProfile represents a single login profile as managed
|
||||
// by the ProfileManager.
|
||||
type LoginProfile struct {
|
||||
// ID is a unique identifier for this profile.
|
||||
// It is assigned on creation and never changes.
|
||||
// It may seem redundant to have both ID and UserProfile.ID
|
||||
// but they are different things. UserProfile.ID may change
|
||||
// over time (e.g. if a device is tagged).
|
||||
ID ProfileID
|
||||
|
||||
// Name is the user-visible name of this profile.
|
||||
// It is filled in from the UserProfile.LoginName field.
|
||||
Name string
|
||||
|
||||
// NetworkProfile is a subset of netmap.NetworkMap that we
|
||||
// store to remember information about the tailnet that this
|
||||
// profile was logged in with.
|
||||
//
|
||||
// This field was added on 2023-11-17.
|
||||
NetworkProfile NetworkProfile
|
||||
|
||||
// Key is the StateKey under which the profile is stored.
|
||||
// It is assigned once at profile creation time and never changes.
|
||||
Key StateKey
|
||||
|
||||
// UserProfile is the server provided UserProfile for this profile.
|
||||
// This is updated whenever the server provides a new UserProfile.
|
||||
UserProfile tailcfg.UserProfile
|
||||
|
||||
// NodeID is the NodeID of the node that this profile is logged into.
|
||||
// This should be stable across tagging and untagging nodes.
|
||||
// It may seem redundant to check against both the UserProfile.UserID
|
||||
// and the NodeID. However the NodeID can change if the node is deleted
|
||||
// from the admin panel.
|
||||
NodeID tailcfg.StableNodeID
|
||||
|
||||
// LocalUserID is the user ID of the user who created this profile.
|
||||
// It is only relevant on Windows where we have a multi-user system.
|
||||
// It is assigned once at profile creation time and never changes.
|
||||
LocalUserID WindowsUserID
|
||||
|
||||
// ControlURL is the URL of the control server that this profile is logged
|
||||
// into.
|
||||
ControlURL string
|
||||
}
|
||||
643
vendor/tailscale.com/ipn/serve.go
generated
vendored
Normal file
643
vendor/tailscale.com/ipn/serve.go
generated
vendored
Normal file
@@ -0,0 +1,643 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
// ServeConfigKey returns a StateKey that stores the
|
||||
// JSON-encoded ServeConfig for a config profile.
|
||||
func ServeConfigKey(profileID ProfileID) StateKey {
|
||||
return StateKey("_serve/" + profileID)
|
||||
}
|
||||
|
||||
// ServeConfig is the JSON type stored in the StateStore for
|
||||
// StateKey "_serve/$PROFILE_ID" as returned by ServeConfigKey.
|
||||
type ServeConfig struct {
|
||||
// TCP are the list of TCP port numbers that tailscaled should handle for
|
||||
// the Tailscale IP addresses. (not subnet routers, etc)
|
||||
TCP map[uint16]*TCPPortHandler `json:",omitempty"`
|
||||
|
||||
// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers
|
||||
// keyed by mount point ("/", "/foo", etc)
|
||||
Web map[HostPort]*WebServerConfig `json:",omitempty"`
|
||||
|
||||
// AllowFunnel is the set of SNI:port values for which funnel
|
||||
// traffic is allowed, from trusted ingress peers.
|
||||
AllowFunnel map[HostPort]bool `json:",omitempty"`
|
||||
|
||||
// Foreground is a map of an IPN Bus session ID to an alternate foreground
|
||||
// serve config that's valid for the life of that WatchIPNBus session ID.
|
||||
// This. This allows the config to specify ephemeral configs that are
|
||||
// used in the CLI's foreground mode to ensure ungraceful shutdowns
|
||||
// of either the client or the LocalBackend does not expose ports
|
||||
// that users are not aware of.
|
||||
Foreground map[string]*ServeConfig `json:",omitempty"`
|
||||
|
||||
// ETag is the checksum of the serve config that's populated
|
||||
// by the LocalClient through the HTTP ETag header during a
|
||||
// GetServeConfig request and is translated to an If-Match header
|
||||
// during a SetServeConfig request.
|
||||
ETag string `json:"-"`
|
||||
}
|
||||
|
||||
// HostPort is an SNI name and port number, joined by a colon.
|
||||
// There is no implicit port 443. It must contain a colon.
|
||||
type HostPort string
|
||||
|
||||
// Port extracts just the port number from hp.
|
||||
// An error is reported in the case that the hp does not
|
||||
// have a valid numeric port ending.
|
||||
func (hp HostPort) Port() (uint16, error) {
|
||||
_, port, err := net.SplitHostPort(string(hp))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
port16, err := strconv.ParseUint(port, 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(port16), nil
|
||||
}
|
||||
|
||||
// A FunnelConn wraps a net.Conn that is coming over a
|
||||
// Funnel connection. It can be used to determine further
|
||||
// information about the connection, like the source address
|
||||
// and the target SNI name.
|
||||
type FunnelConn struct {
|
||||
// Conn is the underlying connection.
|
||||
net.Conn
|
||||
|
||||
// Target is what was presented in the "Tailscale-Ingress-Target"
|
||||
// HTTP header.
|
||||
Target HostPort
|
||||
|
||||
// Src is the source address of the connection.
|
||||
// This is the address of the client that initiated the
|
||||
// connection, not the address of the Tailscale Funnel
|
||||
// node which is relaying the connection. That address
|
||||
// can be found in Conn.RemoteAddr.
|
||||
Src netip.AddrPort
|
||||
}
|
||||
|
||||
// WebServerConfig describes a web server's configuration.
|
||||
type WebServerConfig struct {
|
||||
Handlers map[string]*HTTPHandler // mountPoint => handler
|
||||
}
|
||||
|
||||
// TCPPortHandler describes what to do when handling a TCP
|
||||
// connection.
|
||||
type TCPPortHandler struct {
|
||||
// HTTPS, if true, means that tailscaled should handle this connection as an
|
||||
// HTTPS request as configured by ServeConfig.Web.
|
||||
//
|
||||
// It is mutually exclusive with TCPForward.
|
||||
HTTPS bool `json:",omitempty"`
|
||||
|
||||
// HTTP, if true, means that tailscaled should handle this connection as an
|
||||
// HTTP request as configured by ServeConfig.Web.
|
||||
//
|
||||
// It is mutually exclusive with TCPForward.
|
||||
HTTP bool `json:",omitempty"`
|
||||
|
||||
// TCPForward is the IP:port to forward TCP connections to.
|
||||
// Whether or not TLS is terminated by tailscaled depends on
|
||||
// TerminateTLS.
|
||||
//
|
||||
// It is mutually exclusive with HTTPS.
|
||||
TCPForward string `json:",omitempty"`
|
||||
|
||||
// TerminateTLS, if non-empty, means that tailscaled should terminate the
|
||||
// TLS connections before forwarding them to TCPForward, permitting only the
|
||||
// SNI name with this value. It is only used if TCPForward is non-empty.
|
||||
// (the HTTPS mode uses ServeConfig.Web)
|
||||
TerminateTLS string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// HTTPHandler is either a path or a proxy to serve.
|
||||
type HTTPHandler struct {
|
||||
// Exactly one of the following may be set.
|
||||
|
||||
Path string `json:",omitempty"` // absolute path to directory or file to serve
|
||||
Proxy string `json:",omitempty"` // http://localhost:3000/, localhost:3030, 3030
|
||||
|
||||
Text string `json:",omitempty"` // plaintext to serve (primarily for testing)
|
||||
|
||||
// TODO(bradfitz): bool to not enumerate directories? TTL on mapping for
|
||||
// temporary ones? Error codes? Redirects?
|
||||
}
|
||||
|
||||
// WebHandlerExists reports whether if the ServeConfig Web handler exists for
|
||||
// the given host:port and mount point.
|
||||
func (sc *ServeConfig) WebHandlerExists(hp HostPort, mount string) bool {
|
||||
h := sc.GetWebHandler(hp, mount)
|
||||
return h != nil
|
||||
}
|
||||
|
||||
// GetWebHandler returns the HTTPHandler for the given host:port and mount point.
|
||||
// Returns nil if the handler does not exist.
|
||||
func (sc *ServeConfig) GetWebHandler(hp HostPort, mount string) *HTTPHandler {
|
||||
if sc == nil || sc.Web[hp] == nil {
|
||||
return nil
|
||||
}
|
||||
return sc.Web[hp].Handlers[mount]
|
||||
}
|
||||
|
||||
// GetTCPPortHandler returns the TCPPortHandler for the given port.
|
||||
// If the port is not configured, nil is returned.
|
||||
func (sc *ServeConfig) GetTCPPortHandler(port uint16) *TCPPortHandler {
|
||||
if sc == nil {
|
||||
return nil
|
||||
}
|
||||
return sc.TCP[port]
|
||||
}
|
||||
|
||||
// HasPathHandler reports whether if ServeConfig has at least
|
||||
// one path handler, including foreground configs.
|
||||
func (sc *ServeConfig) HasPathHandler() bool {
|
||||
if sc.Web != nil {
|
||||
for _, webServerConfig := range sc.Web {
|
||||
for _, httpHandler := range webServerConfig.Handlers {
|
||||
if httpHandler.Path != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sc.Foreground != nil {
|
||||
for _, fgConfig := range sc.Foreground {
|
||||
if fgConfig.HasPathHandler() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsTCPForwardingAny reports whether ServeConfig is currently forwarding in
|
||||
// TCPForward mode on any port. This is exclusive of Web/HTTPS serving.
|
||||
func (sc *ServeConfig) IsTCPForwardingAny() bool {
|
||||
if sc == nil || len(sc.TCP) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, h := range sc.TCP {
|
||||
if h.TCPForward != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsTCPForwardingOnPort reports whether if ServeConfig is currently forwarding
|
||||
// in TCPForward mode on the given port. This is exclusive of Web/HTTPS serving.
|
||||
func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16) bool {
|
||||
if sc == nil || sc.TCP[port] == nil {
|
||||
return false
|
||||
}
|
||||
return !sc.IsServingWeb(port)
|
||||
}
|
||||
|
||||
// IsServingWeb reports whether if ServeConfig is currently serving Web
|
||||
// (HTTP/HTTPS) on the given port. This is exclusive of TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingWeb(port uint16) bool {
|
||||
return sc.IsServingHTTP(port) || sc.IsServingHTTPS(port)
|
||||
}
|
||||
|
||||
// IsServingHTTPS reports whether if ServeConfig is currently serving HTTPS on
|
||||
// the given port. This is exclusive of HTTP and TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingHTTPS(port uint16) bool {
|
||||
if sc == nil || sc.TCP[port] == nil {
|
||||
return false
|
||||
}
|
||||
return sc.TCP[port].HTTPS
|
||||
}
|
||||
|
||||
// IsServingHTTP reports whether if ServeConfig is currently serving HTTP on the
|
||||
// given port. This is exclusive of HTTPS and TCPForwarding.
|
||||
func (sc *ServeConfig) IsServingHTTP(port uint16) bool {
|
||||
if sc == nil || sc.TCP[port] == nil {
|
||||
return false
|
||||
}
|
||||
return sc.TCP[port].HTTP
|
||||
}
|
||||
|
||||
// FindConfig finds a config that contains the given port, which can be
|
||||
// the top level background config or an inner foreground one.
|
||||
// The second result is true if it's foreground.
|
||||
func (sc *ServeConfig) FindConfig(port uint16) (*ServeConfig, bool) {
|
||||
if sc == nil {
|
||||
return nil, false
|
||||
}
|
||||
if _, ok := sc.TCP[port]; ok {
|
||||
return sc, false
|
||||
}
|
||||
for _, sc := range sc.Foreground {
|
||||
if _, ok := sc.TCP[port]; ok {
|
||||
return sc, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// SetWebHandler sets the given HTTPHandler at the specified host, port,
|
||||
// and mount in the serve config. sc.TCP is also updated to reflect web
|
||||
// serving usage of the given port.
|
||||
func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool) {
|
||||
if sc == nil {
|
||||
sc = new(ServeConfig)
|
||||
}
|
||||
mak.Set(&sc.TCP, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS})
|
||||
|
||||
hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port))))
|
||||
if _, ok := sc.Web[hp]; !ok {
|
||||
mak.Set(&sc.Web, hp, new(WebServerConfig))
|
||||
}
|
||||
mak.Set(&sc.Web[hp].Handlers, mount, handler)
|
||||
|
||||
// TODO(tylersmalley): handle multiple web handlers from foreground mode
|
||||
for k, v := range sc.Web[hp].Handlers {
|
||||
if v == handler {
|
||||
continue
|
||||
}
|
||||
// If the new mount point ends in / and another mount point
|
||||
// shares the same prefix, remove the other handler.
|
||||
// (e.g. /foo/ overwrites /foo)
|
||||
// The opposite example is also handled.
|
||||
m1 := strings.TrimSuffix(mount, "/")
|
||||
m2 := strings.TrimSuffix(k, "/")
|
||||
if m1 == m2 {
|
||||
delete(sc.Web[hp].Handlers, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetTCPForwarding sets the fwdAddr (IP:port form) to which to forward
|
||||
// connections from the given port. If terminateTLS is true, TLS connections
|
||||
// are terminated with only the given host name permitted before passing them
|
||||
// to the fwdAddr.
|
||||
func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, host string) {
|
||||
if sc == nil {
|
||||
sc = new(ServeConfig)
|
||||
}
|
||||
mak.Set(&sc.TCP, port, &TCPPortHandler{TCPForward: fwdAddr})
|
||||
if terminateTLS {
|
||||
sc.TCP[port].TerminateTLS = host
|
||||
}
|
||||
}
|
||||
|
||||
// SetFunnel sets the sc.AllowFunnel value for the given host and port.
|
||||
func (sc *ServeConfig) SetFunnel(host string, port uint16, setOn bool) {
|
||||
if sc == nil {
|
||||
sc = new(ServeConfig)
|
||||
}
|
||||
hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port))))
|
||||
|
||||
// TODO(tylersmalley): should ensure there is no other conflicting funnel
|
||||
// TODO(tylersmalley): add error handling for if toggling for existing sc
|
||||
if setOn {
|
||||
mak.Set(&sc.AllowFunnel, hp, true)
|
||||
} else if _, exists := sc.AllowFunnel[hp]; exists {
|
||||
delete(sc.AllowFunnel, hp)
|
||||
// Clear map mostly for testing.
|
||||
if len(sc.AllowFunnel) == 0 {
|
||||
sc.AllowFunnel = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveWebHandler deletes the web handlers at all of the given mount points
|
||||
// for the provided host and port in the serve config. If cleanupFunnel is
|
||||
// true, this also removes the funnel value for this port if no handlers remain.
|
||||
func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []string, cleanupFunnel bool) {
|
||||
hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port))))
|
||||
|
||||
// Delete existing handler, then cascade delete if empty.
|
||||
for _, m := range mounts {
|
||||
delete(sc.Web[hp].Handlers, m)
|
||||
}
|
||||
if len(sc.Web[hp].Handlers) == 0 {
|
||||
delete(sc.Web, hp)
|
||||
delete(sc.TCP, port)
|
||||
if cleanupFunnel {
|
||||
delete(sc.AllowFunnel, hp) // disable funnel if no mounts remain for the port
|
||||
}
|
||||
}
|
||||
|
||||
// Clear empty maps, mostly for testing.
|
||||
if len(sc.Web) == 0 {
|
||||
sc.Web = nil
|
||||
}
|
||||
if len(sc.TCP) == 0 {
|
||||
sc.TCP = nil
|
||||
}
|
||||
if len(sc.AllowFunnel) == 0 {
|
||||
sc.AllowFunnel = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTCPForwarding deletes the TCP forwarding configuration for the given
|
||||
// port from the serve config.
|
||||
func (sc *ServeConfig) RemoveTCPForwarding(port uint16) {
|
||||
delete(sc.TCP, port)
|
||||
if len(sc.TCP) == 0 {
|
||||
sc.TCP = nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsFunnelOn reports whether if ServeConfig is currently allowing funnel
|
||||
// traffic for any host:port.
|
||||
//
|
||||
// View version of ServeConfig.IsFunnelOn.
|
||||
func (v ServeConfigView) IsFunnelOn() bool { return v.ж.IsFunnelOn() }
|
||||
|
||||
// IsFunnelOn reports whether if ServeConfig is currently allowing funnel
|
||||
// traffic for any host:port.
|
||||
func (sc *ServeConfig) IsFunnelOn() bool {
|
||||
if sc == nil {
|
||||
return false
|
||||
}
|
||||
for _, b := range sc.AllowFunnel {
|
||||
if b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckFunnelAccess checks whether Funnel access is allowed for the given node
|
||||
// and port.
|
||||
// It checks:
|
||||
// 1. HTTPS is enabled on the tailnet
|
||||
// 2. the node has the "funnel" nodeAttr
|
||||
// 3. the port is allowed for Funnel
|
||||
//
|
||||
// The node arg should be the ipnstate.Status.Self node.
|
||||
func CheckFunnelAccess(port uint16, node *ipnstate.PeerStatus) error {
|
||||
if err := NodeCanFunnel(node); err != nil {
|
||||
return err
|
||||
}
|
||||
return CheckFunnelPort(port, node)
|
||||
}
|
||||
|
||||
// NodeCanFunnel returns an error if the given node is not configured to allow
|
||||
// for Tailscale Funnel usage.
|
||||
func NodeCanFunnel(node *ipnstate.PeerStatus) error {
|
||||
if !node.HasCap(tailcfg.CapabilityHTTPS) {
|
||||
return errors.New("Funnel not available; HTTPS must be enabled. See https://tailscale.com/s/https.")
|
||||
}
|
||||
if !node.HasCap(tailcfg.NodeAttrFunnel) {
|
||||
return errors.New("Funnel not available; \"funnel\" node attribute not set. See https://tailscale.com/s/no-funnel.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckFunnelPort checks whether the given port is allowed for Funnel.
|
||||
// It uses the tailcfg.CapabilityFunnelPorts nodeAttr to determine the allowed
|
||||
// ports.
|
||||
func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error {
|
||||
deny := func(allowedPorts string) error {
|
||||
if allowedPorts == "" {
|
||||
return fmt.Errorf("port %d is not allowed for funnel", wantedPort)
|
||||
}
|
||||
return fmt.Errorf("port %d is not allowed for funnel; allowed ports are: %v", wantedPort, allowedPorts)
|
||||
}
|
||||
var portsStr string
|
||||
parseAttr := func(attr string) (string, error) {
|
||||
u, err := url.Parse(attr)
|
||||
if err != nil {
|
||||
return "", deny("")
|
||||
}
|
||||
portsStr := u.Query().Get("ports")
|
||||
if portsStr == "" {
|
||||
return "", deny("")
|
||||
}
|
||||
u.RawQuery = ""
|
||||
if u.String() != string(tailcfg.CapabilityFunnelPorts) {
|
||||
return "", deny("")
|
||||
}
|
||||
return portsStr, nil
|
||||
}
|
||||
for attr := range node.CapMap {
|
||||
attr := string(attr)
|
||||
if !strings.HasPrefix(attr, string(tailcfg.CapabilityFunnelPorts)) {
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
portsStr, err = parseAttr(attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
if portsStr == "" {
|
||||
for attr := range node.CapMap {
|
||||
attr := string(attr)
|
||||
if !strings.HasPrefix(attr, string(tailcfg.CapabilityFunnelPorts)) {
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
portsStr, err = parseAttr(attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if portsStr == "" {
|
||||
return deny("")
|
||||
}
|
||||
wantedPortString := strconv.Itoa(int(wantedPort))
|
||||
for _, ps := range strings.Split(portsStr, ",") {
|
||||
if ps == "" {
|
||||
continue
|
||||
}
|
||||
first, last, ok := strings.Cut(ps, "-")
|
||||
if !ok {
|
||||
if first == wantedPortString {
|
||||
return nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
fp, err := strconv.ParseUint(first, 10, 16)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
lp, err := strconv.ParseUint(last, 10, 16)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pr := tailcfg.PortRange{First: uint16(fp), Last: uint16(lp)}
|
||||
if pr.Contains(wantedPort) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return deny(portsStr)
|
||||
}
|
||||
|
||||
// ExpandProxyTargetValue expands the supported target values to be proxied
|
||||
// allowing for input values to be a port number, a partial URL, or a full URL
|
||||
// including a path.
|
||||
//
|
||||
// examples:
|
||||
// - 3000
|
||||
// - localhost:3000
|
||||
// - tcp://localhost:3000
|
||||
// - http://localhost:3000
|
||||
// - https://localhost:3000
|
||||
// - https-insecure://localhost:3000
|
||||
// - https-insecure://localhost:3000/foo
|
||||
func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultScheme string) (string, error) {
|
||||
const host = "127.0.0.1"
|
||||
|
||||
// support target being a port number
|
||||
if port, err := strconv.ParseUint(target, 10, 16); err == nil {
|
||||
return fmt.Sprintf("%s://%s:%d", defaultScheme, host, port), nil
|
||||
}
|
||||
|
||||
// prepend scheme if not present
|
||||
if !strings.Contains(target, "://") {
|
||||
target = defaultScheme + "://" + target
|
||||
}
|
||||
|
||||
// make sure we can parse the target
|
||||
u, err := url.ParseRequestURI(target)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid URL %w", err)
|
||||
}
|
||||
|
||||
// ensure a supported scheme
|
||||
if !slices.Contains(supportedSchemes, u.Scheme) {
|
||||
return "", fmt.Errorf("must be a URL starting with one of the supported schemes: %v", supportedSchemes)
|
||||
}
|
||||
|
||||
// validate the host.
|
||||
switch u.Hostname() {
|
||||
case "localhost", "127.0.0.1":
|
||||
default:
|
||||
return "", errors.New("only localhost or 127.0.0.1 proxies are currently supported")
|
||||
}
|
||||
|
||||
// validate the port
|
||||
port, err := strconv.ParseUint(u.Port(), 10, 16)
|
||||
if err != nil || port == 0 {
|
||||
return "", fmt.Errorf("invalid port %q", u.Port())
|
||||
}
|
||||
|
||||
u.Host = fmt.Sprintf("%s:%d", u.Hostname(), port)
|
||||
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
// RangeOverTCPs ranges over both background and foreground TCPs.
|
||||
// If the returned bool from the given f is false, then this function stops
|
||||
// iterating immediately and does not check other foreground configs.
|
||||
func (v ServeConfigView) RangeOverTCPs(f func(port uint16, _ TCPPortHandlerView) bool) {
|
||||
parentCont := true
|
||||
v.TCP().Range(func(k uint16, v TCPPortHandlerView) (cont bool) {
|
||||
parentCont = f(k, v)
|
||||
return parentCont
|
||||
})
|
||||
v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) {
|
||||
if !parentCont {
|
||||
return false
|
||||
}
|
||||
v.TCP().Range(func(k uint16, v TCPPortHandlerView) (cont bool) {
|
||||
parentCont = f(k, v)
|
||||
return parentCont
|
||||
})
|
||||
return parentCont
|
||||
})
|
||||
}
|
||||
|
||||
// RangeOverWebs ranges over both background and foreground Webs.
|
||||
// If the returned bool from the given f is false, then this function stops
|
||||
// iterating immediately and does not check other foreground configs.
|
||||
func (v ServeConfigView) RangeOverWebs(f func(_ HostPort, conf WebServerConfigView) bool) {
|
||||
parentCont := true
|
||||
v.Web().Range(func(k HostPort, v WebServerConfigView) (cont bool) {
|
||||
parentCont = f(k, v)
|
||||
return parentCont
|
||||
})
|
||||
v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) {
|
||||
if !parentCont {
|
||||
return false
|
||||
}
|
||||
v.Web().Range(func(k HostPort, v WebServerConfigView) (cont bool) {
|
||||
parentCont = f(k, v)
|
||||
return parentCont
|
||||
})
|
||||
return parentCont
|
||||
})
|
||||
}
|
||||
|
||||
// FindTCP returns the first TCP that matches with the given port. It
|
||||
// prefers a foreground match first followed by a background search if none
|
||||
// existed.
|
||||
func (v ServeConfigView) FindTCP(port uint16) (res TCPPortHandlerView, ok bool) {
|
||||
v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) {
|
||||
res, ok = v.TCP().GetOk(port)
|
||||
return !ok
|
||||
})
|
||||
if ok {
|
||||
return res, ok
|
||||
}
|
||||
return v.TCP().GetOk(port)
|
||||
}
|
||||
|
||||
// FindWeb returns the first Web that matches with the given HostPort. It
|
||||
// prefers a foreground match first followed by a background search if none
|
||||
// existed.
|
||||
func (v ServeConfigView) FindWeb(hp HostPort) (res WebServerConfigView, ok bool) {
|
||||
v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) {
|
||||
res, ok = v.Web().GetOk(hp)
|
||||
return !ok
|
||||
})
|
||||
if ok {
|
||||
return res, ok
|
||||
}
|
||||
return v.Web().GetOk(hp)
|
||||
}
|
||||
|
||||
// HasAllowFunnel returns whether this config has at least one AllowFunnel
|
||||
// set in the background or foreground configs.
|
||||
func (v ServeConfigView) HasAllowFunnel() bool {
|
||||
return v.AllowFunnel().Len() > 0 || func() bool {
|
||||
var exists bool
|
||||
v.Foreground().Range(func(k string, v ServeConfigView) (cont bool) {
|
||||
exists = v.AllowFunnel().Len() > 0
|
||||
return !exists
|
||||
})
|
||||
return exists
|
||||
}()
|
||||
}
|
||||
|
||||
// FindFunnel reports whether target exists in either the background AllowFunnel
|
||||
// or any of the foreground configs.
|
||||
func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool {
|
||||
if v.AllowFunnel().Get(target) {
|
||||
return true
|
||||
}
|
||||
var exists bool
|
||||
v.Foreground().Range(func(_ string, v ServeConfigView) (cont bool) {
|
||||
if exists = v.AllowFunnel().Get(target); exists {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return exists
|
||||
}
|
||||
115
vendor/tailscale.com/ipn/store.go
generated
vendored
Normal file
115
vendor/tailscale.com/ipn/store.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ErrStateNotExist is returned by StateStore.ReadState when the
|
||||
// requested state ID doesn't exist.
|
||||
var ErrStateNotExist = errors.New("no state with given ID")
|
||||
|
||||
const (
|
||||
// MachineKeyStateKey is the key under which we store the machine key,
|
||||
// in its key.NodePrivate.MarshalText representation.
|
||||
MachineKeyStateKey = StateKey("_machinekey")
|
||||
|
||||
// LegacyGlobalDaemonStateKey is the ipn.StateKey that tailscaled
|
||||
// loads on startup.
|
||||
//
|
||||
// We have to support multiple state keys for other OSes (Windows in
|
||||
// particular), but right now Unix daemons run with a single
|
||||
// node-global state. To keep open the option of having per-user state
|
||||
// later, the global state key doesn't look like a username.
|
||||
//
|
||||
// As of 2022-10-21, it has been superseded by profiles and is no longer
|
||||
// written to disk. It is only read at startup when there are no profiles,
|
||||
// to migrate the state to the "default" profile.
|
||||
// The existing state is left on disk in case the user downgrades to an
|
||||
// older version of Tailscale that doesn't support profiles. We can
|
||||
// remove this in a future release.
|
||||
LegacyGlobalDaemonStateKey = StateKey("_daemon")
|
||||
|
||||
// ServerModeStartKey's value, if non-empty, is the value of a
|
||||
// StateKey containing the prefs to start with which to start the
|
||||
// server.
|
||||
//
|
||||
// For example, the value might be "user-1234", meaning the
|
||||
// the server should start with the Prefs JSON loaded from
|
||||
// StateKey "user-1234".
|
||||
ServerModeStartKey = StateKey("server-mode-start-key")
|
||||
|
||||
// KnownProfilesStateKey is the key under which we store the list of
|
||||
// known profiles. The value is a JSON-encoded []LoginProfile.
|
||||
KnownProfilesStateKey = StateKey("_profiles")
|
||||
|
||||
// CurrentProfileStateKey is the key under which we store the current
|
||||
// profile.
|
||||
CurrentProfileStateKey = StateKey("_current-profile")
|
||||
|
||||
// TaildropReceivedKey is the key to indicate whether any taildrop file
|
||||
// has ever been received (even if partially).
|
||||
// Any non-empty value indicates that at least one file has been received.
|
||||
TaildropReceivedKey = StateKey("_taildrop-received")
|
||||
)
|
||||
|
||||
// CurrentProfileID returns the StateKey that stores the
|
||||
// current profile ID. The value is a JSON-encoded LoginProfile.
|
||||
// If the userID is empty, the key returned is CurrentProfileStateKey,
|
||||
// otherwise it is "_current/"+userID.
|
||||
func CurrentProfileKey(userID string) StateKey {
|
||||
if userID == "" {
|
||||
return CurrentProfileStateKey
|
||||
}
|
||||
return StateKey("_current/" + userID)
|
||||
}
|
||||
|
||||
// StateStore persists state, and produces it back on request.
|
||||
// Implementations of StateStore are expected to be safe for concurrent use.
|
||||
type StateStore interface {
|
||||
// ReadState returns the bytes associated with ID. Returns (nil,
|
||||
// ErrStateNotExist) if the ID doesn't have associated state.
|
||||
ReadState(id StateKey) ([]byte, error)
|
||||
// WriteState saves bs as the state associated with ID.
|
||||
//
|
||||
// Callers should generally use the ipn.WriteState wrapper func
|
||||
// instead, which only writes if the value is different from what's
|
||||
// already in the store.
|
||||
WriteState(id StateKey, bs []byte) error
|
||||
}
|
||||
|
||||
// WriteState is a wrapper around store.WriteState that only writes if
|
||||
// the value is different from what's already in the store.
|
||||
func WriteState(store StateStore, id StateKey, v []byte) error {
|
||||
if was, err := store.ReadState(id); err == nil && bytes.Equal(was, v) {
|
||||
return nil
|
||||
}
|
||||
return store.WriteState(id, v)
|
||||
}
|
||||
|
||||
// StateStoreDialerSetter is an optional interface that StateStores
|
||||
// can implement to allow the caller to set a custom dialer.
|
||||
type StateStoreDialerSetter interface {
|
||||
SetDialer(d func(ctx context.Context, network, address string) (net.Conn, error))
|
||||
}
|
||||
|
||||
// ReadStoreInt reads an integer from a StateStore.
|
||||
func ReadStoreInt(store StateStore, id StateKey) (int64, error) {
|
||||
v, err := store.ReadState(id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(string(v), 10, 64)
|
||||
}
|
||||
|
||||
// PutStoreInt puts an integer into a StateStore.
|
||||
func PutStoreInt(store StateStore, id StateKey, val int64) error {
|
||||
return WriteState(store, id, fmt.Appendf(nil, "%d", val))
|
||||
}
|
||||
186
vendor/tailscale.com/ipn/store/awsstore/store_aws.go
generated
vendored
Normal file
186
vendor/tailscale.com/ipn/store/awsstore/store_aws.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux && !ts_omit_aws
|
||||
|
||||
// Package awsstore contains an ipn.StateStore implementation using AWS SSM.
|
||||
package awsstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/aws/arn"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ssm"
|
||||
ssmTypes "github.com/aws/aws-sdk-go-v2/service/ssm/types"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
parameterNameRxStr = `^parameter(/.*)`
|
||||
)
|
||||
|
||||
var parameterNameRx = regexp.MustCompile(parameterNameRxStr)
|
||||
|
||||
// awsSSMClient is an interface allowing us to mock the couple of
|
||||
// API calls we are leveraging with the AWSStore provider
|
||||
type awsSSMClient interface {
|
||||
GetParameter(ctx context.Context,
|
||||
params *ssm.GetParameterInput,
|
||||
optFns ...func(*ssm.Options)) (*ssm.GetParameterOutput, error)
|
||||
|
||||
PutParameter(ctx context.Context,
|
||||
params *ssm.PutParameterInput,
|
||||
optFns ...func(*ssm.Options)) (*ssm.PutParameterOutput, error)
|
||||
}
|
||||
|
||||
// store is a store which leverages AWS SSM parameter store
|
||||
// to persist the state
|
||||
type awsStore struct {
|
||||
ssmClient awsSSMClient
|
||||
ssmARN arn.ARN
|
||||
|
||||
memory mem.Store
|
||||
}
|
||||
|
||||
// New returns a new ipn.StateStore using the AWS SSM storage
|
||||
// location given by ssmARN.
|
||||
//
|
||||
// Note that we store the entire store in a single parameter
|
||||
// key, therefore if the state is above 8kb, it can cause
|
||||
// Tailscaled to only only store new state in-memory and
|
||||
// restarting Tailscaled can fail until you delete your state
|
||||
// from the AWS Parameter Store.
|
||||
func New(_ logger.Logf, ssmARN string) (ipn.StateStore, error) {
|
||||
return newStore(ssmARN, nil)
|
||||
}
|
||||
|
||||
// newStore is NewStore, but for tests. If client is non-nil, it's
|
||||
// used instead of making one.
|
||||
func newStore(ssmARN string, client awsSSMClient) (ipn.StateStore, error) {
|
||||
s := &awsStore{
|
||||
ssmClient: client,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// Parse the ARN
|
||||
if s.ssmARN, err = arn.Parse(ssmARN); err != nil {
|
||||
return nil, fmt.Errorf("unable to parse the ARN correctly: %v", err)
|
||||
}
|
||||
|
||||
// Validate the ARN corresponds to the SSM service
|
||||
if s.ssmARN.Service != "ssm" {
|
||||
return nil, fmt.Errorf("invalid service %q, expected 'ssm'", s.ssmARN.Service)
|
||||
}
|
||||
|
||||
// Validate the ARN corresponds to a parameter store resource
|
||||
if !parameterNameRx.MatchString(s.ssmARN.Resource) {
|
||||
return nil, fmt.Errorf("invalid resource %q, expected to match %v", s.ssmARN.Resource, parameterNameRxStr)
|
||||
}
|
||||
|
||||
if s.ssmClient == nil {
|
||||
var cfg aws.Config
|
||||
if cfg, err = config.LoadDefaultConfig(
|
||||
context.TODO(),
|
||||
config.WithRegion(s.ssmARN.Region),
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.ssmClient = ssm.NewFromConfig(cfg)
|
||||
}
|
||||
|
||||
// Hydrate cache with the potentially current state
|
||||
if err := s.LoadState(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
|
||||
}
|
||||
|
||||
// LoadState attempts to read the state from AWS SSM parameter store key.
|
||||
func (s *awsStore) LoadState() error {
|
||||
param, err := s.ssmClient.GetParameter(
|
||||
context.TODO(),
|
||||
&ssm.GetParameterInput{
|
||||
Name: aws.String(s.ParameterName()),
|
||||
WithDecryption: aws.Bool(true),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
var pnf *ssmTypes.ParameterNotFound
|
||||
if errors.As(err, &pnf) {
|
||||
// Create the parameter as it does not exist yet
|
||||
// and return directly as it is defacto empty
|
||||
return s.persistState()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Load the content in-memory
|
||||
return s.memory.LoadFromJSON([]byte(*param.Parameter.Value))
|
||||
}
|
||||
|
||||
// ParameterName returns the parameter name extracted from
|
||||
// the provided ARN
|
||||
func (s *awsStore) ParameterName() (name string) {
|
||||
values := parameterNameRx.FindStringSubmatch(s.ssmARN.Resource)
|
||||
if len(values) == 2 {
|
||||
name = values[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String returns the awsStore and the ARN of the SSM parameter store
|
||||
// configured to store the state
|
||||
func (s *awsStore) String() string { return fmt.Sprintf("awsStore(%q)", s.ssmARN.String()) }
|
||||
|
||||
// ReadState implements the Store interface.
|
||||
func (s *awsStore) ReadState(id ipn.StateKey) (bs []byte, err error) {
|
||||
return s.memory.ReadState(id)
|
||||
}
|
||||
|
||||
// WriteState implements the Store interface.
|
||||
func (s *awsStore) WriteState(id ipn.StateKey, bs []byte) (err error) {
|
||||
// Write the state in-memory
|
||||
if err = s.memory.WriteState(id, bs); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Persist the state in AWS SSM parameter store
|
||||
return s.persistState()
|
||||
}
|
||||
|
||||
// PersistState saves the states into the AWS SSM parameter store
|
||||
func (s *awsStore) persistState() error {
|
||||
// Generate JSON from in-memory cache
|
||||
bs, err := s.memory.ExportToJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store in AWS SSM parameter store.
|
||||
//
|
||||
// We use intelligent tiering so that when the state is below 4kb, it uses Standard tiering
|
||||
// which is free. However, if it exceeds 4kb it switches the parameter to advanced tiering
|
||||
// doubling the capacity to 8kb per the following docs:
|
||||
// https://aws.amazon.com/about-aws/whats-new/2019/08/aws-systems-manager-parameter-store-announces-intelligent-tiering-to-enable-automatic-parameter-tier-selection/
|
||||
_, err = s.ssmClient.PutParameter(
|
||||
context.TODO(),
|
||||
&ssm.PutParameterInput{
|
||||
Name: aws.String(s.ParameterName()),
|
||||
Value: aws.String(string(bs)),
|
||||
Overwrite: aws.Bool(true),
|
||||
Tier: ssmTypes.ParameterTierIntelligentTiering,
|
||||
Type: ssmTypes.ParameterTypeSecureString,
|
||||
},
|
||||
)
|
||||
return err
|
||||
}
|
||||
18
vendor/tailscale.com/ipn/store/awsstore/store_aws_stub.go
generated
vendored
Normal file
18
vendor/tailscale.com/ipn/store/awsstore/store_aws_stub.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !linux || ts_omit_aws
|
||||
|
||||
package awsstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
func New(logger.Logf, string) (ipn.StateStore, error) {
|
||||
return nil, fmt.Errorf("AWS store is not supported on %v", runtime.GOOS)
|
||||
}
|
||||
139
vendor/tailscale.com/ipn/store/kubestore/store_kube.go
generated
vendored
Normal file
139
vendor/tailscale.com/ipn/store/kubestore/store_kube.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package kubestore contains an ipn.StateStore implementation using Kubernetes Secrets.
|
||||
package kubestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
// Store is an ipn.StateStore that uses a Kubernetes Secret for persistence.
|
||||
type Store struct {
|
||||
client kubeclient.Client
|
||||
canPatch bool
|
||||
secretName string
|
||||
}
|
||||
|
||||
// New returns a new Store that persists to the named secret.
|
||||
func New(_ logger.Logf, secretName string) (*Store, error) {
|
||||
c, err := kubeclient.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
|
||||
// Derive the API server address from the environment variables
|
||||
c.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
|
||||
}
|
||||
canPatch, _, err := c.CheckSecretPermissions(context.Background(), secretName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Store{
|
||||
client: c,
|
||||
canPatch: canPatch,
|
||||
secretName: secretName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Store) SetDialer(d func(ctx context.Context, network, address string) (net.Conn, error)) {
|
||||
s.client.SetDialer(d)
|
||||
}
|
||||
|
||||
func (s *Store) String() string { return "kube.Store" }
|
||||
|
||||
// ReadState implements the StateStore interface.
|
||||
func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
secret, err := s.client.GetSecret(ctx, s.secretName)
|
||||
if err != nil {
|
||||
if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
b, ok := secret.Data[sanitizeKey(id)]
|
||||
if !ok {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func sanitizeKey(k ipn.StateKey) string {
|
||||
// The only valid characters in a Kubernetes secret key are alphanumeric, -,
|
||||
// _, and .
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}, string(k))
|
||||
}
|
||||
|
||||
// WriteState implements the StateStore interface.
|
||||
func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
secret, err := s.client.GetSecret(ctx, s.secretName)
|
||||
if err != nil {
|
||||
if kubeclient.IsNotFoundErr(err) {
|
||||
return s.client.CreateSecret(ctx, &kubeapi.Secret{
|
||||
TypeMeta: kubeapi.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Secret",
|
||||
},
|
||||
ObjectMeta: kubeapi.ObjectMeta{
|
||||
Name: s.secretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
sanitizeKey(id): bs,
|
||||
},
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
if s.canPatch {
|
||||
if len(secret.Data) == 0 { // if user has pre-created a blank Secret
|
||||
m := []kubeclient.JSONPatch{
|
||||
{
|
||||
Op: "add",
|
||||
Path: "/data",
|
||||
Value: map[string][]byte{sanitizeKey(id): bs},
|
||||
},
|
||||
}
|
||||
if err := s.client.JSONPatchSecret(ctx, s.secretName, m); err != nil {
|
||||
return fmt.Errorf("error patching Secret %s with a /data field: %v", s.secretName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
m := []kubeclient.JSONPatch{
|
||||
{
|
||||
Op: "add",
|
||||
Path: "/data/" + sanitizeKey(id),
|
||||
Value: bs,
|
||||
},
|
||||
}
|
||||
if err := s.client.JSONPatchSecret(ctx, s.secretName, m); err != nil {
|
||||
return fmt.Errorf("error patching Secret %s with /data/%s field", s.secretName, sanitizeKey(id))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
secret.Data[sanitizeKey(id)] = bs
|
||||
if err := s.client.UpdateSecret(ctx, secret); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
70
vendor/tailscale.com/ipn/store/mem/store_mem.go
generated
vendored
Normal file
70
vendor/tailscale.com/ipn/store/mem/store_mem.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package mem provides an in-memory ipn.StateStore implementation.
|
||||
package mem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
// New returns a new Store.
|
||||
func New(logger.Logf, string) (ipn.StateStore, error) {
|
||||
return new(Store), nil
|
||||
}
|
||||
|
||||
// Store is an ipn.StateStore that keeps state in memory only.
|
||||
type Store struct {
|
||||
mu sync.Mutex
|
||||
// +checklocks:mu
|
||||
cache map[ipn.StateKey][]byte
|
||||
}
|
||||
|
||||
func (s *Store) String() string { return "mem.Store" }
|
||||
|
||||
// ReadState implements the StateStore interface.
|
||||
func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
bs, ok := s.cache[id]
|
||||
if !ok {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
// WriteState implements the StateStore interface.
|
||||
func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.cache == nil {
|
||||
s.cache = map[ipn.StateKey][]byte{}
|
||||
}
|
||||
s.cache[id] = bytes.Clone(bs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromJSON attempts to unmarshal json content into the
|
||||
// in-memory cache.
|
||||
func (s *Store) LoadFromJSON(data []byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return json.Unmarshal(data, &s.cache)
|
||||
}
|
||||
|
||||
// ExportToJSON exports the content of the cache to
|
||||
// JSON formatted []byte.
|
||||
func (s *Store) ExportToJSON() ([]byte, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if len(s.cache) == 0 {
|
||||
// Avoid "null" serialization.
|
||||
return []byte("{}"), nil
|
||||
}
|
||||
return json.MarshalIndent(s.cache, "", " ")
|
||||
}
|
||||
18
vendor/tailscale.com/ipn/store/store_aws.go
generated
vendored
Normal file
18
vendor/tailscale.com/ipn/store/store_aws.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build (ts_aws || (linux && (arm64 || amd64))) && !ts_omit_aws
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn/store/awsstore"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerAvailableExternalStores = append(registerAvailableExternalStores, registerAWSStore)
|
||||
}
|
||||
|
||||
func registerAWSStore() {
|
||||
Register("arn:", awsstore.New)
|
||||
}
|
||||
25
vendor/tailscale.com/ipn/store/store_kube.go
generated
vendored
Normal file
25
vendor/tailscale.com/ipn/store/store_kube.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build (ts_kube || (linux && (arm64 || amd64))) && !ts_omit_kube
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/kubestore"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerAvailableExternalStores = append(registerAvailableExternalStores, registerKubeStore)
|
||||
}
|
||||
|
||||
func registerKubeStore() {
|
||||
Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) {
|
||||
secretName := strings.TrimPrefix(path, "kube:")
|
||||
return kubestore.New(logf, secretName)
|
||||
})
|
||||
}
|
||||
188
vendor/tailscale.com/ipn/store/stores.go
generated
vendored
Normal file
188
vendor/tailscale.com/ipn/store/stores.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package store provides various implementation of ipn.StateStore.
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/paths"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
// Provider returns a StateStore for the provided path.
|
||||
// The arg is of the form "prefix:rest", where prefix was previously registered with Register.
|
||||
type Provider func(logf logger.Logf, arg string) (ipn.StateStore, error)
|
||||
|
||||
var regOnce sync.Once
|
||||
|
||||
var registerAvailableExternalStores []func()
|
||||
|
||||
func registerDefaultStores() {
|
||||
Register("mem:", mem.New)
|
||||
|
||||
for _, f := range registerAvailableExternalStores {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
var knownStores map[string]Provider
|
||||
|
||||
// New returns a StateStore based on the provided arg
|
||||
// and registered stores.
|
||||
// The arg is of the form "prefix:rest", where prefix was previously
|
||||
// registered with Register.
|
||||
//
|
||||
// By default the following stores are registered:
|
||||
//
|
||||
// - if the string begins with "mem:", the suffix
|
||||
// is ignored and an in-memory store is used.
|
||||
// - (Linux-only) if the string begins with "arn:",
|
||||
// the suffix an AWS ARN for an SSM.
|
||||
// - (Linux-only) if the string begins with "kube:",
|
||||
// the suffix is a Kubernetes secret name
|
||||
// - In all other cases, the path is treated as a filepath.
|
||||
func New(logf logger.Logf, path string) (ipn.StateStore, error) {
|
||||
regOnce.Do(registerDefaultStores)
|
||||
for prefix, sf := range knownStores {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
// We can't strip the prefix here as some NewStoreFunc (like arn:)
|
||||
// expect the prefix.
|
||||
return sf(logf, path)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
path = TryWindowsAppDataMigration(logf, path)
|
||||
}
|
||||
return NewFileStore(logf, path)
|
||||
}
|
||||
|
||||
// Register registers a prefix to be used for
|
||||
// NewStore. It panics if the prefix is empty, or if the
|
||||
// prefix is already registered.
|
||||
// The provided fn is called with the path passed to NewStore;
|
||||
// the prefix is not stripped.
|
||||
func Register(prefix string, fn Provider) {
|
||||
if len(prefix) == 0 {
|
||||
panic("prefix is empty")
|
||||
}
|
||||
if _, ok := knownStores[prefix]; ok {
|
||||
panic(fmt.Sprintf("%q already registered", prefix))
|
||||
}
|
||||
mak.Set(&knownStores, prefix, fn)
|
||||
}
|
||||
|
||||
// TryWindowsAppDataMigration attempts to copy the Windows state file
|
||||
// from its old location to the new location. (Issue 2856)
|
||||
//
|
||||
// Tailscale 1.14 and before stored state under %LocalAppData%
|
||||
// (usually "C:\WINDOWS\system32\config\systemprofile\AppData\Local"
|
||||
// when tailscaled.exe is running as a non-user system service).
|
||||
// However it is frequently cleared for almost any reason: Windows
|
||||
// updates, System Restore, even various System Cleaner utilities.
|
||||
//
|
||||
// Returns a string of the path to use for the state file.
|
||||
// This will be a fallback %LocalAppData% path if migration fails,
|
||||
// a %ProgramData% path otherwise.
|
||||
func TryWindowsAppDataMigration(logf logger.Logf, path string) string {
|
||||
if path != paths.DefaultTailscaledStateFile() {
|
||||
// If they're specifying a non-default path, just trust that they know
|
||||
// what they are doing.
|
||||
return path
|
||||
}
|
||||
oldFile := paths.LegacyStateFilePath()
|
||||
return paths.TryConfigFileMigration(logf, oldFile, path)
|
||||
}
|
||||
|
||||
// FileStore is a StateStore that uses a JSON file for persistence.
|
||||
type FileStore struct {
|
||||
path string
|
||||
|
||||
mu sync.RWMutex
|
||||
cache map[ipn.StateKey][]byte
|
||||
}
|
||||
|
||||
// Path returns the path that NewFileStore was called with.
|
||||
func (s *FileStore) Path() string { return s.path }
|
||||
|
||||
func (s *FileStore) String() string { return fmt.Sprintf("FileStore(%q)", s.path) }
|
||||
|
||||
// NewFileStore returns a new file store that persists to path.
|
||||
func NewFileStore(logf logger.Logf, path string) (ipn.StateStore, error) {
|
||||
// We unconditionally call this to ensure that our perms are correct
|
||||
if err := paths.MkStateDir(filepath.Dir(path)); err != nil {
|
||||
return nil, fmt.Errorf("creating state directory: %w", err)
|
||||
}
|
||||
|
||||
bs, err := os.ReadFile(path)
|
||||
|
||||
// Treat an empty file as a missing file.
|
||||
// (https://github.com/tailscale/tailscale/issues/895#issuecomment-723255589)
|
||||
if err == nil && len(bs) == 0 {
|
||||
logf("store.NewFileStore(%q): file empty; treating it like a missing file [warning]", path)
|
||||
err = os.ErrNotExist
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Write out an initial file, to verify that we can write
|
||||
// to the path.
|
||||
if err = atomicfile.WriteFile(path, []byte("{}"), 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileStore{
|
||||
path: path,
|
||||
cache: map[ipn.StateKey][]byte{},
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := &FileStore{
|
||||
path: path,
|
||||
cache: map[ipn.StateKey][]byte{},
|
||||
}
|
||||
if err := json.Unmarshal(bs, &ret.cache); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ReadState implements the StateStore interface.
|
||||
func (s *FileStore) ReadState(id ipn.StateKey) ([]byte, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
bs, ok := s.cache[id]
|
||||
if !ok {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
// WriteState implements the StateStore interface.
|
||||
func (s *FileStore) WriteState(id ipn.StateKey, bs []byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if bytes.Equal(s.cache[id], bs) {
|
||||
return nil
|
||||
}
|
||||
s.cache[id] = bytes.Clone(bs)
|
||||
bs, err := json.MarshalIndent(s.cache, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return atomicfile.WriteFile(s.path, bs, 0600)
|
||||
}
|
||||
Reference in New Issue
Block a user