Update dependencies
This commit is contained in:
65
vendor/tailscale.com/ipn/ipnlocal/autoupdate.go
generated
vendored
Normal file
65
vendor/tailscale.com/ipn/ipnlocal/autoupdate.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux || windows
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) stopOfflineAutoUpdate() {
|
||||
if b.offlineAutoUpdateCancel != nil {
|
||||
b.logf("offline auto-update: stopping update checks")
|
||||
b.offlineAutoUpdateCancel()
|
||||
b.offlineAutoUpdateCancel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) {
|
||||
if !prefs.AutoUpdate().Apply.EqualBool(true) {
|
||||
return
|
||||
}
|
||||
// AutoUpdate.Apply field in prefs can only be true for platforms that
|
||||
// support auto-updates. But check it here again, just in case.
|
||||
if !clientupdate.CanAutoUpdate() {
|
||||
return
|
||||
}
|
||||
// On macsys, auto-updates are managed by Sparkle.
|
||||
if version.IsMacSysExt() {
|
||||
return
|
||||
}
|
||||
|
||||
if b.offlineAutoUpdateCancel != nil {
|
||||
// Already running.
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
b.offlineAutoUpdateCancel = cancel
|
||||
|
||||
b.logf("offline auto-update: starting update checks")
|
||||
go b.offlineAutoUpdate(ctx)
|
||||
}
|
||||
|
||||
const offlineAutoUpdateCheckPeriod = time.Hour
|
||||
|
||||
func (b *LocalBackend) offlineAutoUpdate(ctx context.Context) {
|
||||
t := time.NewTicker(offlineAutoUpdateCheckPeriod)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
if err := b.startAutoUpdate("offline auto-update"); err != nil {
|
||||
b.logf("offline auto-update: failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
18
vendor/tailscale.com/ipn/ipnlocal/autoupdate_disabled.go
generated
vendored
Normal file
18
vendor/tailscale.com/ipn/ipnlocal/autoupdate_disabled.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !(linux || windows)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) stopOfflineAutoUpdate() {
|
||||
// Not supported on this platform.
|
||||
}
|
||||
|
||||
func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) {
|
||||
// Not supported on this platform.
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_darwin.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
breakTCPConns = breakTCPConnsDarwin
|
||||
}
|
||||
|
||||
func breakTCPConnsDarwin() error {
|
||||
var matched int
|
||||
for fd := 0; fd < 1000; fd++ {
|
||||
_, err := unix.GetsockoptTCPConnectionInfo(fd, unix.IPPROTO_TCP, unix.TCP_CONNECTION_INFO)
|
||||
if err == nil {
|
||||
matched++
|
||||
err = unix.Close(fd)
|
||||
log.Printf("debug: closed TCP fd %v: %v", fd, err)
|
||||
}
|
||||
}
|
||||
if matched == 0 {
|
||||
log.Printf("debug: no TCP connections found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_linux.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/breaktcp_linux.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
breakTCPConns = breakTCPConnsLinux
|
||||
}
|
||||
|
||||
func breakTCPConnsLinux() error {
|
||||
var matched int
|
||||
for fd := 0; fd < 1000; fd++ {
|
||||
_, err := unix.GetsockoptTCPInfo(fd, unix.IPPROTO_TCP, unix.TCP_INFO)
|
||||
if err == nil {
|
||||
matched++
|
||||
err = unix.Close(fd)
|
||||
log.Printf("debug: closed TCP fd %v: %v", fd, err)
|
||||
}
|
||||
}
|
||||
if matched == 0 {
|
||||
log.Printf("debug: no TCP connections found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
591
vendor/tailscale.com/ipn/ipnlocal/c2n.go
generated
vendored
Normal file
591
vendor/tailscale.com/ipn/ipnlocal/c2n.go
generated
vendored
Normal file
@@ -0,0 +1,591 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kortschak/wol"
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/net/sockstats"
|
||||
"tailscale.com/posture"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/goroutines"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/syspolicy"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// c2nHandlers maps an HTTP method and URI path (without query parameters) to
|
||||
// its handler. The exact method+path match is preferred, but if no entry
|
||||
// exists for that, a map entry with an empty method is used as a fallback.
|
||||
var c2nHandlers = map[methodAndPath]c2nHandler{
|
||||
// Debug.
|
||||
req("/echo"): handleC2NEcho,
|
||||
req("/debug/goroutines"): handleC2NDebugGoroutines,
|
||||
req("/debug/prefs"): handleC2NDebugPrefs,
|
||||
req("/debug/metrics"): handleC2NDebugMetrics,
|
||||
req("/debug/component-logging"): handleC2NDebugComponentLogging,
|
||||
req("/debug/logheap"): handleC2NDebugLogHeap,
|
||||
|
||||
// PPROF - We only expose a subset of typical pprof endpoints for security.
|
||||
req("/debug/pprof/heap"): handleC2NPprof,
|
||||
req("/debug/pprof/allocs"): handleC2NPprof,
|
||||
|
||||
req("POST /logtail/flush"): handleC2NLogtailFlush,
|
||||
req("POST /sockstats"): handleC2NSockStats,
|
||||
|
||||
// Check TLS certificate status.
|
||||
req("GET /tls-cert-status"): handleC2NTLSCertStatus,
|
||||
|
||||
// SSH
|
||||
req("/ssh/usernames"): handleC2NSSHUsernames,
|
||||
|
||||
// Auto-updates.
|
||||
req("GET /update"): handleC2NUpdateGet,
|
||||
req("POST /update"): handleC2NUpdatePost,
|
||||
|
||||
// Wake-on-LAN.
|
||||
req("POST /wol"): handleC2NWoL,
|
||||
|
||||
// Device posture.
|
||||
req("GET /posture/identity"): handleC2NPostureIdentityGet,
|
||||
|
||||
// App Connectors.
|
||||
req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet,
|
||||
|
||||
// Linux netfilter.
|
||||
req("POST /netfilter-kind"): handleC2NSetNetfilterKind,
|
||||
}
|
||||
|
||||
type c2nHandler func(*LocalBackend, http.ResponseWriter, *http.Request)
|
||||
|
||||
type methodAndPath struct {
|
||||
method string // empty string means fallback
|
||||
path string // Request.URL.Path (without query string)
|
||||
}
|
||||
|
||||
func req(s string) methodAndPath {
|
||||
if m, p, ok := strings.Cut(s, " "); ok {
|
||||
return methodAndPath{m, p}
|
||||
}
|
||||
return methodAndPath{"", s}
|
||||
}
|
||||
|
||||
// c2nHandlerPaths is all the set of paths from c2nHandlers, without their HTTP methods.
|
||||
// It's used to detect requests with a non-matching method.
|
||||
var c2nHandlerPaths = set.Set[string]{}
|
||||
|
||||
func init() {
|
||||
for k := range c2nHandlers {
|
||||
c2nHandlerPaths.Add(k.path)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) handleC2N(w http.ResponseWriter, r *http.Request) {
|
||||
// First try to match by both method and path,
|
||||
if h, ok := c2nHandlers[methodAndPath{r.Method, r.URL.Path}]; ok {
|
||||
h(b, w, r)
|
||||
return
|
||||
}
|
||||
// Then try to match by just path.
|
||||
if h, ok := c2nHandlers[methodAndPath{path: r.URL.Path}]; ok {
|
||||
h(b, w, r)
|
||||
return
|
||||
}
|
||||
if c2nHandlerPaths.Contains(r.URL.Path) {
|
||||
http.Error(w, "bad method", http.StatusMethodNotAllowed)
|
||||
} else {
|
||||
http.Error(w, "unknown c2n path", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
func handleC2NEcho(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
// Test handler.
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
w.Write(body)
|
||||
}
|
||||
|
||||
func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if b.TryFlushLogs() {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
} else {
|
||||
http.Error(w, "no log flusher wired up", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write(goroutines.ScrubbedGoroutineDump(true))
|
||||
}
|
||||
|
||||
func handleC2NDebugPrefs(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
writeJSON(w, b.Prefs())
|
||||
}
|
||||
|
||||
func handleC2NDebugMetrics(_ *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
clientmetric.WritePrometheusExpositionFormat(w)
|
||||
}
|
||||
|
||||
func handleC2NDebugComponentLogging(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
component := r.FormValue("component")
|
||||
secs, _ := strconv.Atoi(r.FormValue("secs"))
|
||||
if secs == 0 {
|
||||
secs -= 1
|
||||
}
|
||||
until := b.clock.Now().Add(time.Duration(secs) * time.Second)
|
||||
err := b.SetComponentDebugLogging(component, until)
|
||||
var res struct {
|
||||
Error string `json:",omitempty"`
|
||||
}
|
||||
if err != nil {
|
||||
res.Error = err.Error()
|
||||
}
|
||||
writeJSON(w, res)
|
||||
}
|
||||
|
||||
var c2nLogHeap func(http.ResponseWriter, *http.Request) // non-nil on most platforms (c2n_pprof.go)
|
||||
|
||||
func handleC2NDebugLogHeap(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if c2nLogHeap == nil {
|
||||
// Not implemented on platforms trying to optimize for binary size or
|
||||
// reduced memory usage.
|
||||
http.Error(w, "not implemented", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
c2nLogHeap(w, r)
|
||||
}
|
||||
|
||||
var c2nPprof func(http.ResponseWriter, *http.Request, string) // non-nil on most platforms (c2n_pprof.go)
|
||||
|
||||
func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
if c2nPprof == nil {
|
||||
// Not implemented on platforms trying to optimize for binary size or
|
||||
// reduced memory usage.
|
||||
http.Error(w, "not implemented", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
_, profile := path.Split(r.URL.Path)
|
||||
c2nPprof(w, r, profile)
|
||||
}
|
||||
|
||||
func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
var req tailcfg.C2NSSHUsernamesRequest
|
||||
if r.Method == "POST" {
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
res, err := b.getSSHUsernames(&req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
writeJSON(w, res)
|
||||
}
|
||||
|
||||
func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
if b.sockstatLogger == nil {
|
||||
http.Error(w, "no sockstatLogger", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
b.sockstatLogger.Flush()
|
||||
fmt.Fprintf(w, "logid: %s\n", b.sockstatLogger.LogID())
|
||||
fmt.Fprintf(w, "debug info: %v\n", sockstats.DebugInfo())
|
||||
}
|
||||
|
||||
// handleC2NAppConnectorDomainRoutesGet handles returning the domains
|
||||
// that the app connector is responsible for, as well as the resolved
|
||||
// IP addresses for each domain. If the node is not configured as
|
||||
// an app connector, an empty map is returned.
|
||||
func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /appconnector/routes received")
|
||||
|
||||
var res tailcfg.C2NAppConnectorDomainRoutesResponse
|
||||
if b.appConnector == nil {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
return
|
||||
}
|
||||
|
||||
res.Domains = b.appConnector.DomainRoutes()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: POST /netfilter-kind received")
|
||||
|
||||
if version.OS() != "linux" {
|
||||
http.Error(w, "netfilter kind only settable on linux", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
kind := r.FormValue("kind")
|
||||
b.logf("c2n: switching netfilter to %s", kind)
|
||||
|
||||
_, err := b.EditPrefs(&ipn.MaskedPrefs{
|
||||
NetfilterKindSet: true,
|
||||
Prefs: ipn.Prefs{
|
||||
NetfilterKind: kind,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
b.authReconfig()
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /update received")
|
||||
|
||||
res := b.newC2NUpdateResponse()
|
||||
res.Started = b.c2nUpdateStarted()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func handleC2NUpdatePost(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: POST /update received")
|
||||
res := b.newC2NUpdateResponse()
|
||||
defer func() {
|
||||
if res.Err != "" {
|
||||
b.logf("c2n: POST /update failed: %s", res.Err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}()
|
||||
|
||||
if !res.Enabled {
|
||||
res.Err = "not enabled"
|
||||
return
|
||||
}
|
||||
if !res.Supported {
|
||||
res.Err = "not supported"
|
||||
return
|
||||
}
|
||||
|
||||
// Do not update if we have active inbound SSH connections. Control can set
|
||||
// force=true query parameter to override this.
|
||||
if r.FormValue("force") != "true" && b.sshServer != nil && b.sshServer.NumActiveConns() > 0 {
|
||||
res.Err = "not updating due to active SSH connections"
|
||||
return
|
||||
}
|
||||
|
||||
if err := b.startAutoUpdate("c2n"); err != nil {
|
||||
res.Err = err.Error()
|
||||
return
|
||||
}
|
||||
res.Started = true
|
||||
}
|
||||
|
||||
func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
b.logf("c2n: GET /posture/identity received")
|
||||
|
||||
res := tailcfg.C2NPostureIdentityResponse{}
|
||||
|
||||
// Only collect posture identity if enabled on the client,
|
||||
// this will first check syspolicy, MDM settings like Registry
|
||||
// on Windows or defaults on macOS. If they are not set, it falls
|
||||
// back to the cli-flag, `--posture-checking`.
|
||||
choice, err := syspolicy.GetPreferenceOption(syspolicy.PostureChecking)
|
||||
if err != nil {
|
||||
b.logf(
|
||||
"c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s",
|
||||
b.Prefs().PostureChecking(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if choice.ShouldEnable(b.Prefs().PostureChecking()) {
|
||||
sns, err := posture.GetSerialNumbers(b.logf)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
res.SerialNumbers = sns
|
||||
|
||||
// TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release
|
||||
// and looks good in client metrics, remove this parameter and always report MAC
|
||||
// addresses.
|
||||
if r.FormValue("hwaddrs") == "true" {
|
||||
res.IfaceHardwareAddrs, err = posture.GetHardwareAddrs()
|
||||
if err != nil {
|
||||
b.logf("c2n: GetHardwareAddrs returned error: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
res.PostureDisabled = true
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse {
|
||||
// If NewUpdater does not return an error, we can update the installation.
|
||||
//
|
||||
// Note that we create the Updater solely to check for errors; we do not
|
||||
// invoke it here. For this purpose, it is ok to pass it a zero Arguments.
|
||||
prefs := b.Prefs().AutoUpdate()
|
||||
return tailcfg.C2NUpdateResponse{
|
||||
Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply.EqualBool(true),
|
||||
Supported: clientupdate.CanAutoUpdate() && !version.IsMacSysExt(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) c2nUpdateStarted() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.c2nUpdateStatus.started
|
||||
}
|
||||
|
||||
func (b *LocalBackend) setC2NUpdateStarted(v bool) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.c2nUpdateStatus.started = v
|
||||
}
|
||||
|
||||
func (b *LocalBackend) trySetC2NUpdateStarted() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.c2nUpdateStatus.started {
|
||||
return false
|
||||
}
|
||||
b.c2nUpdateStatus.started = true
|
||||
return true
|
||||
}
|
||||
|
||||
// findCmdTailscale looks for the cmd/tailscale that corresponds to the
|
||||
// currently running cmd/tailscaled. It's up to the caller to verify that the
|
||||
// two match, but this function does its best to find the right one. Notably, it
|
||||
// doesn't use $PATH for security reasons.
|
||||
func findCmdTailscale() (string, error) {
|
||||
self, err := os.Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var ts string
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" {
|
||||
ts = "/usr/bin/tailscale"
|
||||
}
|
||||
if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" {
|
||||
ts = "/usr/local/bin/tailscale"
|
||||
}
|
||||
switch distro.Get() {
|
||||
case distro.QNAP:
|
||||
// The volume under /share/ where qpkg are installed is not
|
||||
// predictable. But the rest of the path is.
|
||||
ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self)
|
||||
if err == nil && ok {
|
||||
ts = filepath.Join(filepath.Dir(self), "tailscale")
|
||||
}
|
||||
case distro.Unraid:
|
||||
if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" {
|
||||
ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale"
|
||||
}
|
||||
}
|
||||
case "windows":
|
||||
ts = filepath.Join(filepath.Dir(self), "tailscale.exe")
|
||||
case "freebsd":
|
||||
if self == "/usr/local/bin/tailscaled" {
|
||||
ts = "/usr/local/bin/tailscale"
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported OS %v", runtime.GOOS)
|
||||
}
|
||||
if ts != "" && regularFileExists(ts) {
|
||||
return ts, nil
|
||||
}
|
||||
return "", errors.New("tailscale executable not found in expected place")
|
||||
}
|
||||
|
||||
func tailscaleUpdateCmd(cmdTS string) *exec.Cmd {
|
||||
defaultCmd := exec.Command(cmdTS, "update", "--yes")
|
||||
if runtime.GOOS != "linux" {
|
||||
return defaultCmd
|
||||
}
|
||||
if _, err := exec.LookPath("systemd-run"); err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
|
||||
// When systemd-run is available, use it to run the update command. This
|
||||
// creates a new temporary unit separate from the tailscaled unit. When
|
||||
// tailscaled is restarted during the update, systemd won't kill this
|
||||
// temporary update unit, which could cause unexpected breakage.
|
||||
//
|
||||
// We want to use a few optional flags:
|
||||
// * --wait, to block the update command until completion (added in systemd 232)
|
||||
// * --pipe, to collect stdout/stderr (added in systemd 235)
|
||||
// * --collect, to clean up failed runs from memory (added in systemd 236)
|
||||
//
|
||||
// We need to check the version of systemd to figure out if those flags are
|
||||
// available.
|
||||
//
|
||||
// The output will look like:
|
||||
//
|
||||
// systemd 255 (255.7-1-arch)
|
||||
// +PAM +AUDIT ... other feature flags ...
|
||||
systemdVerOut, err := exec.Command("systemd-run", "--version").Output()
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
parts := strings.Fields(string(systemdVerOut))
|
||||
if len(parts) < 2 || parts[0] != "systemd" {
|
||||
return defaultCmd
|
||||
}
|
||||
systemdVer, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return defaultCmd
|
||||
}
|
||||
if systemdVer >= 236 {
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes")
|
||||
} else if systemdVer >= 235 {
|
||||
return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes")
|
||||
} else if systemdVer >= 232 {
|
||||
return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes")
|
||||
} else {
|
||||
return exec.Command("systemd-run", cmdTS, "update", "--yes")
|
||||
}
|
||||
}
|
||||
|
||||
func regularFileExists(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
return err == nil && fi.Mode().IsRegular()
|
||||
}
|
||||
|
||||
func handleC2NWoL(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
r.ParseForm()
|
||||
var macs []net.HardwareAddr
|
||||
for _, macStr := range r.Form["mac"] {
|
||||
mac, err := net.ParseMAC(macStr)
|
||||
if err != nil {
|
||||
http.Error(w, "bad 'mac' param", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
macs = append(macs, mac)
|
||||
}
|
||||
var res struct {
|
||||
SentTo []string
|
||||
Errors []string
|
||||
}
|
||||
st := b.sys.NetMon.Get().InterfaceState()
|
||||
if st == nil {
|
||||
res.Errors = append(res.Errors, "no interface state")
|
||||
writeJSON(w, &res)
|
||||
return
|
||||
}
|
||||
var password []byte // TODO(bradfitz): support? does anything use WoL passwords?
|
||||
for _, mac := range macs {
|
||||
for ifName, ips := range st.InterfaceIPs {
|
||||
for _, ip := range ips {
|
||||
if ip.Addr().IsLoopback() || ip.Addr().Is6() {
|
||||
continue
|
||||
}
|
||||
local := &net.UDPAddr{
|
||||
IP: ip.Addr().AsSlice(),
|
||||
Port: 0,
|
||||
}
|
||||
remote := &net.UDPAddr{
|
||||
IP: net.IPv4bcast,
|
||||
Port: 0,
|
||||
}
|
||||
if err := wol.Wake(mac, password, local, remote); err != nil {
|
||||
res.Errors = append(res.Errors, err.Error())
|
||||
} else {
|
||||
res.SentTo = append(res.SentTo, ifName)
|
||||
}
|
||||
break // one per interface is enough
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(res.SentTo)
|
||||
writeJSON(w, &res)
|
||||
}
|
||||
|
||||
// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the
|
||||
// provided domain. This can be called by the controlplane to clean up DNS TXT
|
||||
// records when they're no longer needed by LetsEncrypt.
|
||||
//
|
||||
// It does not kick off a cert fetch or async refresh. It only reports anything
|
||||
// that's already sitting on disk, and only reports metadata about the public
|
||||
// cert (stuff that'd be the in CT logs anyway).
|
||||
func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
|
||||
cs, err := b.getCertStore()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
domain := r.FormValue("domain")
|
||||
if domain == "" {
|
||||
http.Error(w, "no 'domain'", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
ret := &tailcfg.C2NTLSCertInfo{}
|
||||
pair, err := getCertPEMCached(cs, domain, b.clock.Now())
|
||||
ret.Valid = err == nil
|
||||
if err != nil {
|
||||
ret.Error = err.Error()
|
||||
if errors.Is(err, errCertExpired) {
|
||||
ret.Expired = true
|
||||
} else if errors.Is(err, ipn.ErrStateNotExist) {
|
||||
ret.Missing = true
|
||||
ret.Error = "no certificate"
|
||||
}
|
||||
} else {
|
||||
block, _ := pem.Decode(pair.CertPEM)
|
||||
if block == nil {
|
||||
ret.Error = "invalid PEM"
|
||||
ret.Valid = false
|
||||
} else {
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
ret.Error = fmt.Sprintf("invalid certificate: %v", err)
|
||||
ret.Valid = false
|
||||
} else {
|
||||
ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339)
|
||||
ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, ret)
|
||||
}
|
||||
45
vendor/tailscale.com/ipn/ipnlocal/c2n_pprof.go
generated
vendored
Normal file
45
vendor/tailscale.com/ipn/ipnlocal/c2n_pprof.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !js && !wasm
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func init() {
|
||||
c2nLogHeap = func(w http.ResponseWriter, r *http.Request) {
|
||||
// Support same optional gc parameter as net/http/pprof:
|
||||
if gc, _ := strconv.Atoi(r.FormValue("gc")); gc > 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
pprof.WriteHeapProfile(w)
|
||||
}
|
||||
|
||||
c2nPprof = func(w http.ResponseWriter, r *http.Request, profile string) {
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
p := pprof.Lookup(string(profile))
|
||||
if p == nil {
|
||||
http.Error(w, "Unknown profile", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
gc, _ := strconv.Atoi(r.FormValue("gc"))
|
||||
if profile == "heap" && gc > 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
debug, _ := strconv.Atoi(r.FormValue("debug"))
|
||||
if debug != 0 {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, profile))
|
||||
}
|
||||
p.WriteTo(w, debug)
|
||||
}
|
||||
}
|
||||
731
vendor/tailscale.com/ipn/ipnlocal/cert.go
generated
vendored
Normal file
731
vendor/tailscale.com/ipn/ipnlocal/cert.go
generated
vendored
Normal file
@@ -0,0 +1,731 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !js
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
randv2 "math/rand/v2"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tailscale/golang-x-crypto/acme"
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/ipn/store"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/testenv"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// Process-wide cache. (A new *Handler is created per connection,
|
||||
// effectively per request)
|
||||
var (
|
||||
// acmeMu guards all ACME operations, so concurrent requests
|
||||
// for certs don't slam ACME. The first will go through and
|
||||
// populate the on-disk cache and the rest should use that.
|
||||
acmeMu sync.Mutex
|
||||
|
||||
renewMu sync.Mutex // lock order: acmeMu before renewMu
|
||||
renewCertAt = map[string]time.Time{}
|
||||
)
|
||||
|
||||
// certDir returns (creating if needed) the directory in which cached
|
||||
// cert keypairs are stored.
|
||||
func (b *LocalBackend) certDir() (string, error) {
|
||||
d := b.TailscaleVarRoot()
|
||||
|
||||
// As a workaround for Synology DSM6 not having a "var" directory, use the
|
||||
// app's "etc" directory (on a small partition) to hold certs at least.
|
||||
// See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251
|
||||
if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 {
|
||||
d = "/var/packages/Tailscale/etc" // base; we append "certs" below
|
||||
}
|
||||
if d == "" {
|
||||
return "", errors.New("no TailscaleVarRoot")
|
||||
}
|
||||
full := filepath.Join(d, "certs")
|
||||
if err := os.MkdirAll(full, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return full, nil
|
||||
}
|
||||
|
||||
var acmeDebug = envknob.RegisterBool("TS_DEBUG_ACME")
|
||||
|
||||
// GetCertPEM gets the TLSCertKeyPair for domain, either from cache or via the
|
||||
// ACME process. ACME process is used for new domain certs, existing expired
|
||||
// certs or existing certs that should get renewed due to upcoming expiry.
|
||||
//
|
||||
// If a cert is expired, it will be renewed synchronously otherwise it will be
|
||||
// renewed asynchronously.
|
||||
func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) {
|
||||
return b.GetCertPEMWithValidity(ctx, domain, 0)
|
||||
}
|
||||
|
||||
// GetCertPEMWithValidity gets the TLSCertKeyPair for domain, either from cache
|
||||
// or via the ACME process. ACME process is used for new domain certs, existing
|
||||
// expired certs or existing certs that should get renewed sooner than
|
||||
// minValidity.
|
||||
//
|
||||
// If a cert is expired, or expires sooner than minValidity, it will be renewed
|
||||
// synchronously. Otherwise it will be renewed asynchronously.
|
||||
func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string, minValidity time.Duration) (*TLSCertKeyPair, error) {
|
||||
if !validLookingCertDomain(domain) {
|
||||
return nil, errors.New("invalid domain")
|
||||
}
|
||||
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
|
||||
now := b.clock.Now()
|
||||
traceACME := func(v any) {
|
||||
if !acmeDebug() {
|
||||
return
|
||||
}
|
||||
j, _ := json.MarshalIndent(v, "", "\t")
|
||||
log.Printf("acme %T: %s", v, j)
|
||||
}
|
||||
|
||||
cs, err := b.getCertStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pair, err := getCertPEMCached(cs, domain, now); err == nil {
|
||||
// If we got here, we have a valid unexpired cert.
|
||||
// Check whether we should start an async renewal.
|
||||
shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair, minValidity)
|
||||
if err != nil {
|
||||
logf("error checking for certificate renewal: %v", err)
|
||||
// Renewal check failed, but the current cert is valid and not
|
||||
// expired, so it's safe to return.
|
||||
return pair, nil
|
||||
}
|
||||
if !shouldRenew {
|
||||
return pair, nil
|
||||
}
|
||||
if minValidity == 0 {
|
||||
logf("starting async renewal")
|
||||
// Start renewal in the background, return current valid cert.
|
||||
go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, now, minValidity)
|
||||
return pair, nil
|
||||
}
|
||||
// If the caller requested a specific validity duration, fall through
|
||||
// to synchronous renewal to fulfill that.
|
||||
logf("starting sync renewal")
|
||||
}
|
||||
|
||||
pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now, minValidity)
|
||||
if err != nil {
|
||||
logf("getCertPEM: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return pair, nil
|
||||
}
|
||||
|
||||
// shouldStartDomainRenewal reports whether the domain's cert should be renewed
|
||||
// based on the current time, the cert's expiry, and the ARI check.
|
||||
func (b *LocalBackend) shouldStartDomainRenewal(cs certStore, domain string, now time.Time, pair *TLSCertKeyPair, minValidity time.Duration) (bool, error) {
|
||||
if minValidity != 0 {
|
||||
cert, err := pair.parseCertificate()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing certificate: %w", err)
|
||||
}
|
||||
return cert.NotAfter.Sub(now) < minValidity, nil
|
||||
}
|
||||
renewMu.Lock()
|
||||
defer renewMu.Unlock()
|
||||
if renewAt, ok := renewCertAt[domain]; ok {
|
||||
return now.After(renewAt), nil
|
||||
}
|
||||
|
||||
renewTime, err := b.domainRenewalTimeByARI(cs, pair)
|
||||
if err != nil {
|
||||
// Log any ARI failure and fall back to checking for renewal by expiry.
|
||||
b.logf("acme: ARI check failed: %v; falling back to expiry-based check", err)
|
||||
renewTime, err = b.domainRenewalTimeByExpiry(pair)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
renewCertAt[domain] = renewTime
|
||||
return now.After(renewTime), nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) domainRenewed(domain string) {
|
||||
renewMu.Lock()
|
||||
defer renewMu.Unlock()
|
||||
delete(renewCertAt, domain)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) domainRenewalTimeByExpiry(pair *TLSCertKeyPair) (time.Time, error) {
|
||||
cert, err := pair.parseCertificate()
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("parsing certificate: %w", err)
|
||||
}
|
||||
|
||||
certLifetime := cert.NotAfter.Sub(cert.NotBefore)
|
||||
if certLifetime < 0 {
|
||||
return time.Time{}, fmt.Errorf("negative certificate lifetime %v", certLifetime)
|
||||
}
|
||||
|
||||
// Per https://github.com/tailscale/tailscale/issues/8204, check
|
||||
// whether we're more than 2/3 of the way through the certificate's
|
||||
// lifetime, which is the officially-recommended best practice by Let's
|
||||
// Encrypt.
|
||||
renewalDuration := certLifetime * 2 / 3
|
||||
renewAt := cert.NotBefore.Add(renewalDuration)
|
||||
return renewAt, nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) domainRenewalTimeByARI(cs certStore, pair *TLSCertKeyPair) (time.Time, error) {
|
||||
var blocks []*pem.Block
|
||||
rest := pair.CertPEM
|
||||
for len(rest) > 0 {
|
||||
var block *pem.Block
|
||||
block, rest = pem.Decode(rest)
|
||||
if block == nil {
|
||||
return time.Time{}, fmt.Errorf("parsing certificate PEM")
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
if len(blocks) < 1 {
|
||||
return time.Time{}, fmt.Errorf("could not parse certificate chain from certStore, got %d PEM block(s)", len(blocks))
|
||||
}
|
||||
ac, err := acmeClient(cs)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
ri, err := ac.FetchRenewalInfo(ctx, blocks[0].Bytes)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to fetch renewal info from ACME server: %w", err)
|
||||
}
|
||||
if acmeDebug() {
|
||||
b.logf("acme: ARI response: %+v", ri)
|
||||
}
|
||||
|
||||
// Select a random time in the suggested window and renew if that time has
|
||||
// passed. Time is randomized per recommendation in
|
||||
// https://datatracker.ietf.org/doc/draft-ietf-acme-ari/
|
||||
start, end := ri.SuggestedWindow.Start, ri.SuggestedWindow.End
|
||||
renewTime := start.Add(randv2.N(end.Sub(start)))
|
||||
return renewTime, nil
|
||||
}
|
||||
|
||||
// certStore provides a way to perist and retrieve TLS certificates.
|
||||
// As of 2023-02-01, we use store certs in directories on disk everywhere
|
||||
// except on Kubernetes, where we use the state store.
|
||||
type certStore interface {
|
||||
// Read returns the cert and key for domain, if they exist and are valid
|
||||
// for now. If they're expired, it returns errCertExpired.
|
||||
// If they don't exist, it returns ipn.ErrStateNotExist.
|
||||
Read(domain string, now time.Time) (*TLSCertKeyPair, error)
|
||||
// WriteCert writes the cert for domain.
|
||||
WriteCert(domain string, cert []byte) error
|
||||
// WriteKey writes the key for domain.
|
||||
WriteKey(domain string, key []byte) error
|
||||
// ACMEKey returns the value previously stored via WriteACMEKey.
|
||||
// It is a PEM encoded ECDSA key.
|
||||
ACMEKey() ([]byte, error)
|
||||
// WriteACMEKey stores the provided PEM encoded ECDSA key.
|
||||
WriteACMEKey([]byte) error
|
||||
}
|
||||
|
||||
var errCertExpired = errors.New("cert expired")
|
||||
|
||||
var testX509Roots *x509.CertPool // set non-nil by tests
|
||||
|
||||
func (b *LocalBackend) getCertStore() (certStore, error) {
|
||||
switch b.store.(type) {
|
||||
case *store.FileStore:
|
||||
case *mem.Store:
|
||||
default:
|
||||
if hostinfo.GetEnvType() == hostinfo.Kubernetes {
|
||||
// We're running in Kubernetes with a custom StateStore,
|
||||
// use that instead of the cert directory.
|
||||
// TODO(maisem): expand this to other environments?
|
||||
return certStateStore{StateStore: b.store}, nil
|
||||
}
|
||||
}
|
||||
dir, err := b.certDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if testX509Roots != nil && !testenv.InTest() {
|
||||
panic("use of test hook outside of tests")
|
||||
}
|
||||
return certFileStore{dir: dir, testRoots: testX509Roots}, nil
|
||||
}
|
||||
|
||||
// certFileStore implements certStore by storing the cert & key files in the named directory.
|
||||
type certFileStore struct {
|
||||
dir string
|
||||
|
||||
// This field allows a test to override the CA root(s) for certificate
|
||||
// verification. If nil the default system pool is used.
|
||||
testRoots *x509.CertPool
|
||||
}
|
||||
|
||||
const acmePEMName = "acme-account.key.pem"
|
||||
|
||||
func (f certFileStore) ACMEKey() ([]byte, error) {
|
||||
pemName := filepath.Join(f.dir, acmePEMName)
|
||||
v, err := os.ReadFile(pemName)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (f certFileStore) WriteACMEKey(b []byte) error {
|
||||
pemName := filepath.Join(f.dir, acmePEMName)
|
||||
return atomicfile.WriteFile(pemName, b, 0600)
|
||||
}
|
||||
|
||||
func (f certFileStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) {
|
||||
certPEM, err := os.ReadFile(certFile(f.dir, domain))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
keyPEM, err := os.ReadFile(keyFile(f.dir, domain))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ipn.ErrStateNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if !validCertPEM(domain, keyPEM, certPEM, f.testRoots, now) {
|
||||
return nil, errCertExpired
|
||||
}
|
||||
return &TLSCertKeyPair{CertPEM: certPEM, KeyPEM: keyPEM, Cached: true}, nil
|
||||
}
|
||||
|
||||
func (f certFileStore) WriteCert(domain string, cert []byte) error {
|
||||
return atomicfile.WriteFile(certFile(f.dir, domain), cert, 0644)
|
||||
}
|
||||
|
||||
func (f certFileStore) WriteKey(domain string, key []byte) error {
|
||||
return atomicfile.WriteFile(keyFile(f.dir, domain), key, 0600)
|
||||
}
|
||||
|
||||
// certStateStore implements certStore by storing the cert & key files in an ipn.StateStore.
|
||||
type certStateStore struct {
|
||||
ipn.StateStore
|
||||
|
||||
// This field allows a test to override the CA root(s) for certificate
|
||||
// verification. If nil the default system pool is used.
|
||||
testRoots *x509.CertPool
|
||||
}
|
||||
|
||||
func (s certStateStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) {
|
||||
certPEM, err := s.ReadState(ipn.StateKey(domain + ".crt"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyPEM, err := s.ReadState(ipn.StateKey(domain + ".key"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !validCertPEM(domain, keyPEM, certPEM, s.testRoots, now) {
|
||||
return nil, errCertExpired
|
||||
}
|
||||
return &TLSCertKeyPair{CertPEM: certPEM, KeyPEM: keyPEM, Cached: true}, nil
|
||||
}
|
||||
|
||||
func (s certStateStore) WriteCert(domain string, cert []byte) error {
|
||||
return ipn.WriteState(s.StateStore, ipn.StateKey(domain+".crt"), cert)
|
||||
}
|
||||
|
||||
func (s certStateStore) WriteKey(domain string, key []byte) error {
|
||||
return ipn.WriteState(s.StateStore, ipn.StateKey(domain+".key"), key)
|
||||
}
|
||||
|
||||
func (s certStateStore) ACMEKey() ([]byte, error) {
|
||||
return s.ReadState(ipn.StateKey(acmePEMName))
|
||||
}
|
||||
|
||||
func (s certStateStore) WriteACMEKey(key []byte) error {
|
||||
return ipn.WriteState(s.StateStore, ipn.StateKey(acmePEMName), key)
|
||||
}
|
||||
|
||||
// TLSCertKeyPair is a TLS public and private key, and whether they were obtained
|
||||
// from cache or freshly obtained.
|
||||
type TLSCertKeyPair struct {
|
||||
CertPEM []byte // public key, in PEM form
|
||||
KeyPEM []byte // private key, in PEM form
|
||||
Cached bool // whether result came from cache
|
||||
}
|
||||
|
||||
func (kp TLSCertKeyPair) parseCertificate() (*x509.Certificate, error) {
|
||||
block, _ := pem.Decode(kp.CertPEM)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("error parsing certificate PEM")
|
||||
}
|
||||
if block.Type != "CERTIFICATE" {
|
||||
return nil, fmt.Errorf("PEM block is %q, not a CERTIFICATE", block.Type)
|
||||
}
|
||||
return x509.ParseCertificate(block.Bytes)
|
||||
}
|
||||
|
||||
func keyFile(dir, domain string) string { return filepath.Join(dir, domain+".key") }
|
||||
func certFile(dir, domain string) string { return filepath.Join(dir, domain+".crt") }
|
||||
|
||||
// getCertPEMCached returns a non-nil keyPair if a cached keypair for domain
|
||||
// exists on disk in dir that is valid at the provided now time.
|
||||
//
|
||||
// If the keypair is expired, it returns errCertExpired.
|
||||
// If the keypair doesn't exist, it returns ipn.ErrStateNotExist.
|
||||
func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
|
||||
if !validLookingCertDomain(domain) {
|
||||
// Before we read files from disk using it, validate it's halfway
|
||||
// reasonable looking.
|
||||
return nil, fmt.Errorf("invalid domain %q", domain)
|
||||
}
|
||||
return cs.Read(domain, now)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) {
|
||||
acmeMu.Lock()
|
||||
defer acmeMu.Unlock()
|
||||
|
||||
// In case this method was triggered multiple times in parallel (when
|
||||
// serving incoming requests), check whether one of the other goroutines
|
||||
// already renewed the cert before us.
|
||||
if p, err := getCertPEMCached(cs, domain, now); err == nil {
|
||||
// shouldStartDomainRenewal caches its result so it's OK to call this
|
||||
// frequently.
|
||||
shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, p, minValidity)
|
||||
if err != nil {
|
||||
logf("error checking for certificate renewal: %v", err)
|
||||
} else if !shouldRenew {
|
||||
return p, nil
|
||||
}
|
||||
} else if !errors.Is(err, ipn.ErrStateNotExist) && !errors.Is(err, errCertExpired) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ac, err := acmeClient(cs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a, err := ac.GetReg(ctx, "" /* pre-RFC param */)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Great, already registered.
|
||||
logf("already had ACME account.")
|
||||
case err == acme.ErrNoAccount:
|
||||
a, err = ac.Register(ctx, new(acme.Account), acme.AcceptTOS)
|
||||
if err == acme.ErrAccountAlreadyExists {
|
||||
// Potential race. Double check.
|
||||
a, err = ac.GetReg(ctx, "" /* pre-RFC param */)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acme.Register: %w", err)
|
||||
}
|
||||
logf("registered ACME account.")
|
||||
traceACME(a)
|
||||
default:
|
||||
return nil, fmt.Errorf("acme.GetReg: %w", err)
|
||||
|
||||
}
|
||||
if a.Status != acme.StatusValid {
|
||||
return nil, fmt.Errorf("unexpected ACME account status %q", a.Status)
|
||||
}
|
||||
|
||||
// Before hitting LetsEncrypt, see if this is a domain that Tailscale will do DNS challenges for.
|
||||
st := b.StatusWithoutPeers()
|
||||
if err := checkCertDomain(st, domain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
traceACME(order)
|
||||
|
||||
for _, aurl := range order.AuthzURLs {
|
||||
az, err := ac.GetAuthorization(ctx, aurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
traceACME(az)
|
||||
for _, ch := range az.Challenges {
|
||||
if ch.Type == "dns-01" {
|
||||
rec, err := ac.DNS01ChallengeRecord(ch.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := "_acme-challenge." + domain
|
||||
|
||||
// Do a best-effort lookup to see if we've already created this DNS name
|
||||
// in a previous attempt. Don't burn too much time on it, though. Worst
|
||||
// case we ask the server to create something that already exists.
|
||||
var resolver net.Resolver
|
||||
lookupCtx, lookupCancel := context.WithTimeout(ctx, 500*time.Millisecond)
|
||||
txts, _ := resolver.LookupTXT(lookupCtx, key)
|
||||
lookupCancel()
|
||||
if slices.Contains(txts, rec) {
|
||||
logf("TXT record already existed")
|
||||
} else {
|
||||
logf("starting SetDNS call...")
|
||||
err = b.SetDNS(ctx, key, rec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SetDNS %q => %q: %w", key, rec, err)
|
||||
}
|
||||
logf("did SetDNS")
|
||||
}
|
||||
|
||||
chal, err := ac.Accept(ctx, ch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Accept: %v", err)
|
||||
}
|
||||
traceACME(chal)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
orderURI := order.URI
|
||||
order, err = ac.WaitOrder(ctx, orderURI)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if oe, ok := err.(*acme.OrderError); ok {
|
||||
logf("acme: WaitOrder: OrderError status %q", oe.Status)
|
||||
} else {
|
||||
logf("acme: WaitOrder error: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
traceACME(order)
|
||||
|
||||
certPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var privPEM bytes.Buffer
|
||||
if err := encodeECDSAKey(&privPEM, certPrivKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cs.WriteKey(domain, privPEM.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
csr, err := certRequest(certPrivKey, domain, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logf("requesting cert...")
|
||||
der, _, err := ac.CreateOrderCert(ctx, order.FinalizeURL, csr, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CreateOrder: %v", err)
|
||||
}
|
||||
logf("got cert")
|
||||
|
||||
var certPEM bytes.Buffer
|
||||
for _, b := range der {
|
||||
pb := &pem.Block{Type: "CERTIFICATE", Bytes: b}
|
||||
if err := pem.Encode(&certPEM, pb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := cs.WriteCert(domain, certPEM.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.domainRenewed(domain)
|
||||
|
||||
return &TLSCertKeyPair{CertPEM: certPEM.Bytes(), KeyPEM: privPEM.Bytes()}, nil
|
||||
}
|
||||
|
||||
// certRequest generates a CSR for the given common name cn and optional SANs.
|
||||
func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) {
|
||||
req := &x509.CertificateRequest{
|
||||
Subject: pkix.Name{CommonName: cn},
|
||||
DNSNames: san,
|
||||
ExtraExtensions: ext,
|
||||
}
|
||||
return x509.CreateCertificateRequest(rand.Reader, req, key)
|
||||
}
|
||||
|
||||
func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error {
|
||||
b, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
|
||||
return pem.Encode(w, pb)
|
||||
}
|
||||
|
||||
// parsePrivateKey is a copy of x/crypto/acme's parsePrivateKey.
|
||||
//
|
||||
// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates
|
||||
// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys.
|
||||
// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three.
|
||||
//
|
||||
// Inspired by parsePrivateKey in crypto/tls/tls.go.
|
||||
func parsePrivateKey(der []byte) (crypto.Signer, error) {
|
||||
if key, err := x509.ParsePKCS1PrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
if key, err := x509.ParsePKCS8PrivateKey(der); err == nil {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return key, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return key, nil
|
||||
default:
|
||||
return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping")
|
||||
}
|
||||
}
|
||||
if key, err := x509.ParseECPrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("acme/autocert: failed to parse private key")
|
||||
}
|
||||
|
||||
func acmeKey(cs certStore) (crypto.Signer, error) {
|
||||
if v, err := cs.ACMEKey(); err == nil {
|
||||
priv, _ := pem.Decode(v)
|
||||
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
||||
return nil, errors.New("acme/autocert: invalid account key found in cache")
|
||||
}
|
||||
return parsePrivateKey(priv.Bytes)
|
||||
} else if !errors.Is(err, ipn.ErrStateNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pemBuf bytes.Buffer
|
||||
if err := encodeECDSAKey(&pemBuf, privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cs.WriteACMEKey(pemBuf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return privKey, nil
|
||||
}
|
||||
|
||||
func acmeClient(cs certStore) (*acme.Client, error) {
|
||||
key, err := acmeKey(cs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acmeKey: %w", err)
|
||||
}
|
||||
// Note: if we add support for additional ACME providers (other than
|
||||
// LetsEncrypt), we should make sure that they support ARI extension (see
|
||||
// shouldStartDomainRenewalARI).
|
||||
return &acme.Client{
|
||||
Key: key,
|
||||
UserAgent: "tailscaled/" + version.Long(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validCertPEM reports whether the given certificate is valid for domain at now.
|
||||
//
|
||||
// If roots != nil, it is used instead of the system root pool. This is meant
|
||||
// to support testing, and production code should pass roots == nil.
|
||||
func validCertPEM(domain string, keyPEM, certPEM []byte, roots *x509.CertPool, now time.Time) bool {
|
||||
if len(keyPEM) == 0 || len(certPEM) == 0 {
|
||||
return false
|
||||
}
|
||||
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var leaf *x509.Certificate
|
||||
intermediates := x509.NewCertPool()
|
||||
for i, certDER := range tlsCert.Certificate {
|
||||
cert, err := x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if i == 0 {
|
||||
leaf = cert
|
||||
} else {
|
||||
intermediates.AddCert(cert)
|
||||
}
|
||||
}
|
||||
if leaf == nil {
|
||||
return false
|
||||
}
|
||||
_, err = leaf.Verify(x509.VerifyOptions{
|
||||
DNSName: domain,
|
||||
CurrentTime: now,
|
||||
Roots: roots,
|
||||
Intermediates: intermediates,
|
||||
})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// validLookingCertDomain reports whether name looks like a valid domain name that
|
||||
// we might be able to get a cert for.
|
||||
//
|
||||
// It's a light check primarily for double checking before it's used
|
||||
// as part of a filesystem path. The actual validation happens in checkCertDomain.
|
||||
func validLookingCertDomain(name string) bool {
|
||||
if name == "" ||
|
||||
strings.Contains(name, "..") ||
|
||||
strings.ContainsAny(name, ":/\\\x00") ||
|
||||
!strings.Contains(name, ".") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkCertDomain(st *ipnstate.Status, domain string) error {
|
||||
if domain == "" {
|
||||
return errors.New("missing domain name")
|
||||
}
|
||||
for _, d := range st.CertDomains {
|
||||
if d == domain {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if len(st.CertDomains) == 0 {
|
||||
return errors.New("your Tailscale account does not support getting TLS certs")
|
||||
}
|
||||
return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains)
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/cert_js.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/cert_js.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TLSCertKeyPair struct {
|
||||
CertPEM, KeyPEM []byte
|
||||
}
|
||||
|
||||
func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
}
|
||||
|
||||
var errCertExpired = errors.New("cert expired")
|
||||
|
||||
type certStore interface{}
|
||||
|
||||
func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getCertStore() (certStore, error) {
|
||||
return nil, errors.New("not implemented for js/wasm")
|
||||
}
|
||||
370
vendor/tailscale.com/ipn/ipnlocal/drive.go
generated
vendored
Normal file
370
vendor/tailscale.com/ipn/ipnlocal/drive.go
generated
vendored
Normal file
@@ -0,0 +1,370 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/views"
|
||||
)
|
||||
|
||||
const (
|
||||
// DriveLocalPort is the port on which the Taildrive listens for location
|
||||
// connections on quad 100.
|
||||
DriveLocalPort = 8080
|
||||
)
|
||||
|
||||
// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is
|
||||
// enabled. This is currently based on checking for the drive:share node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveSharingEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.driveSharingEnabledLocked()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveSharingEnabledLocked() bool {
|
||||
return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveShare)
|
||||
}
|
||||
|
||||
// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes
|
||||
// is enabled. This is currently based on checking for the drive:access node
|
||||
// attribute.
|
||||
func (b *LocalBackend) DriveAccessEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.driveAccessEnabledLocked()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveAccessEnabledLocked() bool {
|
||||
return b.netMap != nil && b.netMap.SelfNode.HasCap(tailcfg.NodeAttrsTaildriveAccess)
|
||||
}
|
||||
|
||||
// DriveSetServerAddr tells Taildrive to use the given address for connecting
|
||||
// to the drive.FileServer that's exposing local files as an unprivileged
|
||||
// user.
|
||||
func (b *LocalBackend) DriveSetServerAddr(addr string) error {
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
fs.SetFileServerAddr(addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DriveSetShare adds the given share if no share with that name exists, or
|
||||
// replaces the existing share if one with the same name already exists. To
|
||||
// avoid potential incompatibilities across file systems, share names are
|
||||
// limited to alphanumeric characters and the underscore _.
|
||||
func (b *LocalBackend) DriveSetShare(share *drive.Share) error {
|
||||
var err error
|
||||
share.Name, err = drive.NormalizeShareName(share.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
shares, err := b.driveSetShareLocked(share)
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.driveNotifyShares(shares)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveSetShareLocked(share *drive.Share) (views.SliceView[*drive.Share, drive.ShareView], error) {
|
||||
existingShares := b.pm.prefs.DriveShares()
|
||||
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return existingShares, drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
addedShare := false
|
||||
var shares []*drive.Share
|
||||
for _, existing := range existingShares.All() {
|
||||
if existing.Name() != share.Name {
|
||||
if !addedShare && existing.Name() > share.Name {
|
||||
// Add share in order
|
||||
shares = append(shares, share)
|
||||
addedShare = true
|
||||
}
|
||||
shares = append(shares, existing.AsStruct())
|
||||
}
|
||||
}
|
||||
if !addedShare {
|
||||
shares = append(shares, share)
|
||||
}
|
||||
|
||||
err := b.driveSetSharesLocked(shares)
|
||||
if err != nil {
|
||||
return existingShares, err
|
||||
}
|
||||
fs.SetShares(shares)
|
||||
|
||||
return b.pm.prefs.DriveShares(), nil
|
||||
}
|
||||
|
||||
// DriveRenameShare renames the share at old name to new name. To avoid
|
||||
// potential incompatibilities across file systems, the new share name is
|
||||
// limited to alphanumeric characters and the underscore _.
|
||||
// Any of the following will result in an error.
|
||||
// - no share found under old name
|
||||
// - new share name contains disallowed characters
|
||||
// - share already exists under new name
|
||||
func (b *LocalBackend) DriveRenameShare(oldName, newName string) error {
|
||||
var err error
|
||||
newName, err = drive.NormalizeShareName(newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
shares, err := b.driveRenameShareLocked(oldName, newName)
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.driveNotifyShares(shares)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRenameShareLocked(oldName, newName string) (views.SliceView[*drive.Share, drive.ShareView], error) {
|
||||
existingShares := b.pm.prefs.DriveShares()
|
||||
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return existingShares, drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
found := false
|
||||
var shares []*drive.Share
|
||||
for _, existing := range existingShares.All() {
|
||||
if existing.Name() == newName {
|
||||
return existingShares, os.ErrExist
|
||||
}
|
||||
if existing.Name() == oldName {
|
||||
share := existing.AsStruct()
|
||||
share.Name = newName
|
||||
shares = append(shares, share)
|
||||
found = true
|
||||
} else {
|
||||
shares = append(shares, existing.AsStruct())
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return existingShares, os.ErrNotExist
|
||||
}
|
||||
|
||||
slices.SortFunc(shares, drive.CompareShares)
|
||||
err := b.driveSetSharesLocked(shares)
|
||||
if err != nil {
|
||||
return existingShares, err
|
||||
}
|
||||
fs.SetShares(shares)
|
||||
|
||||
return b.pm.prefs.DriveShares(), nil
|
||||
}
|
||||
|
||||
// DriveRemoveShare removes the named share. Share names are forced to
|
||||
// lowercase.
|
||||
func (b *LocalBackend) DriveRemoveShare(name string) error {
|
||||
// Force all share names to lowercase to avoid potential incompatibilities
|
||||
// with clients that don't support case-sensitive filenames.
|
||||
var err error
|
||||
name, err = drive.NormalizeShareName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
shares, err := b.driveRemoveShareLocked(name)
|
||||
b.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.driveNotifyShares(shares)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRemoveShareLocked(name string) (views.SliceView[*drive.Share, drive.ShareView], error) {
|
||||
existingShares := b.pm.prefs.DriveShares()
|
||||
|
||||
fs, ok := b.sys.DriveForRemote.GetOK()
|
||||
if !ok {
|
||||
return existingShares, drive.ErrDriveNotEnabled
|
||||
}
|
||||
|
||||
found := false
|
||||
var shares []*drive.Share
|
||||
for _, existing := range existingShares.All() {
|
||||
if existing.Name() != name {
|
||||
shares = append(shares, existing.AsStruct())
|
||||
} else {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return existingShares, os.ErrNotExist
|
||||
}
|
||||
|
||||
err := b.driveSetSharesLocked(shares)
|
||||
if err != nil {
|
||||
return existingShares, err
|
||||
}
|
||||
fs.SetShares(shares)
|
||||
|
||||
return b.pm.prefs.DriveShares(), nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveSetSharesLocked(shares []*drive.Share) error {
|
||||
prefs := b.pm.prefs.AsStruct()
|
||||
prefs.ApplyEdits(&ipn.MaskedPrefs{
|
||||
Prefs: ipn.Prefs{
|
||||
DriveShares: shares,
|
||||
},
|
||||
DriveSharesSet: true,
|
||||
})
|
||||
return b.pm.setPrefsNoPermCheck(prefs.View())
|
||||
}
|
||||
|
||||
// driveNotifyShares notifies IPN bus listeners (e.g. Mac Application process)
|
||||
// about the latest list of shares, if and only if the shares have changed since
|
||||
// the last time we notified.
|
||||
func (b *LocalBackend) driveNotifyShares(shares views.SliceView[*drive.Share, drive.ShareView]) {
|
||||
b.lastNotifiedDriveSharesMu.Lock()
|
||||
defer b.lastNotifiedDriveSharesMu.Unlock()
|
||||
if b.lastNotifiedDriveShares != nil && driveShareViewsEqual(b.lastNotifiedDriveShares, shares) {
|
||||
// shares are unchanged since last notification, don't bother notifying
|
||||
return
|
||||
}
|
||||
b.lastNotifiedDriveShares = &shares
|
||||
|
||||
// Ensures shares is not nil to distinguish "no shares" from "not notifying shares"
|
||||
if shares.IsNil() {
|
||||
shares = views.SliceOfViews(make([]*drive.Share, 0))
|
||||
}
|
||||
b.send(ipn.Notify{DriveShares: shares})
|
||||
}
|
||||
|
||||
// driveNotifyCurrentSharesLocked sends an ipn.Notify if the current set of
|
||||
// shares has changed since the last notification.
|
||||
func (b *LocalBackend) driveNotifyCurrentSharesLocked() {
|
||||
var shares views.SliceView[*drive.Share, drive.ShareView]
|
||||
if b.driveSharingEnabledLocked() {
|
||||
// Only populate shares if sharing is enabled.
|
||||
shares = b.pm.prefs.DriveShares()
|
||||
}
|
||||
|
||||
// Do the below on a goroutine to avoid deadlocking on b.mu in b.send().
|
||||
go b.driveNotifyShares(shares)
|
||||
}
|
||||
|
||||
func driveShareViewsEqual(a *views.SliceView[*drive.Share, drive.ShareView], b views.SliceView[*drive.Share, drive.ShareView]) bool {
|
||||
if a == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.Len() != b.Len() {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range a.Len() {
|
||||
if !drive.ShareViewsEqual(a.At(i), b.At(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DriveGetShares gets the current list of Taildrive shares, sorted by name.
|
||||
func (b *LocalBackend) DriveGetShares() views.SliceView[*drive.Share, drive.ShareView] {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
return b.pm.prefs.DriveShares()
|
||||
}
|
||||
|
||||
// updateDrivePeersLocked sets all applicable peers from the netmap as Taildrive
|
||||
// remotes.
|
||||
func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) {
|
||||
fs, ok := b.sys.DriveForLocal.GetOK()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var driveRemotes []*drive.Remote
|
||||
if b.driveAccessEnabledLocked() {
|
||||
// Only populate peers if access is enabled, otherwise leave blank.
|
||||
driveRemotes = b.driveRemotesFromPeers(nm)
|
||||
}
|
||||
|
||||
fs.SetRemotes(b.netMap.Domain, driveRemotes, b.newDriveTransport())
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote {
|
||||
driveRemotes := make([]*drive.Remote, 0, len(nm.Peers))
|
||||
for _, p := range nm.Peers {
|
||||
peerID := p.ID()
|
||||
url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:])
|
||||
driveRemotes = append(driveRemotes, &drive.Remote{
|
||||
Name: p.DisplayName(false),
|
||||
URL: url,
|
||||
Available: func() bool {
|
||||
// Peers are available to Taildrive if:
|
||||
// - They are online
|
||||
// - They are allowed to share at least one folder with us
|
||||
b.mu.Lock()
|
||||
latestNetMap := b.netMap
|
||||
b.mu.Unlock()
|
||||
|
||||
idx, found := slices.BinarySearchFunc(latestNetMap.Peers, peerID, func(candidate tailcfg.NodeView, id tailcfg.NodeID) int {
|
||||
return cmp.Compare(candidate.ID(), id)
|
||||
})
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
peer := latestNetMap.Peers[idx]
|
||||
|
||||
// Exclude offline peers.
|
||||
// TODO(oxtoacart): for some reason, this correctly
|
||||
// catches when a node goes from offline to online,
|
||||
// but not the other way around...
|
||||
online := peer.Online()
|
||||
if online == nil || !*online {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check that the peer is allowed to share with us.
|
||||
addresses := peer.Addresses()
|
||||
for i := range addresses.Len() {
|
||||
addr := addresses.At(i)
|
||||
capsMap := b.PeerCaps(addr.Addr())
|
||||
if capsMap.HasCapability(tailcfg.PeerCapabilityTaildriveSharer) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
})
|
||||
}
|
||||
return driveRemotes
|
||||
}
|
||||
226
vendor/tailscale.com/ipn/ipnlocal/expiry.go
generated
vendored
Normal file
226
vendor/tailscale.com/ipn/ipnlocal/expiry.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
)
|
||||
|
||||
// For extra defense-in-depth, when we're testing expired nodes we check
|
||||
// ControlTime against this 'epoch' (set to the approximate time that this code
|
||||
// was written) such that if control (or Headscale, etc.) sends a ControlTime
|
||||
// that's sufficiently far in the past, we can safely ignore it.
|
||||
var flagExpiredPeersEpoch = time.Unix(1673373066, 0)
|
||||
|
||||
// If the offset between the current time and the time received from control is
|
||||
// larger than this, we store an offset in our expiryManager to adjust future
|
||||
// clock timings.
|
||||
const minClockDelta = 1 * time.Minute
|
||||
|
||||
// expiryManager tracks the state of expired nodes and the delta from the
|
||||
// current clock time to the time returned from control, and allows mutating a
|
||||
// netmap to mark peers as expired based on the current delta-adjusted time.
|
||||
type expiryManager struct {
|
||||
// previouslyExpired stores nodes that have already expired so we can
|
||||
// only log on state transitions.
|
||||
previouslyExpired map[tailcfg.StableNodeID]bool
|
||||
|
||||
// clockDelta stores the delta between the current time and the time
|
||||
// received from control such that:
|
||||
// time.Now().Add(clockDelta) == MapResponse.ControlTime
|
||||
clockDelta syncs.AtomicValue[time.Duration]
|
||||
|
||||
logf logger.Logf
|
||||
clock tstime.Clock
|
||||
}
|
||||
|
||||
func newExpiryManager(logf logger.Logf) *expiryManager {
|
||||
return &expiryManager{
|
||||
previouslyExpired: map[tailcfg.StableNodeID]bool{},
|
||||
logf: logf,
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
}
|
||||
|
||||
// onControlTime is called whenever we receive a new timestamp from the control
|
||||
// server to store the delta.
|
||||
func (em *expiryManager) onControlTime(t time.Time) {
|
||||
localNow := em.clock.Now()
|
||||
delta := t.Sub(localNow)
|
||||
if delta.Abs() > minClockDelta {
|
||||
em.logf("[v1] netmap: flagExpiredPeers: setting clock delta to %v", delta)
|
||||
em.clockDelta.Store(delta)
|
||||
} else {
|
||||
em.clockDelta.Store(0)
|
||||
}
|
||||
}
|
||||
|
||||
// flagExpiredPeers updates mapRes.Peers, mutating all peers that have expired,
|
||||
// taking into account any clock skew detected by using the ControlTime field
|
||||
// in the MapResponse. We don't actually remove expired peers from the Peers
|
||||
// array; instead, we clear some fields of the Node object, and set
|
||||
// Node.Expired so other parts of the codebase can provide more clear error
|
||||
// messages when attempting to e.g. ping an expired node.
|
||||
//
|
||||
// The localNow time should be the output of time.Now for the local system; it
|
||||
// will be adjusted by any stored clock skew from ControlTime.
|
||||
//
|
||||
// This is additionally a defense-in-depth against something going wrong with
|
||||
// control such that we start seeing expired peers with a valid Endpoints or
|
||||
// DERP field.
|
||||
//
|
||||
// This function is safe to call concurrently with onControlTime but not
|
||||
// concurrently with any other call to flagExpiredPeers.
|
||||
func (em *expiryManager) flagExpiredPeers(netmap *netmap.NetworkMap, localNow time.Time) {
|
||||
// Adjust our current time by any saved delta to adjust for clock skew.
|
||||
controlNow := localNow.Add(em.clockDelta.Load())
|
||||
if controlNow.Before(flagExpiredPeersEpoch) {
|
||||
em.logf("netmap: flagExpiredPeers: [unexpected] delta-adjusted current time is before hardcoded epoch; skipping")
|
||||
return
|
||||
}
|
||||
|
||||
for i, peer := range netmap.Peers {
|
||||
// Nodes that don't expire have KeyExpiry set to the zero time;
|
||||
// skip those and peers that are already marked as expired
|
||||
// (e.g. from control).
|
||||
if peer.KeyExpiry().IsZero() || peer.KeyExpiry().After(controlNow) {
|
||||
delete(em.previouslyExpired, peer.StableID())
|
||||
continue
|
||||
} else if peer.Expired() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !em.previouslyExpired[peer.StableID()] {
|
||||
em.logf("[v1] netmap: flagExpiredPeers: clearing expired peer %v", peer.StableID())
|
||||
em.previouslyExpired[peer.StableID()] = true
|
||||
}
|
||||
|
||||
mut := peer.AsStruct()
|
||||
|
||||
// Actually mark the node as expired
|
||||
mut.Expired = true
|
||||
|
||||
// Control clears the Endpoints and DERP fields of expired
|
||||
// nodes; do so here as well. The Expired bool is the correct
|
||||
// thing to set, but this replicates the previous behaviour.
|
||||
//
|
||||
// NOTE: this is insufficient to actually break connectivity,
|
||||
// since we discover endpoints via DERP, and due to DERP return
|
||||
// path optimization.
|
||||
mut.Endpoints = nil
|
||||
mut.DERP = ""
|
||||
|
||||
// Defense-in-depth: break the node's public key as well, in
|
||||
// case something tries to communicate.
|
||||
mut.Key = key.NodePublicWithBadOldPrefix(peer.Key())
|
||||
|
||||
netmap.Peers[i] = mut.View()
|
||||
}
|
||||
}
|
||||
|
||||
// nextPeerExpiry returns the time that the next node in the netmap expires
|
||||
// (including the self node), based on their KeyExpiry. It skips nodes that are
|
||||
// already marked as Expired. If there are no nodes expiring in the future,
|
||||
// then the zero Time will be returned.
|
||||
//
|
||||
// The localNow time should be the output of time.Now for the local system; it
|
||||
// will be adjusted by any stored clock skew from ControlTime.
|
||||
//
|
||||
// This function is safe to call concurrently with other methods of this expiryManager.
|
||||
func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Time) time.Time {
|
||||
if nm == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
controlNow := localNow.Add(em.clockDelta.Load())
|
||||
if controlNow.Before(flagExpiredPeersEpoch) {
|
||||
em.logf("netmap: nextPeerExpiry: [unexpected] delta-adjusted current time is before hardcoded epoch; skipping")
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
var nextExpiry time.Time // zero if none
|
||||
for _, peer := range nm.Peers {
|
||||
if peer.KeyExpiry().IsZero() {
|
||||
continue // tagged node
|
||||
} else if peer.Expired() {
|
||||
// Peer already expired; Expired is set by the
|
||||
// flagExpiredPeers function, above.
|
||||
continue
|
||||
} else if peer.KeyExpiry().Before(controlNow) {
|
||||
// This peer already expired, and peer.Expired
|
||||
// isn't set for some reason. Skip this node.
|
||||
continue
|
||||
}
|
||||
|
||||
// nextExpiry being zero is a sentinel that we haven't yet set
|
||||
// an expiry; otherwise, only update if this node's expiry is
|
||||
// sooner than the currently-stored one (since we want the
|
||||
// soonest-occurring expiry time).
|
||||
if nextExpiry.IsZero() || peer.KeyExpiry().Before(nextExpiry) {
|
||||
nextExpiry = peer.KeyExpiry()
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that we also fire this timer if our own node key expires.
|
||||
if nm.SelfNode.Valid() {
|
||||
selfExpiry := nm.SelfNode.KeyExpiry()
|
||||
|
||||
if selfExpiry.IsZero() {
|
||||
// No expiry for self node
|
||||
} else if selfExpiry.Before(controlNow) {
|
||||
// Self node already expired; we don't want to return a
|
||||
// time in the past, so skip this.
|
||||
} else if nextExpiry.IsZero() || selfExpiry.Before(nextExpiry) {
|
||||
// Self node expires after now, but before the soonest
|
||||
// peer in the netmap; update our next expiry to this
|
||||
// time.
|
||||
nextExpiry = selfExpiry
|
||||
}
|
||||
}
|
||||
|
||||
// As an additional defense in depth, never return a time that is
|
||||
// before the current time from the perspective of the local system
|
||||
// (since timers with a zero or negative duration will fire
|
||||
// immediately and can cause unnecessary reconfigurations).
|
||||
//
|
||||
// This can happen if the local clock is running fast; for example:
|
||||
// localTime = 2pm
|
||||
// controlTime = 1pm (real time)
|
||||
// nextExpiry = 1:30pm (real time)
|
||||
//
|
||||
// In the above case, we'd return a nextExpiry of 1:30pm while the
|
||||
// current clock reads 2pm; in this case, setting a timer for
|
||||
// nextExpiry.Sub(now) would result in a negative duration and a timer
|
||||
// that fired immediately.
|
||||
//
|
||||
// In this particular edge-case, return an expiry time 30 seconds after
|
||||
// the local time so that any timers created based on this expiry won't
|
||||
// fire too quickly.
|
||||
//
|
||||
// The alternative would be to do all comparisons in local time,
|
||||
// unadjusted for clock skew, but that doesn't handle cases where the
|
||||
// local clock is "fixed" between netmap updates.
|
||||
if !nextExpiry.IsZero() && nextExpiry.Before(localNow) {
|
||||
em.logf("netmap: nextPeerExpiry: skipping nextExpiry %q before local time %q due to clock skew",
|
||||
nextExpiry.UTC().Format(time.RFC3339),
|
||||
localNow.UTC().Format(time.RFC3339))
|
||||
return localNow.Add(30 * time.Second)
|
||||
}
|
||||
|
||||
return nextExpiry
|
||||
}
|
||||
|
||||
// ControlNow estimates the current time on the control server, calculated as
|
||||
// localNow + the delta between local and control server clocks as recorded
|
||||
// when the LocalBackend last received a time message from the control server.
|
||||
func (b *LocalBackend) ControlNow(localNow time.Time) time.Time {
|
||||
return localNow.Add(b.em.clockDelta.Load())
|
||||
}
|
||||
7366
vendor/tailscale.com/ipn/ipnlocal/local.go
generated
vendored
Normal file
7366
vendor/tailscale.com/ipn/ipnlocal/local.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1474
vendor/tailscale.com/ipn/ipnlocal/network-lock.go
generated
vendored
Normal file
1474
vendor/tailscale.com/ipn/ipnlocal/network-lock.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1279
vendor/tailscale.com/ipn/ipnlocal/peerapi.go
generated
vendored
Normal file
1279
vendor/tailscale.com/ipn/ipnlocal/peerapi.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/tailscale.com/ipn/ipnlocal/peerapi_h2c.go
generated
vendored
Normal file
20
vendor/tailscale.com/ipn/ipnlocal/peerapi_h2c.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android && !js
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
)
|
||||
|
||||
func init() {
|
||||
addH2C = func(s *http.Server) {
|
||||
h2s := &http2.Server{}
|
||||
s.Handler = h2c.NewHandler(s.Handler, h2s)
|
||||
}
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/peerapi_macios_ext.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/peerapi_macios_ext.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ts_macext && (darwin || ios)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"tailscale.com/net/netmon"
|
||||
"tailscale.com/net/netns"
|
||||
)
|
||||
|
||||
func init() {
|
||||
initListenConfig = initListenConfigNetworkExtension
|
||||
}
|
||||
|
||||
// initListenConfigNetworkExtension configures nc for listening on IP
|
||||
// through the iOS/macOS Network/System Extension (Packet Tunnel
|
||||
// Provider) sandbox.
|
||||
func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, st *netmon.State, tunIfName string) error {
|
||||
tunIf, ok := st.Interface[tunIfName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no interface with name %q", tunIfName)
|
||||
}
|
||||
return netns.SetListenConfigInterfaceIndex(nc, tunIf.Index)
|
||||
}
|
||||
825
vendor/tailscale.com/ipn/ipnlocal/profiles.go
generated
vendored
Normal file
825
vendor/tailscale.com/ipn/ipnlocal/profiles.go
generated
vendored
Normal file
@@ -0,0 +1,825 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/clientmetric"
|
||||
)
|
||||
|
||||
var debug = envknob.RegisterBool("TS_DEBUG_PROFILES")
|
||||
|
||||
// profileManager is a wrapper around an [ipn.StateStore] that manages
|
||||
// multiple profiles and the current profile.
|
||||
//
|
||||
// It is not safe for concurrent use.
|
||||
type profileManager struct {
|
||||
goos string // used for TestProfileManagementWindows
|
||||
store ipn.StateStore
|
||||
logf logger.Logf
|
||||
health *health.Tracker
|
||||
|
||||
currentUserID ipn.WindowsUserID
|
||||
knownProfiles map[ipn.ProfileID]*ipn.LoginProfile // always non-nil
|
||||
currentProfile *ipn.LoginProfile // always non-nil
|
||||
prefs ipn.PrefsView // always Valid.
|
||||
}
|
||||
|
||||
func (pm *profileManager) dlogf(format string, args ...any) {
|
||||
if !debug() {
|
||||
return
|
||||
}
|
||||
pm.logf(format, args...)
|
||||
}
|
||||
|
||||
func (pm *profileManager) WriteState(id ipn.StateKey, val []byte) error {
|
||||
return ipn.WriteState(pm.store, id, val)
|
||||
}
|
||||
|
||||
// CurrentUserID returns the current user ID. It is only non-empty on
|
||||
// Windows where we have a multi-user system.
|
||||
func (pm *profileManager) CurrentUserID() ipn.WindowsUserID {
|
||||
return pm.currentUserID
|
||||
}
|
||||
|
||||
// SetCurrentUserID sets the current user ID and switches to that user's default (last used) profile.
|
||||
// If the specified user does not have a default profile, or the default profile could not be loaded,
|
||||
// it creates a new one and switches to it. The uid is only non-empty on Windows where we have a multi-user system.
|
||||
func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) {
|
||||
if pm.currentUserID == uid {
|
||||
return
|
||||
}
|
||||
pm.currentUserID = uid
|
||||
if err := pm.SwitchToDefaultProfile(); err != nil {
|
||||
// SetCurrentUserID should never fail and must always switch to the
|
||||
// user's default profile or create a new profile for the current user.
|
||||
// Until we implement multi-user support and the new permission model,
|
||||
// and remove the concept of the "current user" completely, we must ensure
|
||||
// that when SetCurrentUserID exits, the profile in pm.currentProfile
|
||||
// is either an existing profile owned by the user, or a new, empty profile.
|
||||
pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err)
|
||||
pm.NewProfileForUser(uid)
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultUserProfileID returns [ipn.ProfileID] of the default (last used) profile for the specified user,
|
||||
// or an empty string if the specified user does not have a default profile.
|
||||
func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.ProfileID {
|
||||
// Read the CurrentProfileKey from the store which stores
|
||||
// the selected profile for the specified user.
|
||||
b, err := pm.store.ReadState(ipn.CurrentProfileKey(string(uid)))
|
||||
pm.dlogf("DefaultUserProfileID: ReadState(%q) = %v, %v", string(uid), len(b), err)
|
||||
if err == ipn.ErrStateNotExist || len(b) == 0 {
|
||||
if runtime.GOOS == "windows" {
|
||||
pm.dlogf("DefaultUserProfileID: windows: migrating from legacy preferences")
|
||||
profile, err := pm.migrateFromLegacyPrefs(uid, false)
|
||||
if err == nil {
|
||||
return profile.ID
|
||||
}
|
||||
pm.logf("failed to migrate from legacy preferences: %v", err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
pk := ipn.StateKey(string(b))
|
||||
prof := pm.findProfileByKey(pk)
|
||||
if prof == nil {
|
||||
pm.dlogf("DefaultUserProfileID: no profile found for key: %q", pk)
|
||||
return ""
|
||||
}
|
||||
return prof.ID
|
||||
}
|
||||
|
||||
// checkProfileAccess returns an [errProfileAccessDenied] if the current user
|
||||
// does not have access to the specified profile.
|
||||
func (pm *profileManager) checkProfileAccess(profile *ipn.LoginProfile) error {
|
||||
if pm.currentUserID != "" && profile.LocalUserID != pm.currentUserID {
|
||||
return errProfileAccessDenied
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// allProfiles returns all profiles accessible to the current user.
|
||||
// The returned profiles are sorted by Name.
|
||||
func (pm *profileManager) allProfiles() (out []*ipn.LoginProfile) {
|
||||
for _, p := range pm.knownProfiles {
|
||||
if pm.checkProfileAccess(p) == nil {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(out, func(a, b *ipn.LoginProfile) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// matchingProfiles is like [profileManager.allProfiles], but returns only profiles
|
||||
// matching the given predicate.
|
||||
func (pm *profileManager) matchingProfiles(f func(*ipn.LoginProfile) bool) (out []*ipn.LoginProfile) {
|
||||
all := pm.allProfiles()
|
||||
out = all[:0]
|
||||
for _, p := range all {
|
||||
if f(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// findMatchingProfiles returns all profiles accessible to the current user
|
||||
// that represent the same node/user as prefs.
|
||||
// The returned profiles are sorted by Name.
|
||||
func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []*ipn.LoginProfile {
|
||||
return pm.matchingProfiles(func(p *ipn.LoginProfile) bool {
|
||||
return p.ControlURL == prefs.ControlURL() &&
|
||||
(p.UserProfile.ID == prefs.Persist().UserProfile().ID ||
|
||||
p.NodeID == prefs.Persist().NodeID())
|
||||
})
|
||||
}
|
||||
|
||||
// ProfileIDForName returns the profile ID for the profile with the
|
||||
// given name. It returns "" if no such profile exists among profiles
|
||||
// accessible to the current user.
|
||||
func (pm *profileManager) ProfileIDForName(name string) ipn.ProfileID {
|
||||
p := pm.findProfileByName(name)
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return p.ID
|
||||
}
|
||||
|
||||
func (pm *profileManager) findProfileByName(name string) *ipn.LoginProfile {
|
||||
out := pm.matchingProfiles(func(p *ipn.LoginProfile) bool {
|
||||
return p.Name == name
|
||||
})
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(out) > 1 {
|
||||
pm.logf("[unexpected] multiple profiles with the same name")
|
||||
}
|
||||
return out[0]
|
||||
}
|
||||
|
||||
func (pm *profileManager) findProfileByKey(key ipn.StateKey) *ipn.LoginProfile {
|
||||
out := pm.matchingProfiles(func(p *ipn.LoginProfile) bool {
|
||||
return p.Key == key
|
||||
})
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(out) > 1 {
|
||||
pm.logf("[unexpected] multiple profiles with the same key")
|
||||
}
|
||||
return out[0]
|
||||
}
|
||||
|
||||
func (pm *profileManager) setUnattendedModeAsConfigured() error {
|
||||
if pm.goos != "windows" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pm.currentProfile.Key != "" && pm.prefs.ForceDaemon() {
|
||||
return pm.WriteState(ipn.ServerModeStartKey, []byte(pm.currentProfile.Key))
|
||||
} else {
|
||||
return pm.WriteState(ipn.ServerModeStartKey, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset unloads the current profile, if any.
|
||||
func (pm *profileManager) Reset() {
|
||||
pm.currentUserID = ""
|
||||
pm.NewProfile()
|
||||
}
|
||||
|
||||
// SetPrefs sets the current profile's prefs to the provided value.
|
||||
// It also saves the prefs to the [ipn.StateStore]. It stores a copy of the
|
||||
// provided prefs, which may be accessed via [profileManager.CurrentPrefs].
|
||||
//
|
||||
// The [ipn.NetworkProfile] stores additional information about the tailnet the user
|
||||
// is logged into so that we can keep track of things like their domain name
|
||||
// across user switches to disambiguate the same account but a different tailnet.
|
||||
func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) error {
|
||||
cp := pm.currentProfile
|
||||
if persist := prefsIn.Persist(); !persist.Valid() || persist.NodeID() == "" || persist.UserProfile().LoginName == "" {
|
||||
// We don't know anything about this profile, so ignore it for now.
|
||||
return pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefsIn.AsStruct().View())
|
||||
}
|
||||
|
||||
// Check if we already have an existing profile that matches the user/node.
|
||||
if existing := pm.findMatchingProfiles(prefsIn); len(existing) > 0 {
|
||||
// We already have a profile for this user/node we should reuse it. Also
|
||||
// cleanup any other duplicate profiles.
|
||||
cp = existing[0]
|
||||
existing = existing[1:]
|
||||
for _, p := range existing {
|
||||
// Clear the state.
|
||||
if err := pm.store.WriteState(p.Key, nil); err != nil {
|
||||
// We couldn't delete the state, so keep the profile around.
|
||||
continue
|
||||
}
|
||||
// Remove the profile, knownProfiles will be persisted
|
||||
// in [profileManager.setProfilePrefs] below.
|
||||
delete(pm.knownProfiles, p.ID)
|
||||
}
|
||||
}
|
||||
pm.currentProfile = cp
|
||||
if err := pm.SetProfilePrefs(cp, prefsIn, np); err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.setProfileAsUserDefault(cp)
|
||||
|
||||
}
|
||||
|
||||
// SetProfilePrefs is like [profileManager.SetPrefs], but sets prefs for the specified [ipn.LoginProfile]
|
||||
// which is not necessarily the [profileManager.CurrentProfile]. It returns an [errProfileAccessDenied]
|
||||
// if the specified profile is not accessible by the current user.
|
||||
func (pm *profileManager) SetProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.PrefsView, np ipn.NetworkProfile) error {
|
||||
if err := pm.checkProfileAccess(lp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// An empty profile.ID indicates that the profile is new, the node info wasn't available,
|
||||
// and it hasn't been persisted yet. We'll generate both an ID and [ipn.StateKey]
|
||||
// once the information is available and needs to be persisted.
|
||||
if lp.ID == "" {
|
||||
if persist := prefsIn.Persist(); persist.Valid() && persist.NodeID() != "" && persist.UserProfile().LoginName != "" {
|
||||
// Generate an ID and [ipn.StateKey] now that we have the node info.
|
||||
lp.ID, lp.Key = newUnusedID(pm.knownProfiles)
|
||||
}
|
||||
|
||||
// Set the current user as the profile owner, unless the current user ID does
|
||||
// not represent a specific user, or the profile is already owned by a different user.
|
||||
// It is only relevant on Windows where we have a multi-user system.
|
||||
if lp.LocalUserID == "" && pm.currentUserID != "" {
|
||||
lp.LocalUserID = pm.currentUserID
|
||||
}
|
||||
}
|
||||
|
||||
var up tailcfg.UserProfile
|
||||
if persist := prefsIn.Persist(); persist.Valid() {
|
||||
up = persist.UserProfile()
|
||||
if up.DisplayName == "" {
|
||||
up.DisplayName = up.LoginName
|
||||
}
|
||||
lp.NodeID = persist.NodeID()
|
||||
} else {
|
||||
lp.NodeID = ""
|
||||
}
|
||||
|
||||
if prefsIn.ProfileName() != "" {
|
||||
lp.Name = prefsIn.ProfileName()
|
||||
} else {
|
||||
lp.Name = up.LoginName
|
||||
}
|
||||
lp.ControlURL = prefsIn.ControlURL()
|
||||
lp.UserProfile = up
|
||||
lp.NetworkProfile = np
|
||||
|
||||
// An empty profile.ID indicates that the node info is not available yet,
|
||||
// and the profile doesn't need to be saved on disk.
|
||||
if lp.ID != "" {
|
||||
pm.knownProfiles[lp.ID] = lp
|
||||
if err := pm.writeKnownProfiles(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Clone prefsIn and create a read-only view as a safety measure to
|
||||
// prevent accidental preference mutations, both externally and internally.
|
||||
if err := pm.setProfilePrefsNoPermCheck(lp, prefsIn.AsStruct().View()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newUnusedID(knownProfiles map[ipn.ProfileID]*ipn.LoginProfile) (ipn.ProfileID, ipn.StateKey) {
|
||||
var idb [2]byte
|
||||
for {
|
||||
rand.Read(idb[:])
|
||||
id := ipn.ProfileID(fmt.Sprintf("%x", idb))
|
||||
if _, ok := knownProfiles[id]; ok {
|
||||
continue
|
||||
}
|
||||
return id, ipn.StateKey("profile-" + id)
|
||||
}
|
||||
}
|
||||
|
||||
// setProfilePrefsNoPermCheck sets the profile's prefs to the provided value.
|
||||
// If the profile has the [ipn.LoginProfile.Key] set, it saves the prefs to the
|
||||
// [ipn.StateStore] under that key. It returns an error if the profile is non-current
|
||||
// and does not have its Key set, or if the prefs could not be saved.
|
||||
// The method does not perform any additional checks on the specified
|
||||
// profile, such as verifying the caller's access rights or checking
|
||||
// if another profile for the same node already exists.
|
||||
func (pm *profileManager) setProfilePrefsNoPermCheck(profile *ipn.LoginProfile, clonedPrefs ipn.PrefsView) error {
|
||||
isCurrentProfile := pm.currentProfile == profile
|
||||
if isCurrentProfile {
|
||||
pm.prefs = clonedPrefs
|
||||
pm.updateHealth()
|
||||
}
|
||||
if profile.Key != "" {
|
||||
if err := pm.writePrefsToStore(profile.Key, clonedPrefs); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !isCurrentProfile {
|
||||
return errors.New("cannot set prefs for a non-current in-memory profile")
|
||||
}
|
||||
if isCurrentProfile {
|
||||
return pm.setUnattendedModeAsConfigured()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPrefsNoPermCheck is like [profileManager.setProfilePrefsNoPermCheck], but sets the current profile's prefs.
|
||||
func (pm *profileManager) setPrefsNoPermCheck(clonedPrefs ipn.PrefsView) error {
|
||||
return pm.setProfilePrefsNoPermCheck(pm.currentProfile, clonedPrefs)
|
||||
}
|
||||
|
||||
func (pm *profileManager) writePrefsToStore(key ipn.StateKey, prefs ipn.PrefsView) error {
|
||||
if key == "" {
|
||||
return nil
|
||||
}
|
||||
if err := pm.WriteState(key, prefs.ToBytes()); err != nil {
|
||||
pm.logf("WriteState(%q): %v", key, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Profiles returns the list of known profiles accessible to the current user.
|
||||
func (pm *profileManager) Profiles() []ipn.LoginProfile {
|
||||
allProfiles := pm.allProfiles()
|
||||
out := make([]ipn.LoginProfile, len(allProfiles))
|
||||
for i, p := range allProfiles {
|
||||
out[i] = *p
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ProfileByID returns a profile with the given id, if it is accessible to the current user.
|
||||
// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied].
|
||||
// If the profile does not exist, it returns an [errProfileNotFound].
|
||||
func (pm *profileManager) ProfileByID(id ipn.ProfileID) (ipn.LoginProfile, error) {
|
||||
kp, err := pm.profileByIDNoPermCheck(id)
|
||||
if err != nil {
|
||||
return ipn.LoginProfile{}, err
|
||||
}
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return ipn.LoginProfile{}, err
|
||||
}
|
||||
return *kp, nil
|
||||
}
|
||||
|
||||
// profileByIDNoPermCheck is like [profileManager.ProfileByID], but it doesn't
|
||||
// check user's access rights to the profile.
|
||||
func (pm *profileManager) profileByIDNoPermCheck(id ipn.ProfileID) (*ipn.LoginProfile, error) {
|
||||
if id == pm.currentProfile.ID {
|
||||
return pm.currentProfile, nil
|
||||
}
|
||||
kp, ok := pm.knownProfiles[id]
|
||||
if !ok {
|
||||
return nil, errProfileNotFound
|
||||
}
|
||||
return kp, nil
|
||||
}
|
||||
|
||||
// ProfilePrefs returns preferences for a profile with the given id.
|
||||
// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied].
|
||||
// If the profile does not exist, it returns an [errProfileNotFound].
|
||||
func (pm *profileManager) ProfilePrefs(id ipn.ProfileID) (ipn.PrefsView, error) {
|
||||
kp, err := pm.profileByIDNoPermCheck(id)
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, errProfileNotFound
|
||||
}
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
return pm.profilePrefs(kp)
|
||||
}
|
||||
|
||||
func (pm *profileManager) profilePrefs(p *ipn.LoginProfile) (ipn.PrefsView, error) {
|
||||
if p.ID == pm.currentProfile.ID {
|
||||
return pm.prefs, nil
|
||||
}
|
||||
return pm.loadSavedPrefs(p.Key)
|
||||
}
|
||||
|
||||
// SwitchProfile switches to the profile with the given id.
|
||||
// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied].
|
||||
// If the profile does not exist, it returns an [errProfileNotFound].
|
||||
func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error {
|
||||
metricSwitchProfile.Add(1)
|
||||
|
||||
kp, ok := pm.knownProfiles[id]
|
||||
if !ok {
|
||||
return errProfileNotFound
|
||||
}
|
||||
if pm.currentProfile != nil && kp.ID == pm.currentProfile.ID && pm.prefs.Valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return fmt.Errorf("%w: profile %q is not accessible to the current user", err, id)
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(kp.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pm.prefs = prefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = kp
|
||||
return pm.setProfileAsUserDefault(kp)
|
||||
}
|
||||
|
||||
// SwitchToDefaultProfile switches to the default (last used) profile for the current user.
|
||||
// It creates a new one and switches to it if the current user does not have a default profile,
|
||||
// or returns an error if the default profile is inaccessible or could not be loaded.
|
||||
func (pm *profileManager) SwitchToDefaultProfile() error {
|
||||
if id := pm.DefaultUserProfileID(pm.currentUserID); id != "" {
|
||||
return pm.SwitchProfile(id)
|
||||
}
|
||||
pm.NewProfileForUser(pm.currentUserID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// setProfileAsUserDefault sets the specified profile as the default for the current user.
|
||||
// It returns an [errProfileAccessDenied] if the specified profile is not accessible to the current user.
|
||||
func (pm *profileManager) setProfileAsUserDefault(profile *ipn.LoginProfile) error {
|
||||
if profile.Key == "" {
|
||||
// The profile has not been persisted yet; ignore it for now.
|
||||
return nil
|
||||
}
|
||||
if err := pm.checkProfileAccess(profile); err != nil {
|
||||
return errProfileAccessDenied
|
||||
}
|
||||
k := ipn.CurrentProfileKey(string(pm.currentUserID))
|
||||
return pm.WriteState(k, []byte(profile.Key))
|
||||
}
|
||||
|
||||
func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) {
|
||||
bs, err := pm.store.ReadState(key)
|
||||
if err == ipn.ErrStateNotExist || len(bs) == 0 {
|
||||
return defaultPrefs, nil
|
||||
}
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
savedPrefs := ipn.NewPrefs()
|
||||
if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil {
|
||||
return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err)
|
||||
}
|
||||
pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty())
|
||||
|
||||
// Ignore any old stored preferences for https://login.tailscale.com
|
||||
// as the control server that would override the new default of
|
||||
// controlplane.tailscale.com.
|
||||
if savedPrefs.ControlURL != "" &&
|
||||
savedPrefs.ControlURL != ipn.DefaultControlURL &&
|
||||
ipn.IsLoginServerSynonym(savedPrefs.ControlURL) {
|
||||
savedPrefs.ControlURL = ""
|
||||
}
|
||||
// Before
|
||||
// https://github.com/tailscale/tailscale/pull/11814/commits/1613b18f8280c2bce786980532d012c9f0454fa2#diff-314ba0d799f70c8998940903efb541e511f352b39a9eeeae8d475c921d66c2ac
|
||||
// prefs could set AutoUpdate.Apply=true via EditPrefs or tailnet
|
||||
// auto-update defaults. After that change, such value is "invalid" and
|
||||
// cause any EditPrefs calls to fail (other than disabling auto-updates).
|
||||
//
|
||||
// Reset AutoUpdate.Apply if we detect such invalid prefs.
|
||||
if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() {
|
||||
savedPrefs.AutoUpdate.Apply.Clear()
|
||||
}
|
||||
|
||||
return savedPrefs.View(), nil
|
||||
}
|
||||
|
||||
// CurrentProfile returns the current LoginProfile.
|
||||
// The value may be zero if the profile is not persisted.
|
||||
func (pm *profileManager) CurrentProfile() ipn.LoginProfile {
|
||||
return *pm.currentProfile
|
||||
}
|
||||
|
||||
// errProfileNotFound is returned by methods that accept a ProfileID
|
||||
// when the specified profile does not exist.
|
||||
var errProfileNotFound = errors.New("profile not found")
|
||||
|
||||
// errProfileAccessDenied is returned by methods that accept a ProfileID
|
||||
// when the current user does not have access to the specified profile.
|
||||
// It is used temporarily until we implement access checks based on the
|
||||
// caller's identity in tailscale/corp#18342.
|
||||
var errProfileAccessDenied = errors.New("profile access denied")
|
||||
|
||||
// DeleteProfile removes the profile with the given id. It returns
|
||||
// [errProfileNotFound] if the profile does not exist, or an
|
||||
// [errProfileAccessDenied] if the specified profile is not accessible
|
||||
// to the current user.
|
||||
// If the profile is the current profile, it is the equivalent of
|
||||
// calling [profileManager.NewProfile] followed by [profileManager.DeleteProfile](id).
|
||||
// This is useful for deleting the last profile. In other cases, it is
|
||||
// recommended to call [profileManager.SwitchProfile] first.
|
||||
func (pm *profileManager) DeleteProfile(id ipn.ProfileID) error {
|
||||
metricDeleteProfile.Add(1)
|
||||
if id == pm.currentProfile.ID {
|
||||
return pm.deleteCurrentProfile()
|
||||
}
|
||||
kp, ok := pm.knownProfiles[id]
|
||||
if !ok {
|
||||
return errProfileNotFound
|
||||
}
|
||||
if err := pm.checkProfileAccess(kp); err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.deleteProfileNoPermCheck(kp)
|
||||
}
|
||||
|
||||
func (pm *profileManager) deleteCurrentProfile() error {
|
||||
if err := pm.checkProfileAccess(pm.currentProfile); err != nil {
|
||||
return err
|
||||
}
|
||||
if pm.currentProfile.ID == "" {
|
||||
// Deleting the in-memory only new profile, just create a new one.
|
||||
pm.NewProfile()
|
||||
return nil
|
||||
}
|
||||
return pm.deleteProfileNoPermCheck(pm.currentProfile)
|
||||
}
|
||||
|
||||
// deleteProfileNoPermCheck is like [profileManager.DeleteProfile],
|
||||
// but it doesn't check user's access rights to the profile.
|
||||
func (pm *profileManager) deleteProfileNoPermCheck(profile *ipn.LoginProfile) error {
|
||||
if profile.ID == pm.currentProfile.ID {
|
||||
pm.NewProfile()
|
||||
}
|
||||
if err := pm.WriteState(profile.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(pm.knownProfiles, profile.ID)
|
||||
return pm.writeKnownProfiles()
|
||||
}
|
||||
|
||||
// DeleteAllProfilesForUser removes all known profiles accessible to the current user
|
||||
// and switches to a new, empty profile.
|
||||
func (pm *profileManager) DeleteAllProfilesForUser() error {
|
||||
metricDeleteAllProfile.Add(1)
|
||||
|
||||
currentProfileDeleted := false
|
||||
writeKnownProfiles := func() error {
|
||||
if currentProfileDeleted || pm.currentProfile.ID == "" {
|
||||
pm.NewProfile()
|
||||
}
|
||||
return pm.writeKnownProfiles()
|
||||
}
|
||||
|
||||
for _, kp := range pm.knownProfiles {
|
||||
if pm.checkProfileAccess(kp) != nil {
|
||||
// Skip profiles we don't have access to.
|
||||
continue
|
||||
}
|
||||
if err := pm.WriteState(kp.Key, nil); err != nil {
|
||||
// Write to remove references to profiles we've already deleted, but
|
||||
// return the original error.
|
||||
writeKnownProfiles()
|
||||
return err
|
||||
}
|
||||
delete(pm.knownProfiles, kp.ID)
|
||||
if kp.ID == pm.currentProfile.ID {
|
||||
currentProfileDeleted = true
|
||||
}
|
||||
}
|
||||
return writeKnownProfiles()
|
||||
}
|
||||
|
||||
func (pm *profileManager) writeKnownProfiles() error {
|
||||
b, err := json.Marshal(pm.knownProfiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.WriteState(ipn.KnownProfilesStateKey, b)
|
||||
}
|
||||
|
||||
func (pm *profileManager) updateHealth() {
|
||||
if !pm.prefs.Valid() {
|
||||
return
|
||||
}
|
||||
pm.health.SetAutoUpdatePrefs(pm.prefs.AutoUpdate().Check, pm.prefs.AutoUpdate().Apply)
|
||||
}
|
||||
|
||||
// NewProfile creates and switches to a new unnamed profile. The new profile is
|
||||
// not persisted until [profileManager.SetPrefs] is called with a logged-in user.
|
||||
func (pm *profileManager) NewProfile() {
|
||||
pm.NewProfileForUser(pm.currentUserID)
|
||||
}
|
||||
|
||||
// NewProfileForUser is like [profileManager.NewProfile], but it switches to the
|
||||
// specified user and sets that user as the profile owner for the new profile.
|
||||
func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) {
|
||||
pm.currentUserID = uid
|
||||
|
||||
metricNewProfile.Add(1)
|
||||
|
||||
pm.prefs = defaultPrefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = &ipn.LoginProfile{LocalUserID: uid}
|
||||
}
|
||||
|
||||
// newProfileWithPrefs creates a new profile with the specified prefs and assigns
|
||||
// the specified uid as the profile owner. If switchNow is true, it switches to the
|
||||
// newly created profile immediately. It returns the newly created profile on success,
|
||||
// or an error on failure.
|
||||
func (pm *profileManager) newProfileWithPrefs(uid ipn.WindowsUserID, prefs ipn.PrefsView, switchNow bool) (*ipn.LoginProfile, error) {
|
||||
metricNewProfile.Add(1)
|
||||
|
||||
profile := &ipn.LoginProfile{LocalUserID: uid}
|
||||
if err := pm.SetProfilePrefs(profile, prefs, ipn.NetworkProfile{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if switchNow {
|
||||
pm.currentProfile = profile
|
||||
pm.prefs = prefs.AsStruct().View()
|
||||
pm.updateHealth()
|
||||
if err := pm.setProfileAsUserDefault(profile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
// defaultPrefs is the default prefs for a new profile. This initializes before
|
||||
// even this package's init() so do not rely on other parts of the system being
|
||||
// fully initialized here (for example, syspolicy will not be available on
|
||||
// Apple platforms).
|
||||
var defaultPrefs = func() ipn.PrefsView {
|
||||
prefs := ipn.NewPrefs()
|
||||
prefs.LoggedOut = true
|
||||
prefs.WantRunning = false
|
||||
|
||||
return prefs.View()
|
||||
}()
|
||||
|
||||
// Store returns the [ipn.StateStore] used by the [profileManager].
|
||||
func (pm *profileManager) Store() ipn.StateStore {
|
||||
return pm.store
|
||||
}
|
||||
|
||||
// CurrentPrefs returns a read-only view of the current prefs.
|
||||
// The returned view is always valid.
|
||||
func (pm *profileManager) CurrentPrefs() ipn.PrefsView {
|
||||
return pm.prefs
|
||||
}
|
||||
|
||||
// ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing.
|
||||
func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) {
|
||||
ht := new(health.Tracker) // in tests, don't care about the health status
|
||||
pm, err := newProfileManager(store, logf, ht)
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
return pm.CurrentPrefs(), nil
|
||||
}
|
||||
|
||||
// newProfileManager creates a new [profileManager] using the provided [ipn.StateStore].
|
||||
// It also loads the list of known profiles from the store.
|
||||
func newProfileManager(store ipn.StateStore, logf logger.Logf, health *health.Tracker) (*profileManager, error) {
|
||||
return newProfileManagerWithGOOS(store, logf, health, envknob.GOOS())
|
||||
}
|
||||
|
||||
func readAutoStartKey(store ipn.StateStore, goos string) (ipn.StateKey, error) {
|
||||
startKey := ipn.CurrentProfileStateKey
|
||||
if goos == "windows" {
|
||||
// When tailscaled runs on Windows it is not typically run unattended.
|
||||
// So we can't use the profile mechanism to load the profile at startup.
|
||||
startKey = ipn.ServerModeStartKey
|
||||
}
|
||||
autoStartKey, err := store.ReadState(startKey)
|
||||
if err != nil && err != ipn.ErrStateNotExist {
|
||||
return "", fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
return ipn.StateKey(autoStartKey), nil
|
||||
}
|
||||
|
||||
func readKnownProfiles(store ipn.StateStore) (map[ipn.ProfileID]*ipn.LoginProfile, error) {
|
||||
var knownProfiles map[ipn.ProfileID]*ipn.LoginProfile
|
||||
prfB, err := store.ReadState(ipn.KnownProfilesStateKey)
|
||||
switch err {
|
||||
case nil:
|
||||
if err := json.Unmarshal(prfB, &knownProfiles); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling known profiles: %w", err)
|
||||
}
|
||||
case ipn.ErrStateNotExist:
|
||||
knownProfiles = make(map[ipn.ProfileID]*ipn.LoginProfile)
|
||||
default:
|
||||
return nil, fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
return knownProfiles, nil
|
||||
}
|
||||
|
||||
func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *health.Tracker, goos string) (*profileManager, error) {
|
||||
logf = logger.WithPrefix(logf, "pm: ")
|
||||
stateKey, err := readAutoStartKey(store, goos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
knownProfiles, err := readKnownProfiles(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pm := &profileManager{
|
||||
goos: goos,
|
||||
store: store,
|
||||
knownProfiles: knownProfiles,
|
||||
logf: logf,
|
||||
health: ht,
|
||||
}
|
||||
|
||||
if stateKey != "" {
|
||||
for _, v := range knownProfiles {
|
||||
if v.Key == stateKey {
|
||||
pm.currentProfile = v
|
||||
}
|
||||
}
|
||||
if pm.currentProfile == nil {
|
||||
if suf, ok := strings.CutPrefix(string(stateKey), "user-"); ok {
|
||||
pm.currentUserID = ipn.WindowsUserID(suf)
|
||||
}
|
||||
pm.NewProfile()
|
||||
} else {
|
||||
pm.currentUserID = pm.currentProfile.LocalUserID
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(stateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Most platform behavior is controlled by the goos parameter, however
|
||||
// some behavior is implied by build tag and fails when run on Windows,
|
||||
// so we explicitly avoid that behavior when running on Windows.
|
||||
// Specifically this reaches down into legacy preference loading that is
|
||||
// specialized by profiles_windows.go and fails in tests on an invalid
|
||||
// uid passed in from the unix tests. The uid's used for Windows tests
|
||||
// and runtime must be valid Windows security identifier structures.
|
||||
} else if len(knownProfiles) == 0 && goos != "windows" && runtime.GOOS != "windows" {
|
||||
// No known profiles, try a migration.
|
||||
pm.dlogf("no known profiles; trying to migrate from legacy prefs")
|
||||
if _, err := pm.migrateFromLegacyPrefs(pm.currentUserID, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pm.NewProfile()
|
||||
}
|
||||
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNow bool) (*ipn.LoginProfile, error) {
|
||||
metricMigration.Add(1)
|
||||
sentinel, prefs, err := pm.loadLegacyPrefs(uid)
|
||||
if err != nil {
|
||||
metricMigrationError.Add(1)
|
||||
return nil, fmt.Errorf("load legacy prefs: %w", err)
|
||||
}
|
||||
pm.dlogf("loaded legacy preferences; sentinel=%q", sentinel)
|
||||
profile, err := pm.newProfileWithPrefs(uid, prefs, switchNow)
|
||||
if err != nil {
|
||||
metricMigrationError.Add(1)
|
||||
return nil, fmt.Errorf("migrating _daemon profile: %w", err)
|
||||
}
|
||||
pm.completeMigration(sentinel)
|
||||
pm.dlogf("completed legacy preferences migration with sentinel=%q", sentinel)
|
||||
metricMigrationSuccess.Add(1)
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) requiresBackfill() bool {
|
||||
return pm != nil &&
|
||||
pm.currentProfile != nil &&
|
||||
pm.currentProfile.NetworkProfile.RequiresBackfill()
|
||||
}
|
||||
|
||||
var (
|
||||
metricNewProfile = clientmetric.NewCounter("profiles_new")
|
||||
metricSwitchProfile = clientmetric.NewCounter("profiles_switch")
|
||||
metricDeleteProfile = clientmetric.NewCounter("profiles_delete")
|
||||
metricDeleteAllProfile = clientmetric.NewCounter("profiles_delete_all")
|
||||
|
||||
metricMigration = clientmetric.NewCounter("profiles_migration")
|
||||
metricMigrationError = clientmetric.NewCounter("profiles_migration_error")
|
||||
metricMigrationSuccess = clientmetric.NewCounter("profiles_migration_success")
|
||||
)
|
||||
35
vendor/tailscale.com/ipn/ipnlocal/profiles_notwindows.go
generated
vendored
Normal file
35
vendor/tailscale.com/ipn/ipnlocal/profiles_notwindows.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func (pm *profileManager) loadLegacyPrefs(ipn.WindowsUserID) (string, ipn.PrefsView, error) {
|
||||
k := ipn.LegacyGlobalDaemonStateKey
|
||||
switch {
|
||||
case runtime.GOOS == "ios", version.IsSandboxedMacOS():
|
||||
k = "ipn-go-bridge"
|
||||
case runtime.GOOS == "android":
|
||||
k = "ipn-android"
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(k)
|
||||
if err != nil {
|
||||
return "", ipn.PrefsView{}, fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
pm.logf("migrating %q profile to new format", k)
|
||||
return "", prefs, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) completeMigration(migrationSentinel string) {
|
||||
// Do not delete the old state key, as we may be downgraded to an
|
||||
// older version that still relies on it.
|
||||
}
|
||||
79
vendor/tailscale.com/ipn/ipnlocal/profiles_windows.go
generated
vendored
Normal file
79
vendor/tailscale.com/ipn/ipnlocal/profiles_windows.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"tailscale.com/atomicfile"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/util/winutil/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
legacyPrefsFile = "prefs"
|
||||
legacyPrefsMigrationSentinelFile = "_migrated-to-profiles"
|
||||
legacyPrefsExt = ".conf"
|
||||
)
|
||||
|
||||
var errAlreadyMigrated = errors.New("profile migration already completed")
|
||||
|
||||
func legacyPrefsDir(uid ipn.WindowsUserID) (string, error) {
|
||||
// TODO(aaron): Ideally we'd have the impersonation token for the pipe's
|
||||
// client and use it to call SHGetKnownFolderPath, thus yielding the correct
|
||||
// path without having to make gross assumptions about directory names.
|
||||
usr, err := user.LookupId(string(uid))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if usr.HomeDir == "" {
|
||||
return "", fmt.Errorf("user %q does not have a home directory", uid)
|
||||
}
|
||||
userLegacyPrefsDir := filepath.Join(usr.HomeDir, "AppData", "Local", "Tailscale")
|
||||
return userLegacyPrefsDir, nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) loadLegacyPrefs(uid ipn.WindowsUserID) (string, ipn.PrefsView, error) {
|
||||
userLegacyPrefsDir, err := legacyPrefsDir(uid)
|
||||
if err != nil {
|
||||
pm.dlogf("no legacy preferences directory for %q: %v", uid, err)
|
||||
return "", ipn.PrefsView{}, err
|
||||
}
|
||||
|
||||
migrationSentinel := filepath.Join(userLegacyPrefsDir, legacyPrefsMigrationSentinelFile+legacyPrefsExt)
|
||||
// verify that migration sentinel is not present
|
||||
_, err = os.Stat(migrationSentinel)
|
||||
if err == nil {
|
||||
pm.dlogf("migration sentinel %q already exists", migrationSentinel)
|
||||
return "", ipn.PrefsView{}, errAlreadyMigrated
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
pm.dlogf("os.Stat(%q) = %v", migrationSentinel, err)
|
||||
return "", ipn.PrefsView{}, err
|
||||
}
|
||||
|
||||
prefsPath := filepath.Join(userLegacyPrefsDir, legacyPrefsFile+legacyPrefsExt)
|
||||
prefs, err := ipn.LoadPrefsWindows(prefsPath)
|
||||
pm.dlogf("ipn.LoadPrefs(%q) = %v, %v", prefsPath, prefs, err)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", ipn.PrefsView{}, errAlreadyMigrated
|
||||
}
|
||||
if err != nil {
|
||||
return "", ipn.PrefsView{}, err
|
||||
}
|
||||
|
||||
prefs.ControlURL = policy.SelectControlURL(defaultPrefs.ControlURL(), prefs.ControlURL)
|
||||
|
||||
pm.logf("migrating Windows profile to new format")
|
||||
return migrationSentinel, prefs.View(), nil
|
||||
}
|
||||
|
||||
func (pm *profileManager) completeMigration(migrationSentinel string) {
|
||||
atomicfile.WriteFile(migrationSentinel, []byte{}, 0600)
|
||||
}
|
||||
939
vendor/tailscale.com/ipn/ipnlocal/serve.go
generated
vendored
Normal file
939
vendor/tailscale.com/ipn/ipnlocal/serve.go
generated
vendored
Normal file
@@ -0,0 +1,939 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/lazy"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/ctxkey"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
grpcBaseContentType = "application/grpc"
|
||||
)
|
||||
|
||||
// ErrETagMismatch signals that the given
|
||||
// If-Match header does not match with the
|
||||
// current etag of a resource.
|
||||
var ErrETagMismatch = errors.New("etag mismatch")
|
||||
|
||||
var serveHTTPContextKey ctxkey.Key[*serveHTTPContext]
|
||||
|
||||
type serveHTTPContext struct {
|
||||
SrcAddr netip.AddrPort
|
||||
DestPort uint16
|
||||
|
||||
// provides funnel-specific context, nil if not funneled
|
||||
Funnel *funnelFlow
|
||||
}
|
||||
|
||||
// funnelFlow represents a funneled connection initiated via IngressPeer
|
||||
// to Host.
|
||||
type funnelFlow struct {
|
||||
Host string
|
||||
IngressPeer tailcfg.NodeView
|
||||
}
|
||||
|
||||
// localListener is the state of host-level net.Listen for a specific (Tailscale IP, port)
|
||||
// combination. If there are two TailscaleIPs (v4 and v6) and three ports being served,
|
||||
// then there will be six of these active and looping in their Run method.
|
||||
//
|
||||
// This is not used in userspace-networking mode.
|
||||
//
|
||||
// localListener is used by tailscale serve (TCP only), the built-in web client and Taildrive.
|
||||
// Most serve traffic and peer traffic for the web client are intercepted by netstack.
|
||||
// This listener exists purely for connections from the machine itself, as that goes via the kernel,
|
||||
// so we need to be in the kernel's listening/routing tables.
|
||||
type localListener struct {
|
||||
b *LocalBackend
|
||||
ap netip.AddrPort
|
||||
ctx context.Context // valid while listener is desired
|
||||
cancel context.CancelFunc // for ctx, to close listener
|
||||
logf logger.Logf
|
||||
bo *backoff.Backoff // for retrying failed Listen calls
|
||||
|
||||
handler func(net.Conn) error // handler for inbound connections
|
||||
closeListener syncs.AtomicValue[func() error] // Listener's Close method, if any
|
||||
}
|
||||
|
||||
func (b *LocalBackend) newServeListener(ctx context.Context, ap netip.AddrPort, logf logger.Logf) *localListener {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &localListener{
|
||||
b: b,
|
||||
ap: ap,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logf: logf,
|
||||
|
||||
handler: func(conn net.Conn) error {
|
||||
srcAddr := conn.RemoteAddr().(*net.TCPAddr).AddrPort()
|
||||
handler := b.tcpHandlerForServe(ap.Port(), srcAddr, nil)
|
||||
if handler == nil {
|
||||
b.logf("[unexpected] local-serve: no handler for %v to port %v", srcAddr, ap.Port())
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
return handler(conn)
|
||||
},
|
||||
bo: backoff.NewBackoff("serve-listener", logf, 30*time.Second),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Close cancels the context and closes the listener, if any.
|
||||
func (s *localListener) Close() error {
|
||||
s.cancel()
|
||||
if close, ok := s.closeListener.LoadOk(); ok {
|
||||
s.closeListener.Store(nil)
|
||||
close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run starts a net.Listen for the localListener's address and port.
|
||||
// If unable to listen, it retries with exponential backoff.
|
||||
// Listen is retried until the context is canceled.
|
||||
func (s *localListener) Run() {
|
||||
for {
|
||||
ip := s.ap.Addr()
|
||||
ipStr := ip.String()
|
||||
|
||||
var lc net.ListenConfig
|
||||
if initListenConfig != nil {
|
||||
// On macOS, this sets the lc.Control hook to
|
||||
// setsockopt the interface index to bind to. This is
|
||||
// required by the network sandbox to allow binding to
|
||||
// a specific interface. Without this hook, the system
|
||||
// chooses a default interface to bind to.
|
||||
if err := initListenConfig(&lc, ip, s.b.prevIfState, s.b.dialer.TUNName()); err != nil {
|
||||
s.logf("localListener failed to init listen config %v, backing off: %v", s.ap, err)
|
||||
s.bo.BackOff(s.ctx, err)
|
||||
continue
|
||||
}
|
||||
// On macOS (AppStore or macsys) and if we're binding to a privileged port,
|
||||
if version.IsSandboxedMacOS() && s.ap.Port() < 1024 {
|
||||
// On macOS, we need to bind to ""/all-interfaces due to
|
||||
// the network sandbox. Ideally we would only bind to the
|
||||
// Tailscale interface, but macOS errors out if we try to
|
||||
// to listen on privileged ports binding only to a specific
|
||||
// interface. (#6364)
|
||||
ipStr = ""
|
||||
}
|
||||
}
|
||||
|
||||
tcp4or6 := "tcp4"
|
||||
if ip.Is6() {
|
||||
tcp4or6 = "tcp6"
|
||||
}
|
||||
|
||||
// while we were backing off and trying again, the context got canceled
|
||||
// so don't bind, just return, because otherwise there will be no way
|
||||
// to close this listener
|
||||
if s.ctx.Err() != nil {
|
||||
s.logf("localListener context closed before binding")
|
||||
return
|
||||
}
|
||||
|
||||
ln, err := lc.Listen(s.ctx, tcp4or6, net.JoinHostPort(ipStr, fmt.Sprint(s.ap.Port())))
|
||||
if err != nil {
|
||||
if s.shouldWarnAboutListenError(err) {
|
||||
s.logf("localListener failed to listen on %v, backing off: %v", s.ap, err)
|
||||
}
|
||||
s.bo.BackOff(s.ctx, err)
|
||||
continue
|
||||
}
|
||||
s.closeListener.Store(ln.Close)
|
||||
|
||||
s.logf("listening on %v", s.ap)
|
||||
err = s.handleListenersAccept(ln)
|
||||
if s.ctx.Err() != nil {
|
||||
// context canceled, we're done
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
s.logf("localListener accept error, retrying: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *localListener) shouldWarnAboutListenError(err error) bool {
|
||||
if !s.b.sys.NetMon.Get().InterfaceState().HasIP(s.ap.Addr()) {
|
||||
// Machine likely doesn't have IPv6 enabled (or the IP is still being
|
||||
// assigned). No need to warn. Notably, WSL2 (Issue 6303).
|
||||
return false
|
||||
}
|
||||
// TODO(bradfitz): check errors.Is(err, syscall.EADDRNOTAVAIL) etc? Let's
|
||||
// see what happens in practice.
|
||||
return true
|
||||
}
|
||||
|
||||
// handleListenersAccept accepts connections for the Listener. It calls the
|
||||
// handler in a new goroutine for each accepted connection. This is used to
|
||||
// handle local "tailscale serve" and web client traffic originating from the
|
||||
// machine itself.
|
||||
func (s *localListener) handleListenersAccept(ln net.Listener) error {
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go s.handler(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// updateServeTCPPortNetMapAddrListenersLocked starts a net.Listen for configured
|
||||
// Serve ports on all the node's addresses.
|
||||
// Existing Listeners are closed if port no longer in incoming ports list.
|
||||
//
|
||||
// b.mu must be held.
|
||||
func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint16) {
|
||||
// close existing listeners where port
|
||||
// is no longer in incoming ports list
|
||||
for ap, sl := range b.serveListeners {
|
||||
if !slices.Contains(ports, ap.Port()) {
|
||||
b.logf("closing listener %v", ap)
|
||||
sl.Close()
|
||||
delete(b.serveListeners, ap)
|
||||
}
|
||||
}
|
||||
|
||||
nm := b.netMap
|
||||
if nm == nil {
|
||||
b.logf("netMap is nil")
|
||||
return
|
||||
}
|
||||
if !nm.SelfNode.Valid() {
|
||||
b.logf("netMap SelfNode is nil")
|
||||
return
|
||||
}
|
||||
|
||||
addrs := nm.GetAddresses()
|
||||
for i := range addrs.Len() {
|
||||
a := addrs.At(i)
|
||||
for _, p := range ports {
|
||||
addrPort := netip.AddrPortFrom(a.Addr(), p)
|
||||
if _, ok := b.serveListeners[addrPort]; ok {
|
||||
continue // already listening
|
||||
}
|
||||
|
||||
sl := b.newServeListener(context.Background(), addrPort, b.logf)
|
||||
mak.Set(&b.serveListeners, addrPort, sl)
|
||||
|
||||
go sl.Run()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetServeConfig establishes or replaces the current serve config.
|
||||
// ETag is an optional parameter to enforce Optimistic Concurrency Control.
|
||||
// If it is an empty string, then the config will be overwritten.
|
||||
func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig, etag string) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.setServeConfigLocked(config, etag)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string) error {
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
if config.IsFunnelOn() && prefs.ShieldsUp() {
|
||||
return errors.New("Unable to turn on Funnel while shields-up is enabled")
|
||||
}
|
||||
if b.isConfigLocked_Locked() {
|
||||
return errors.New("can't reconfigure tailscaled when using a config file; config file is locked")
|
||||
}
|
||||
|
||||
nm := b.netMap
|
||||
if nm == nil {
|
||||
return errors.New("netMap is nil")
|
||||
}
|
||||
if !nm.SelfNode.Valid() {
|
||||
return errors.New("netMap SelfNode is nil")
|
||||
}
|
||||
|
||||
// If etag is present, check that it has
|
||||
// not changed from the last config.
|
||||
prevConfig := b.serveConfig
|
||||
if etag != "" {
|
||||
// Note that we marshal b.serveConfig
|
||||
// and not use b.lastServeConfJSON as that might
|
||||
// be a Go nil value, which produces a different
|
||||
// checksum from a JSON "null" value.
|
||||
prevBytes, err := json.Marshal(prevConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error encoding previous config: %w", err)
|
||||
}
|
||||
sum := sha256.Sum256(prevBytes)
|
||||
previousEtag := hex.EncodeToString(sum[:])
|
||||
if etag != previousEtag {
|
||||
return ErrETagMismatch
|
||||
}
|
||||
}
|
||||
|
||||
var bs []byte
|
||||
if config != nil {
|
||||
j, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding serve config: %w", err)
|
||||
}
|
||||
bs = j
|
||||
}
|
||||
|
||||
profileID := b.pm.CurrentProfile().ID
|
||||
confKey := ipn.ServeConfigKey(profileID)
|
||||
if err := b.store.WriteState(confKey, bs); err != nil {
|
||||
return fmt.Errorf("writing ServeConfig to StateStore: %w", err)
|
||||
}
|
||||
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs())
|
||||
|
||||
// clean up and close all previously open foreground sessions
|
||||
// if the current ServeConfig has overwritten them.
|
||||
if prevConfig.Valid() {
|
||||
has := func(string) bool { return false }
|
||||
if b.serveConfig.Valid() {
|
||||
has = b.serveConfig.Foreground().Contains
|
||||
}
|
||||
prevConfig.Foreground().Range(func(k string, v ipn.ServeConfigView) (cont bool) {
|
||||
if !has(k) {
|
||||
for _, sess := range b.notifyWatchers {
|
||||
if sess.sessionID == k {
|
||||
sess.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServeConfig provides a view of the current serve mappings.
|
||||
// If serving is not configured, the returned view is not Valid.
|
||||
func (b *LocalBackend) ServeConfig() ipn.ServeConfigView {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.serveConfig
|
||||
}
|
||||
|
||||
// DeleteForegroundSession deletes a ServeConfig's foreground session
|
||||
// in the LocalBackend if it exists. It also ensures check, delete, and
|
||||
// set operations happen within the same mutex lock to avoid any races.
|
||||
func (b *LocalBackend) DeleteForegroundSession(sessionID string) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if !b.serveConfig.Valid() || !b.serveConfig.Foreground().Contains(sessionID) {
|
||||
return nil
|
||||
}
|
||||
sc := b.serveConfig.AsStruct()
|
||||
delete(sc.Foreground, sessionID)
|
||||
return b.setServeConfigLocked(sc, "")
|
||||
}
|
||||
|
||||
// HandleIngressTCPConn handles a TCP connection initiated by the ingressPeer
|
||||
// proxied to the local node over the PeerAPI.
|
||||
// Target represents the destination HostPort of the conn.
|
||||
// srcAddr represents the source AddrPort and not that of the ingressPeer.
|
||||
// getConnOrReset is a callback to get the connection, or reset if the connection
|
||||
// is no longer available.
|
||||
// sendRST is a callback to send a TCP RST to the ingressPeer indicating that
|
||||
// the connection was not accepted.
|
||||
func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target ipn.HostPort, srcAddr netip.AddrPort, getConnOrReset func() (net.Conn, bool), sendRST func()) {
|
||||
b.mu.Lock()
|
||||
sc := b.serveConfig
|
||||
b.mu.Unlock()
|
||||
|
||||
// TODO(maisem,bradfitz): make this not alloc for every conn.
|
||||
logf := logger.WithPrefix(b.logf, "handleIngress: ")
|
||||
|
||||
if !sc.Valid() {
|
||||
logf("got ingress conn w/o serveConfig; rejecting")
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
|
||||
if !sc.HasFunnelForTarget(target) {
|
||||
logf("got ingress conn for unconfigured %q; rejecting", target)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(string(target))
|
||||
if err != nil {
|
||||
logf("got ingress conn for bad target %q; rejecting", target)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
port16, err := strconv.ParseUint(port, 10, 16)
|
||||
if err != nil {
|
||||
logf("got ingress conn for bad target %q; rejecting", target)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
dport := uint16(port16)
|
||||
if b.getTCPHandlerForFunnelFlow != nil {
|
||||
handler := b.getTCPHandlerForFunnelFlow(srcAddr, dport)
|
||||
if handler != nil {
|
||||
c, ok := getConnOrReset()
|
||||
if !ok {
|
||||
logf("getConn didn't complete from %v to port %v", srcAddr, dport)
|
||||
return
|
||||
}
|
||||
handler(c)
|
||||
return
|
||||
}
|
||||
}
|
||||
handler := b.tcpHandlerForServe(dport, srcAddr, &funnelFlow{
|
||||
Host: host,
|
||||
IngressPeer: ingressPeer,
|
||||
})
|
||||
if handler == nil {
|
||||
logf("[unexpected] no matching ingress serve handler for %v to port %v", srcAddr, dport)
|
||||
sendRST()
|
||||
return
|
||||
}
|
||||
c, ok := getConnOrReset()
|
||||
if !ok {
|
||||
logf("getConn didn't complete from %v to port %v", srcAddr, dport)
|
||||
return
|
||||
}
|
||||
handler(c)
|
||||
}
|
||||
|
||||
// tcpHandlerForServe returns a handler for a TCP connection to be served via
|
||||
// the ipn.ServeConfig. The funnelFlow can be nil if this is not a funneled
|
||||
// connection.
|
||||
func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, f *funnelFlow) (handler func(net.Conn) error) {
|
||||
b.mu.Lock()
|
||||
sc := b.serveConfig
|
||||
b.mu.Unlock()
|
||||
|
||||
if !sc.Valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
tcph, ok := sc.FindTCP(dport)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if tcph.HTTPS() || tcph.HTTP() {
|
||||
hs := &http.Server{
|
||||
Handler: http.HandlerFunc(b.serveWebHandler),
|
||||
BaseContext: func(_ net.Listener) context.Context {
|
||||
return serveHTTPContextKey.WithValue(context.Background(), &serveHTTPContext{
|
||||
Funnel: f,
|
||||
SrcAddr: srcAddr,
|
||||
DestPort: dport,
|
||||
})
|
||||
},
|
||||
}
|
||||
if tcph.HTTPS() {
|
||||
hs.TLSConfig = &tls.Config{
|
||||
GetCertificate: b.getTLSServeCertForPort(dport),
|
||||
}
|
||||
return func(c net.Conn) error {
|
||||
return hs.ServeTLS(netutil.NewOneConnListener(c, nil), "", "")
|
||||
}
|
||||
}
|
||||
|
||||
return func(c net.Conn) error {
|
||||
return hs.Serve(netutil.NewOneConnListener(c, nil))
|
||||
}
|
||||
}
|
||||
|
||||
if backDst := tcph.TCPForward(); backDst != "" {
|
||||
return func(conn net.Conn) error {
|
||||
defer conn.Close()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
backConn, err := b.dialer.SystemDial(ctx, "tcp", backDst)
|
||||
cancel()
|
||||
if err != nil {
|
||||
b.logf("localbackend: failed to TCP proxy port %v (from %v) to %s: %v", dport, srcAddr, backDst, err)
|
||||
return nil
|
||||
}
|
||||
defer backConn.Close()
|
||||
if sni := tcph.TerminateTLS(); sni != "" {
|
||||
conn = tls.Server(conn, &tls.Config{
|
||||
GetCertificate: func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
pair, err := b.GetCertPEM(ctx, sni)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := tls.X509KeyPair(pair.CertPEM, pair.KeyPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cert, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(bradfitz): do the RegisterIPPortIdentity and
|
||||
// UnregisterIPPortIdentity stuff that netstack does
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := io.Copy(backConn, conn)
|
||||
errc <- err
|
||||
}()
|
||||
go func() {
|
||||
_, err := io.Copy(conn, backConn)
|
||||
errc <- err
|
||||
}()
|
||||
return <-errc
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, at string, ok bool) {
|
||||
var z ipn.HTTPHandlerView // zero value
|
||||
|
||||
hostname := r.Host
|
||||
if r.TLS == nil {
|
||||
tcd := "." + b.Status().CurrentTailnet.MagicDNSSuffix
|
||||
if host, _, err := net.SplitHostPort(hostname); err == nil {
|
||||
hostname = host
|
||||
}
|
||||
if !strings.HasSuffix(hostname, tcd) {
|
||||
hostname += tcd
|
||||
}
|
||||
} else {
|
||||
hostname = r.TLS.ServerName
|
||||
}
|
||||
|
||||
sctx, ok := serveHTTPContextKey.ValueOk(r.Context())
|
||||
if !ok {
|
||||
b.logf("[unexpected] localbackend: no serveHTTPContext in request")
|
||||
return z, "", false
|
||||
}
|
||||
wsc, ok := b.webServerConfig(hostname, sctx.DestPort)
|
||||
if !ok {
|
||||
return z, "", false
|
||||
}
|
||||
|
||||
if h, ok := wsc.Handlers().GetOk(r.URL.Path); ok {
|
||||
return h, r.URL.Path, true
|
||||
}
|
||||
pth := path.Clean(r.URL.Path)
|
||||
for {
|
||||
withSlash := pth + "/"
|
||||
if h, ok := wsc.Handlers().GetOk(withSlash); ok {
|
||||
return h, withSlash, true
|
||||
}
|
||||
if h, ok := wsc.Handlers().GetOk(pth); ok {
|
||||
return h, pth, true
|
||||
}
|
||||
if pth == "/" {
|
||||
return z, "", false
|
||||
}
|
||||
pth = path.Dir(pth)
|
||||
}
|
||||
}
|
||||
|
||||
// proxyHandlerForBackend creates a new HTTP reverse proxy for a particular backend that
|
||||
// we serve requests for. `backend` is a HTTPHandler.Proxy string (url, hostport or just port).
|
||||
func (b *LocalBackend) proxyHandlerForBackend(backend string) (http.Handler, error) {
|
||||
targetURL, insecure := expandProxyArg(backend)
|
||||
u, err := url.Parse(targetURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid url %s: %w", targetURL, err)
|
||||
}
|
||||
p := &reverseProxy{
|
||||
logf: b.logf,
|
||||
url: u,
|
||||
insecure: insecure,
|
||||
backend: backend,
|
||||
lb: b,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// reverseProxy is a proxy that forwards a request to a backend host
|
||||
// (preconfigured via ipn.ServeConfig). If the host is configured with
|
||||
// http+insecure prefix, connection between proxy and backend will be over
|
||||
// insecure TLS. If the backend host has a http prefix and the incoming request
|
||||
// has application/grpc content type header, the connection will be over h2c.
|
||||
// Otherwise standard Go http transport will be used.
|
||||
type reverseProxy struct {
|
||||
logf logger.Logf
|
||||
url *url.URL
|
||||
// insecure tracks whether the connection to an https backend should be
|
||||
// insecure (i.e because we cannot verify its CA).
|
||||
insecure bool
|
||||
backend string
|
||||
lb *LocalBackend
|
||||
httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends
|
||||
h2cTransport lazy.SyncValue[*http2.Transport] // transport for h2c backends
|
||||
// closed tracks whether proxy is closed/currently closing.
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// close ensures that any open backend connections get closed.
|
||||
func (rp *reverseProxy) close() {
|
||||
rp.closed.Store(true)
|
||||
if h2cT := rp.h2cTransport.Get(func() *http2.Transport {
|
||||
return nil
|
||||
}); h2cT != nil {
|
||||
h2cT.CloseIdleConnections()
|
||||
}
|
||||
if httpTransport := rp.httpTransport.Get(func() *http.Transport {
|
||||
return nil
|
||||
}); httpTransport != nil {
|
||||
httpTransport.CloseIdleConnections()
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if closed := rp.closed.Load(); closed {
|
||||
rp.logf("received a request for a proxy that's being closed or has been closed")
|
||||
http.Error(w, "proxy is closed", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
p := &httputil.ReverseProxy{Rewrite: func(r *httputil.ProxyRequest) {
|
||||
oldOutPath := r.Out.URL.Path
|
||||
r.SetURL(rp.url)
|
||||
|
||||
// If mount point matches the request path exactly, the outbound
|
||||
// request URL was set to empty string in serveWebHandler which
|
||||
// would have resulted in the outbound path set to <proxy path>
|
||||
// + '/' in SetURL. In that case, if the proxy path was set, we
|
||||
// want to send the request to the <proxy path> (without the
|
||||
// '/') .
|
||||
if oldOutPath == "" && rp.url.Path != "" {
|
||||
r.Out.URL.Path = rp.url.Path
|
||||
r.Out.URL.RawPath = rp.url.RawPath
|
||||
}
|
||||
|
||||
r.Out.Host = r.In.Host
|
||||
addProxyForwardedHeaders(r)
|
||||
rp.lb.addTailscaleIdentityHeaders(r)
|
||||
}}
|
||||
|
||||
// There is no way to autodetect h2c as per RFC 9113
|
||||
// https://datatracker.ietf.org/doc/html/rfc9113#name-starting-http-2.
|
||||
// However, we assume that http:// proxy prefix in combination with the
|
||||
// protoccol being HTTP/2 is sufficient to detect h2c for our needs. Only use this for
|
||||
// gRPC to fix a known problem of plaintext gRPC backends
|
||||
if rp.shouldProxyViaH2C(r) {
|
||||
rp.logf("received a proxy request for plaintext gRPC")
|
||||
p.Transport = rp.getH2CTransport()
|
||||
} else {
|
||||
p.Transport = rp.getTransport()
|
||||
}
|
||||
p.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// getTransport returns the Transport used for regular (non-GRPC) requests
|
||||
// to the backend. The Transport gets created lazily, at most once.
|
||||
func (rp *reverseProxy) getTransport() *http.Transport {
|
||||
return rp.httpTransport.Get(func() *http.Transport {
|
||||
return &http.Transport{
|
||||
DialContext: rp.lb.dialer.SystemDial,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: rp.insecure,
|
||||
},
|
||||
// Values for the following parameters have been copied from http.DefaultTransport.
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// getH2CTransport returns the Transport used for GRPC requests to the backend.
|
||||
// The Transport gets created lazily, at most once.
|
||||
func (rp *reverseProxy) getH2CTransport() *http2.Transport {
|
||||
return rp.h2cTransport.Get(func() *http2.Transport {
|
||||
return &http2.Transport{
|
||||
AllowHTTP: true,
|
||||
DialTLSContext: func(ctx context.Context, network string, addr string, _ *tls.Config) (net.Conn, error) {
|
||||
return rp.lb.dialer.SystemDial(ctx, "tcp", rp.url.Host)
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// This is not a generally reliable way how to determine whether a request is
|
||||
// for a h2c server, but sufficient for our particular use case.
|
||||
func (rp *reverseProxy) shouldProxyViaH2C(r *http.Request) bool {
|
||||
contentType := r.Header.Get(contentTypeHeader)
|
||||
return r.ProtoMajor == 2 && strings.HasPrefix(rp.backend, "http://") && isGRPCContentType(contentType)
|
||||
}
|
||||
|
||||
// isGRPC accepts an HTTP request's content type header value and determines
|
||||
// whether this is gRPC content. grpc-go considers a value that equals
|
||||
// application/grpc or has a prefix of application/grpc+ or application/grpc; a
|
||||
// valid grpc content type header.
|
||||
// https://github.com/grpc/grpc-go/blob/v1.60.0-dev/internal/grpcutil/method.go#L41-L78
|
||||
func isGRPCContentType(contentType string) bool {
|
||||
s, ok := strings.CutPrefix(contentType, grpcBaseContentType)
|
||||
return ok && (len(s) == 0 || s[0] == '+' || s[0] == ';')
|
||||
}
|
||||
|
||||
func addProxyForwardedHeaders(r *httputil.ProxyRequest) {
|
||||
r.Out.Header.Set("X-Forwarded-Host", r.In.Host)
|
||||
if r.In.TLS != nil {
|
||||
r.Out.Header.Set("X-Forwarded-Proto", "https")
|
||||
}
|
||||
if c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()); ok {
|
||||
r.Out.Header.Set("X-Forwarded-For", c.SrcAddr.Addr().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) addTailscaleIdentityHeaders(r *httputil.ProxyRequest) {
|
||||
// Clear any incoming values squatting in the headers.
|
||||
r.Out.Header.Del("Tailscale-User-Login")
|
||||
r.Out.Header.Del("Tailscale-User-Name")
|
||||
r.Out.Header.Del("Tailscale-User-Profile-Pic")
|
||||
r.Out.Header.Del("Tailscale-Funnel-Request")
|
||||
r.Out.Header.Del("Tailscale-Headers-Info")
|
||||
|
||||
c, ok := serveHTTPContextKey.ValueOk(r.Out.Context())
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if c.Funnel != nil {
|
||||
r.Out.Header.Set("Tailscale-Funnel-Request", "?1")
|
||||
return
|
||||
}
|
||||
node, user, ok := b.WhoIs("tcp", c.SrcAddr)
|
||||
if !ok {
|
||||
return // traffic from outside of Tailnet (funneled or local machine)
|
||||
}
|
||||
if node.IsTagged() {
|
||||
// 2023-06-14: Not setting identity headers for tagged nodes.
|
||||
// Only currently set for nodes with user identities.
|
||||
return
|
||||
}
|
||||
r.Out.Header.Set("Tailscale-User-Login", encTailscaleHeaderValue(user.LoginName))
|
||||
r.Out.Header.Set("Tailscale-User-Name", encTailscaleHeaderValue(user.DisplayName))
|
||||
r.Out.Header.Set("Tailscale-User-Profile-Pic", user.ProfilePicURL)
|
||||
r.Out.Header.Set("Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers")
|
||||
}
|
||||
|
||||
// encTailscaleHeaderValue cleans or encodes as necessary v, to be suitable in
|
||||
// an HTTP header value. See
|
||||
// https://github.com/tailscale/tailscale/issues/11603.
|
||||
//
|
||||
// If v is not a valid UTF-8 string, it returns an empty string.
|
||||
// If v is a valid ASCII string, it returns v unmodified.
|
||||
// If v is a valid UTF-8 string with non-ASCII characters, it returns a
|
||||
// RFC 2047 Q-encoded string.
|
||||
func encTailscaleHeaderValue(v string) string {
|
||||
if !utf8.ValidString(v) {
|
||||
return ""
|
||||
}
|
||||
return mime.QEncoding.Encode("utf-8", v)
|
||||
}
|
||||
|
||||
// serveWebHandler is an http.HandlerFunc that maps incoming requests to the
|
||||
// correct *http.
|
||||
func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h, mountPoint, ok := b.getServeHandler(r)
|
||||
if !ok {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
if s := h.Text(); s != "" {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
io.WriteString(w, s)
|
||||
return
|
||||
}
|
||||
if v := h.Path(); v != "" {
|
||||
b.serveFileOrDirectory(w, r, v, mountPoint)
|
||||
return
|
||||
}
|
||||
if v := h.Proxy(); v != "" {
|
||||
p, ok := b.serveProxyHandlers.Load(v)
|
||||
if !ok {
|
||||
http.Error(w, "unknown proxy destination", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
h := p.(http.Handler)
|
||||
// Trim the mount point from the URL path before proxying. (#6571)
|
||||
if r.URL.Path != "/" {
|
||||
h = http.StripPrefix(strings.TrimSuffix(mountPoint, "/"), h)
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, "empty handler", 500)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) serveFileOrDirectory(w http.ResponseWriter, r *http.Request, fileOrDir, mountPoint string) {
|
||||
fi, err := os.Stat(fileOrDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
b.logf("error calling stat on %s: %v", fileOrDir, err)
|
||||
http.Error(w, "an error occurred reading the file or directory", 500)
|
||||
return
|
||||
}
|
||||
if fi.Mode().IsRegular() {
|
||||
if mountPoint != r.URL.Path {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
f, err := os.Open(fileOrDir)
|
||||
if err != nil {
|
||||
b.logf("error opening %s: %v", fileOrDir, err)
|
||||
http.Error(w, "an error occurred reading the file or directory", 500)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
http.ServeContent(w, r, path.Base(mountPoint), fi.ModTime(), f)
|
||||
return
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
http.Error(w, "not a file or directory", 500)
|
||||
return
|
||||
}
|
||||
if len(r.URL.Path) < len(mountPoint) && r.URL.Path+"/" == mountPoint {
|
||||
http.Redirect(w, r, mountPoint, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
var fs http.Handler = http.FileServer(http.Dir(fileOrDir))
|
||||
if mountPoint != "/" {
|
||||
fs = http.StripPrefix(strings.TrimSuffix(mountPoint, "/"), fs)
|
||||
}
|
||||
fs.ServeHTTP(&fixLocationHeaderResponseWriter{
|
||||
ResponseWriter: w,
|
||||
mountPoint: mountPoint,
|
||||
}, r)
|
||||
}
|
||||
|
||||
// fixLocationHeaderResponseWriter is an http.ResponseWriter wrapper that, upon
|
||||
// flushing HTTP headers, prefixes any Location header with the mount point.
|
||||
type fixLocationHeaderResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
mountPoint string
|
||||
fixOnce sync.Once // guards call to fix
|
||||
}
|
||||
|
||||
func (w *fixLocationHeaderResponseWriter) fix() {
|
||||
h := w.ResponseWriter.Header()
|
||||
if v := h.Get("Location"); v != "" {
|
||||
h.Set("Location", w.mountPoint+v)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fixLocationHeaderResponseWriter) WriteHeader(code int) {
|
||||
w.fixOnce.Do(w.fix)
|
||||
w.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (w *fixLocationHeaderResponseWriter) Write(p []byte) (int, error) {
|
||||
w.fixOnce.Do(w.fix)
|
||||
return w.ResponseWriter.Write(p)
|
||||
}
|
||||
|
||||
// expandProxyArg returns a URL from s, where s can be of form:
|
||||
//
|
||||
// * port number ("8080")
|
||||
// * host:port ("localhost:8080")
|
||||
// * full URL ("http://localhost:8080", in which case it's returned unchanged)
|
||||
// * insecure TLS ("https+insecure://127.0.0.1:4430")
|
||||
func expandProxyArg(s string) (targetURL string, insecureSkipVerify bool) {
|
||||
if s == "" {
|
||||
return "", false
|
||||
}
|
||||
if strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://") {
|
||||
return s, false
|
||||
}
|
||||
if rest, ok := strings.CutPrefix(s, "https+insecure://"); ok {
|
||||
return "https://" + rest, true
|
||||
}
|
||||
if allNumeric(s) {
|
||||
return "http://127.0.0.1:" + s, false
|
||||
}
|
||||
return "http://" + s, false
|
||||
}
|
||||
|
||||
func allNumeric(s string) bool {
|
||||
for i := range len(s) {
|
||||
if s[i] < '0' || s[i] > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s != ""
|
||||
}
|
||||
|
||||
func (b *LocalBackend) webServerConfig(hostname string, port uint16) (c ipn.WebServerConfigView, ok bool) {
|
||||
key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port))
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if !b.serveConfig.Valid() {
|
||||
return c, false
|
||||
}
|
||||
return b.serveConfig.FindWeb(key)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getTLSServeCertForPort(port uint16) func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return func(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if hi == nil || hi.ServerName == "" {
|
||||
return nil, errors.New("no SNI ServerName")
|
||||
}
|
||||
_, ok := b.webServerConfig(hi.ServerName, port)
|
||||
if !ok {
|
||||
return nil, errors.New("no webserver configured for name/port")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
pair, err := b.GetCertPEM(ctx, hi.ServerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := tls.X509KeyPair(pair.CertPEM, pair.KeyPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
}
|
||||
232
vendor/tailscale.com/ipn/ipnlocal/ssh.go
generated
vendored
Normal file
232
vendor/tailscale.com/ipn/ipnlocal/ssh.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build linux || (darwin && !ios) || freebsd || openbsd
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/tailscale/golang-x-crypto/ssh"
|
||||
"go4.org/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/lineread"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
// keyTypes are the SSH key types that we either try to read from the
|
||||
// system's OpenSSH keys or try to generate for ourselves when not
|
||||
// running as root.
|
||||
var keyTypes = []string{"rsa", "ecdsa", "ed25519"}
|
||||
|
||||
// getSSHUsernames discovers and returns the list of usernames that are
|
||||
// potential Tailscale SSH user targets.
|
||||
//
|
||||
// Invariant: must not be called with b.mu held.
|
||||
func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) {
|
||||
res := new(tailcfg.C2NSSHUsernamesResponse)
|
||||
if !b.tailscaleSSHEnabled() {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
max := 10
|
||||
if req != nil && req.Max != 0 {
|
||||
max = req.Max
|
||||
}
|
||||
|
||||
add := func(u string) {
|
||||
if req != nil && req.Exclude[u] {
|
||||
return
|
||||
}
|
||||
switch u {
|
||||
case "nobody", "daemon", "sync":
|
||||
return
|
||||
}
|
||||
if slices.Contains(res.Usernames, u) {
|
||||
return
|
||||
}
|
||||
if len(res.Usernames) > max {
|
||||
// Enough for a hint.
|
||||
return
|
||||
}
|
||||
res.Usernames = append(res.Usernames, u)
|
||||
}
|
||||
|
||||
if opUser := b.operatorUserName(); opUser != "" {
|
||||
add(opUser)
|
||||
}
|
||||
|
||||
// Check popular usernames and see if they exist with a real shell.
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
out, err := exec.Command("dscl", ".", "list", "/Users").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lineread.Reader(bytes.NewReader(out), func(line []byte) error {
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '_' {
|
||||
return nil
|
||||
}
|
||||
add(string(line))
|
||||
return nil
|
||||
})
|
||||
default:
|
||||
lineread.File("/etc/passwd", func(line []byte) error {
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == '_' {
|
||||
return nil
|
||||
}
|
||||
if mem.HasSuffix(mem.B(line), mem.S("/nologin")) ||
|
||||
mem.HasSuffix(mem.B(line), mem.S("/false")) {
|
||||
return nil
|
||||
}
|
||||
colon := bytes.IndexByte(line, ':')
|
||||
if colon != -1 {
|
||||
add(string(line[:colon]))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) GetSSH_HostKeys() (keys []ssh.Signer, err error) {
|
||||
var existing map[string]ssh.Signer
|
||||
if os.Geteuid() == 0 {
|
||||
existing = b.getSystemSSH_HostKeys()
|
||||
}
|
||||
return b.getTailscaleSSH_HostKeys(existing)
|
||||
}
|
||||
|
||||
// getTailscaleSSH_HostKeys returns the three (rsa, ecdsa, ed25519) SSH host
|
||||
// keys, reusing the provided ones in existing if present in the map.
|
||||
func (b *LocalBackend) getTailscaleSSH_HostKeys(existing map[string]ssh.Signer) (keys []ssh.Signer, err error) {
|
||||
var keyDir string // lazily initialized $TAILSCALE_VAR/ssh dir.
|
||||
for _, typ := range keyTypes {
|
||||
if s, ok := existing[typ]; ok {
|
||||
keys = append(keys, s)
|
||||
continue
|
||||
}
|
||||
if keyDir == "" {
|
||||
root := b.TailscaleVarRoot()
|
||||
if root == "" {
|
||||
return nil, errors.New("no var root for ssh keys")
|
||||
}
|
||||
keyDir = filepath.Join(root, "ssh")
|
||||
if err := os.MkdirAll(keyDir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
hostKey, err := b.hostKeyFileOrCreate(keyDir, typ)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating SSH host key type %q in %q: %w", typ, keyDir, err)
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(hostKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing SSH host key type %q from %q: %w", typ, keyDir, err)
|
||||
}
|
||||
keys = append(keys, signer)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
var keyGenMu sync.Mutex
|
||||
|
||||
func (b *LocalBackend) hostKeyFileOrCreate(keyDir, typ string) ([]byte, error) {
|
||||
keyGenMu.Lock()
|
||||
defer keyGenMu.Unlock()
|
||||
|
||||
path := filepath.Join(keyDir, "ssh_host_"+typ+"_key")
|
||||
v, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
var priv any
|
||||
switch typ {
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type %q", typ)
|
||||
case "ed25519":
|
||||
_, priv, err = ed25519.GenerateKey(rand.Reader)
|
||||
case "ecdsa":
|
||||
// curve is arbitrary. We pick whatever will at
|
||||
// least pacify clients as the actual encryption
|
||||
// doesn't matter: it's all over WireGuard anyway.
|
||||
curve := elliptic.P256()
|
||||
priv, err = ecdsa.GenerateKey(curve, rand.Reader)
|
||||
case "rsa":
|
||||
// keySize is arbitrary. We pick whatever will at
|
||||
// least pacify clients as the actual encryption
|
||||
// doesn't matter: it's all over WireGuard anyway.
|
||||
const keySize = 2048
|
||||
priv, err = rsa.GenerateKey(rand.Reader, keySize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mk, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pemGen := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk})
|
||||
err = os.WriteFile(path, pemGen, 0700)
|
||||
return pemGen, err
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getSystemSSH_HostKeys() (ret map[string]ssh.Signer) {
|
||||
for _, typ := range keyTypes {
|
||||
filename := "/etc/ssh/ssh_host_" + typ + "_key"
|
||||
hostKey, err := os.ReadFile(filename)
|
||||
if err != nil || len(bytes.TrimSpace(hostKey)) == 0 {
|
||||
continue
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(hostKey)
|
||||
if err != nil {
|
||||
b.logf("warning: error reading host key %s: %v (generating one instead)", filename, err)
|
||||
continue
|
||||
}
|
||||
mak.Set(&ret, typ, signer)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) {
|
||||
signers, err := b.GetSSH_HostKeys()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keyStrings []string
|
||||
for _, signer := range signers {
|
||||
keyStrings = append(keyStrings, strings.TrimSpace(string(ssh.MarshalAuthorizedKey(signer.PublicKey()))))
|
||||
}
|
||||
return keyStrings, nil
|
||||
}
|
||||
|
||||
// tailscaleSSHEnabled reports whether Tailscale SSH is currently enabled based
|
||||
// on prefs. It returns false if there are no prefs set.
|
||||
func (b *LocalBackend) tailscaleSSHEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
p := b.pm.CurrentPrefs()
|
||||
return p.Valid() && p.RunSSH()
|
||||
}
|
||||
20
vendor/tailscale.com/ipn/ipnlocal/ssh_stub.go
generated
vendored
Normal file
20
vendor/tailscale.com/ipn/ipnlocal/ssh_stub.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ios || (!linux && !darwin && !freebsd && !openbsd)
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *LocalBackend) getSSHUsernames(*tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
35
vendor/tailscale.com/ipn/ipnlocal/taildrop.go
generated
vendored
Normal file
35
vendor/tailscale.com/ipn/ipnlocal/taildrop.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
)
|
||||
|
||||
// UpdateOutgoingFiles updates b.outgoingFiles to reflect the given updates and
|
||||
// sends an ipn.Notify with the full list of outgoingFiles.
|
||||
func (b *LocalBackend) UpdateOutgoingFiles(updates map[string]*ipn.OutgoingFile) {
|
||||
b.mu.Lock()
|
||||
if b.outgoingFiles == nil {
|
||||
b.outgoingFiles = make(map[string]*ipn.OutgoingFile, len(updates))
|
||||
}
|
||||
maps.Copy(b.outgoingFiles, updates)
|
||||
outgoingFiles := make([]*ipn.OutgoingFile, 0, len(b.outgoingFiles))
|
||||
for _, file := range b.outgoingFiles {
|
||||
outgoingFiles = append(outgoingFiles, file)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
slices.SortFunc(outgoingFiles, func(a, b *ipn.OutgoingFile) int {
|
||||
t := a.Started.Compare(b.Started)
|
||||
if t != 0 {
|
||||
return t
|
||||
}
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
})
|
||||
b.send(ipn.Notify{OutgoingFiles: outgoingFiles})
|
||||
}
|
||||
205
vendor/tailscale.com/ipn/ipnlocal/web_client.go
generated
vendored
Normal file
205
vendor/tailscale.com/ipn/ipnlocal/web_client.go
generated
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ios && !android
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/web"
|
||||
"tailscale.com/logtail/backoff"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
const webClientPort = web.ListenPort
|
||||
|
||||
// webClient holds state for the web interface for managing this
|
||||
// tailscale instance. The web interface is not used by default,
|
||||
// but initialized by calling LocalBackend.WebClientGetOrInit.
|
||||
type webClient struct {
|
||||
mu sync.Mutex // protects webClient fields
|
||||
|
||||
server *web.Server // or nil, initialized lazily
|
||||
|
||||
// lc optionally specifies a LocalClient to use to connect
|
||||
// to the localapi for this tailscaled instance.
|
||||
// If nil, a default is used.
|
||||
lc *tailscale.LocalClient
|
||||
}
|
||||
|
||||
// ConfigureWebClient configures b.web prior to use.
|
||||
// Specifially, it sets b.web.lc to the provided LocalClient.
|
||||
// If provided as nil, b.web.lc is cleared out.
|
||||
func (b *LocalBackend) ConfigureWebClient(lc *tailscale.LocalClient) {
|
||||
b.webClient.mu.Lock()
|
||||
defer b.webClient.mu.Unlock()
|
||||
b.webClient.lc = lc
|
||||
}
|
||||
|
||||
// webClientGetOrInit gets or initializes the web server for managing
|
||||
// this tailscaled instance.
|
||||
// s is always non-nil if err is empty.
|
||||
func (b *LocalBackend) webClientGetOrInit() (s *web.Server, err error) {
|
||||
if !b.ShouldRunWebClient() {
|
||||
return nil, errors.New("web client not enabled for this device")
|
||||
}
|
||||
|
||||
b.webClient.mu.Lock()
|
||||
defer b.webClient.mu.Unlock()
|
||||
if b.webClient.server != nil {
|
||||
return b.webClient.server, nil
|
||||
}
|
||||
|
||||
b.logf("webClientGetOrInit: initializing web ui")
|
||||
if b.webClient.server, err = web.NewServer(web.ServerOpts{
|
||||
Mode: web.ManageServerMode,
|
||||
LocalClient: b.webClient.lc,
|
||||
Logf: b.logf,
|
||||
NewAuthURL: b.newWebClientAuthURL,
|
||||
WaitAuthURL: b.waitWebClientAuthURL,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("web.NewServer: %w", err)
|
||||
}
|
||||
|
||||
b.logf("webClientGetOrInit: started web ui")
|
||||
return b.webClient.server, nil
|
||||
}
|
||||
|
||||
// WebClientShutdown shuts down any running b.webClient servers and
|
||||
// clears out b.webClient state (besides the b.webClient.lc field,
|
||||
// which is left untouched because required for future web startups).
|
||||
// WebClientShutdown obtains the b.mu lock.
|
||||
func (b *LocalBackend) webClientShutdown() {
|
||||
b.mu.Lock()
|
||||
for ap, ln := range b.webClientListeners {
|
||||
ln.Close()
|
||||
delete(b.webClientListeners, ap)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
b.webClient.mu.Lock() // webClient struct uses its own mutext
|
||||
server := b.webClient.server
|
||||
b.webClient.server = nil
|
||||
b.webClient.mu.Unlock() // release lock before shutdown
|
||||
if server != nil {
|
||||
server.Shutdown()
|
||||
b.logf("WebClientShutdown: shut down web ui")
|
||||
}
|
||||
}
|
||||
|
||||
// handleWebClientConn serves web client requests.
|
||||
func (b *LocalBackend) handleWebClientConn(c net.Conn) error {
|
||||
webServer, err := b.webClientGetOrInit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s := http.Server{Handler: webServer}
|
||||
return s.Serve(netutil.NewOneConnListener(c, nil))
|
||||
}
|
||||
|
||||
// updateWebClientListenersLocked creates listeners on the web client port (5252)
|
||||
// for each of the local device's Tailscale IP addresses. This is needed to properly
|
||||
// route local traffic when using kernel networking mode.
|
||||
func (b *LocalBackend) updateWebClientListenersLocked() {
|
||||
if b.netMap == nil {
|
||||
return
|
||||
}
|
||||
|
||||
addrs := b.netMap.GetAddresses()
|
||||
for i := range addrs.Len() {
|
||||
addrPort := netip.AddrPortFrom(addrs.At(i).Addr(), webClientPort)
|
||||
if _, ok := b.webClientListeners[addrPort]; ok {
|
||||
continue // already listening
|
||||
}
|
||||
|
||||
sl := b.newWebClientListener(context.Background(), addrPort, b.logf)
|
||||
mak.Set(&b.webClientListeners, addrPort, sl)
|
||||
|
||||
go sl.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// newWebClientListener returns a listener for local connections to the built-in web client
|
||||
// used to manage this Tailscale instance.
|
||||
func (b *LocalBackend) newWebClientListener(ctx context.Context, ap netip.AddrPort, logf logger.Logf) *localListener {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &localListener{
|
||||
b: b,
|
||||
ap: ap,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logf: logf,
|
||||
|
||||
handler: b.handleWebClientConn,
|
||||
bo: backoff.NewBackoff("webclient-listener", logf, 30*time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
// newWebClientAuthURL talks to the control server to create a new auth
|
||||
// URL that can be used to validate a browser session to manage this
|
||||
// tailscaled instance via the web client.
|
||||
func (b *LocalBackend) newWebClientAuthURL(ctx context.Context, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) {
|
||||
return b.doWebClientNoiseRequest(ctx, "", src)
|
||||
}
|
||||
|
||||
// waitWebClientAuthURL connects to the control server and blocks
|
||||
// until the associated auth URL has been completed by its user,
|
||||
// or until ctx is canceled.
|
||||
func (b *LocalBackend) waitWebClientAuthURL(ctx context.Context, id string, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) {
|
||||
return b.doWebClientNoiseRequest(ctx, id, src)
|
||||
}
|
||||
|
||||
// doWebClientNoiseRequest handles making the "/machine/webclient"
|
||||
// noise requests to the control server for web client user auth.
|
||||
//
|
||||
// It either creates a new control auth URL or waits for an existing
|
||||
// one to be completed, based on the presence or absence of the
|
||||
// provided id value.
|
||||
func (b *LocalBackend) doWebClientNoiseRequest(ctx context.Context, id string, src tailcfg.NodeID) (*tailcfg.WebClientAuthResponse, error) {
|
||||
nm := b.NetMap()
|
||||
if nm == nil || !nm.SelfNode.Valid() {
|
||||
return nil, errors.New("[unexpected] no self node")
|
||||
}
|
||||
dst := nm.SelfNode.ID()
|
||||
var noiseURL string
|
||||
if id != "" {
|
||||
noiseURL = fmt.Sprintf("https://unused/machine/webclient/wait/%d/to/%d/%s", src, dst, id)
|
||||
} else {
|
||||
noiseURL = fmt.Sprintf("https://unused/machine/webclient/init/%d/to/%d", src, dst)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", noiseURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := b.DoNoiseRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed request: %s", body)
|
||||
}
|
||||
var authResp *tailcfg.WebClientAuthResponse
|
||||
if err := json.Unmarshal(body, &authResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return authResp, nil
|
||||
}
|
||||
30
vendor/tailscale.com/ipn/ipnlocal/web_client_stub.go
generated
vendored
Normal file
30
vendor/tailscale.com/ipn/ipnlocal/web_client_stub.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build ios || android
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
)
|
||||
|
||||
const webClientPort = 5252
|
||||
|
||||
type webClient struct{}
|
||||
|
||||
func (b *LocalBackend) ConfigureWebClient(lc *tailscale.LocalClient) {}
|
||||
|
||||
func (b *LocalBackend) webClientGetOrInit() error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (b *LocalBackend) webClientShutdown() {}
|
||||
|
||||
func (b *LocalBackend) handleWebClientConn(c net.Conn) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
func (b *LocalBackend) updateWebClientListenersLocked() {}
|
||||
Reference in New Issue
Block a user