This commit is contained in:
2026-02-19 10:07:43 +00:00
parent 007438e372
commit 6e637ecf77
1763 changed files with 60820 additions and 279516 deletions

55
vendor/tailscale.com/net/netcheck/captiveportal.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_captiveportal
package netcheck
import (
"context"
"time"
"tailscale.com/net/captivedetection"
"tailscale.com/tailcfg"
)
func init() {
hookStartCaptivePortalDetection.Set(startCaptivePortalDetection)
}
func startCaptivePortalDetection(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (done <-chan struct{}, stop func()) {
c := rs.c
// NOTE(andrew): we can't simply add this goroutine to the
// `NewWaitGroupChan` below, since we don't wait for that
// waitgroup to finish when exiting this function and thus get
// a data race.
ch := make(chan struct{})
tmr := time.AfterFunc(c.captivePortalDelay(), func() {
defer close(ch)
d := captivedetection.NewDetector(c.logf)
found := d.Detect(ctx, c.NetMon, dm, preferredDERP)
rs.report.CaptivePortal.Set(found)
})
stop = func() {
// Don't cancel our captive portal check if we're
// explicitly doing a verbose netcheck.
if c.Verbose {
return
}
if tmr.Stop() {
// Stopped successfully; need to close the
// signal channel ourselves.
close(ch)
return
}
// Did not stop; do nothing and it'll finish by itself
// and close the signal channel.
}
return ch, stop
}

View File

@@ -23,15 +23,18 @@ import (
"syscall"
"time"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/envknob"
"tailscale.com/net/captivedetection"
"tailscale.com/feature"
"tailscale.com/feature/buildfeatures"
"tailscale.com/hostinfo"
"tailscale.com/net/dnscache"
"tailscale.com/net/neterror"
"tailscale.com/net/netmon"
"tailscale.com/net/netns"
"tailscale.com/net/ping"
"tailscale.com/net/portmapper"
"tailscale.com/net/portmapper/portmappertype"
"tailscale.com/net/sockstats"
"tailscale.com/net/stun"
"tailscale.com/syncs"
@@ -213,7 +216,7 @@ type Client struct {
// PortMapper, if non-nil, is used for portmap queries.
// If nil, portmap discovery is not done.
PortMapper *portmapper.Client // lazily initialized on first use
PortMapper portmappertype.Client
// UseDNSCache controls whether this client should use a
// *dnscache.Resolver to resolve DERP hostnames, when no IP address is
@@ -232,7 +235,7 @@ type Client struct {
testEnoughRegions int
testCaptivePortalDelay time.Duration
mu sync.Mutex // guards following
mu syncs.Mutex // guards following
nextFull bool // do a full region scan, even if last != nil
prev map[time.Time]*Report // some previous reports
last *Report // most recent report
@@ -448,7 +451,7 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report, pre
// restoration back to the home DERP on the next full netcheck ~5 minutes later
// - which is highly disruptive when it causes shifts in geo routed subnet
// routers. By always including the home DERP in the incremental netcheck, we
// ensure that the home DERP is always probed, even if it observed a recenet
// ensure that the home DERP is always probed, even if it observed a recent
// poor latency sample. This inclusion enables the latency history checks in
// home DERP selection to still take effect.
// planContainsHome indicates whether the home DERP has been added to the probePlan,
@@ -594,7 +597,7 @@ type reportState struct {
stopProbeCh chan struct{}
waitPortMap sync.WaitGroup
mu sync.Mutex
mu syncs.Mutex
report *Report // to be returned by GetReport
inFlight map[stun.TxID]func(netip.AddrPort) // called without c.mu held
gotEP4 netip.AddrPort
@@ -728,7 +731,7 @@ func (rs *reportState) probePortMapServices() {
res, err := rs.c.PortMapper.Probe(context.Background())
if err != nil {
if !errors.Is(err, portmapper.ErrGatewayRange) {
if !errors.Is(err, portmappertype.ErrGatewayRange) {
// "skipping portmap; gateway range likely lacks support"
// is not very useful, and too spammy on cloud systems.
// If there are other errors, we want to log those.
@@ -752,6 +755,7 @@ func newReport() *Report {
// GetReportOpts contains options that can be passed to GetReport. Unless
// specified, all fields are optional and can be left as their zero value.
// At most one of OnlyTCP443 or OnlySTUN may be set.
type GetReportOpts struct {
// GetLastDERPActivity is a callback that, if provided, should return
// the absolute time that the calling code last communicated with a
@@ -764,6 +768,8 @@ type GetReportOpts struct {
// OnlyTCP443 constrains netcheck reporting to measurements over TCP port
// 443.
OnlyTCP443 bool
// OnlySTUN constrains netcheck reporting to STUN measurements over UDP.
OnlySTUN bool
}
// getLastDERPActivity calls o.GetLastDERPActivity if both o and
@@ -781,6 +787,8 @@ func (c *Client) SetForcePreferredDERP(region int) {
c.ForcePreferredDERP = region
}
var hookStartCaptivePortalDetection feature.Hook[func(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (<-chan struct{}, func())]
// GetReport gets a report. The 'opts' argument is optional and can be nil.
// Callers are discouraged from passing a ctx with an arbitrary deadline as this
// may cause GetReport to return prematurely before all reporting methods have
@@ -789,6 +797,13 @@ func (c *Client) SetForcePreferredDERP(region int) {
//
// It may not be called concurrently with itself.
func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetReportOpts) (_ *Report, reterr error) {
onlySTUN := false
if opts != nil && opts.OnlySTUN {
if opts.OnlyTCP443 {
return nil, errors.New("netcheck: only one of OnlySTUN or OnlyTCP443 may be set in opts")
}
onlySTUN = true
}
defer func() {
if reterr != nil {
metricNumGetReportError.Add(1)
@@ -863,7 +878,10 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
c.curState = nil
}()
if runtime.GOOS == "js" || runtime.GOOS == "tamago" {
if runtime.GOOS == "js" || runtime.GOOS == "tamago" || (runtime.GOOS == "plan9" && hostinfo.IsInVM86()) {
if onlySTUN {
return nil, errors.New("platform is restricted to HTTP, but OnlySTUN is set in opts")
}
if err := c.runHTTPOnlyChecks(ctx, last, rs, dm); err != nil {
return nil, err
}
@@ -895,38 +913,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
// it's unnecessary.
captivePortalDone := syncs.ClosedChan()
captivePortalStop := func() {}
if !rs.incremental {
// NOTE(andrew): we can't simply add this goroutine to the
// `NewWaitGroupChan` below, since we don't wait for that
// waitgroup to finish when exiting this function and thus get
// a data race.
ch := make(chan struct{})
captivePortalDone = ch
tmr := time.AfterFunc(c.captivePortalDelay(), func() {
defer close(ch)
d := captivedetection.NewDetector(c.logf)
found := d.Detect(ctx, c.NetMon, dm, preferredDERP)
rs.report.CaptivePortal.Set(found)
})
captivePortalStop = func() {
// Don't cancel our captive portal check if we're
// explicitly doing a verbose netcheck.
if c.Verbose {
return
}
if tmr.Stop() {
// Stopped successfully; need to close the
// signal channel ourselves.
close(ch)
return
}
// Did not stop; do nothing and it'll finish by itself
// and close the signal channel.
}
if buildfeatures.HasCaptivePortal && !rs.incremental && !onlySTUN {
start := hookStartCaptivePortalDetection.Get()
captivePortalDone, captivePortalStop = start(ctx, rs, dm, preferredDERP)
}
wg := syncs.NewWaitGroupChan()
@@ -969,13 +958,13 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
rs.stopTimers()
// Try HTTPS and ICMP latency check if all STUN probes failed due to
// UDP presumably being blocked.
// UDP presumably being blocked, and we are not constrained to only STUN.
// TODO: this should be moved into the probePlan, using probeProto probeHTTPS.
if !rs.anyUDP() && ctx.Err() == nil {
if !rs.anyUDP() && ctx.Err() == nil && !onlySTUN {
var wg sync.WaitGroup
var need []*tailcfg.DERPRegion
for rid, reg := range dm.Regions {
if !rs.haveRegionLatency(rid) && regionHasDERPNode(reg) {
if !rs.haveRegionLatency(rid) && regionHasDERPNode(reg) && !reg.Avoid && !reg.NoMeasureNoHome {
need = append(need, reg)
}
}
@@ -1004,9 +993,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err)
} else {
rs.mu.Lock()
if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
mak.Set(&rs.report.RegionLatency, reg.RegionID, d)
} else if l >= d {
} else if latency >= d {
rs.report.RegionLatency[reg.RegionID] = d
}
// We set these IPv4 and IPv6 but they're not really used
@@ -1045,7 +1034,7 @@ func (c *Client) finishAndStoreReport(rs *reportState, dm *tailcfg.DERPMap) *Rep
}
// runHTTPOnlyChecks is the netcheck done by environments that can
// only do HTTP requests, such as ws/wasm.
// only do HTTP requests, such as js/wasm.
func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *reportState, dm *tailcfg.DERPMap) error {
var regions []*tailcfg.DERPRegion
if rs.incremental && last != nil {
@@ -1057,9 +1046,25 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report
}
if len(regions) == 0 {
for _, dr := range dm.Regions {
if dr.NoMeasureNoHome {
continue
}
regions = append(regions, dr)
}
}
if len(regions) == 1 && hostinfo.IsInVM86() {
// If we only have 1 region that's probably and we're in a
// network-limited v86 environment, don't actually probe it. Just fake
// some results.
rg := regions[0]
if len(rg.Nodes) > 0 {
node := rg.Nodes[0]
rs.addNodeLatency(node, netip.AddrPort{}, 999*time.Millisecond)
return nil
}
}
c.logf("running HTTP-only netcheck against %v regions", len(regions))
var wg sync.WaitGroup
@@ -1068,7 +1073,6 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report
continue
}
wg.Add(1)
rg := rg
go func() {
defer wg.Done()
node := rg.Nodes[0]
@@ -1188,6 +1192,10 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee
if len(need) == 0 {
return nil
}
if runtime.GOOS == "plan9" {
// ICMP isn't implemented.
return nil
}
ctx, done := context.WithTimeout(ctx, icmpProbeTimeout)
defer done()
@@ -1206,9 +1214,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee
} else if ok {
c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d)
rs.mu.Lock()
if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
mak.Set(&rs.report.RegionLatency, reg.RegionID, d)
} else if l >= d {
} else if latency >= d {
rs.report.RegionLatency[reg.RegionID] = d
}
@@ -1337,6 +1345,15 @@ const (
// even without receiving a STUN response.
// Note: must remain higher than the derp package frameReceiveRecordRate
PreferredDERPFrameTime = 8 * time.Second
// PreferredDERPKeepAliveTimeout is 2x the DERP Keep Alive timeout. If there
// is no latency data to make judgements from, but we have heard from our
// current DERP region inside of 2x the KeepAlive window, don't switch DERP
// regions yet, keep the current region. This prevents region flapping /
// home DERP removal during short periods of packet loss where the DERP TCP
// connection may itself naturally recover.
// TODO(raggi): expose shared time bounds from the DERP package rather than
// duplicating them here.
PreferredDERPKeepAliveTimeout = 2 * derp.KeepAlive
)
// addReportHistoryAndSetPreferredDERP adds r to the set of recent Reports
@@ -1421,13 +1438,10 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report,
// the STUN probe) since we started the netcheck, or in the past 2s, as
// another signal for "this region is still working".
heardFromOldRegionRecently := false
prevRegionLastHeard := rs.opts.getLastDERPActivity(prevDERP)
if changingPreferred {
if lastHeard := rs.opts.getLastDERPActivity(prevDERP); !lastHeard.IsZero() {
now := c.timeNow()
heardFromOldRegionRecently = lastHeard.After(rs.start)
heardFromOldRegionRecently = heardFromOldRegionRecently || lastHeard.After(now.Add(-PreferredDERPFrameTime))
}
heardFromOldRegionRecently = prevRegionLastHeard.After(rs.start)
heardFromOldRegionRecently = heardFromOldRegionRecently || prevRegionLastHeard.After(now.Add(-PreferredDERPFrameTime))
}
// The old region is accessible if we've heard from it via a non-STUN
@@ -1454,17 +1468,20 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report,
// If the forced DERP region probed successfully, or has recent traffic,
// use it.
_, haveLatencySample := r.RegionLatency[c.ForcePreferredDERP]
var recentActivity bool
if lastHeard := rs.opts.getLastDERPActivity(c.ForcePreferredDERP); !lastHeard.IsZero() {
now := c.timeNow()
recentActivity = lastHeard.After(rs.start)
recentActivity = recentActivity || lastHeard.After(now.Add(-PreferredDERPFrameTime))
}
lastHeard := rs.opts.getLastDERPActivity(c.ForcePreferredDERP)
recentActivity := lastHeard.After(rs.start)
recentActivity = recentActivity || lastHeard.After(now.Add(-PreferredDERPFrameTime))
if haveLatencySample || recentActivity {
r.PreferredDERP = c.ForcePreferredDERP
}
}
// If there was no latency data to make judgements on, but there is an
// active DERP connection that has at least been doing KeepAlive recently,
// keep it, rather than dropping it.
if r.PreferredDERP == 0 && prevRegionLastHeard.After(now.Add(-PreferredDERPKeepAliveTimeout)) {
r.PreferredDERP = prevDERP
}
}
func updateLatency(m map[int]time.Duration, regionID int, d time.Duration) {

View File

@@ -13,7 +13,6 @@ import (
"tailscale.com/net/stun"
"tailscale.com/types/logger"
"tailscale.com/types/nettype"
"tailscale.com/util/multierr"
)
// Standalone creates the necessary UDP sockets on the given bindAddr and starts
@@ -62,7 +61,7 @@ func (c *Client) Standalone(ctx context.Context, bindAddr string) error {
// If both v4 and v6 failed, report an error, otherwise let one succeed.
if len(errs) == 2 {
return multierr.New(errs...)
return errors.Join(errs...)
}
return nil
}