Update dependencies
This commit is contained in:
214
vendor/tailscale.com/net/netcheck/netcheck.go
generated
vendored
214
vendor/tailscale.com/net/netcheck/netcheck.go
generated
vendored
@@ -23,7 +23,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/tcnksm/go-httpstat"
|
||||
"tailscale.com/derp/derphttp"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/net/captivedetection"
|
||||
@@ -85,13 +84,14 @@ const (
|
||||
|
||||
// Report contains the result of a single netcheck.
|
||||
type Report struct {
|
||||
UDP bool // a UDP STUN round trip completed
|
||||
IPv6 bool // an IPv6 STUN round trip completed
|
||||
IPv4 bool // an IPv4 STUN round trip completed
|
||||
IPv6CanSend bool // an IPv6 packet was able to be sent
|
||||
IPv4CanSend bool // an IPv4 packet was able to be sent
|
||||
OSHasIPv6 bool // could bind a socket to ::1
|
||||
ICMPv4 bool // an ICMPv4 round trip completed
|
||||
Now time.Time // the time the report was run
|
||||
UDP bool // a UDP STUN round trip completed
|
||||
IPv6 bool // an IPv6 STUN round trip completed
|
||||
IPv4 bool // an IPv4 STUN round trip completed
|
||||
IPv6CanSend bool // an IPv6 packet was able to be sent
|
||||
IPv4CanSend bool // an IPv4 packet was able to be sent
|
||||
OSHasIPv6 bool // could bind a socket to ::1
|
||||
ICMPv4 bool // an ICMPv4 round trip completed
|
||||
|
||||
// MappingVariesByDestIP is whether STUN results depend which
|
||||
// STUN server you're talking to (on IPv4).
|
||||
@@ -172,25 +172,14 @@ func (r *Report) Clone() *Report {
|
||||
return nil
|
||||
}
|
||||
r2 := *r
|
||||
r2.RegionLatency = cloneDurationMap(r2.RegionLatency)
|
||||
r2.RegionV4Latency = cloneDurationMap(r2.RegionV4Latency)
|
||||
r2.RegionV6Latency = cloneDurationMap(r2.RegionV6Latency)
|
||||
r2.RegionLatency = maps.Clone(r2.RegionLatency)
|
||||
r2.RegionV4Latency = maps.Clone(r2.RegionV4Latency)
|
||||
r2.RegionV6Latency = maps.Clone(r2.RegionV6Latency)
|
||||
r2.GlobalV4Counters = maps.Clone(r2.GlobalV4Counters)
|
||||
r2.GlobalV6Counters = maps.Clone(r2.GlobalV6Counters)
|
||||
return &r2
|
||||
}
|
||||
|
||||
func cloneDurationMap(m map[int]time.Duration) map[int]time.Duration {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
m2 := make(map[int]time.Duration, len(m))
|
||||
for k, v := range m {
|
||||
m2[k] = v
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
||||
// Client generates Reports describing the result of both passive and active
|
||||
// network configuration probing. It provides two different modes of report, a
|
||||
// full report (see MakeNextReportFull) and a more lightweight incremental
|
||||
@@ -235,6 +224,10 @@ type Client struct {
|
||||
// If false, the default net.Resolver will be used, with no caching.
|
||||
UseDNSCache bool
|
||||
|
||||
// if non-zero, force this DERP region to be preferred in all reports where
|
||||
// the DERP is found to be reachable.
|
||||
ForcePreferredDERP int
|
||||
|
||||
// For tests
|
||||
testEnoughRegions int
|
||||
testCaptivePortalDelay time.Duration
|
||||
@@ -391,10 +384,14 @@ type probePlan map[string][]probe
|
||||
// sortRegions returns the regions of dm first sorted
|
||||
// from fastest to slowest (based on the 'last' report),
|
||||
// end in regions that have no data.
|
||||
func sortRegions(dm *tailcfg.DERPMap, last *Report) (prev []*tailcfg.DERPRegion) {
|
||||
func sortRegions(dm *tailcfg.DERPMap, last *Report, preferredDERP int) (prev []*tailcfg.DERPRegion) {
|
||||
prev = make([]*tailcfg.DERPRegion, 0, len(dm.Regions))
|
||||
for _, reg := range dm.Regions {
|
||||
if reg.Avoid {
|
||||
if reg.NoMeasureNoHome {
|
||||
continue
|
||||
}
|
||||
// include an otherwise avoid region if it is the current preferred region
|
||||
if reg.Avoid && reg.RegionID != preferredDERP {
|
||||
continue
|
||||
}
|
||||
prev = append(prev, reg)
|
||||
@@ -419,9 +416,19 @@ func sortRegions(dm *tailcfg.DERPMap, last *Report) (prev []*tailcfg.DERPRegion)
|
||||
// a full report, all regions are scanned.)
|
||||
const numIncrementalRegions = 3
|
||||
|
||||
// makeProbePlan generates the probe plan for a DERPMap, given the most
|
||||
// recent report and whether IPv6 is configured on an interface.
|
||||
func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (plan probePlan) {
|
||||
// makeProbePlan generates the probe plan for a DERPMap, given the most recent
|
||||
// report and the current home DERP. preferredDERP is passed independently of
|
||||
// last (report) because last is currently nil'd to indicate a desire for a full
|
||||
// netcheck.
|
||||
//
|
||||
// TODO(raggi,jwhited): refactor the callers and this function to be more clear
|
||||
// about full vs. incremental netchecks, and remove the need for the history
|
||||
// hiding. This was avoided in an incremental change due to exactly this kind of
|
||||
// distant coupling.
|
||||
// TODO(raggi): change from "preferred DERP" from a historical report to "home
|
||||
// DERP" as in what DERP is the current home connection, this would further
|
||||
// reduce flap events.
|
||||
func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report, preferredDERP int) (plan probePlan) {
|
||||
if last == nil || len(last.RegionLatency) == 0 {
|
||||
return makeProbePlanInitial(dm, ifState)
|
||||
}
|
||||
@@ -432,9 +439,34 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (pl
|
||||
had4 := len(last.RegionV4Latency) > 0
|
||||
had6 := len(last.RegionV6Latency) > 0
|
||||
hadBoth := have6if && had4 && had6
|
||||
for ri, reg := range sortRegions(dm, last) {
|
||||
if ri == numIncrementalRegions {
|
||||
break
|
||||
// #13969 ensure that the home region is always probed.
|
||||
// If a netcheck has unstable latency, such as a user with large amounts of
|
||||
// bufferbloat or a highly congested connection, there are cases where a full
|
||||
// netcheck may observe a one-off high latency to the current home DERP. Prior
|
||||
// to the forced inclusion of the home DERP, this would result in an
|
||||
// incremental netcheck following such an event to cause a home DERP move, with
|
||||
// restoration back to the home DERP on the next full netcheck ~5 minutes later
|
||||
// - which is highly disruptive when it causes shifts in geo routed subnet
|
||||
// routers. By always including the home DERP in the incremental netcheck, we
|
||||
// ensure that the home DERP is always probed, even if it observed a recenet
|
||||
// poor latency sample. This inclusion enables the latency history checks in
|
||||
// home DERP selection to still take effect.
|
||||
// planContainsHome indicates whether the home DERP has been added to the probePlan,
|
||||
// if there is no prior home, then there's no home to additionally include.
|
||||
planContainsHome := preferredDERP == 0
|
||||
for ri, reg := range sortRegions(dm, last, preferredDERP) {
|
||||
regIsHome := reg.RegionID == preferredDERP
|
||||
if ri >= numIncrementalRegions {
|
||||
// planned at least numIncrementalRegions regions and that includes the
|
||||
// last home region (or there was none), plan complete.
|
||||
if planContainsHome {
|
||||
break
|
||||
}
|
||||
// planned at least numIncrementalRegions regions, but not the home region,
|
||||
// check if this is the home region, if not, skip it.
|
||||
if !regIsHome {
|
||||
continue
|
||||
}
|
||||
}
|
||||
var p4, p6 []probe
|
||||
do4 := have4if
|
||||
@@ -445,7 +477,7 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (pl
|
||||
tries := 1
|
||||
isFastestTwo := ri < 2
|
||||
|
||||
if isFastestTwo {
|
||||
if isFastestTwo || regIsHome {
|
||||
tries = 2
|
||||
} else if hadBoth {
|
||||
// For dual stack machines, make the 3rd & slower nodes alternate
|
||||
@@ -456,14 +488,15 @@ func makeProbePlan(dm *tailcfg.DERPMap, ifState *netmon.State, last *Report) (pl
|
||||
do4, do6 = false, true
|
||||
}
|
||||
}
|
||||
if !isFastestTwo && !had6 {
|
||||
if !regIsHome && !isFastestTwo && !had6 {
|
||||
do6 = false
|
||||
}
|
||||
|
||||
if reg.RegionID == last.PreferredDERP {
|
||||
if regIsHome {
|
||||
// But if we already had a DERP home, try extra hard to
|
||||
// make sure it's there so we don't flip flop around.
|
||||
tries = 4
|
||||
planContainsHome = true
|
||||
}
|
||||
|
||||
for try := 0; try < tries; try++ {
|
||||
@@ -503,7 +536,7 @@ func makeProbePlanInitial(dm *tailcfg.DERPMap, ifState *netmon.State) (plan prob
|
||||
plan = make(probePlan)
|
||||
|
||||
for _, reg := range dm.Regions {
|
||||
if len(reg.Nodes) == 0 {
|
||||
if reg.NoMeasureNoHome || len(reg.Nodes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -742,6 +775,12 @@ func (o *GetReportOpts) getLastDERPActivity(region int) time.Time {
|
||||
return o.GetLastDERPActivity(region)
|
||||
}
|
||||
|
||||
func (c *Client) SetForcePreferredDERP(region int) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.ForcePreferredDERP = region
|
||||
}
|
||||
|
||||
// GetReport gets a report. The 'opts' argument is optional and can be nil.
|
||||
// Callers are discouraged from passing a ctx with an arbitrary deadline as this
|
||||
// may cause GetReport to return prematurely before all reporting methods have
|
||||
@@ -788,9 +827,10 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
|
||||
c.curState = rs
|
||||
last := c.last
|
||||
|
||||
// Even if we're doing a non-incremental update, we may want to try our
|
||||
// preferred DERP region for captive portal detection. Save that, if we
|
||||
// have it.
|
||||
// Extract preferredDERP from the last report, if available. This will be used
|
||||
// in captive portal detection and DERP flapping suppression. Ideally this would
|
||||
// be the current active home DERP rather than the last report preferred DERP,
|
||||
// but only the latter is presently available.
|
||||
var preferredDERP int
|
||||
if last != nil {
|
||||
preferredDERP = last.PreferredDERP
|
||||
@@ -847,7 +887,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
|
||||
|
||||
var plan probePlan
|
||||
if opts == nil || !opts.OnlyTCP443 {
|
||||
plan = makeProbePlan(dm, ifState, last)
|
||||
plan = makeProbePlan(dm, ifState, last, preferredDERP)
|
||||
}
|
||||
|
||||
// If we're doing a full probe, also check for a captive portal. We
|
||||
@@ -1061,10 +1101,11 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report
|
||||
return nil
|
||||
}
|
||||
|
||||
// measureHTTPSLatency measures HTTP request latency to the DERP region, but
|
||||
// only returns success if an HTTPS request to the region succeeds.
|
||||
func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegion) (time.Duration, netip.Addr, error) {
|
||||
metricHTTPSend.Add(1)
|
||||
var result httpstat.Result
|
||||
ctx, cancel := context.WithTimeout(httpstat.WithHTTPStat(ctx, &result), httpsProbeTimeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, httpsProbeTimeout)
|
||||
defer cancel()
|
||||
|
||||
var ip netip.Addr
|
||||
@@ -1072,6 +1113,8 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio
|
||||
dc := derphttp.NewNetcheckClient(c.logf, c.NetMon)
|
||||
defer dc.Close()
|
||||
|
||||
// DialRegionTLS may dial multiple times if a node is not available, as such
|
||||
// it does not have stable timing to measure.
|
||||
tlsConn, tcpConn, node, err := dc.DialRegionTLS(ctx, reg)
|
||||
if err != nil {
|
||||
return 0, ip, err
|
||||
@@ -1089,6 +1132,8 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio
|
||||
connc := make(chan *tls.Conn, 1)
|
||||
connc <- tlsConn
|
||||
|
||||
// make an HTTP request to measure, as this enables us to account for MITM
|
||||
// overhead in e.g. corp environments that have HTTP MITM in front of DERP.
|
||||
tr := &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return nil, errors.New("unexpected DialContext dial")
|
||||
@@ -1104,12 +1149,17 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio
|
||||
}
|
||||
hc := &http.Client{Transport: tr}
|
||||
|
||||
// This is the request that will be measured, the request and response
|
||||
// should be small enough to fit into a single packet each way unless the
|
||||
// connection has already become unstable.
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://"+node.HostName+"/derp/latency-check", nil)
|
||||
if err != nil {
|
||||
return 0, ip, err
|
||||
}
|
||||
|
||||
startTime := c.timeNow()
|
||||
resp, err := hc.Do(req)
|
||||
reqDur := c.timeNow().Sub(startTime)
|
||||
if err != nil {
|
||||
return 0, ip, err
|
||||
}
|
||||
@@ -1126,11 +1176,12 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio
|
||||
if err != nil {
|
||||
return 0, ip, err
|
||||
}
|
||||
result.End(c.timeNow())
|
||||
|
||||
// TODO: decide best timing heuristic here.
|
||||
// Maybe the server should return the tcpinfo_rtt?
|
||||
return result.ServerProcessing, ip, nil
|
||||
// return the connection duration, not the request duration, as this is the
|
||||
// best approximation of the RTT latency to the node. Note that the
|
||||
// connection setup performs happy-eyeballs and TLS so there are additional
|
||||
// overheads.
|
||||
return reqDur, ip, nil
|
||||
}
|
||||
|
||||
func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, need []*tailcfg.DERPRegion) error {
|
||||
@@ -1182,17 +1233,19 @@ func (c *Client) measureICMPLatency(ctx context.Context, reg *tailcfg.DERPRegion
|
||||
// Try pinging the first node in the region
|
||||
node := reg.Nodes[0]
|
||||
|
||||
// Get the IPAddr by asking for the UDP address that we would use for
|
||||
// STUN and then using that IP.
|
||||
//
|
||||
// TODO(andrew-d): this is a bit ugly
|
||||
nodeAddr := c.nodeAddr(ctx, node, probeIPv4)
|
||||
if !nodeAddr.IsValid() {
|
||||
if node.STUNPort < 0 {
|
||||
// If STUN is disabled on a node, interpret that as meaning don't measure latency.
|
||||
return 0, false, nil
|
||||
}
|
||||
const unusedPort = 0
|
||||
stunAddrPort, ok := c.nodeAddrPort(ctx, node, unusedPort, probeIPv4)
|
||||
if !ok {
|
||||
return 0, false, fmt.Errorf("no address for node %v (v4-for-icmp)", node.Name)
|
||||
}
|
||||
ip := stunAddrPort.Addr()
|
||||
addr := &net.IPAddr{
|
||||
IP: net.IP(nodeAddr.Addr().AsSlice()),
|
||||
Zone: nodeAddr.Addr().Zone(),
|
||||
IP: net.IP(ip.AsSlice()),
|
||||
Zone: ip.Zone(),
|
||||
}
|
||||
|
||||
// Use the unique node.Name field as the packet data to reduce the
|
||||
@@ -1236,6 +1289,9 @@ func (c *Client) logConciseReport(r *Report, dm *tailcfg.DERPMap) {
|
||||
if r.CaptivePortal != "" {
|
||||
fmt.Fprintf(w, " captiveportal=%v", r.CaptivePortal)
|
||||
}
|
||||
if c.ForcePreferredDERP != 0 {
|
||||
fmt.Fprintf(w, " force=%v", c.ForcePreferredDERP)
|
||||
}
|
||||
fmt.Fprintf(w, " derp=%v", r.PreferredDERP)
|
||||
if r.PreferredDERP != 0 {
|
||||
fmt.Fprintf(w, " derpdist=")
|
||||
@@ -1297,6 +1353,7 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report,
|
||||
c.prev = map[time.Time]*Report{}
|
||||
}
|
||||
now := c.timeNow()
|
||||
r.Now = now.UTC()
|
||||
c.prev[now] = r
|
||||
c.last = r
|
||||
|
||||
@@ -1393,6 +1450,21 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report,
|
||||
// which undoes any region change we made above.
|
||||
r.PreferredDERP = prevDERP
|
||||
}
|
||||
if c.ForcePreferredDERP != 0 {
|
||||
// If the forced DERP region probed successfully, or has recent traffic,
|
||||
// use it.
|
||||
_, haveLatencySample := r.RegionLatency[c.ForcePreferredDERP]
|
||||
var recentActivity bool
|
||||
if lastHeard := rs.opts.getLastDERPActivity(c.ForcePreferredDERP); !lastHeard.IsZero() {
|
||||
now := c.timeNow()
|
||||
recentActivity = lastHeard.After(rs.start)
|
||||
recentActivity = recentActivity || lastHeard.After(now.Add(-PreferredDERPFrameTime))
|
||||
}
|
||||
|
||||
if haveLatencySample || recentActivity {
|
||||
r.PreferredDERP = c.ForcePreferredDERP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateLatency(m map[int]time.Duration, regionID int, d time.Duration) {
|
||||
@@ -1438,8 +1510,8 @@ func (rs *reportState) runProbe(ctx context.Context, dm *tailcfg.DERPMap, probe
|
||||
return
|
||||
}
|
||||
|
||||
addr := c.nodeAddr(ctx, node, probe.proto)
|
||||
if !addr.IsValid() {
|
||||
addr, ok := c.nodeAddrPort(ctx, node, node.STUNPort, probe.proto)
|
||||
if !ok {
|
||||
c.logf("netcheck.runProbe: named node %q has no %v address", probe.node, probe.proto)
|
||||
return
|
||||
}
|
||||
@@ -1488,12 +1560,20 @@ func (rs *reportState) runProbe(ctx context.Context, dm *tailcfg.DERPMap, probe
|
||||
c.vlogf("sent to %v", addr)
|
||||
}
|
||||
|
||||
// proto is 4 or 6
|
||||
// If it returns nil, the node is skipped.
|
||||
func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeProto) (ap netip.AddrPort) {
|
||||
port := cmp.Or(n.STUNPort, 3478)
|
||||
// nodeAddrPort returns the IP:port to send a STUN queries to for a given node.
|
||||
//
|
||||
// The provided port should be n.STUNPort, which may be negative to disable STUN.
|
||||
// If STUN is disabled for this node, it returns ok=false.
|
||||
// The port parameter is separate for the ICMP caller to provide a fake value.
|
||||
//
|
||||
// proto is [probeIPv4] or [probeIPv6].
|
||||
func (c *Client) nodeAddrPort(ctx context.Context, n *tailcfg.DERPNode, port int, proto probeProto) (_ netip.AddrPort, ok bool) {
|
||||
var zero netip.AddrPort
|
||||
if port < 0 || port > 1<<16-1 {
|
||||
return
|
||||
return zero, false
|
||||
}
|
||||
if port == 0 {
|
||||
port = 3478
|
||||
}
|
||||
if n.STUNTestIP != "" {
|
||||
ip, err := netip.ParseAddr(n.STUNTestIP)
|
||||
@@ -1506,7 +1586,7 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP
|
||||
if proto == probeIPv6 && ip.Is4() {
|
||||
return
|
||||
}
|
||||
return netip.AddrPortFrom(ip, uint16(port))
|
||||
return netip.AddrPortFrom(ip, uint16(port)), true
|
||||
}
|
||||
|
||||
switch proto {
|
||||
@@ -1514,20 +1594,20 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP
|
||||
if n.IPv4 != "" {
|
||||
ip, _ := netip.ParseAddr(n.IPv4)
|
||||
if !ip.Is4() {
|
||||
return
|
||||
return zero, false
|
||||
}
|
||||
return netip.AddrPortFrom(ip, uint16(port))
|
||||
return netip.AddrPortFrom(ip, uint16(port)), true
|
||||
}
|
||||
case probeIPv6:
|
||||
if n.IPv6 != "" {
|
||||
ip, _ := netip.ParseAddr(n.IPv6)
|
||||
if !ip.Is6() {
|
||||
return
|
||||
return zero, false
|
||||
}
|
||||
return netip.AddrPortFrom(ip, uint16(port))
|
||||
return netip.AddrPortFrom(ip, uint16(port)), true
|
||||
}
|
||||
default:
|
||||
return
|
||||
return zero, false
|
||||
}
|
||||
|
||||
// The default lookup function if we don't set UseDNSCache is to use net.DefaultResolver.
|
||||
@@ -1569,13 +1649,13 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP
|
||||
addrs, err := lookupIPAddr(ctx, n.HostName)
|
||||
for _, a := range addrs {
|
||||
if (a.Is4() && probeIsV4) || (a.Is6() && !probeIsV4) {
|
||||
return netip.AddrPortFrom(a, uint16(port))
|
||||
return netip.AddrPortFrom(a, uint16(port)), true
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
c.logf("netcheck: DNS lookup error for %q (node %q region %v): %v", n.HostName, n.Name, n.RegionID, err)
|
||||
}
|
||||
return
|
||||
return zero, false
|
||||
}
|
||||
|
||||
func regionHasDERPNode(r *tailcfg.DERPRegion) bool {
|
||||
|
||||
Reference in New Issue
Block a user