Update dependencies

This commit is contained in:
bluepython508
2025-04-09 01:00:12 +01:00
parent f0641ffd6e
commit 5a9cfc022c
882 changed files with 68930 additions and 24201 deletions

View File

@@ -11,6 +11,7 @@ import (
"net"
"net/http"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
@@ -23,6 +24,7 @@ import (
// Detector checks whether the system is behind a captive portal.
type Detector struct {
clock func() time.Time
// httpClient is the HTTP client that is used for captive portal detection. It is configured
// to not follow redirects, have a short timeout and no keep-alive.
@@ -52,6 +54,13 @@ func NewDetector(logf logger.Logf) *Detector {
return d
}
func (d *Detector) Now() time.Time {
if d.clock != nil {
return d.clock()
}
return time.Now()
}
// Timeout is the timeout for captive portal detection requests. Because the captive portal intercepting our requests
// is usually located on the LAN, this is a relatively short timeout.
const Timeout = 3 * time.Second
@@ -136,26 +145,31 @@ func interfaceNameDoesNotNeedCaptiveDetection(ifName string, goos string) bool {
func (d *Detector) detectOnInterface(ctx context.Context, ifIndex int, endpoints []Endpoint) bool {
defer d.httpClient.CloseIdleConnections()
d.logf("[v2] %d available captive portal detection endpoints: %v", len(endpoints), endpoints)
use := min(len(endpoints), 5)
endpoints = endpoints[:use]
d.logf("[v2] %d available captive portal detection endpoints; trying %v", len(endpoints), use)
// We try to detect the captive portal more quickly by making requests to multiple endpoints concurrently.
var wg sync.WaitGroup
resultCh := make(chan bool, len(endpoints))
for i, e := range endpoints {
if i >= 5 {
// Try a maximum of 5 endpoints, break out (returning false) if we run of attempts.
break
}
// Once any goroutine detects a captive portal, we shut down the others.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for _, e := range endpoints {
wg.Add(1)
go func(endpoint Endpoint) {
defer wg.Done()
found, err := d.verifyCaptivePortalEndpoint(ctx, endpoint, ifIndex)
if err != nil {
d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
if ctx.Err() == nil {
d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
}
return
}
if found {
cancel() // one match is good enough
resultCh <- true
}
}(e)
@@ -182,10 +196,16 @@ func (d *Detector) verifyCaptivePortalEndpoint(ctx context.Context, e Endpoint,
ctx, cancel := context.WithTimeout(ctx, Timeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, "GET", e.URL.String(), nil)
u := *e.URL
v := u.Query()
v.Add("t", strconv.Itoa(int(d.Now().Unix())))
u.RawQuery = v.Encode()
req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
if err != nil {
return false, err
}
req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0")
// Attach the Tailscale challenge header if the endpoint supports it. Not all captive portal detection endpoints
// support this, so we only attach it if the endpoint does.

View File

@@ -89,7 +89,7 @@ func availableEndpoints(derpMap *tailcfg.DERPMap, preferredDERPRegionID int, log
// Use the DERP IPs as captive portal detection endpoints. Using IPs is better than hostnames
// because they do not depend on DNS resolution.
for _, region := range derpMap.Regions {
if region.Avoid {
if region.Avoid || region.NoMeasureNoHome {
continue
}
for _, node := range region.Nodes {