Update dependencies
This commit is contained in:
238
vendor/tailscale.com/wgengine/capture/capture.go
generated
vendored
238
vendor/tailscale.com/wgengine/capture/capture.go
generated
vendored
@@ -1,238 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package capture formats packet logging into a debug pcap stream.
|
||||
package capture
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"tailscale.com/net/packet"
|
||||
"tailscale.com/util/set"
|
||||
)
|
||||
|
||||
//go:embed ts-dissector.lua
|
||||
var DissectorLua string
|
||||
|
||||
// Callback describes a function which is called to
|
||||
// record packets when debugging packet-capture.
|
||||
// Such callbacks must not take ownership of the
|
||||
// provided data slice: it may only copy out of it
|
||||
// within the lifetime of the function.
|
||||
type Callback func(Path, time.Time, []byte, packet.CaptureMeta)
|
||||
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() any {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
const flushPeriod = 100 * time.Millisecond
|
||||
|
||||
func writePcapHeader(w io.Writer) {
|
||||
binary.Write(w, binary.LittleEndian, uint32(0xA1B2C3D4)) // pcap magic number
|
||||
binary.Write(w, binary.LittleEndian, uint16(2)) // version major
|
||||
binary.Write(w, binary.LittleEndian, uint16(4)) // version minor
|
||||
binary.Write(w, binary.LittleEndian, uint32(0)) // this zone
|
||||
binary.Write(w, binary.LittleEndian, uint32(0)) // zone significant figures
|
||||
binary.Write(w, binary.LittleEndian, uint32(65535)) // max packet len
|
||||
binary.Write(w, binary.LittleEndian, uint32(147)) // link-layer ID - USER0
|
||||
}
|
||||
|
||||
func writePktHeader(w *bytes.Buffer, when time.Time, length int) {
|
||||
s := when.Unix()
|
||||
us := when.UnixMicro() - (s * 1000000)
|
||||
|
||||
binary.Write(w, binary.LittleEndian, uint32(s)) // timestamp in seconds
|
||||
binary.Write(w, binary.LittleEndian, uint32(us)) // timestamp microseconds
|
||||
binary.Write(w, binary.LittleEndian, uint32(length)) // length present
|
||||
binary.Write(w, binary.LittleEndian, uint32(length)) // total length
|
||||
}
|
||||
|
||||
// Path describes where in the data path the packet was captured.
|
||||
type Path uint8
|
||||
|
||||
// Valid Path values.
|
||||
const (
|
||||
// FromLocal indicates the packet was logged as it traversed the FromLocal path:
|
||||
// i.e.: A packet from the local system into the TUN.
|
||||
FromLocal Path = 0
|
||||
// FromPeer indicates the packet was logged upon reception from a remote peer.
|
||||
FromPeer Path = 1
|
||||
// SynthesizedToLocal indicates the packet was generated from within tailscaled,
|
||||
// and is being routed to the local machine's network stack.
|
||||
SynthesizedToLocal Path = 2
|
||||
// SynthesizedToPeer indicates the packet was generated from within tailscaled,
|
||||
// and is being routed to a remote Wireguard peer.
|
||||
SynthesizedToPeer Path = 3
|
||||
|
||||
// PathDisco indicates the packet is information about a disco frame.
|
||||
PathDisco Path = 254
|
||||
)
|
||||
|
||||
// New creates a new capture sink.
|
||||
func New() *Sink {
|
||||
ctx, c := context.WithCancel(context.Background())
|
||||
return &Sink{
|
||||
ctx: ctx,
|
||||
ctxCancel: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Type Sink handles callbacks with packets to be logged,
|
||||
// formatting them into a pcap stream which is mirrored to
|
||||
// all registered outputs.
|
||||
type Sink struct {
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
|
||||
mu sync.Mutex
|
||||
outputs set.HandleSet[io.Writer]
|
||||
flushTimer *time.Timer // or nil if none running
|
||||
}
|
||||
|
||||
// RegisterOutput connects an output to this sink, which
|
||||
// will be written to with a pcap stream as packets are logged.
|
||||
// A function is returned which unregisters the output when
|
||||
// called.
|
||||
//
|
||||
// If w implements io.Closer, it will be closed upon error
|
||||
// or when the sink is closed. If w implements http.Flusher,
|
||||
// it will be flushed periodically.
|
||||
func (s *Sink) RegisterOutput(w io.Writer) (unregister func()) {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return func() {}
|
||||
default:
|
||||
}
|
||||
|
||||
writePcapHeader(w)
|
||||
s.mu.Lock()
|
||||
hnd := s.outputs.Add(w)
|
||||
s.mu.Unlock()
|
||||
|
||||
return func() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
delete(s.outputs, hnd)
|
||||
}
|
||||
}
|
||||
|
||||
// NumOutputs returns the number of outputs registered with the sink.
|
||||
func (s *Sink) NumOutputs() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.outputs)
|
||||
}
|
||||
|
||||
// Close shuts down the sink. Future calls to LogPacket
|
||||
// are ignored, and any registered output that implements
|
||||
// io.Closer is closed.
|
||||
func (s *Sink) Close() error {
|
||||
s.ctxCancel()
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.flushTimer != nil {
|
||||
s.flushTimer.Stop()
|
||||
s.flushTimer = nil
|
||||
}
|
||||
|
||||
for _, o := range s.outputs {
|
||||
if o, ok := o.(io.Closer); ok {
|
||||
o.Close()
|
||||
}
|
||||
}
|
||||
s.outputs = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitCh returns a channel which blocks until
|
||||
// the sink is closed.
|
||||
func (s *Sink) WaitCh() <-chan struct{} {
|
||||
return s.ctx.Done()
|
||||
}
|
||||
|
||||
func customDataLen(meta packet.CaptureMeta) int {
|
||||
length := 4
|
||||
if meta.DidSNAT {
|
||||
length += meta.OriginalSrc.Addr().BitLen() / 8
|
||||
}
|
||||
if meta.DidDNAT {
|
||||
length += meta.OriginalDst.Addr().BitLen() / 8
|
||||
}
|
||||
return length
|
||||
}
|
||||
|
||||
// LogPacket is called to insert a packet into the capture.
|
||||
//
|
||||
// This function does not take ownership of the provided data slice.
|
||||
func (s *Sink) LogPacket(path Path, when time.Time, data []byte, meta packet.CaptureMeta) {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
extraLen := customDataLen(meta)
|
||||
b := bufferPool.Get().(*bytes.Buffer)
|
||||
b.Reset()
|
||||
b.Grow(16 + extraLen + len(data)) // 16b pcap header + len(metadata) + len(payload)
|
||||
defer bufferPool.Put(b)
|
||||
|
||||
writePktHeader(b, when, len(data)+extraLen)
|
||||
|
||||
// Custom tailscale debugging data
|
||||
binary.Write(b, binary.LittleEndian, uint16(path))
|
||||
if meta.DidSNAT {
|
||||
binary.Write(b, binary.LittleEndian, uint8(meta.OriginalSrc.Addr().BitLen()/8))
|
||||
b.Write(meta.OriginalSrc.Addr().AsSlice())
|
||||
} else {
|
||||
binary.Write(b, binary.LittleEndian, uint8(0)) // SNAT addr len == 0
|
||||
}
|
||||
if meta.DidDNAT {
|
||||
binary.Write(b, binary.LittleEndian, uint8(meta.OriginalDst.Addr().BitLen()/8))
|
||||
b.Write(meta.OriginalDst.Addr().AsSlice())
|
||||
} else {
|
||||
binary.Write(b, binary.LittleEndian, uint8(0)) // DNAT addr len == 0
|
||||
}
|
||||
|
||||
b.Write(data)
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var hadError []set.Handle
|
||||
for hnd, o := range s.outputs {
|
||||
if _, err := o.Write(b.Bytes()); err != nil {
|
||||
hadError = append(hadError, hnd)
|
||||
continue
|
||||
}
|
||||
}
|
||||
for _, hnd := range hadError {
|
||||
if o, ok := s.outputs[hnd].(io.Closer); ok {
|
||||
o.Close()
|
||||
}
|
||||
delete(s.outputs, hnd)
|
||||
}
|
||||
|
||||
if s.flushTimer == nil {
|
||||
s.flushTimer = time.AfterFunc(flushPeriod, func() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, o := range s.outputs {
|
||||
if f, ok := o.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
s.flushTimer = nil
|
||||
})
|
||||
}
|
||||
}
|
||||
169
vendor/tailscale.com/wgengine/capture/ts-dissector.lua
generated
vendored
169
vendor/tailscale.com/wgengine/capture/ts-dissector.lua
generated
vendored
@@ -1,169 +0,0 @@
|
||||
function hasbit(x, p)
|
||||
return x % (p + p) >= p
|
||||
end
|
||||
|
||||
tsdebug_ll = Proto("tsdebug", "Tailscale debug")
|
||||
PATH = ProtoField.string("tsdebug.PATH","PATH", base.ASCII)
|
||||
SNAT_IP_4 = ProtoField.ipv4("tsdebug.SNAT_IP_4", "Pre-NAT Source IPv4 address")
|
||||
SNAT_IP_6 = ProtoField.ipv6("tsdebug.SNAT_IP_6", "Pre-NAT Source IPv6 address")
|
||||
DNAT_IP_4 = ProtoField.ipv4("tsdebug.DNAT_IP_4", "Pre-NAT Dest IPv4 address")
|
||||
DNAT_IP_6 = ProtoField.ipv6("tsdebug.DNAT_IP_6", "Pre-NAT Dest IPv6 address")
|
||||
tsdebug_ll.fields = {PATH, SNAT_IP_4, SNAT_IP_6, DNAT_IP_4, DNAT_IP_6}
|
||||
|
||||
function tsdebug_ll.dissector(buffer, pinfo, tree)
|
||||
pinfo.cols.protocol = tsdebug_ll.name
|
||||
packet_length = buffer:len()
|
||||
local offset = 0
|
||||
local subtree = tree:add(tsdebug_ll, buffer(), "Tailscale packet")
|
||||
|
||||
-- -- Get path UINT16
|
||||
local path_id = buffer:range(offset, 2):le_uint()
|
||||
if path_id == 0 then subtree:add(PATH, "FromLocal")
|
||||
elseif path_id == 1 then subtree:add(PATH, "FromPeer")
|
||||
elseif path_id == 2 then subtree:add(PATH, "Synthesized (Inbound / ToLocal)")
|
||||
elseif path_id == 3 then subtree:add(PATH, "Synthesized (Outbound / ToPeer)")
|
||||
elseif path_id == 254 then subtree:add(PATH, "Disco frame")
|
||||
end
|
||||
offset = offset + 2
|
||||
|
||||
-- -- Get SNAT address
|
||||
local snat_addr_len = buffer:range(offset, 1):le_uint()
|
||||
if snat_addr_len == 4 then subtree:add(SNAT_IP_4, buffer:range(offset + 1, snat_addr_len))
|
||||
elseif snat_addr_len > 0 then subtree:add(SNAT_IP_6, buffer:range(offset + 1, snat_addr_len))
|
||||
end
|
||||
offset = offset + 1 + snat_addr_len
|
||||
|
||||
-- -- Get DNAT address
|
||||
local dnat_addr_len = buffer:range(offset, 1):le_uint()
|
||||
if dnat_addr_len == 4 then subtree:add(DNAT_IP_4, buffer:range(offset + 1, dnat_addr_len))
|
||||
elseif dnat_addr_len > 0 then subtree:add(DNAT_IP_6, buffer:range(offset + 1, dnat_addr_len))
|
||||
end
|
||||
offset = offset + 1 + dnat_addr_len
|
||||
|
||||
-- -- Handover rest of data to lower-level dissector
|
||||
local data_buffer = buffer:range(offset, packet_length-offset):tvb()
|
||||
if path_id == 254 then
|
||||
Dissector.get("tsdisco"):call(data_buffer, pinfo, tree)
|
||||
else
|
||||
Dissector.get("ip"):call(data_buffer, pinfo, tree)
|
||||
end
|
||||
end
|
||||
|
||||
-- Install the dissector on link-layer ID 147 (User-defined protocol 0)
|
||||
local eth_table = DissectorTable.get("wtap_encap")
|
||||
eth_table:add(wtap.USER0, tsdebug_ll)
|
||||
|
||||
|
||||
local ts_dissectors = DissectorTable.new("ts.proto", "Tailscale-specific dissectors", ftypes.STRING, base.NONE)
|
||||
|
||||
|
||||
--
|
||||
-- DISCO metadata dissector
|
||||
--
|
||||
tsdisco_meta = Proto("tsdisco", "Tailscale DISCO metadata")
|
||||
DISCO_IS_DERP = ProtoField.bool("tsdisco.IS_DERP","From DERP")
|
||||
DISCO_SRC_IP_4 = ProtoField.ipv4("tsdisco.SRC_IP_4", "Source IPv4 address")
|
||||
DISCO_SRC_IP_6 = ProtoField.ipv6("tsdisco.SRC_IP_6", "Source IPv6 address")
|
||||
DISCO_SRC_PORT = ProtoField.uint16("tsdisco.SRC_PORT","Source port", base.DEC)
|
||||
DISCO_DERP_PUB = ProtoField.bytes("tsdisco.DERP_PUB", "DERP public key", base.SPACE)
|
||||
tsdisco_meta.fields = {DISCO_IS_DERP, DISCO_SRC_PORT, DISCO_DERP_PUB, DISCO_SRC_IP_4, DISCO_SRC_IP_6}
|
||||
|
||||
function tsdisco_meta.dissector(buffer, pinfo, tree)
|
||||
pinfo.cols.protocol = tsdisco_meta.name
|
||||
packet_length = buffer:len()
|
||||
local offset = 0
|
||||
local subtree = tree:add(tsdisco_meta, buffer(), "DISCO metadata")
|
||||
|
||||
-- Parse flags
|
||||
local from_derp = hasbit(buffer(offset, 1):le_uint(), 0)
|
||||
subtree:add(DISCO_IS_DERP, from_derp) -- Flag bit 0
|
||||
offset = offset + 1
|
||||
-- Parse DERP public key
|
||||
if from_derp then
|
||||
subtree:add(DISCO_DERP_PUB, buffer(offset, 32))
|
||||
end
|
||||
offset = offset + 32
|
||||
|
||||
-- Parse source port
|
||||
subtree:add(DISCO_SRC_PORT, buffer:range(offset, 2):le_uint())
|
||||
offset = offset + 2
|
||||
|
||||
-- Parse source address
|
||||
local addr_len = buffer:range(offset, 2):le_uint()
|
||||
offset = offset + 2
|
||||
if addr_len == 4 then subtree:add(DISCO_SRC_IP_4, buffer:range(offset, addr_len))
|
||||
else subtree:add(DISCO_SRC_IP_6, buffer:range(offset, addr_len))
|
||||
end
|
||||
offset = offset + addr_len
|
||||
|
||||
-- Handover to the actual disco frame dissector
|
||||
offset = offset + 2 -- skip over payload len
|
||||
local data_buffer = buffer:range(offset, packet_length-offset):tvb()
|
||||
Dissector.get("disco"):call(data_buffer, pinfo, tree)
|
||||
end
|
||||
|
||||
ts_dissectors:add(1, tsdisco_meta)
|
||||
|
||||
--
|
||||
-- DISCO frame dissector
|
||||
--
|
||||
tsdisco_frame = Proto("disco", "Tailscale DISCO frame")
|
||||
DISCO_TYPE = ProtoField.string("disco.TYPE", "Message type", base.ASCII)
|
||||
DISCO_VERSION = ProtoField.uint8("disco.VERSION","Protocol version", base.DEC)
|
||||
DISCO_TXID = ProtoField.bytes("disco.TXID", "Transaction ID", base.SPACE)
|
||||
DISCO_NODEKEY = ProtoField.bytes("disco.NODE_KEY", "Node key", base.SPACE)
|
||||
DISCO_PONG_SRC = ProtoField.ipv6("disco.PONG_SRC", "Pong source")
|
||||
DISCO_PONG_SRC_PORT = ProtoField.uint16("disco.PONG_SRC_PORT","Source port", base.DEC)
|
||||
DISCO_UNKNOWN = ProtoField.bytes("disco.UNKNOWN_DATA", "Trailing data", base.SPACE)
|
||||
tsdisco_frame.fields = {DISCO_TYPE, DISCO_VERSION, DISCO_TXID, DISCO_NODEKEY, DISCO_PONG_SRC, DISCO_PONG_SRC_PORT, DISCO_UNKNOWN}
|
||||
|
||||
function tsdisco_frame.dissector(buffer, pinfo, tree)
|
||||
packet_length = buffer:len()
|
||||
local offset = 0
|
||||
local subtree = tree:add(tsdisco_frame, buffer(), "DISCO frame")
|
||||
|
||||
-- Message type
|
||||
local message_type = buffer(offset, 1):le_uint()
|
||||
offset = offset + 1
|
||||
if message_type == 1 then subtree:add(DISCO_TYPE, "Ping")
|
||||
elseif message_type == 2 then subtree:add(DISCO_TYPE, "Pong")
|
||||
elseif message_type == 3 then subtree:add(DISCO_TYPE, "Call me maybe")
|
||||
end
|
||||
|
||||
-- Message version
|
||||
local message_version = buffer(offset, 1):le_uint()
|
||||
offset = offset + 1
|
||||
subtree:add(DISCO_VERSION, message_version)
|
||||
|
||||
-- TXID (Ping / Pong)
|
||||
if message_type == 1 or message_type == 2 then
|
||||
subtree:add(DISCO_TXID, buffer(offset, 12))
|
||||
offset = offset + 12
|
||||
end
|
||||
|
||||
-- NodeKey (Ping)
|
||||
if message_type == 1 then
|
||||
subtree:add(DISCO_NODEKEY, buffer(offset, 32))
|
||||
offset = offset + 32
|
||||
end
|
||||
|
||||
-- Src (Pong)
|
||||
if message_type == 2 then
|
||||
subtree:add(DISCO_PONG_SRC, buffer:range(offset, 16))
|
||||
offset = offset + 16
|
||||
end
|
||||
-- Src port (Pong)
|
||||
if message_type == 2 then
|
||||
subtree:add(DISCO_PONG_SRC_PORT, buffer(offset, 2):le_uint())
|
||||
offset = offset + 2
|
||||
end
|
||||
|
||||
-- TODO(tom): Parse CallMeMaybe.MyNumber
|
||||
|
||||
local trailing = buffer:range(offset, packet_length-offset)
|
||||
if trailing:len() > 0 then
|
||||
subtree:add(DISCO_UNKNOWN, trailing)
|
||||
end
|
||||
end
|
||||
|
||||
ts_dissectors:add(2, tsdisco_frame)
|
||||
51
vendor/tailscale.com/wgengine/filter/filter.go
generated
vendored
51
vendor/tailscale.com/wgengine/filter/filter.go
generated
vendored
@@ -24,6 +24,7 @@ import (
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/slicesx"
|
||||
"tailscale.com/util/usermetric"
|
||||
"tailscale.com/wgengine/filter/filtertype"
|
||||
)
|
||||
|
||||
@@ -202,16 +203,17 @@ func New(matches []Match, capTest CapTestFunc, localNets, logIPs *netipx.IPSet,
|
||||
}
|
||||
|
||||
f := &Filter{
|
||||
logf: logf,
|
||||
matches4: matchesFamily(matches, netip.Addr.Is4),
|
||||
matches6: matchesFamily(matches, netip.Addr.Is6),
|
||||
cap4: capMatchesFunc(matches, netip.Addr.Is4),
|
||||
cap6: capMatchesFunc(matches, netip.Addr.Is6),
|
||||
local4: ipset.FalseContainsIPFunc(),
|
||||
local6: ipset.FalseContainsIPFunc(),
|
||||
logIPs4: ipset.FalseContainsIPFunc(),
|
||||
logIPs6: ipset.FalseContainsIPFunc(),
|
||||
state: state,
|
||||
logf: logf,
|
||||
matches4: matchesFamily(matches, netip.Addr.Is4),
|
||||
matches6: matchesFamily(matches, netip.Addr.Is6),
|
||||
cap4: capMatchesFunc(matches, netip.Addr.Is4),
|
||||
cap6: capMatchesFunc(matches, netip.Addr.Is6),
|
||||
local4: ipset.FalseContainsIPFunc(),
|
||||
local6: ipset.FalseContainsIPFunc(),
|
||||
logIPs4: ipset.FalseContainsIPFunc(),
|
||||
logIPs6: ipset.FalseContainsIPFunc(),
|
||||
state: state,
|
||||
srcIPHasCap: capTest,
|
||||
}
|
||||
if localNets != nil {
|
||||
p := localNets.Prefixes()
|
||||
@@ -409,7 +411,7 @@ func (f *Filter) ShieldsUp() bool { return f.shieldsUp }
|
||||
// Tailscale peer.
|
||||
func (f *Filter) RunIn(q *packet.Parsed, rf RunFlags) Response {
|
||||
dir := in
|
||||
r := f.pre(q, rf, dir)
|
||||
r, _ := f.pre(q, rf, dir)
|
||||
if r == Accept || r == Drop {
|
||||
// already logged
|
||||
return r
|
||||
@@ -430,16 +432,16 @@ func (f *Filter) RunIn(q *packet.Parsed, rf RunFlags) Response {
|
||||
|
||||
// RunOut determines whether this node is allowed to send q to a
|
||||
// Tailscale peer.
|
||||
func (f *Filter) RunOut(q *packet.Parsed, rf RunFlags) Response {
|
||||
func (f *Filter) RunOut(q *packet.Parsed, rf RunFlags) (Response, usermetric.DropReason) {
|
||||
dir := out
|
||||
r := f.pre(q, rf, dir)
|
||||
r, reason := f.pre(q, rf, dir)
|
||||
if r == Accept || r == Drop {
|
||||
// already logged
|
||||
return r
|
||||
return r, reason
|
||||
}
|
||||
r, why := f.runOut(q)
|
||||
f.logRateLimit(rf, q, dir, r, why)
|
||||
return r
|
||||
return r, ""
|
||||
}
|
||||
|
||||
var unknownProtoStringCache sync.Map // ipproto.Proto -> string
|
||||
@@ -609,33 +611,38 @@ var gcpDNSAddr = netaddr.IPv4(169, 254, 169, 254)
|
||||
|
||||
// pre runs the direction-agnostic filter logic. dir is only used for
|
||||
// logging.
|
||||
func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) Response {
|
||||
func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) (Response, usermetric.DropReason) {
|
||||
if len(q.Buffer()) == 0 {
|
||||
// wireguard keepalive packet, always permit.
|
||||
return Accept
|
||||
return Accept, ""
|
||||
}
|
||||
if len(q.Buffer()) < 20 {
|
||||
f.logRateLimit(rf, q, dir, Drop, "too short")
|
||||
return Drop
|
||||
return Drop, usermetric.ReasonTooShort
|
||||
}
|
||||
|
||||
if q.IPProto == ipproto.Unknown {
|
||||
f.logRateLimit(rf, q, dir, Drop, "unknown proto")
|
||||
return Drop, usermetric.ReasonUnknownProtocol
|
||||
}
|
||||
|
||||
if q.Dst.Addr().IsMulticast() {
|
||||
f.logRateLimit(rf, q, dir, Drop, "multicast")
|
||||
return Drop
|
||||
return Drop, usermetric.ReasonMulticast
|
||||
}
|
||||
if q.Dst.Addr().IsLinkLocalUnicast() && q.Dst.Addr() != gcpDNSAddr {
|
||||
f.logRateLimit(rf, q, dir, Drop, "link-local-unicast")
|
||||
return Drop
|
||||
return Drop, usermetric.ReasonLinkLocalUnicast
|
||||
}
|
||||
|
||||
if q.IPProto == ipproto.Fragment {
|
||||
// Fragments after the first always need to be passed through.
|
||||
// Very small fragments are considered Junk by Parsed.
|
||||
f.logRateLimit(rf, q, dir, Accept, "fragment")
|
||||
return Accept
|
||||
return Accept, ""
|
||||
}
|
||||
|
||||
return noVerdict
|
||||
return noVerdict, ""
|
||||
}
|
||||
|
||||
// loggingAllowed reports whether p can appear in logs at all.
|
||||
|
||||
3
vendor/tailscale.com/wgengine/magicsock/debughttp.go
generated
vendored
3
vendor/tailscale.com/wgengine/magicsock/debughttp.go
generated
vendored
@@ -102,8 +102,7 @@ func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) {
|
||||
sort.Slice(ent, func(i, j int) bool { return ent[i].pub.Less(ent[j].pub) })
|
||||
|
||||
peers := map[key.NodePublic]tailcfg.NodeView{}
|
||||
for i := range c.peers.Len() {
|
||||
p := c.peers.At(i)
|
||||
for _, p := range c.peers.All() {
|
||||
peers[p.Key()] = p
|
||||
}
|
||||
|
||||
|
||||
58
vendor/tailscale.com/wgengine/magicsock/derp.go
generated
vendored
58
vendor/tailscale.com/wgengine/magicsock/derp.go
generated
vendored
@@ -64,10 +64,30 @@ func (c *Conn) removeDerpPeerRoute(peer key.NodePublic, regionID int, dc *derpht
|
||||
// addDerpPeerRoute adds a DERP route entry, noting that peer was seen
|
||||
// on DERP node derpID, at least on the connection identified by dc.
|
||||
// See issue 150 for details.
|
||||
func (c *Conn) addDerpPeerRoute(peer key.NodePublic, derpID int, dc *derphttp.Client) {
|
||||
func (c *Conn) addDerpPeerRoute(peer key.NodePublic, regionID int, dc *derphttp.Client) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
mak.Set(&c.derpRoute, peer, derpRoute{derpID, dc})
|
||||
mak.Set(&c.derpRoute, peer, derpRoute{regionID, dc})
|
||||
}
|
||||
|
||||
// fallbackDERPRegionForPeer returns the DERP region ID we might be able to use
|
||||
// to contact peer, learned from observing recent DERP traffic from them.
|
||||
//
|
||||
// This is used as a fallback when a peer receives a packet from a peer
|
||||
// over DERP but doesn't known that peer's home DERP or any UDP endpoints.
|
||||
// This is particularly useful for large one-way nodes (such as hello.ts.net)
|
||||
// that don't actively reach out to other nodes, so don't need to be told
|
||||
// the DERP home of peers. They can instead learn the DERP home upon getting the
|
||||
// first connection.
|
||||
//
|
||||
// This can also help nodes from a slow or misbehaving control plane.
|
||||
func (c *Conn) fallbackDERPRegionForPeer(peer key.NodePublic) (regionID int) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if dr, ok := c.derpRoute[peer]; ok {
|
||||
return dr.regionID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// activeDerp contains fields for an active DERP connection.
|
||||
@@ -158,10 +178,10 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int)
|
||||
} else {
|
||||
connectedToControl = c.health.GetInPollNetMap()
|
||||
}
|
||||
c.mu.Lock()
|
||||
myDerp := c.myDerp
|
||||
c.mu.Unlock()
|
||||
if !connectedToControl {
|
||||
c.mu.Lock()
|
||||
myDerp := c.myDerp
|
||||
c.mu.Unlock()
|
||||
if myDerp != 0 {
|
||||
metricDERPHomeNoChangeNoControl.Add(1)
|
||||
return myDerp
|
||||
@@ -178,6 +198,11 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int)
|
||||
// one.
|
||||
preferredDERP = c.pickDERPFallback()
|
||||
}
|
||||
if preferredDERP != myDerp {
|
||||
c.logf(
|
||||
"magicsock: home DERP changing from derp-%d [%dms] to derp-%d [%dms]",
|
||||
c.myDerp, report.RegionLatency[myDerp].Milliseconds(), preferredDERP, report.RegionLatency[preferredDERP].Milliseconds())
|
||||
}
|
||||
if !c.setNearestDERP(preferredDERP) {
|
||||
preferredDERP = 0
|
||||
}
|
||||
@@ -627,7 +652,7 @@ func (c *Conn) runDerpReader(ctx context.Context, regionID int, dc *derphttp.Cli
|
||||
// Do nothing.
|
||||
case derp.PeerGoneReasonNotHere:
|
||||
metricRecvDiscoDERPPeerNotHere.Add(1)
|
||||
c.logf("[unexpected] magicsock: derp-%d does not know about peer %s, removing route",
|
||||
c.logf("magicsock: derp-%d does not know about peer %s, removing route",
|
||||
regionID, key.NodePublic(m.Peer).ShortString())
|
||||
default:
|
||||
metricRecvDiscoDERPPeerGoneUnknown.Add(1)
|
||||
@@ -644,9 +669,10 @@ func (c *Conn) runDerpReader(ctx context.Context, regionID int, dc *derphttp.Cli
|
||||
}
|
||||
|
||||
type derpWriteRequest struct {
|
||||
addr netip.AddrPort
|
||||
pubKey key.NodePublic
|
||||
b []byte // copied; ownership passed to receiver
|
||||
addr netip.AddrPort
|
||||
pubKey key.NodePublic
|
||||
b []byte // copied; ownership passed to receiver
|
||||
isDisco bool
|
||||
}
|
||||
|
||||
// runDerpWriter runs in a goroutine for the life of a DERP
|
||||
@@ -668,8 +694,12 @@ func (c *Conn) runDerpWriter(ctx context.Context, dc *derphttp.Client, ch <-chan
|
||||
if err != nil {
|
||||
c.logf("magicsock: derp.Send(%v): %v", wr.addr, err)
|
||||
metricSendDERPError.Add(1)
|
||||
} else {
|
||||
metricSendDERP.Add(1)
|
||||
if !wr.isDisco {
|
||||
c.metrics.outboundPacketsDroppedErrors.Add(1)
|
||||
}
|
||||
} else if !wr.isDisco {
|
||||
c.metrics.outboundPacketsDERPTotal.Add(1)
|
||||
c.metrics.outboundBytesDERPTotal.Add(int64(len(wr.b)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -690,7 +720,6 @@ func (c *connBind) receiveDERP(buffs [][]byte, sizes []int, eps []conn.Endpoint)
|
||||
// No data read occurred. Wait for another packet.
|
||||
continue
|
||||
}
|
||||
metricRecvDataDERP.Add(1)
|
||||
sizes[0] = n
|
||||
eps[0] = ep
|
||||
return 1, nil
|
||||
@@ -728,8 +757,11 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en
|
||||
|
||||
ep.noteRecvActivity(ipp, mono.Now())
|
||||
if stats := c.stats.Load(); stats != nil {
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, dm.n)
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, dm.n)
|
||||
}
|
||||
|
||||
c.metrics.inboundPacketsDERPTotal.Add(1)
|
||||
c.metrics.inboundBytesDERPTotal.Add(int64(n))
|
||||
return n, ep
|
||||
}
|
||||
|
||||
|
||||
57
vendor/tailscale.com/wgengine/magicsock/endpoint.go
generated
vendored
57
vendor/tailscale.com/wgengine/magicsock/endpoint.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"iter"
|
||||
"math"
|
||||
"math/rand/v2"
|
||||
"net"
|
||||
@@ -20,7 +21,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
"tailscale.com/disco"
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/ringbuffer"
|
||||
"tailscale.com/util/slicesx"
|
||||
)
|
||||
|
||||
var mtuProbePingSizesV4 []int
|
||||
@@ -586,7 +587,7 @@ func (de *endpoint) addrForWireGuardSendLocked(now mono.Time) (udpAddr netip.Add
|
||||
needPing := len(de.endpointState) > 1 && now.Sub(oldestPing) > wireguardPingInterval
|
||||
|
||||
if !udpAddr.IsValid() {
|
||||
candidates := xmaps.Keys(de.endpointState)
|
||||
candidates := slicesx.MapKeys(de.endpointState)
|
||||
|
||||
// Randomly select an address to use until we retrieve latency information
|
||||
// and give it a short trustBestAddrUntil time so we avoid flapping between
|
||||
@@ -947,7 +948,15 @@ func (de *endpoint) send(buffs [][]byte) error {
|
||||
de.mu.Unlock()
|
||||
|
||||
if !udpAddr.IsValid() && !derpAddr.IsValid() {
|
||||
return errNoUDPOrDERP
|
||||
// Make a last ditch effort to see if we have a DERP route for them. If
|
||||
// they contacted us over DERP and we don't know their UDP endpoints or
|
||||
// their DERP home, we can at least assume they're reachable over the
|
||||
// DERP they used to contact us.
|
||||
if rid := de.c.fallbackDERPRegionForPeer(de.publicKey); rid != 0 {
|
||||
derpAddr = netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(rid))
|
||||
} else {
|
||||
return errNoUDPOrDERP
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if udpAddr.IsValid() {
|
||||
@@ -960,26 +969,40 @@ func (de *endpoint) send(buffs [][]byte) error {
|
||||
de.noteBadEndpoint(udpAddr)
|
||||
}
|
||||
|
||||
var txBytes int
|
||||
for _, b := range buffs {
|
||||
txBytes += len(b)
|
||||
}
|
||||
|
||||
switch {
|
||||
case udpAddr.Addr().Is4():
|
||||
de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs)))
|
||||
de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes))
|
||||
case udpAddr.Addr().Is6():
|
||||
de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs)))
|
||||
de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes))
|
||||
}
|
||||
|
||||
// TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends.
|
||||
if stats := de.c.stats.Load(); err == nil && stats != nil {
|
||||
var txBytes int
|
||||
for _, b := range buffs {
|
||||
txBytes += len(b)
|
||||
}
|
||||
stats.UpdateTxPhysical(de.nodeAddr, udpAddr, txBytes)
|
||||
stats.UpdateTxPhysical(de.nodeAddr, udpAddr, len(buffs), txBytes)
|
||||
}
|
||||
}
|
||||
if derpAddr.IsValid() {
|
||||
allOk := true
|
||||
var txBytes int
|
||||
for _, buff := range buffs {
|
||||
ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff)
|
||||
if stats := de.c.stats.Load(); stats != nil {
|
||||
stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buff))
|
||||
}
|
||||
const isDisco = false
|
||||
ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco)
|
||||
txBytes += len(buff)
|
||||
if !ok {
|
||||
allOk = false
|
||||
}
|
||||
}
|
||||
|
||||
if stats := de.c.stats.Load(); stats != nil {
|
||||
stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buffs), txBytes)
|
||||
}
|
||||
if allOk {
|
||||
return nil
|
||||
}
|
||||
@@ -1344,7 +1367,7 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p
|
||||
})
|
||||
de.resetLocked()
|
||||
}
|
||||
if n.DERP() == "" {
|
||||
if n.HomeDERP() == 0 {
|
||||
if de.derpAddr.IsValid() {
|
||||
de.debugUpdates.Add(EndpointChange{
|
||||
When: time.Now(),
|
||||
@@ -1354,7 +1377,7 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p
|
||||
}
|
||||
de.derpAddr = netip.AddrPort{}
|
||||
} else {
|
||||
newDerp, _ := netip.ParseAddrPort(n.DERP())
|
||||
newDerp := netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(n.HomeDERP()))
|
||||
if de.derpAddr != newDerp {
|
||||
de.debugUpdates.Add(EndpointChange{
|
||||
When: time.Now(),
|
||||
@@ -1370,20 +1393,18 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p
|
||||
}
|
||||
|
||||
func (de *endpoint) setEndpointsLocked(eps interface {
|
||||
Len() int
|
||||
At(i int) netip.AddrPort
|
||||
All() iter.Seq2[int, netip.AddrPort]
|
||||
}) {
|
||||
for _, st := range de.endpointState {
|
||||
st.index = indexSentinelDeleted // assume deleted until updated in next loop
|
||||
}
|
||||
|
||||
var newIpps []netip.AddrPort
|
||||
for i := range eps.Len() {
|
||||
for i, ipp := range eps.All() {
|
||||
if i > math.MaxInt16 {
|
||||
// Seems unlikely.
|
||||
break
|
||||
}
|
||||
ipp := eps.At(i)
|
||||
if !ipp.IsValid() {
|
||||
de.c.logf("magicsock: bogus netmap endpoint from %v", eps)
|
||||
continue
|
||||
|
||||
293
vendor/tailscale.com/wgengine/magicsock/magicsock.go
generated
vendored
293
vendor/tailscale.com/wgengine/magicsock/magicsock.go
generated
vendored
@@ -10,17 +10,18 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/tailscale/wireguard-go/conn"
|
||||
@@ -59,9 +60,7 @@ import (
|
||||
"tailscale.com/util/ringbuffer"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/testenv"
|
||||
"tailscale.com/util/uniq"
|
||||
"tailscale.com/util/usermetric"
|
||||
"tailscale.com/wgengine/capture"
|
||||
"tailscale.com/wgengine/wgint"
|
||||
)
|
||||
|
||||
@@ -80,6 +79,58 @@ const (
|
||||
socketBufferSize = 7 << 20
|
||||
)
|
||||
|
||||
// Path is a label indicating the type of path a packet took.
|
||||
type Path string
|
||||
|
||||
const (
|
||||
PathDirectIPv4 Path = "direct_ipv4"
|
||||
PathDirectIPv6 Path = "direct_ipv6"
|
||||
PathDERP Path = "derp"
|
||||
)
|
||||
|
||||
type pathLabel struct {
|
||||
// Path indicates the path that the packet took:
|
||||
// - direct_ipv4
|
||||
// - direct_ipv6
|
||||
// - derp
|
||||
Path Path
|
||||
}
|
||||
|
||||
// metrics in wgengine contains the usermetrics counters for magicsock, it
|
||||
// is however a bit special. All them metrics are labeled, but looking up
|
||||
// the metric everytime we need to record it has an overhead, and includes
|
||||
// a lock in MultiLabelMap. The metrics are therefore instead created with
|
||||
// wgengine and the underlying expvar.Int is stored to be used directly.
|
||||
type metrics struct {
|
||||
// inboundPacketsTotal is the total number of inbound packets received,
|
||||
// labeled by the path the packet took.
|
||||
inboundPacketsIPv4Total expvar.Int
|
||||
inboundPacketsIPv6Total expvar.Int
|
||||
inboundPacketsDERPTotal expvar.Int
|
||||
|
||||
// inboundBytesTotal is the total number of inbound bytes received,
|
||||
// labeled by the path the packet took.
|
||||
inboundBytesIPv4Total expvar.Int
|
||||
inboundBytesIPv6Total expvar.Int
|
||||
inboundBytesDERPTotal expvar.Int
|
||||
|
||||
// outboundPacketsTotal is the total number of outbound packets sent,
|
||||
// labeled by the path the packet took.
|
||||
outboundPacketsIPv4Total expvar.Int
|
||||
outboundPacketsIPv6Total expvar.Int
|
||||
outboundPacketsDERPTotal expvar.Int
|
||||
|
||||
// outboundBytesTotal is the total number of outbound bytes sent,
|
||||
// labeled by the path the packet took.
|
||||
outboundBytesIPv4Total expvar.Int
|
||||
outboundBytesIPv6Total expvar.Int
|
||||
outboundBytesDERPTotal expvar.Int
|
||||
|
||||
// outboundPacketsDroppedErrors is the total number of outbound packets
|
||||
// dropped due to errors.
|
||||
outboundPacketsDroppedErrors expvar.Int
|
||||
}
|
||||
|
||||
// A Conn routes UDP packets and actively manages a list of its endpoints.
|
||||
type Conn struct {
|
||||
// This block mirrors the contents and field order of the Options
|
||||
@@ -126,6 +177,10 @@ type Conn struct {
|
||||
// port mappings from NAT devices.
|
||||
portMapper *portmapper.Client
|
||||
|
||||
// portMapperLogfUnregister is the function to call to unregister
|
||||
// the portmapper log limiter.
|
||||
portMapperLogfUnregister func()
|
||||
|
||||
// derpRecvCh is used by receiveDERP to read DERP messages.
|
||||
// It must have buffer size > 0; see issue 3736.
|
||||
derpRecvCh chan derpReadResult
|
||||
@@ -186,7 +241,7 @@ type Conn struct {
|
||||
stats atomic.Pointer[connstats.Statistics]
|
||||
|
||||
// captureHook, if non-nil, is the pcap logging callback when capturing.
|
||||
captureHook syncs.AtomicValue[capture.Callback]
|
||||
captureHook syncs.AtomicValue[packet.CaptureCallback]
|
||||
|
||||
// discoPrivate is the private naclbox key used for active
|
||||
// discovery traffic. It is always present, and immutable.
|
||||
@@ -312,15 +367,18 @@ type Conn struct {
|
||||
// wireguard state by its public key. If nil, it's not used.
|
||||
getPeerByKey func(key.NodePublic) (_ wgint.Peer, ok bool)
|
||||
|
||||
// lastEPERMRebind tracks the last time a rebind was performed
|
||||
// after experiencing a syscall.EPERM.
|
||||
lastEPERMRebind syncs.AtomicValue[time.Time]
|
||||
// lastErrRebind tracks the last time a rebind was performed after
|
||||
// experiencing a write error, and is used to throttle the rate of rebinds.
|
||||
lastErrRebind syncs.AtomicValue[time.Time]
|
||||
|
||||
// staticEndpoints are user set endpoints that this node should
|
||||
// advertise amongst its wireguard endpoints. It is user's
|
||||
// responsibility to ensure that traffic from these endpoints is routed
|
||||
// to the node.
|
||||
staticEndpoints views.Slice[netip.AddrPort]
|
||||
|
||||
// metrics contains the metrics for the magicsock instance.
|
||||
metrics *metrics
|
||||
}
|
||||
|
||||
// SetDebugLoggingEnabled controls whether spammy debug logging is enabled.
|
||||
@@ -478,10 +536,15 @@ func NewConn(opts Options) (*Conn, error) {
|
||||
c.idleFunc = opts.IdleFunc
|
||||
c.testOnlyPacketListener = opts.TestOnlyPacketListener
|
||||
c.noteRecvActivity = opts.NoteRecvActivity
|
||||
|
||||
// Don't log the same log messages possibly every few seconds in our
|
||||
// portmapper.
|
||||
portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ")
|
||||
portmapperLogf, c.portMapperLogfUnregister = netmon.LinkChangeLogLimiter(portmapperLogf, opts.NetMon)
|
||||
portMapOpts := &portmapper.DebugKnobs{
|
||||
DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() },
|
||||
}
|
||||
c.portMapper = portmapper.NewClient(logger.WithPrefix(c.logf, "portmapper: "), opts.NetMon, portMapOpts, opts.ControlKnobs, c.onPortMapChanged)
|
||||
c.portMapper = portmapper.NewClient(portmapperLogf, opts.NetMon, portMapOpts, opts.ControlKnobs, c.onPortMapChanged)
|
||||
c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP)
|
||||
c.netMon = opts.NetMon
|
||||
c.health = opts.HealthTracker
|
||||
@@ -503,6 +566,8 @@ func NewConn(opts Options) (*Conn, error) {
|
||||
UseDNSCache: true,
|
||||
}
|
||||
|
||||
c.metrics = registerMetrics(opts.Metrics)
|
||||
|
||||
if d4, err := c.listenRawDisco("ip4"); err == nil {
|
||||
c.logf("[v1] using BPF disco receiver for IPv4")
|
||||
c.closeDisco4 = d4
|
||||
@@ -520,11 +585,85 @@ func NewConn(opts Options) (*Conn, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// registerMetrics wires up the metrics for wgengine, instead of
|
||||
// registering the label metric directly, the underlying expvar is exposed.
|
||||
// See metrics for more info.
|
||||
func registerMetrics(reg *usermetric.Registry) *metrics {
|
||||
pathDirectV4 := pathLabel{Path: PathDirectIPv4}
|
||||
pathDirectV6 := pathLabel{Path: PathDirectIPv6}
|
||||
pathDERP := pathLabel{Path: PathDERP}
|
||||
inboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
|
||||
reg,
|
||||
"tailscaled_inbound_packets_total",
|
||||
"counter",
|
||||
"Counts the number of packets received from other peers",
|
||||
)
|
||||
inboundBytesTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
|
||||
reg,
|
||||
"tailscaled_inbound_bytes_total",
|
||||
"counter",
|
||||
"Counts the number of bytes received from other peers",
|
||||
)
|
||||
outboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
|
||||
reg,
|
||||
"tailscaled_outbound_packets_total",
|
||||
"counter",
|
||||
"Counts the number of packets sent to other peers",
|
||||
)
|
||||
outboundBytesTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
|
||||
reg,
|
||||
"tailscaled_outbound_bytes_total",
|
||||
"counter",
|
||||
"Counts the number of bytes sent to other peers",
|
||||
)
|
||||
outboundPacketsDroppedErrors := reg.DroppedPacketsOutbound()
|
||||
|
||||
m := new(metrics)
|
||||
|
||||
// Map clientmetrics to the usermetric counters.
|
||||
metricRecvDataPacketsIPv4.Register(&m.inboundPacketsIPv4Total)
|
||||
metricRecvDataPacketsIPv6.Register(&m.inboundPacketsIPv6Total)
|
||||
metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal)
|
||||
metricSendUDP.Register(&m.outboundPacketsIPv4Total)
|
||||
metricSendUDP.Register(&m.outboundPacketsIPv6Total)
|
||||
metricSendDERP.Register(&m.outboundPacketsDERPTotal)
|
||||
|
||||
inboundPacketsTotal.Set(pathDirectV4, &m.inboundPacketsIPv4Total)
|
||||
inboundPacketsTotal.Set(pathDirectV6, &m.inboundPacketsIPv6Total)
|
||||
inboundPacketsTotal.Set(pathDERP, &m.inboundPacketsDERPTotal)
|
||||
|
||||
inboundBytesTotal.Set(pathDirectV4, &m.inboundBytesIPv4Total)
|
||||
inboundBytesTotal.Set(pathDirectV6, &m.inboundBytesIPv6Total)
|
||||
inboundBytesTotal.Set(pathDERP, &m.inboundBytesDERPTotal)
|
||||
|
||||
outboundPacketsTotal.Set(pathDirectV4, &m.outboundPacketsIPv4Total)
|
||||
outboundPacketsTotal.Set(pathDirectV6, &m.outboundPacketsIPv6Total)
|
||||
outboundPacketsTotal.Set(pathDERP, &m.outboundPacketsDERPTotal)
|
||||
|
||||
outboundBytesTotal.Set(pathDirectV4, &m.outboundBytesIPv4Total)
|
||||
outboundBytesTotal.Set(pathDirectV6, &m.outboundBytesIPv6Total)
|
||||
outboundBytesTotal.Set(pathDERP, &m.outboundBytesDERPTotal)
|
||||
|
||||
outboundPacketsDroppedErrors.Set(usermetric.DropLabels{Reason: usermetric.ReasonError}, &m.outboundPacketsDroppedErrors)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// deregisterMetrics unregisters the underlying usermetrics expvar counters
|
||||
// from clientmetrics.
|
||||
func deregisterMetrics(m *metrics) {
|
||||
metricRecvDataPacketsIPv4.UnregisterAll()
|
||||
metricRecvDataPacketsIPv6.UnregisterAll()
|
||||
metricRecvDataPacketsDERP.UnregisterAll()
|
||||
metricSendUDP.UnregisterAll()
|
||||
metricSendDERP.UnregisterAll()
|
||||
}
|
||||
|
||||
// InstallCaptureHook installs a callback which is called to
|
||||
// log debug information into the pcap stream. This function
|
||||
// can be called with a nil argument to uninstall the capture
|
||||
// hook.
|
||||
func (c *Conn) InstallCaptureHook(cb capture.Callback) {
|
||||
func (c *Conn) InstallCaptureHook(cb packet.CaptureCallback) {
|
||||
c.captureHook.Store(cb)
|
||||
}
|
||||
|
||||
@@ -988,8 +1127,8 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro
|
||||
// re-run.
|
||||
eps = c.endpointTracker.update(time.Now(), eps)
|
||||
|
||||
for i := range c.staticEndpoints.Len() {
|
||||
addAddr(c.staticEndpoints.At(i), tailcfg.EndpointExplicitConf)
|
||||
for _, ep := range c.staticEndpoints.All() {
|
||||
addAddr(ep, tailcfg.EndpointExplicitConf)
|
||||
}
|
||||
|
||||
if localAddr := c.pconn4.LocalAddr(); localAddr.IP.IsUnspecified() {
|
||||
@@ -1078,8 +1217,13 @@ func (c *Conn) networkDown() bool { return !c.networkUp.Load() }
|
||||
// Send implements conn.Bind.
|
||||
//
|
||||
// See https://pkg.go.dev/golang.zx2c4.com/wireguard/conn#Bind.Send
|
||||
func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) error {
|
||||
func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) (err error) {
|
||||
n := int64(len(buffs))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.metrics.outboundPacketsDroppedErrors.Add(n)
|
||||
}
|
||||
}()
|
||||
metricSendData.Add(n)
|
||||
if c.networkDown() {
|
||||
metricSendDataNetworkDown.Add(n)
|
||||
@@ -1122,7 +1266,7 @@ func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err
|
||||
c.logf("magicsock: %s", errGSO.Error())
|
||||
err = errGSO.RetryErr
|
||||
} else {
|
||||
_ = c.maybeRebindOnError(runtime.GOOS, err)
|
||||
c.maybeRebindOnError(err)
|
||||
}
|
||||
}
|
||||
return err == nil, err
|
||||
@@ -1130,48 +1274,44 @@ func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err
|
||||
|
||||
// sendUDP sends UDP packet b to ipp.
|
||||
// See sendAddr's docs on the return value meanings.
|
||||
func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) {
|
||||
func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, err error) {
|
||||
if runtime.GOOS == "js" {
|
||||
return false, errNoUDP
|
||||
}
|
||||
sent, err = c.sendUDPStd(ipp, b)
|
||||
if err != nil {
|
||||
metricSendUDPError.Add(1)
|
||||
_ = c.maybeRebindOnError(runtime.GOOS, err)
|
||||
c.maybeRebindOnError(err)
|
||||
} else {
|
||||
if sent {
|
||||
metricSendUDP.Add(1)
|
||||
if sent && !isDisco {
|
||||
switch {
|
||||
case ipp.Addr().Is4():
|
||||
c.metrics.outboundPacketsIPv4Total.Add(1)
|
||||
c.metrics.outboundBytesIPv4Total.Add(int64(len(b)))
|
||||
case ipp.Addr().Is6():
|
||||
c.metrics.outboundPacketsIPv6Total.Add(1)
|
||||
c.metrics.outboundBytesIPv6Total.Add(int64(len(b)))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// maybeRebindOnError performs a rebind and restun if the error is defined and
|
||||
// any conditionals are met.
|
||||
func (c *Conn) maybeRebindOnError(os string, err error) bool {
|
||||
switch {
|
||||
case errors.Is(err, syscall.EPERM):
|
||||
why := "operation-not-permitted-rebind"
|
||||
switch os {
|
||||
// We currently will only rebind and restun on a syscall.EPERM if it is experienced
|
||||
// on a client running darwin.
|
||||
// TODO(charlotte, raggi): expand os options if required.
|
||||
case "darwin":
|
||||
// TODO(charlotte): implement a backoff, so we don't end up in a rebind loop for persistent
|
||||
// EPERMs.
|
||||
if c.lastEPERMRebind.Load().Before(time.Now().Add(-5 * time.Second)) {
|
||||
c.logf("magicsock: performing %q", why)
|
||||
c.lastEPERMRebind.Store(time.Now())
|
||||
c.Rebind()
|
||||
go c.ReSTUN(why)
|
||||
return true
|
||||
}
|
||||
default:
|
||||
c.logf("magicsock: not performing %q", why)
|
||||
return false
|
||||
}
|
||||
// maybeRebindOnError performs a rebind and restun if the error is one that is
|
||||
// known to be healed by a rebind, and the rebind is not throttled.
|
||||
func (c *Conn) maybeRebindOnError(err error) {
|
||||
ok, reason := shouldRebind(err)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if c.lastErrRebind.Load().Before(time.Now().Add(-5 * time.Second)) {
|
||||
c.logf("magicsock: performing rebind due to %q", reason)
|
||||
c.Rebind()
|
||||
go c.ReSTUN(reason)
|
||||
} else {
|
||||
c.logf("magicsock: not performing %q rebind due to throttle", reason)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// sendUDPNetcheck sends b via UDP to addr. It is used exclusively by netcheck.
|
||||
@@ -1225,9 +1365,9 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error)
|
||||
// An example of when they might be different: sending to an
|
||||
// IPv6 address when the local machine doesn't have IPv6 support
|
||||
// returns (false, nil); it's not an error, but nothing was sent.
|
||||
func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte) (sent bool, err error) {
|
||||
func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool) (sent bool, err error) {
|
||||
if addr.Addr() != tailcfg.DerpMagicIPAddr {
|
||||
return c.sendUDP(addr, b)
|
||||
return c.sendUDP(addr, b, isDisco)
|
||||
}
|
||||
|
||||
regionID := int(addr.Port())
|
||||
@@ -1248,7 +1388,7 @@ func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte) (s
|
||||
case <-c.donec:
|
||||
metricSendDERPErrorClosed.Add(1)
|
||||
return false, errConnClosed
|
||||
case ch <- derpWriteRequest{addr, pubKey, pkt}:
|
||||
case ch <- derpWriteRequest{addr, pubKey, pkt, isDisco}:
|
||||
metricSendDERPQueued.Add(1)
|
||||
return true, nil
|
||||
default:
|
||||
@@ -1278,19 +1418,24 @@ func (c *Conn) putReceiveBatch(batch *receiveBatch) {
|
||||
c.receiveBatchPool.Put(batch)
|
||||
}
|
||||
|
||||
// receiveIPv4 creates an IPv4 ReceiveFunc reading from c.pconn4.
|
||||
func (c *Conn) receiveIPv4() conn.ReceiveFunc {
|
||||
return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), metricRecvDataIPv4)
|
||||
return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4),
|
||||
&c.metrics.inboundPacketsIPv4Total,
|
||||
&c.metrics.inboundBytesIPv4Total,
|
||||
)
|
||||
}
|
||||
|
||||
// receiveIPv6 creates an IPv6 ReceiveFunc reading from c.pconn6.
|
||||
func (c *Conn) receiveIPv6() conn.ReceiveFunc {
|
||||
return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), metricRecvDataIPv6)
|
||||
return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6),
|
||||
&c.metrics.inboundPacketsIPv6Total,
|
||||
&c.metrics.inboundBytesIPv6Total,
|
||||
)
|
||||
}
|
||||
|
||||
// mkReceiveFunc creates a ReceiveFunc reading from ruc.
|
||||
// The provided healthItem and metric are updated if non-nil.
|
||||
func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, metric *clientmetric.Metric) conn.ReceiveFunc {
|
||||
// The provided healthItem and metrics are updated if non-nil.
|
||||
func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetric, bytesMetric *expvar.Int) conn.ReceiveFunc {
|
||||
// epCache caches an IPPort->endpoint for hot flows.
|
||||
var epCache ippEndpointCache
|
||||
|
||||
@@ -1327,8 +1472,11 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu
|
||||
}
|
||||
ipp := msg.Addr.(*net.UDPAddr).AddrPort()
|
||||
if ep, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok {
|
||||
if metric != nil {
|
||||
metric.Add(1)
|
||||
if packetMetric != nil {
|
||||
packetMetric.Add(1)
|
||||
}
|
||||
if bytesMetric != nil {
|
||||
bytesMetric.Add(int64(msg.N))
|
||||
}
|
||||
eps[i] = ep
|
||||
sizes[i] = msg.N
|
||||
@@ -1384,7 +1532,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *ippEndpointCache)
|
||||
ep.lastRecvUDPAny.StoreAtomic(now)
|
||||
ep.noteRecvActivity(ipp, now)
|
||||
if stats := c.stats.Load(); stats != nil {
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, len(b))
|
||||
stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b))
|
||||
}
|
||||
return ep, true
|
||||
}
|
||||
@@ -1438,7 +1586,8 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, dstKey key.NodePublic, dstDi
|
||||
|
||||
box := di.sharedKey.Seal(m.AppendMarshal(nil))
|
||||
pkt = append(pkt, box...)
|
||||
sent, err = c.sendAddr(dst, dstKey, pkt)
|
||||
const isDisco = true
|
||||
sent, err = c.sendAddr(dst, dstKey, pkt, isDisco)
|
||||
if sent {
|
||||
if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) {
|
||||
node := "?"
|
||||
@@ -1568,7 +1717,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src netip.AddrPort, derpNodeSrc ke
|
||||
// Emit information about the disco frame into the pcap stream
|
||||
// if a capture hook is installed.
|
||||
if cb := c.captureHook.Load(); cb != nil {
|
||||
cb(capture.PathDisco, time.Now(), disco.ToPCAPFrame(src, derpNodeSrc, payload), packet.CaptureMeta{})
|
||||
cb(packet.PathDisco, time.Now(), disco.ToPCAPFrame(src, derpNodeSrc, payload), packet.CaptureMeta{})
|
||||
}
|
||||
|
||||
dm, err := disco.Parse(payload)
|
||||
@@ -2196,10 +2345,7 @@ func devPanicf(format string, a ...any) {
|
||||
|
||||
func (c *Conn) logEndpointCreated(n tailcfg.NodeView) {
|
||||
c.logf("magicsock: created endpoint key=%s: disco=%s; %v", n.Key().ShortString(), n.DiscoKey().ShortString(), logger.ArgWriter(func(w *bufio.Writer) {
|
||||
const derpPrefix = "127.3.3.40:"
|
||||
if strings.HasPrefix(n.DERP(), derpPrefix) {
|
||||
ipp, _ := netip.ParseAddrPort(n.DERP())
|
||||
regionID := int(ipp.Port())
|
||||
if regionID := n.HomeDERP(); regionID != 0 {
|
||||
code := c.derpRegionCodeLocked(regionID)
|
||||
if code != "" {
|
||||
code = "(" + code + ")"
|
||||
@@ -2207,16 +2353,14 @@ func (c *Conn) logEndpointCreated(n tailcfg.NodeView) {
|
||||
fmt.Fprintf(w, "derp=%v%s ", regionID, code)
|
||||
}
|
||||
|
||||
for i := range n.AllowedIPs().Len() {
|
||||
a := n.AllowedIPs().At(i)
|
||||
for _, a := range n.AllowedIPs().All() {
|
||||
if a.IsSingleIP() {
|
||||
fmt.Fprintf(w, "aip=%v ", a.Addr())
|
||||
} else {
|
||||
fmt.Fprintf(w, "aip=%v ", a)
|
||||
}
|
||||
}
|
||||
for i := range n.Endpoints().Len() {
|
||||
ep := n.Endpoints().At(i)
|
||||
for _, ep := range n.Endpoints().All() {
|
||||
fmt.Fprintf(w, "ep=%v ", ep)
|
||||
}
|
||||
}))
|
||||
@@ -2346,6 +2490,7 @@ func (c *Conn) Close() error {
|
||||
}
|
||||
c.stopPeriodicReSTUNTimerLocked()
|
||||
c.portMapper.Close()
|
||||
c.portMapperLogfUnregister()
|
||||
|
||||
c.peerMap.forEachEndpoint(func(ep *endpoint) {
|
||||
ep.stopAndReset()
|
||||
@@ -2377,6 +2522,8 @@ func (c *Conn) Close() error {
|
||||
pinger.Close()
|
||||
}
|
||||
|
||||
deregisterMetrics(c.metrics)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2525,7 +2672,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur
|
||||
}
|
||||
ports = append(ports, 0)
|
||||
// Remove duplicates. (All duplicates are consecutive.)
|
||||
uniq.ModifySlice(&ports)
|
||||
ports = slices.Compact(ports)
|
||||
|
||||
if debugBindSocket() {
|
||||
c.logf("magicsock: bindSocket: candidate ports: %+v", ports)
|
||||
@@ -2860,6 +3007,14 @@ func (c *Conn) DebugPickNewDERP() error {
|
||||
return errors.New("too few regions")
|
||||
}
|
||||
|
||||
func (c *Conn) DebugForcePreferDERP(n int) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.logf("magicsock: [debug] force preferred DERP set to: %d", n)
|
||||
c.netChecker.SetForcePreferredDERP(n)
|
||||
}
|
||||
|
||||
// portableTrySetSocketBuffer sets SO_SNDBUF and SO_RECVBUF on pconn to socketBufferSize,
|
||||
// logging an error if it occurs.
|
||||
func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) {
|
||||
@@ -2930,17 +3085,17 @@ var (
|
||||
metricSendDERPErrorChan = clientmetric.NewCounter("magicsock_send_derp_error_chan")
|
||||
metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed")
|
||||
metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue")
|
||||
metricSendUDP = clientmetric.NewCounter("magicsock_send_udp")
|
||||
metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp")
|
||||
metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error")
|
||||
metricSendDERP = clientmetric.NewCounter("magicsock_send_derp")
|
||||
metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp")
|
||||
metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error")
|
||||
|
||||
// Data packets (non-disco)
|
||||
metricSendData = clientmetric.NewCounter("magicsock_send_data")
|
||||
metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down")
|
||||
metricRecvDataDERP = clientmetric.NewCounter("magicsock_recv_data_derp")
|
||||
metricRecvDataIPv4 = clientmetric.NewCounter("magicsock_recv_data_ipv4")
|
||||
metricRecvDataIPv6 = clientmetric.NewCounter("magicsock_recv_data_ipv6")
|
||||
metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp")
|
||||
metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4")
|
||||
metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6")
|
||||
|
||||
// Disco packets
|
||||
metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp")
|
||||
|
||||
31
vendor/tailscale.com/wgengine/magicsock/magicsock_notplan9.go
generated
vendored
Normal file
31
vendor/tailscale.com/wgengine/magicsock/magicsock_notplan9.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package magicsock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// shouldRebind returns if the error is one that is known to be healed by a
|
||||
// rebind, and if so also returns a resason string for the rebind.
|
||||
func shouldRebind(err error) (ok bool, reason string) {
|
||||
switch {
|
||||
// EPIPE/ENOTCONN are common errors when a send fails due to a closed
|
||||
// socket. There is some platform and version inconsistency in which
|
||||
// error is returned, but the meaning is the same.
|
||||
case errors.Is(err, syscall.EPIPE), errors.Is(err, syscall.ENOTCONN):
|
||||
return true, "broken-pipe"
|
||||
|
||||
// EPERM is typically caused by EDR software, and has been observed to be
|
||||
// transient, it seems that some versions of some EDR lose track of sockets
|
||||
// at times, and return EPERM, but reconnects will establish appropriate
|
||||
// rights associated with a new socket.
|
||||
case errors.Is(err, syscall.EPERM):
|
||||
return true, "operation-not-permitted"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
12
vendor/tailscale.com/wgengine/magicsock/magicsock_plan9.go
generated
vendored
Normal file
12
vendor/tailscale.com/wgengine/magicsock/magicsock_plan9.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build plan9
|
||||
|
||||
package magicsock
|
||||
|
||||
// shouldRebind returns if the error is one that is known to be healed by a
|
||||
// rebind, and if so also returns a resason string for the rebind.
|
||||
func shouldRebind(err error) (ok bool, reason string) {
|
||||
return false, ""
|
||||
}
|
||||
1
vendor/tailscale.com/wgengine/netstack/gro/gro.go
generated
vendored
1
vendor/tailscale.com/wgengine/netstack/gro/gro.go
generated
vendored
@@ -6,6 +6,7 @@ package gro
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/tailscale/wireguard-go/tun"
|
||||
"gvisor.dev/gvisor/pkg/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
|
||||
53
vendor/tailscale.com/wgengine/netstack/link_endpoint.go
generated
vendored
53
vendor/tailscale.com/wgengine/netstack/link_endpoint.go
generated
vendored
@@ -16,19 +16,27 @@ import (
|
||||
)
|
||||
|
||||
type queue struct {
|
||||
// TODO(jwhited): evaluate performance with mu as Mutex and/or alternative
|
||||
// non-channel buffer.
|
||||
c chan *stack.PacketBuffer
|
||||
mu sync.RWMutex // mu guards closed
|
||||
// TODO(jwhited): evaluate performance with a non-channel buffer.
|
||||
c chan *stack.PacketBuffer
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{}
|
||||
|
||||
mu sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (q *queue) Close() {
|
||||
q.closeOnce.Do(func() {
|
||||
close(q.closedCh)
|
||||
})
|
||||
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
if !q.closed {
|
||||
close(q.c)
|
||||
if q.closed {
|
||||
return
|
||||
}
|
||||
close(q.c)
|
||||
q.closed = true
|
||||
}
|
||||
|
||||
@@ -51,26 +59,27 @@ func (q *queue) ReadContext(ctx context.Context) *stack.PacketBuffer {
|
||||
}
|
||||
|
||||
func (q *queue) Write(pkt *stack.PacketBuffer) tcpip.Error {
|
||||
// q holds the PacketBuffer.
|
||||
q.mu.RLock()
|
||||
defer q.mu.RUnlock()
|
||||
if q.closed {
|
||||
return &tcpip.ErrClosedForSend{}
|
||||
}
|
||||
|
||||
wrote := false
|
||||
select {
|
||||
case q.c <- pkt.IncRef():
|
||||
wrote = true
|
||||
default:
|
||||
// TODO(jwhited): reconsider/count
|
||||
pkt.DecRef()
|
||||
}
|
||||
|
||||
if wrote {
|
||||
return nil
|
||||
case <-q.closedCh:
|
||||
pkt.DecRef()
|
||||
return &tcpip.ErrClosedForSend{}
|
||||
}
|
||||
return &tcpip.ErrNoBufferSpace{}
|
||||
}
|
||||
|
||||
func (q *queue) Drain() int {
|
||||
c := 0
|
||||
for pkt := range q.c {
|
||||
pkt.DecRef()
|
||||
c++
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (q *queue) Num() int {
|
||||
@@ -107,7 +116,8 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported
|
||||
le := &linkEndpoint{
|
||||
supportedGRO: supportedGRO,
|
||||
q: &queue{
|
||||
c: make(chan *stack.PacketBuffer, size),
|
||||
c: make(chan *stack.PacketBuffer, size),
|
||||
closedCh: make(chan struct{}),
|
||||
},
|
||||
mtu: mtu,
|
||||
linkAddr: linkAddr,
|
||||
@@ -164,12 +174,7 @@ func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer {
|
||||
|
||||
// Drain removes all outbound packets from the channel and counts them.
|
||||
func (l *linkEndpoint) Drain() int {
|
||||
c := 0
|
||||
for pkt := l.Read(); pkt != nil; pkt = l.Read() {
|
||||
pkt.DecRef()
|
||||
c++
|
||||
}
|
||||
return c
|
||||
return l.q.Drain()
|
||||
}
|
||||
|
||||
// NumQueued returns the number of packets queued for outbound.
|
||||
|
||||
148
vendor/tailscale.com/wgengine/netstack/netstack.go
generated
vendored
148
vendor/tailscale.com/wgengine/netstack/netstack.go
generated
vendored
@@ -32,7 +32,6 @@ import (
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/metrics"
|
||||
@@ -51,6 +50,7 @@ import (
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/nettype"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/wgengine"
|
||||
"tailscale.com/wgengine/filter"
|
||||
@@ -174,19 +174,18 @@ type Impl struct {
|
||||
// It can only be set before calling Start.
|
||||
ProcessSubnets bool
|
||||
|
||||
ipstack *stack.Stack
|
||||
linkEP *linkEndpoint
|
||||
tundev *tstun.Wrapper
|
||||
e wgengine.Engine
|
||||
pm *proxymap.Mapper
|
||||
mc *magicsock.Conn
|
||||
logf logger.Logf
|
||||
dialer *tsdial.Dialer
|
||||
ctx context.Context // alive until Close
|
||||
ctxCancel context.CancelFunc // called on Close
|
||||
lb *ipnlocal.LocalBackend // or nil
|
||||
dns *dns.Manager
|
||||
driveForLocal drive.FileSystemForLocal // or nil
|
||||
ipstack *stack.Stack
|
||||
linkEP *linkEndpoint
|
||||
tundev *tstun.Wrapper
|
||||
e wgengine.Engine
|
||||
pm *proxymap.Mapper
|
||||
mc *magicsock.Conn
|
||||
logf logger.Logf
|
||||
dialer *tsdial.Dialer
|
||||
ctx context.Context // alive until Close
|
||||
ctxCancel context.CancelFunc // called on Close
|
||||
lb *ipnlocal.LocalBackend // or nil
|
||||
dns *dns.Manager
|
||||
|
||||
// loopbackPort, if non-nil, will enable Impl to loop back (dnat to
|
||||
// <address-family-loopback>:loopbackPort) TCP & UDP flows originally
|
||||
@@ -202,6 +201,8 @@ type Impl struct {
|
||||
// updates.
|
||||
atomicIsLocalIPFunc syncs.AtomicValue[func(netip.Addr) bool]
|
||||
|
||||
atomicIsVIPServiceIPFunc syncs.AtomicValue[func(netip.Addr) bool]
|
||||
|
||||
// forwardDialFunc, if non-nil, is the net.Dialer.DialContext-style
|
||||
// function that is used to make outgoing connections when forwarding a
|
||||
// TCP connection to another host (e.g. in subnet router mode).
|
||||
@@ -288,7 +289,7 @@ func setTCPBufSizes(ipstack *stack.Stack) error {
|
||||
}
|
||||
|
||||
// Create creates and populates a new Impl.
|
||||
func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper, driveForLocal drive.FileSystemForLocal) (*Impl, error) {
|
||||
func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper) (*Impl, error) {
|
||||
if mc == nil {
|
||||
return nil, errors.New("nil magicsock.Conn")
|
||||
}
|
||||
@@ -316,16 +317,19 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not enable TCP SACK: %v", tcpipErr)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
// See https://github.com/tailscale/tailscale/issues/9707
|
||||
// Windows w/RACK performs poorly. ACKs do not appear to be handled in a
|
||||
// timely manner, leading to spurious retransmissions and a reduced
|
||||
// congestion window.
|
||||
tcpRecoveryOpt := tcpip.TCPRecovery(0)
|
||||
tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRecoveryOpt)
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr)
|
||||
}
|
||||
// See https://github.com/tailscale/tailscale/issues/9707
|
||||
// gVisor's RACK performs poorly. ACKs do not appear to be handled in a
|
||||
// timely manner, leading to spurious retransmissions and a reduced
|
||||
// congestion window.
|
||||
tcpRecoveryOpt := tcpip.TCPRecovery(0)
|
||||
tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRecoveryOpt)
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr)
|
||||
}
|
||||
cubicOpt := tcpip.CongestionControlOption("cubic")
|
||||
tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &cubicOpt)
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not set cubic congestion control: %v", tcpipErr)
|
||||
}
|
||||
err := setTCPBufSizes(ipstack)
|
||||
if err != nil {
|
||||
@@ -382,7 +386,6 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi
|
||||
connsInFlightByClient: make(map[netip.Addr]int),
|
||||
packetsInFlight: make(map[stack.TransportEndpointID]struct{}),
|
||||
dns: dns,
|
||||
driveForLocal: driveForLocal,
|
||||
}
|
||||
loopbackPort, ok := envknob.LookupInt("TS_DEBUG_NETSTACK_LOOPBACK_PORT")
|
||||
if ok && loopbackPort >= 0 && loopbackPort <= math.MaxUint16 {
|
||||
@@ -390,6 +393,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi
|
||||
}
|
||||
ns.ctx, ns.ctxCancel = context.WithCancel(context.Background())
|
||||
ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
ns.atomicIsVIPServiceIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
ns.tundev.PostFilterPacketInboundFromWireGuard = ns.injectInbound
|
||||
ns.tundev.PreFilterPacketOutboundToWireGuardNetstackIntercept = ns.handleLocalPackets
|
||||
stacksForMetrics.Store(ns, struct{}{})
|
||||
@@ -404,6 +408,14 @@ func (ns *Impl) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTransportProtocolOption forwards to the underlying
|
||||
// [stack.Stack.SetTransportProtocolOption]. Callers are responsible for
|
||||
// ensuring that the options are valid, compatible and appropriate for their use
|
||||
// case. Compatibility may change at any version.
|
||||
func (ns *Impl) SetTransportProtocolOption(transport tcpip.TransportProtocolNumber, option tcpip.SettableTransportProtocolOption) tcpip.Error {
|
||||
return ns.ipstack.SetTransportProtocolOption(transport, option)
|
||||
}
|
||||
|
||||
// A single process might have several netstacks running at the same time.
|
||||
// Exported clientmetric counters will have a sum of counters of all of them.
|
||||
var stacksForMetrics syncs.Map[*Impl, struct{}]
|
||||
@@ -535,7 +547,7 @@ func (ns *Impl) wrapTCPProtocolHandler(h protocolHandlerFunc) protocolHandlerFun
|
||||
|
||||
// Dynamically reconfigure ns's subnet addresses as needed for
|
||||
// outbound traffic.
|
||||
if !ns.isLocalIP(localIP) {
|
||||
if !ns.isLocalIP(localIP) && !ns.isVIPServiceIP(localIP) {
|
||||
ns.addSubnetAddress(localIP)
|
||||
}
|
||||
|
||||
@@ -623,11 +635,19 @@ var v4broadcast = netaddr.IPv4(255, 255, 255, 255)
|
||||
// address slice views.
|
||||
func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) {
|
||||
var selfNode tailcfg.NodeView
|
||||
var serviceAddrSet set.Set[netip.Addr]
|
||||
if nm != nil {
|
||||
vipServiceIPMap := nm.GetVIPServiceIPMap()
|
||||
serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2)
|
||||
for _, addrs := range vipServiceIPMap {
|
||||
serviceAddrSet.AddSlice(addrs)
|
||||
}
|
||||
ns.atomicIsLocalIPFunc.Store(ipset.NewContainsIPFunc(nm.GetAddresses()))
|
||||
ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains)
|
||||
selfNode = nm.SelfNode
|
||||
} else {
|
||||
ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
ns.atomicIsVIPServiceIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
}
|
||||
|
||||
oldPfx := make(map[netip.Prefix]bool)
|
||||
@@ -646,18 +666,21 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) {
|
||||
newPfx := make(map[netip.Prefix]bool)
|
||||
|
||||
if selfNode.Valid() {
|
||||
for i := range selfNode.Addresses().Len() {
|
||||
p := selfNode.Addresses().At(i)
|
||||
for _, p := range selfNode.Addresses().All() {
|
||||
newPfx[p] = true
|
||||
}
|
||||
if ns.ProcessSubnets {
|
||||
for i := range selfNode.AllowedIPs().Len() {
|
||||
p := selfNode.AllowedIPs().At(i)
|
||||
for _, p := range selfNode.AllowedIPs().All() {
|
||||
newPfx[p] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for addr := range serviceAddrSet {
|
||||
p := netip.PrefixFrom(addr, addr.BitLen())
|
||||
newPfx[p] = true
|
||||
}
|
||||
|
||||
pfxToAdd := make(map[netip.Prefix]bool)
|
||||
for p := range newPfx {
|
||||
if !oldPfx[p] {
|
||||
@@ -820,6 +843,27 @@ func (ns *Impl) DialContextTCP(ctx context.Context, ipp netip.AddrPort) (*gonet.
|
||||
return gonet.DialContextTCP(ctx, ns.ipstack, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
// DialContextTCPWithBind creates a new gonet.TCPConn connected to the specified
|
||||
// remoteAddress with its local address bound to localAddr on an available port.
|
||||
func (ns *Impl) DialContextTCPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.TCPConn, error) {
|
||||
remoteAddress := tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()),
|
||||
Port: remoteAddr.Port(),
|
||||
}
|
||||
localAddress := tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(localAddr.AsSlice()),
|
||||
}
|
||||
var ipType tcpip.NetworkProtocolNumber
|
||||
if remoteAddr.Addr().Is4() {
|
||||
ipType = ipv4.ProtocolNumber
|
||||
} else {
|
||||
ipType = ipv6.ProtocolNumber
|
||||
}
|
||||
return gonet.DialTCPWithBind(ctx, ns.ipstack, localAddress, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet.UDPConn, error) {
|
||||
remoteAddress := &tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
@@ -836,6 +880,28 @@ func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet.
|
||||
return gonet.DialUDP(ns.ipstack, nil, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
// DialContextUDPWithBind creates a new gonet.UDPConn. Connected to remoteAddr.
|
||||
// With its local address bound to localAddr on an available port.
|
||||
func (ns *Impl) DialContextUDPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.UDPConn, error) {
|
||||
remoteAddress := &tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()),
|
||||
Port: remoteAddr.Port(),
|
||||
}
|
||||
localAddress := &tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(localAddr.AsSlice()),
|
||||
}
|
||||
var ipType tcpip.NetworkProtocolNumber
|
||||
if remoteAddr.Addr().Is4() {
|
||||
ipType = ipv4.ProtocolNumber
|
||||
} else {
|
||||
ipType = ipv6.ProtocolNumber
|
||||
}
|
||||
|
||||
return gonet.DialUDP(ns.ipstack, localAddress, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
// getInjectInboundBuffsSizes returns packet memory and a sizes slice for usage
|
||||
// when calling tstun.Wrapper.InjectInboundPacketBuffer(). These are sized with
|
||||
// consideration for MTU and GSO support on ns.linkEP. They should be recycled
|
||||
@@ -957,6 +1023,12 @@ func (ns *Impl) isLocalIP(ip netip.Addr) bool {
|
||||
return ns.atomicIsLocalIPFunc.Load()(ip)
|
||||
}
|
||||
|
||||
// isVIPServiceIP reports whether ip is an IP address that's
|
||||
// assigned to a VIP service.
|
||||
func (ns *Impl) isVIPServiceIP(ip netip.Addr) bool {
|
||||
return ns.atomicIsVIPServiceIPFunc.Load()(ip)
|
||||
}
|
||||
|
||||
func (ns *Impl) peerAPIPortAtomic(ip netip.Addr) *atomic.Uint32 {
|
||||
if ip.Is4() {
|
||||
return &ns.peerapiPort4Atomic
|
||||
@@ -973,6 +1045,7 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool {
|
||||
// Handle incoming peerapi connections in netstack.
|
||||
dstIP := p.Dst.Addr()
|
||||
isLocal := ns.isLocalIP(dstIP)
|
||||
isService := ns.isVIPServiceIP(dstIP)
|
||||
|
||||
// Handle TCP connection to the Tailscale IP(s) in some cases:
|
||||
if ns.lb != nil && p.IPProto == ipproto.TCP && isLocal {
|
||||
@@ -995,6 +1068,19 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if isService {
|
||||
if p.IsEchoRequest() {
|
||||
return true
|
||||
}
|
||||
if ns.lb != nil && p.IPProto == ipproto.TCP {
|
||||
// An assumption holds for this to work: when tun mode is on for a service,
|
||||
// its tcp and web are not set. This is enforced in b.setServeConfigLocked.
|
||||
if ns.lb.ShouldInterceptVIPServiceTCPPort(p.Dst) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if p.IPVersion == 6 && !isLocal && viaRange.Contains(dstIP) {
|
||||
return ns.lb != nil && ns.lb.ShouldHandleViaIP(dstIP)
|
||||
}
|
||||
|
||||
17
vendor/tailscale.com/wgengine/pendopen.go
generated
vendored
17
vendor/tailscale.com/wgengine/pendopen.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"net/netip"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
"tailscale.com/net/packet"
|
||||
"tailscale.com/net/tstun"
|
||||
"tailscale.com/types/ipproto"
|
||||
"tailscale.com/types/lazy"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/wgengine/filter"
|
||||
)
|
||||
@@ -91,7 +91,7 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp
|
||||
|
||||
var (
|
||||
appleIPRange = netip.MustParsePrefix("17.0.0.0/8")
|
||||
canonicalIPs = lazy.SyncFunc(func() (checkIPFunc func(netip.Addr) bool) {
|
||||
canonicalIPs = sync.OnceValue(func() (checkIPFunc func(netip.Addr) bool) {
|
||||
// https://bgp.he.net/AS41231#_prefixes
|
||||
t := &bart.Table[bool]{}
|
||||
for _, s := range strings.Fields(`
|
||||
@@ -198,7 +198,7 @@ func (e *userspaceEngine) onOpenTimeout(flow flowtrack.Tuple) {
|
||||
e.logf("open-conn-track: timeout opening %v; peer node %v running pre-0.100", flow, n.Key().ShortString())
|
||||
return
|
||||
}
|
||||
if n.DERP() == "" {
|
||||
if n.HomeDERP() == 0 {
|
||||
e.logf("open-conn-track: timeout opening %v; peer node %v not connected to any DERP relay", flow, n.Key().ShortString())
|
||||
return
|
||||
}
|
||||
@@ -207,8 +207,7 @@ func (e *userspaceEngine) onOpenTimeout(flow flowtrack.Tuple) {
|
||||
ps, found := e.getPeerStatusLite(n.Key())
|
||||
if !found {
|
||||
onlyZeroRoute := true // whether peerForIP returned n only because its /0 route matched
|
||||
for i := range n.AllowedIPs().Len() {
|
||||
r := n.AllowedIPs().At(i)
|
||||
for _, r := range n.AllowedIPs().All() {
|
||||
if r.Bits() != 0 && r.Contains(flow.DstAddr()) {
|
||||
onlyZeroRoute = false
|
||||
break
|
||||
@@ -240,15 +239,15 @@ func (e *userspaceEngine) onOpenTimeout(flow flowtrack.Tuple) {
|
||||
if n.IsWireGuardOnly() {
|
||||
online = "wg"
|
||||
} else {
|
||||
if v := n.Online(); v != nil {
|
||||
if *v {
|
||||
if v, ok := n.Online().GetOk(); ok {
|
||||
if v {
|
||||
online = "yes"
|
||||
} else {
|
||||
online = "no"
|
||||
}
|
||||
}
|
||||
if n.LastSeen() != nil && online != "yes" {
|
||||
online += fmt.Sprintf(", lastseen=%v", durFmt(*n.LastSeen()))
|
||||
if lastSeen, ok := n.LastSeen().GetOk(); ok && online != "yes" {
|
||||
online += fmt.Sprintf(", lastseen=%v", durFmt(lastSeen))
|
||||
}
|
||||
}
|
||||
e.logf("open-conn-track: timeout opening %v to node %v; online=%v, lastRecv=%v",
|
||||
|
||||
52
vendor/tailscale.com/wgengine/router/router_linux.go
generated
vendored
52
vendor/tailscale.com/wgengine/router/router_linux.go
generated
vendored
@@ -32,6 +32,8 @@ import (
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
var getDistroFunc = distro.Get
|
||||
|
||||
const (
|
||||
netfilterOff = preftype.NetfilterOff
|
||||
netfilterNoDivert = preftype.NetfilterNoDivert
|
||||
@@ -222,7 +224,7 @@ func busyboxParseVersion(output string) (major, minor, patch int, err error) {
|
||||
}
|
||||
|
||||
func useAmbientCaps() bool {
|
||||
if distro.Get() != distro.Synology {
|
||||
if getDistroFunc() != distro.Synology {
|
||||
return false
|
||||
}
|
||||
return distro.DSMVersion() >= 7
|
||||
@@ -438,7 +440,7 @@ func (r *linuxRouter) Set(cfg *Config) error {
|
||||
|
||||
// Issue 11405: enable IP forwarding on gokrazy.
|
||||
advertisingRoutes := len(cfg.SubnetRoutes) > 0
|
||||
if distro.Get() == distro.Gokrazy && advertisingRoutes {
|
||||
if getDistroFunc() == distro.Gokrazy && advertisingRoutes {
|
||||
r.enableIPForwarding()
|
||||
}
|
||||
|
||||
@@ -1181,7 +1183,9 @@ var (
|
||||
tailscaleRouteTable = newRouteTable("tailscale", 52)
|
||||
)
|
||||
|
||||
// ipRules are the policy routing rules that Tailscale uses.
|
||||
// baseIPRules are the policy routing rules that Tailscale uses, when not
|
||||
// running on a UBNT device.
|
||||
//
|
||||
// The priority is the value represented here added to r.ipPolicyPrefBase,
|
||||
// which is usually 5200.
|
||||
//
|
||||
@@ -1196,7 +1200,7 @@ var (
|
||||
// and 'ip rule' implementations (including busybox), don't support
|
||||
// checking for the lack of a fwmark, only the presence. The technique
|
||||
// below works even on very old kernels.
|
||||
var ipRules = []netlink.Rule{
|
||||
var baseIPRules = []netlink.Rule{
|
||||
// Packets from us, tagged with our fwmark, first try the kernel's
|
||||
// main routing table.
|
||||
{
|
||||
@@ -1232,6 +1236,34 @@ var ipRules = []netlink.Rule{
|
||||
// usual rules (pref 32766 and 32767, ie. main and default).
|
||||
}
|
||||
|
||||
// ubntIPRules are the policy routing rules that Tailscale uses, when running
|
||||
// on a UBNT device.
|
||||
//
|
||||
// The priority is the value represented here added to
|
||||
// r.ipPolicyPrefBase, which is usually 5200.
|
||||
//
|
||||
// This represents an experiment that will be used to gather more information.
|
||||
// If this goes well, Tailscale may opt to use this for all of Linux.
|
||||
var ubntIPRules = []netlink.Rule{
|
||||
// non-fwmark packets fall through to the usual rules (pref 32766 and 32767,
|
||||
// ie. main and default).
|
||||
{
|
||||
Priority: 70,
|
||||
Invert: true,
|
||||
Mark: linuxfw.TailscaleBypassMarkNum,
|
||||
Table: tailscaleRouteTable.Num,
|
||||
},
|
||||
}
|
||||
|
||||
// ipRules returns the appropriate list of ip rules to be used by Tailscale. See
|
||||
// comments on baseIPRules and ubntIPRules for more details.
|
||||
func ipRules() []netlink.Rule {
|
||||
if getDistroFunc() == distro.UBNT {
|
||||
return ubntIPRules
|
||||
}
|
||||
return baseIPRules
|
||||
}
|
||||
|
||||
// justAddIPRules adds policy routing rule without deleting any first.
|
||||
func (r *linuxRouter) justAddIPRules() error {
|
||||
if !r.ipRuleAvailable {
|
||||
@@ -1243,7 +1275,7 @@ func (r *linuxRouter) justAddIPRules() error {
|
||||
var errAcc error
|
||||
for _, family := range r.addrFamilies() {
|
||||
|
||||
for _, ru := range ipRules {
|
||||
for _, ru := range ipRules() {
|
||||
// Note: r is a value type here; safe to mutate it.
|
||||
ru.Family = family.netlinkInt()
|
||||
if ru.Mark != 0 {
|
||||
@@ -1272,7 +1304,7 @@ func (r *linuxRouter) addIPRulesWithIPCommand() error {
|
||||
rg := newRunGroup(nil, r.cmd)
|
||||
|
||||
for _, family := range r.addrFamilies() {
|
||||
for _, rule := range ipRules {
|
||||
for _, rule := range ipRules() {
|
||||
args := []string{
|
||||
"ip", family.dashArg(),
|
||||
"rule", "add",
|
||||
@@ -1320,7 +1352,7 @@ func (r *linuxRouter) delIPRules() error {
|
||||
}
|
||||
var errAcc error
|
||||
for _, family := range r.addrFamilies() {
|
||||
for _, ru := range ipRules {
|
||||
for _, ru := range ipRules() {
|
||||
// Note: r is a value type here; safe to mutate it.
|
||||
// When deleting rules, we want to be a bit specific (mention which
|
||||
// table we were routing to) but not *too* specific (fwmarks, etc).
|
||||
@@ -1363,7 +1395,7 @@ func (r *linuxRouter) delIPRulesWithIPCommand() error {
|
||||
// That leaves us some flexibility to change these values in later
|
||||
// versions without having ongoing hacks for every possible
|
||||
// combination.
|
||||
for _, rule := range ipRules {
|
||||
for _, rule := range ipRules() {
|
||||
args := []string{
|
||||
"ip", family.dashArg(),
|
||||
"rule", "del",
|
||||
@@ -1500,7 +1532,7 @@ func normalizeCIDR(cidr netip.Prefix) string {
|
||||
// platformCanNetfilter reports whether the current distro/environment supports
|
||||
// running iptables/nftables commands.
|
||||
func platformCanNetfilter() bool {
|
||||
switch distro.Get() {
|
||||
switch getDistroFunc() {
|
||||
case distro.Synology:
|
||||
// Synology doesn't support iptables or nftables. Attempting to run it
|
||||
// just blocks for a long time while it logs about failures.
|
||||
@@ -1526,7 +1558,7 @@ func cleanUp(logf logger.Logf, interfaceName string) {
|
||||
// of the config file being present as well as a policy rule with a specific
|
||||
// priority (2000 + 1 - first interface mwan3 manages) and non-zero mark.
|
||||
func checkOpenWRTUsingMWAN3() (bool, error) {
|
||||
if distro.Get() != distro.OpenWrt {
|
||||
if getDistroFunc() != distro.OpenWrt {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
14
vendor/tailscale.com/wgengine/userspace.go
generated
vendored
14
vendor/tailscale.com/wgengine/userspace.go
generated
vendored
@@ -51,7 +51,6 @@ import (
|
||||
"tailscale.com/util/testenv"
|
||||
"tailscale.com/util/usermetric"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/wgengine/capture"
|
||||
"tailscale.com/wgengine/filter"
|
||||
"tailscale.com/wgengine/magicsock"
|
||||
"tailscale.com/wgengine/netlog"
|
||||
@@ -852,8 +851,7 @@ func (e *userspaceEngine) updateActivityMapsLocked(trackNodes []key.NodePublic,
|
||||
// hasOverlap checks if there is a IPPrefix which is common amongst the two
|
||||
// provided slices.
|
||||
func hasOverlap(aips, rips views.Slice[netip.Prefix]) bool {
|
||||
for i := range aips.Len() {
|
||||
aip := aips.At(i)
|
||||
for _, aip := range aips.All() {
|
||||
if views.SliceContains(rips, aip) {
|
||||
return true
|
||||
}
|
||||
@@ -1236,7 +1234,7 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) {
|
||||
// and Apple platforms.
|
||||
if changed {
|
||||
switch runtime.GOOS {
|
||||
case "linux", "android", "ios", "darwin":
|
||||
case "linux", "android", "ios", "darwin", "openbsd":
|
||||
e.wgLock.Lock()
|
||||
dnsCfg := e.lastDNSConfig
|
||||
e.wgLock.Unlock()
|
||||
@@ -1329,9 +1327,9 @@ func (e *userspaceEngine) mySelfIPMatchingFamily(dst netip.Addr) (src netip.Addr
|
||||
if addrs.Len() == 0 {
|
||||
return zero, errors.New("no self address in netmap")
|
||||
}
|
||||
for i := range addrs.Len() {
|
||||
if a := addrs.At(i); a.IsSingleIP() && a.Addr().BitLen() == dst.BitLen() {
|
||||
return a.Addr(), nil
|
||||
for _, p := range addrs.All() {
|
||||
if p.IsSingleIP() && p.Addr().BitLen() == dst.BitLen() {
|
||||
return p.Addr(), nil
|
||||
}
|
||||
}
|
||||
return zero, errors.New("no self address in netmap matching address family")
|
||||
@@ -1595,7 +1593,7 @@ var (
|
||||
metricNumMinorChanges = clientmetric.NewCounter("wgengine_minor_changes")
|
||||
)
|
||||
|
||||
func (e *userspaceEngine) InstallCaptureHook(cb capture.Callback) {
|
||||
func (e *userspaceEngine) InstallCaptureHook(cb packet.CaptureCallback) {
|
||||
e.tundev.InstallCaptureHook(cb)
|
||||
e.magicConn.InstallCaptureHook(cb)
|
||||
}
|
||||
|
||||
4
vendor/tailscale.com/wgengine/watchdog.go
generated
vendored
4
vendor/tailscale.com/wgengine/watchdog.go
generated
vendored
@@ -17,10 +17,10 @@ import (
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/net/dns"
|
||||
"tailscale.com/net/packet"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/wgengine/capture"
|
||||
"tailscale.com/wgengine/filter"
|
||||
"tailscale.com/wgengine/router"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
@@ -162,7 +162,7 @@ func (e *watchdogEngine) Done() <-chan struct{} {
|
||||
return e.wrap.Done()
|
||||
}
|
||||
|
||||
func (e *watchdogEngine) InstallCaptureHook(cb capture.Callback) {
|
||||
func (e *watchdogEngine) InstallCaptureHook(cb packet.CaptureCallback) {
|
||||
e.wrap.InstallCaptureHook(cb)
|
||||
}
|
||||
|
||||
|
||||
32
vendor/tailscale.com/wgengine/wgcfg/nmcfg/nmcfg.go
generated
vendored
32
vendor/tailscale.com/wgengine/wgcfg/nmcfg/nmcfg.go
generated
vendored
@@ -40,8 +40,7 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool {
|
||||
if !cidr.IsSingleIP() {
|
||||
return true
|
||||
}
|
||||
for i := range node.Addresses().Len() {
|
||||
selfCIDR := node.Addresses().At(i)
|
||||
for _, selfCIDR := range node.Addresses().All() {
|
||||
if cidr == selfCIDR {
|
||||
return false
|
||||
}
|
||||
@@ -82,11 +81,11 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
|
||||
|
||||
// Logging buffers
|
||||
skippedUnselected := new(bytes.Buffer)
|
||||
skippedIPs := new(bytes.Buffer)
|
||||
skippedSubnets := new(bytes.Buffer)
|
||||
skippedExpired := new(bytes.Buffer)
|
||||
|
||||
for _, peer := range nm.Peers {
|
||||
if peer.DiscoKey().IsZero() && peer.DERP() == "" && !peer.IsWireGuardOnly() {
|
||||
if peer.DiscoKey().IsZero() && peer.HomeDERP() == 0 && !peer.IsWireGuardOnly() {
|
||||
// Peer predates both DERP and active discovery, we cannot
|
||||
// communicate with it.
|
||||
logf("[v1] wgcfg: skipped peer %s, doesn't offer DERP or disco", peer.Key().ShortString())
|
||||
@@ -96,7 +95,16 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
|
||||
// anyway, since control intentionally breaks node keys for
|
||||
// expired peers so that we can't discover endpoints via DERP.
|
||||
if peer.Expired() {
|
||||
logf("[v1] wgcfg: skipped expired peer %s", peer.Key().ShortString())
|
||||
if skippedExpired.Len() >= 1<<10 {
|
||||
if !bytes.HasSuffix(skippedExpired.Bytes(), []byte("...")) {
|
||||
skippedExpired.WriteString("...")
|
||||
}
|
||||
} else {
|
||||
if skippedExpired.Len() > 0 {
|
||||
skippedExpired.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(skippedExpired, "%s/%v", peer.StableID(), peer.Key().ShortString())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -107,11 +115,10 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
|
||||
cpeer := &cfg.Peers[len(cfg.Peers)-1]
|
||||
|
||||
didExitNodeWarn := false
|
||||
cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer()
|
||||
cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer()
|
||||
cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer().Clone()
|
||||
cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer().Clone()
|
||||
cpeer.IsJailed = peer.IsJailed()
|
||||
for i := range peer.AllowedIPs().Len() {
|
||||
allowedIP := peer.AllowedIPs().At(i)
|
||||
for _, allowedIP := range peer.AllowedIPs().All() {
|
||||
if allowedIP.Bits() == 0 && peer.StableID() != exitNode {
|
||||
if didExitNodeWarn {
|
||||
// Don't log about both the IPv4 /0 and IPv6 /0.
|
||||
@@ -139,12 +146,11 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
|
||||
if skippedUnselected.Len() > 0 {
|
||||
logf("[v1] wgcfg: skipped unselected default routes from: %s", skippedUnselected.Bytes())
|
||||
}
|
||||
if skippedIPs.Len() > 0 {
|
||||
logf("[v1] wgcfg: skipped node IPs: %s", skippedIPs)
|
||||
}
|
||||
if skippedSubnets.Len() > 0 {
|
||||
logf("[v1] wgcfg: did not accept subnet routes: %s", skippedSubnets)
|
||||
}
|
||||
|
||||
if skippedExpired.Len() > 0 {
|
||||
logf("[v1] wgcfg: skipped expired peer: %s", skippedExpired)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
4
vendor/tailscale.com/wgengine/wgengine.go
generated
vendored
4
vendor/tailscale.com/wgengine/wgengine.go
generated
vendored
@@ -11,10 +11,10 @@ import (
|
||||
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/net/dns"
|
||||
"tailscale.com/net/packet"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/wgengine/capture"
|
||||
"tailscale.com/wgengine/filter"
|
||||
"tailscale.com/wgengine/router"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
@@ -129,5 +129,5 @@ type Engine interface {
|
||||
// InstallCaptureHook registers a function to be called to capture
|
||||
// packets traversing the data path. The hook can be uninstalled by
|
||||
// calling this function with a nil value.
|
||||
InstallCaptureHook(capture.Callback)
|
||||
InstallCaptureHook(packet.CaptureCallback)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user