Update dependencies
This commit is contained in:
1
vendor/tailscale.com/wgengine/netstack/gro/gro.go
generated
vendored
1
vendor/tailscale.com/wgengine/netstack/gro/gro.go
generated
vendored
@@ -6,6 +6,7 @@ package gro
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/tailscale/wireguard-go/tun"
|
||||
"gvisor.dev/gvisor/pkg/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
|
||||
53
vendor/tailscale.com/wgengine/netstack/link_endpoint.go
generated
vendored
53
vendor/tailscale.com/wgengine/netstack/link_endpoint.go
generated
vendored
@@ -16,19 +16,27 @@ import (
|
||||
)
|
||||
|
||||
type queue struct {
|
||||
// TODO(jwhited): evaluate performance with mu as Mutex and/or alternative
|
||||
// non-channel buffer.
|
||||
c chan *stack.PacketBuffer
|
||||
mu sync.RWMutex // mu guards closed
|
||||
// TODO(jwhited): evaluate performance with a non-channel buffer.
|
||||
c chan *stack.PacketBuffer
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{}
|
||||
|
||||
mu sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (q *queue) Close() {
|
||||
q.closeOnce.Do(func() {
|
||||
close(q.closedCh)
|
||||
})
|
||||
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
if !q.closed {
|
||||
close(q.c)
|
||||
if q.closed {
|
||||
return
|
||||
}
|
||||
close(q.c)
|
||||
q.closed = true
|
||||
}
|
||||
|
||||
@@ -51,26 +59,27 @@ func (q *queue) ReadContext(ctx context.Context) *stack.PacketBuffer {
|
||||
}
|
||||
|
||||
func (q *queue) Write(pkt *stack.PacketBuffer) tcpip.Error {
|
||||
// q holds the PacketBuffer.
|
||||
q.mu.RLock()
|
||||
defer q.mu.RUnlock()
|
||||
if q.closed {
|
||||
return &tcpip.ErrClosedForSend{}
|
||||
}
|
||||
|
||||
wrote := false
|
||||
select {
|
||||
case q.c <- pkt.IncRef():
|
||||
wrote = true
|
||||
default:
|
||||
// TODO(jwhited): reconsider/count
|
||||
pkt.DecRef()
|
||||
}
|
||||
|
||||
if wrote {
|
||||
return nil
|
||||
case <-q.closedCh:
|
||||
pkt.DecRef()
|
||||
return &tcpip.ErrClosedForSend{}
|
||||
}
|
||||
return &tcpip.ErrNoBufferSpace{}
|
||||
}
|
||||
|
||||
func (q *queue) Drain() int {
|
||||
c := 0
|
||||
for pkt := range q.c {
|
||||
pkt.DecRef()
|
||||
c++
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (q *queue) Num() int {
|
||||
@@ -107,7 +116,8 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported
|
||||
le := &linkEndpoint{
|
||||
supportedGRO: supportedGRO,
|
||||
q: &queue{
|
||||
c: make(chan *stack.PacketBuffer, size),
|
||||
c: make(chan *stack.PacketBuffer, size),
|
||||
closedCh: make(chan struct{}),
|
||||
},
|
||||
mtu: mtu,
|
||||
linkAddr: linkAddr,
|
||||
@@ -164,12 +174,7 @@ func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer {
|
||||
|
||||
// Drain removes all outbound packets from the channel and counts them.
|
||||
func (l *linkEndpoint) Drain() int {
|
||||
c := 0
|
||||
for pkt := l.Read(); pkt != nil; pkt = l.Read() {
|
||||
pkt.DecRef()
|
||||
c++
|
||||
}
|
||||
return c
|
||||
return l.q.Drain()
|
||||
}
|
||||
|
||||
// NumQueued returns the number of packets queued for outbound.
|
||||
|
||||
148
vendor/tailscale.com/wgengine/netstack/netstack.go
generated
vendored
148
vendor/tailscale.com/wgengine/netstack/netstack.go
generated
vendored
@@ -32,7 +32,6 @@ import (
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/metrics"
|
||||
@@ -51,6 +50,7 @@ import (
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/nettype"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/version"
|
||||
"tailscale.com/wgengine"
|
||||
"tailscale.com/wgengine/filter"
|
||||
@@ -174,19 +174,18 @@ type Impl struct {
|
||||
// It can only be set before calling Start.
|
||||
ProcessSubnets bool
|
||||
|
||||
ipstack *stack.Stack
|
||||
linkEP *linkEndpoint
|
||||
tundev *tstun.Wrapper
|
||||
e wgengine.Engine
|
||||
pm *proxymap.Mapper
|
||||
mc *magicsock.Conn
|
||||
logf logger.Logf
|
||||
dialer *tsdial.Dialer
|
||||
ctx context.Context // alive until Close
|
||||
ctxCancel context.CancelFunc // called on Close
|
||||
lb *ipnlocal.LocalBackend // or nil
|
||||
dns *dns.Manager
|
||||
driveForLocal drive.FileSystemForLocal // or nil
|
||||
ipstack *stack.Stack
|
||||
linkEP *linkEndpoint
|
||||
tundev *tstun.Wrapper
|
||||
e wgengine.Engine
|
||||
pm *proxymap.Mapper
|
||||
mc *magicsock.Conn
|
||||
logf logger.Logf
|
||||
dialer *tsdial.Dialer
|
||||
ctx context.Context // alive until Close
|
||||
ctxCancel context.CancelFunc // called on Close
|
||||
lb *ipnlocal.LocalBackend // or nil
|
||||
dns *dns.Manager
|
||||
|
||||
// loopbackPort, if non-nil, will enable Impl to loop back (dnat to
|
||||
// <address-family-loopback>:loopbackPort) TCP & UDP flows originally
|
||||
@@ -202,6 +201,8 @@ type Impl struct {
|
||||
// updates.
|
||||
atomicIsLocalIPFunc syncs.AtomicValue[func(netip.Addr) bool]
|
||||
|
||||
atomicIsVIPServiceIPFunc syncs.AtomicValue[func(netip.Addr) bool]
|
||||
|
||||
// forwardDialFunc, if non-nil, is the net.Dialer.DialContext-style
|
||||
// function that is used to make outgoing connections when forwarding a
|
||||
// TCP connection to another host (e.g. in subnet router mode).
|
||||
@@ -288,7 +289,7 @@ func setTCPBufSizes(ipstack *stack.Stack) error {
|
||||
}
|
||||
|
||||
// Create creates and populates a new Impl.
|
||||
func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper, driveForLocal drive.FileSystemForLocal) (*Impl, error) {
|
||||
func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper) (*Impl, error) {
|
||||
if mc == nil {
|
||||
return nil, errors.New("nil magicsock.Conn")
|
||||
}
|
||||
@@ -316,16 +317,19 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not enable TCP SACK: %v", tcpipErr)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
// See https://github.com/tailscale/tailscale/issues/9707
|
||||
// Windows w/RACK performs poorly. ACKs do not appear to be handled in a
|
||||
// timely manner, leading to spurious retransmissions and a reduced
|
||||
// congestion window.
|
||||
tcpRecoveryOpt := tcpip.TCPRecovery(0)
|
||||
tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRecoveryOpt)
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr)
|
||||
}
|
||||
// See https://github.com/tailscale/tailscale/issues/9707
|
||||
// gVisor's RACK performs poorly. ACKs do not appear to be handled in a
|
||||
// timely manner, leading to spurious retransmissions and a reduced
|
||||
// congestion window.
|
||||
tcpRecoveryOpt := tcpip.TCPRecovery(0)
|
||||
tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRecoveryOpt)
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr)
|
||||
}
|
||||
cubicOpt := tcpip.CongestionControlOption("cubic")
|
||||
tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &cubicOpt)
|
||||
if tcpipErr != nil {
|
||||
return nil, fmt.Errorf("could not set cubic congestion control: %v", tcpipErr)
|
||||
}
|
||||
err := setTCPBufSizes(ipstack)
|
||||
if err != nil {
|
||||
@@ -382,7 +386,6 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi
|
||||
connsInFlightByClient: make(map[netip.Addr]int),
|
||||
packetsInFlight: make(map[stack.TransportEndpointID]struct{}),
|
||||
dns: dns,
|
||||
driveForLocal: driveForLocal,
|
||||
}
|
||||
loopbackPort, ok := envknob.LookupInt("TS_DEBUG_NETSTACK_LOOPBACK_PORT")
|
||||
if ok && loopbackPort >= 0 && loopbackPort <= math.MaxUint16 {
|
||||
@@ -390,6 +393,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi
|
||||
}
|
||||
ns.ctx, ns.ctxCancel = context.WithCancel(context.Background())
|
||||
ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
ns.atomicIsVIPServiceIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
ns.tundev.PostFilterPacketInboundFromWireGuard = ns.injectInbound
|
||||
ns.tundev.PreFilterPacketOutboundToWireGuardNetstackIntercept = ns.handleLocalPackets
|
||||
stacksForMetrics.Store(ns, struct{}{})
|
||||
@@ -404,6 +408,14 @@ func (ns *Impl) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTransportProtocolOption forwards to the underlying
|
||||
// [stack.Stack.SetTransportProtocolOption]. Callers are responsible for
|
||||
// ensuring that the options are valid, compatible and appropriate for their use
|
||||
// case. Compatibility may change at any version.
|
||||
func (ns *Impl) SetTransportProtocolOption(transport tcpip.TransportProtocolNumber, option tcpip.SettableTransportProtocolOption) tcpip.Error {
|
||||
return ns.ipstack.SetTransportProtocolOption(transport, option)
|
||||
}
|
||||
|
||||
// A single process might have several netstacks running at the same time.
|
||||
// Exported clientmetric counters will have a sum of counters of all of them.
|
||||
var stacksForMetrics syncs.Map[*Impl, struct{}]
|
||||
@@ -535,7 +547,7 @@ func (ns *Impl) wrapTCPProtocolHandler(h protocolHandlerFunc) protocolHandlerFun
|
||||
|
||||
// Dynamically reconfigure ns's subnet addresses as needed for
|
||||
// outbound traffic.
|
||||
if !ns.isLocalIP(localIP) {
|
||||
if !ns.isLocalIP(localIP) && !ns.isVIPServiceIP(localIP) {
|
||||
ns.addSubnetAddress(localIP)
|
||||
}
|
||||
|
||||
@@ -623,11 +635,19 @@ var v4broadcast = netaddr.IPv4(255, 255, 255, 255)
|
||||
// address slice views.
|
||||
func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) {
|
||||
var selfNode tailcfg.NodeView
|
||||
var serviceAddrSet set.Set[netip.Addr]
|
||||
if nm != nil {
|
||||
vipServiceIPMap := nm.GetVIPServiceIPMap()
|
||||
serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2)
|
||||
for _, addrs := range vipServiceIPMap {
|
||||
serviceAddrSet.AddSlice(addrs)
|
||||
}
|
||||
ns.atomicIsLocalIPFunc.Store(ipset.NewContainsIPFunc(nm.GetAddresses()))
|
||||
ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains)
|
||||
selfNode = nm.SelfNode
|
||||
} else {
|
||||
ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
ns.atomicIsVIPServiceIPFunc.Store(ipset.FalseContainsIPFunc())
|
||||
}
|
||||
|
||||
oldPfx := make(map[netip.Prefix]bool)
|
||||
@@ -646,18 +666,21 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) {
|
||||
newPfx := make(map[netip.Prefix]bool)
|
||||
|
||||
if selfNode.Valid() {
|
||||
for i := range selfNode.Addresses().Len() {
|
||||
p := selfNode.Addresses().At(i)
|
||||
for _, p := range selfNode.Addresses().All() {
|
||||
newPfx[p] = true
|
||||
}
|
||||
if ns.ProcessSubnets {
|
||||
for i := range selfNode.AllowedIPs().Len() {
|
||||
p := selfNode.AllowedIPs().At(i)
|
||||
for _, p := range selfNode.AllowedIPs().All() {
|
||||
newPfx[p] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for addr := range serviceAddrSet {
|
||||
p := netip.PrefixFrom(addr, addr.BitLen())
|
||||
newPfx[p] = true
|
||||
}
|
||||
|
||||
pfxToAdd := make(map[netip.Prefix]bool)
|
||||
for p := range newPfx {
|
||||
if !oldPfx[p] {
|
||||
@@ -820,6 +843,27 @@ func (ns *Impl) DialContextTCP(ctx context.Context, ipp netip.AddrPort) (*gonet.
|
||||
return gonet.DialContextTCP(ctx, ns.ipstack, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
// DialContextTCPWithBind creates a new gonet.TCPConn connected to the specified
|
||||
// remoteAddress with its local address bound to localAddr on an available port.
|
||||
func (ns *Impl) DialContextTCPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.TCPConn, error) {
|
||||
remoteAddress := tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()),
|
||||
Port: remoteAddr.Port(),
|
||||
}
|
||||
localAddress := tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(localAddr.AsSlice()),
|
||||
}
|
||||
var ipType tcpip.NetworkProtocolNumber
|
||||
if remoteAddr.Addr().Is4() {
|
||||
ipType = ipv4.ProtocolNumber
|
||||
} else {
|
||||
ipType = ipv6.ProtocolNumber
|
||||
}
|
||||
return gonet.DialTCPWithBind(ctx, ns.ipstack, localAddress, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet.UDPConn, error) {
|
||||
remoteAddress := &tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
@@ -836,6 +880,28 @@ func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet.
|
||||
return gonet.DialUDP(ns.ipstack, nil, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
// DialContextUDPWithBind creates a new gonet.UDPConn. Connected to remoteAddr.
|
||||
// With its local address bound to localAddr on an available port.
|
||||
func (ns *Impl) DialContextUDPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.UDPConn, error) {
|
||||
remoteAddress := &tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()),
|
||||
Port: remoteAddr.Port(),
|
||||
}
|
||||
localAddress := &tcpip.FullAddress{
|
||||
NIC: nicID,
|
||||
Addr: tcpip.AddrFromSlice(localAddr.AsSlice()),
|
||||
}
|
||||
var ipType tcpip.NetworkProtocolNumber
|
||||
if remoteAddr.Addr().Is4() {
|
||||
ipType = ipv4.ProtocolNumber
|
||||
} else {
|
||||
ipType = ipv6.ProtocolNumber
|
||||
}
|
||||
|
||||
return gonet.DialUDP(ns.ipstack, localAddress, remoteAddress, ipType)
|
||||
}
|
||||
|
||||
// getInjectInboundBuffsSizes returns packet memory and a sizes slice for usage
|
||||
// when calling tstun.Wrapper.InjectInboundPacketBuffer(). These are sized with
|
||||
// consideration for MTU and GSO support on ns.linkEP. They should be recycled
|
||||
@@ -957,6 +1023,12 @@ func (ns *Impl) isLocalIP(ip netip.Addr) bool {
|
||||
return ns.atomicIsLocalIPFunc.Load()(ip)
|
||||
}
|
||||
|
||||
// isVIPServiceIP reports whether ip is an IP address that's
|
||||
// assigned to a VIP service.
|
||||
func (ns *Impl) isVIPServiceIP(ip netip.Addr) bool {
|
||||
return ns.atomicIsVIPServiceIPFunc.Load()(ip)
|
||||
}
|
||||
|
||||
func (ns *Impl) peerAPIPortAtomic(ip netip.Addr) *atomic.Uint32 {
|
||||
if ip.Is4() {
|
||||
return &ns.peerapiPort4Atomic
|
||||
@@ -973,6 +1045,7 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool {
|
||||
// Handle incoming peerapi connections in netstack.
|
||||
dstIP := p.Dst.Addr()
|
||||
isLocal := ns.isLocalIP(dstIP)
|
||||
isService := ns.isVIPServiceIP(dstIP)
|
||||
|
||||
// Handle TCP connection to the Tailscale IP(s) in some cases:
|
||||
if ns.lb != nil && p.IPProto == ipproto.TCP && isLocal {
|
||||
@@ -995,6 +1068,19 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if isService {
|
||||
if p.IsEchoRequest() {
|
||||
return true
|
||||
}
|
||||
if ns.lb != nil && p.IPProto == ipproto.TCP {
|
||||
// An assumption holds for this to work: when tun mode is on for a service,
|
||||
// its tcp and web are not set. This is enforced in b.setServeConfigLocked.
|
||||
if ns.lb.ShouldInterceptVIPServiceTCPPort(p.Dst) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if p.IPVersion == 6 && !isLocal && viaRange.Contains(dstIP) {
|
||||
return ns.lb != nil && ns.lb.ShouldHandleViaIP(dstIP)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user