Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

746
vendor/tailscale.com/control/controlclient/auto.go generated vendored Normal file
View File

@@ -0,0 +1,746 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"context"
"errors"
"fmt"
"net/http"
"sync"
"sync/atomic"
"time"
"tailscale.com/logtail/backoff"
"tailscale.com/net/sockstats"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/structs"
"tailscale.com/util/execqueue"
)
type LoginGoal struct {
_ structs.Incomparable
flags LoginFlags // flags to use when logging in
url string // auth url that needs to be visited
}
var _ Client = (*Auto)(nil)
// waitUnpause waits until either the client is unpaused or the Auto client is
// shut down. It reports whether the client should keep running (i.e. it's not
// closed).
func (c *Auto) waitUnpause(routineLogName string) (keepRunning bool) {
c.mu.Lock()
if !c.paused || c.closed {
defer c.mu.Unlock()
return !c.closed
}
unpaused := c.unpausedChanLocked()
c.mu.Unlock()
c.logf("%s: awaiting unpause", routineLogName)
return <-unpaused
}
// updateRoutine is responsible for informing the server of worthy changes to
// our local state. It runs in its own goroutine.
func (c *Auto) updateRoutine() {
defer close(c.updateDone)
bo := backoff.NewBackoff("updateRoutine", c.logf, 30*time.Second)
// lastUpdateGenInformed is the value of lastUpdateAt that we've successfully
// informed the server of.
var lastUpdateGenInformed updateGen
for {
if !c.waitUnpause("updateRoutine") {
c.logf("updateRoutine: exiting")
return
}
c.mu.Lock()
gen := c.lastUpdateGen
ctx := c.mapCtx
needUpdate := gen > 0 && gen != lastUpdateGenInformed && c.loggedIn
c.mu.Unlock()
if !needUpdate {
// Nothing to do, wait for a signal.
select {
case <-ctx.Done():
continue
case <-c.updateCh:
continue
}
}
t0 := c.clock.Now()
err := c.direct.SendUpdate(ctx)
d := time.Since(t0).Round(time.Millisecond)
if err != nil {
if ctx.Err() == nil {
c.direct.logf("lite map update error after %v: %v", d, err)
}
bo.BackOff(ctx, err)
continue
}
bo.BackOff(ctx, nil)
c.direct.logf("[v1] successful lite map update in %v", d)
lastUpdateGenInformed = gen
}
}
// atomicGen is an atomic int64 generator. It is used to generate monotonically
// increasing numbers for updateGen.
var atomicGen atomic.Int64
func nextUpdateGen() updateGen {
return updateGen(atomicGen.Add(1))
}
// updateGen is a monotonically increasing number that represents a particular
// update to the local state.
type updateGen int64
// Auto connects to a tailcontrol server for a node.
// It's a concrete implementation of the Client interface.
type Auto struct {
direct *Direct // our interface to the server APIs
clock tstime.Clock
logf logger.Logf
closed bool
updateCh chan struct{} // readable when we should inform the server of a change
observer Observer // called to update Client status; always non-nil
observerQueue execqueue.ExecQueue
unregisterHealthWatch func()
mu sync.Mutex // mutex guards the following fields
wantLoggedIn bool // whether the user wants to be logged in per last method call
urlToVisit string // the last url we were told to visit
expiry time.Time
// lastUpdateGen is the gen of last update we had an update worth sending to
// the server.
lastUpdateGen updateGen
paused bool // whether we should stop making HTTP requests
unpauseWaiters []chan bool // chans that gets sent true (once) on wake, or false on Shutdown
loggedIn bool // true if currently logged in
loginGoal *LoginGoal // non-nil if some login activity is desired
inMapPoll bool // true once we get the first MapResponse in a stream; false when HTTP response ends
state State // TODO(bradfitz): delete this, make it computed by method from other state
authCtx context.Context // context used for auth requests
mapCtx context.Context // context used for netmap and update requests
authCancel func() // cancel authCtx
mapCancel func() // cancel mapCtx
authDone chan struct{} // when closed, authRoutine is done
mapDone chan struct{} // when closed, mapRoutine is done
updateDone chan struct{} // when closed, updateRoutine is done
}
// New creates and starts a new Auto.
func New(opts Options) (*Auto, error) {
c, err := NewNoStart(opts)
if c != nil {
c.Start()
}
return c, err
}
// NewNoStart creates a new Auto, but without calling Start on it.
func NewNoStart(opts Options) (_ *Auto, err error) {
direct, err := NewDirect(opts)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
direct.Close()
}
}()
if opts.Observer == nil {
return nil, errors.New("missing required Options.Observer")
}
if opts.Logf == nil {
opts.Logf = func(fmt string, args ...any) {}
}
if opts.Clock == nil {
opts.Clock = tstime.StdClock{}
}
c := &Auto{
direct: direct,
clock: opts.Clock,
logf: opts.Logf,
updateCh: make(chan struct{}, 1),
authDone: make(chan struct{}),
mapDone: make(chan struct{}),
updateDone: make(chan struct{}),
observer: opts.Observer,
}
c.authCtx, c.authCancel = context.WithCancel(context.Background())
c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf)
c.mapCtx, c.mapCancel = context.WithCancel(context.Background())
c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf)
c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(direct.ReportHealthChange)
return c, nil
}
// SetPaused controls whether HTTP activity should be paused.
//
// The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once.
func (c *Auto) SetPaused(paused bool) {
c.mu.Lock()
defer c.mu.Unlock()
if paused == c.paused || c.closed {
return
}
c.logf("setPaused(%v)", paused)
c.paused = paused
if paused {
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
return
}
for _, ch := range c.unpauseWaiters {
ch <- true
}
c.unpauseWaiters = nil
}
// Start starts the client's goroutines.
//
// It should only be called for clients created by NewNoStart.
func (c *Auto) Start() {
go c.authRoutine()
go c.mapRoutine()
go c.updateRoutine()
}
// updateControl sends a new OmitPeers, non-streaming map request (to just send
// Hostinfo/Netinfo/Endpoints info, while keeping an existing streaming response
// open).
//
// It should be called whenever there's something new to tell the server.
func (c *Auto) updateControl() {
gen := nextUpdateGen()
c.mu.Lock()
if gen < c.lastUpdateGen {
// This update is out of date.
c.mu.Unlock()
return
}
c.lastUpdateGen = gen
c.mu.Unlock()
select {
case c.updateCh <- struct{}{}:
default:
}
}
// cancelAuthCtxLocked is like cancelAuthCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelAuthCtxLocked() {
if c.authCancel != nil {
c.authCancel()
}
if !c.closed {
c.authCtx, c.authCancel = context.WithCancel(context.Background())
c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, c.logf)
}
}
// cancelMapCtxLocked is like cancelMapCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelMapCtxLocked() {
if c.mapCancel != nil {
c.mapCancel()
}
if !c.closed {
c.mapCtx, c.mapCancel = context.WithCancel(context.Background())
c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, c.logf)
}
}
// restartMap cancels the existing mapPoll and liteUpdates, and then starts a
// new one.
func (c *Auto) restartMap() {
c.mu.Lock()
c.cancelMapCtxLocked()
synced := c.inMapPoll
c.mu.Unlock()
c.logf("[v1] restartMap: synced=%v", synced)
c.updateControl()
}
func (c *Auto) authRoutine() {
defer close(c.authDone)
bo := backoff.NewBackoff("authRoutine", c.logf, 30*time.Second)
for {
if !c.waitUnpause("authRoutine") {
c.logf("authRoutine: exiting")
return
}
c.mu.Lock()
goal := c.loginGoal
ctx := c.authCtx
if goal != nil {
c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, true)
} else {
c.logf("[v1] authRoutine: %s; goal=nil paused=%v", c.state, c.paused)
}
c.mu.Unlock()
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
if ctx.Err() == nil {
c.sendStatus("authRoutine-report", err, "", nil)
}
}
if goal == nil {
c.direct.health.SetAuthRoutineInError(nil)
// Wait for user to Login or Logout.
<-ctx.Done()
c.logf("[v1] authRoutine: context done.")
continue
}
c.mu.Lock()
c.urlToVisit = goal.url
if goal.url != "" {
c.state = StateURLVisitRequired
} else {
c.state = StateAuthenticating
}
c.mu.Unlock()
var url string
var err error
var f string
if goal.url != "" {
url, err = c.direct.WaitLoginURL(ctx, goal.url)
f = "WaitLoginURL"
} else {
url, err = c.direct.TryLogin(ctx, goal.flags)
f = "TryLogin"
}
if err != nil {
c.direct.health.SetAuthRoutineInError(err)
report(err, f)
bo.BackOff(ctx, err)
continue
}
if url != "" {
// goal.url ought to be empty here. However, not all control servers
// get this right, and logging about it here just generates noise.
//
// TODO(bradfitz): I don't follow that comment. Our own testcontrol
// used by tstest/integration hits this path, in fact.
if c.direct.panicOnUse {
panic("tainted client")
}
c.mu.Lock()
c.urlToVisit = url
c.loginGoal = &LoginGoal{
flags: LoginDefault,
url: url,
}
c.state = StateURLVisitRequired
c.mu.Unlock()
c.sendStatus("authRoutine-url", err, url, nil)
if goal.url == url {
// The server sent us the same URL we already tried,
// backoff to avoid a busy loop.
bo.BackOff(ctx, errors.New("login URL not changing"))
} else {
bo.BackOff(ctx, nil)
}
continue
}
// success
c.direct.health.SetAuthRoutineInError(nil)
c.mu.Lock()
c.urlToVisit = ""
c.loggedIn = true
c.loginGoal = nil
c.state = StateAuthenticated
c.mu.Unlock()
c.sendStatus("authRoutine-success", nil, "", nil)
c.restartMap()
bo.BackOff(ctx, nil)
}
}
// ExpiryForTests returns the credential expiration time, or the zero value if
// the expiration time isn't known. It's used in tests only.
func (c *Auto) ExpiryForTests() time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.expiry
}
// DirectForTest returns the underlying direct client object.
// It's used in tests only.
func (c *Auto) DirectForTest() *Direct {
return c.direct
}
// unpausedChanLocked returns a new channel that gets sent
// either a true when unpaused or false on Auto.Shutdown.
//
// c.mu must be held
func (c *Auto) unpausedChanLocked() <-chan bool {
unpaused := make(chan bool, 1)
c.unpauseWaiters = append(c.unpauseWaiters, unpaused)
return unpaused
}
// mapRoutineState is the state of Auto.mapRoutine while it's running.
type mapRoutineState struct {
c *Auto
bo *backoff.Backoff
}
var _ NetmapDeltaUpdater = mapRoutineState{}
func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) {
c := mrs.c
c.mu.Lock()
ctx := c.mapCtx
c.inMapPoll = true
if c.loggedIn {
c.state = StateSynchronized
}
c.expiry = nm.Expiry
stillAuthed := c.loggedIn
c.logf("[v1] mapRoutine: netmap received: %s", c.state)
c.mu.Unlock()
if stillAuthed {
c.sendStatus("mapRoutine-got-netmap", nil, "", nm)
}
// Reset the backoff timer if we got a netmap.
mrs.bo.BackOff(ctx, nil)
}
func (mrs mapRoutineState) UpdateNetmapDelta(muts []netmap.NodeMutation) bool {
c := mrs.c
c.mu.Lock()
goodState := c.loggedIn && c.inMapPoll
ndu, canDelta := c.observer.(NetmapDeltaUpdater)
c.mu.Unlock()
if !goodState || !canDelta {
return false
}
ctx, cancel := context.WithTimeout(c.mapCtx, 2*time.Second)
defer cancel()
var ok bool
err := c.observerQueue.RunSync(ctx, func() {
ok = ndu.UpdateNetmapDelta(muts)
})
return err == nil && ok
}
// mapRoutine is responsible for keeping a read-only streaming connection to the
// control server, and keeping the netmap up to date.
func (c *Auto) mapRoutine() {
defer close(c.mapDone)
mrs := mapRoutineState{
c: c,
bo: backoff.NewBackoff("mapRoutine", c.logf, 30*time.Second),
}
for {
if !c.waitUnpause("mapRoutine") {
c.logf("mapRoutine: exiting")
return
}
c.mu.Lock()
c.logf("[v1] mapRoutine: %s", c.state)
loggedIn := c.loggedIn
ctx := c.mapCtx
c.mu.Unlock()
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
err = fmt.Errorf("%s: %w", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
if ctx.Err() == nil {
c.sendStatus("mapRoutine1", err, "", nil)
}
}
if !loggedIn {
// Wait for something interesting to happen
c.mu.Lock()
c.inMapPoll = false
c.mu.Unlock()
<-ctx.Done()
c.logf("[v1] mapRoutine: context done.")
continue
}
c.direct.health.SetOutOfPollNetMap()
err := c.direct.PollNetMap(ctx, mrs)
c.direct.health.SetOutOfPollNetMap()
c.mu.Lock()
c.inMapPoll = false
if c.state == StateSynchronized {
c.state = StateAuthenticated
}
paused := c.paused
c.mu.Unlock()
if paused {
mrs.bo.BackOff(ctx, nil)
c.logf("mapRoutine: paused")
} else {
mrs.bo.BackOff(ctx, err)
report(err, "PollNetMap")
}
}
}
func (c *Auto) AuthCantContinue() bool {
if c == nil {
return true
}
c.mu.Lock()
defer c.mu.Unlock()
return !c.loggedIn && (c.loginGoal == nil || c.loginGoal.url != "")
}
func (c *Auto) SetHostinfo(hi *tailcfg.Hostinfo) {
if hi == nil {
panic("nil Hostinfo")
}
if !c.direct.SetHostinfo(hi) {
// No changes. Don't log.
return
}
// Send new Hostinfo to server
c.updateControl()
}
func (c *Auto) SetNetInfo(ni *tailcfg.NetInfo) {
if ni == nil {
panic("nil NetInfo")
}
if !c.direct.SetNetInfo(ni) {
return
}
// Send new NetInfo to server
c.updateControl()
}
// SetTKAHead updates the TKA head hash that map-request infrastructure sends.
func (c *Auto) SetTKAHead(headHash string) {
if !c.direct.SetTKAHead(headHash) {
return
}
// Send new TKAHead to server
c.updateControl()
}
// sendStatus can not be called with the c.mu held.
func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkMap) {
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return
}
state := c.state
loggedIn := c.loggedIn
inMapPoll := c.inMapPoll
c.mu.Unlock()
c.logf("[v1] sendStatus: %s: %v", who, state)
var p persist.PersistView
if nm != nil && loggedIn && inMapPoll {
p = c.direct.GetPersist()
} else {
// don't send netmap status, as it's misleading when we're
// not logged in.
nm = nil
}
new := Status{
URL: url,
Persist: p,
NetMap: nm,
Err: err,
state: state,
}
// Launch a new goroutine to avoid blocking the caller while the observer
// does its thing, which may result in a call back into the client.
c.observerQueue.Add(func() {
c.observer.SetControlClientStatus(c, new)
})
}
func (c *Auto) Login(flags LoginFlags) {
c.logf("client.Login(%v)", flags)
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return
}
if c.direct != nil && c.direct.panicOnUse {
panic("tainted client")
}
c.wantLoggedIn = true
c.loginGoal = &LoginGoal{
flags: flags,
}
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
}
var ErrClientClosed = errors.New("client closed")
func (c *Auto) Logout(ctx context.Context) error {
c.logf("client.Logout()")
c.mu.Lock()
c.wantLoggedIn = false
c.loginGoal = nil
closed := c.closed
if c.direct != nil && c.direct.panicOnUse {
panic("tainted client")
}
c.mu.Unlock()
if closed {
return ErrClientClosed
}
if err := c.direct.TryLogout(ctx); err != nil {
return err
}
c.mu.Lock()
c.loggedIn = false
c.state = StateNotAuthenticated
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
c.mu.Unlock()
c.sendStatus("authRoutine-wantout", nil, "", nil)
return nil
}
func (c *Auto) SetExpirySooner(ctx context.Context, expiry time.Time) error {
return c.direct.SetExpirySooner(ctx, expiry)
}
// UpdateEndpoints sets the client's discovered endpoints and sends
// them to the control server if they've changed.
//
// It does not retain the provided slice.
func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) {
changed := c.direct.SetEndpoints(endpoints)
if changed {
c.updateControl()
}
}
func (c *Auto) Shutdown() {
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return
}
c.logf("client.Shutdown ...")
direct := c.direct
c.closed = true
c.observerQueue.Shutdown()
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
for _, w := range c.unpauseWaiters {
w <- false
}
c.unpauseWaiters = nil
c.mu.Unlock()
c.unregisterHealthWatch()
<-c.authDone
<-c.mapDone
<-c.updateDone
if direct != nil {
direct.Close()
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
c.observerQueue.Wait(ctx)
c.logf("Client.Shutdown done.")
}
// NodePublicKey returns the node public key currently in use. This is
// used exclusively in tests.
func (c *Auto) TestOnlyNodePublicKey() key.NodePublic {
priv := c.direct.GetPersist()
return priv.PrivateNodeKey().Public()
}
func (c *Auto) TestOnlySetAuthKey(authkey string) {
c.direct.mu.Lock()
defer c.direct.mu.Unlock()
c.direct.authKey = authkey
}
func (c *Auto) TestOnlyTimeNow() time.Time {
return c.clock.Now()
}
// SetDNS sends the SetDNSRequest request to the control plane server,
// requesting a DNS record be created or updated.
func (c *Auto) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) error {
return c.direct.SetDNS(ctx, req)
}
func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) {
return c.direct.DoNoiseRequest(req)
}
// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used
// once (and must be used once) to make a single HTTP request over the noise
// channel to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
return c.direct.GetSingleUseNoiseRoundTripper(ctx)
}

90
vendor/tailscale.com/control/controlclient/client.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package controlclient implements the client for the Tailscale
// control plane.
//
// It handles authentication, port picking, and collects the local
// network configuration.
package controlclient
import (
"context"
"tailscale.com/tailcfg"
)
// LoginFlags is a bitmask of options to change the behavior of Client.Login
// and LocalBackend.
type LoginFlags int
const (
LoginDefault = LoginFlags(0)
LoginInteractive = LoginFlags(1 << iota) // force user login and key refresh
LoginEphemeral // set RegisterRequest.Ephemeral
// LocalBackendStartKeyOSNeutral instructs NewLocalBackend to start the
// LocalBackend without any OS-dependent StateStore StartKey behavior.
//
// See https://github.com/tailscale/tailscale/issues/6973.
LocalBackendStartKeyOSNeutral
)
// Client represents a client connection to the control server.
// Currently this is done through a pair of polling https requests in
// the Auto client, but that might change eventually.
//
// The Client must be comparable as it is used by the Observer to detect stale
// clients.
type Client interface {
// Shutdown closes this session, which should not be used any further
// afterwards.
Shutdown()
// Login begins an interactive or non-interactive login process.
// Client will eventually call the Status callback with either a
// LoginFinished flag (on success) or an auth URL (if further
// interaction is needed). It merely sets the process in motion,
// and doesn't wait for it to complete.
Login(LoginFlags)
// Logout starts a synchronous logout process. It doesn't return
// until the logout operation has been completed.
Logout(context.Context) error
// SetPaused pauses or unpauses the controlclient activity as much
// as possible, without losing its internal state, to minimize
// unnecessary network activity.
// TODO: It might be better to simply shutdown the controlclient and
// make a new one when it's time to unpause.
SetPaused(bool)
// AuthCantContinue returns whether authentication is blocked. If it
// is, you either need to visit the auth URL (previously sent in a
// Status callback) or call the Login function appropriately.
// TODO: this probably belongs in the Status itself instead.
AuthCantContinue() bool
// SetHostinfo changes the Hostinfo structure that will be sent in
// subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
SetHostinfo(*tailcfg.Hostinfo)
// SetNetinfo changes the NetIinfo structure that will be sent in
// subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
SetNetInfo(*tailcfg.NetInfo)
// SetTKAHead changes the TKA head hash value that will be sent in
// subsequent netmap requests.
SetTKAHead(headHash string)
// UpdateEndpoints changes the Endpoint structure that will be sent
// in subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
UpdateEndpoints(endpoints []tailcfg.Endpoint)
}
// UserVisibleError is an error that should be shown to users.
type UserVisibleError string
func (e UserVisibleError) Error() string { return string(e) }
func (e UserVisibleError) UserVisibleError() string { return string(e) }

1699
vendor/tailscale.com/control/controlclient/direct.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

832
vendor/tailscale.com/control/controlclient/map.go generated vendored Normal file
View File

@@ -0,0 +1,832 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"cmp"
"context"
"encoding/json"
"fmt"
"maps"
"net"
"reflect"
"runtime"
"runtime/debug"
"slices"
"sort"
"strconv"
"sync"
"time"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/ptr"
"tailscale.com/types/views"
"tailscale.com/util/clientmetric"
"tailscale.com/util/mak"
"tailscale.com/util/set"
"tailscale.com/wgengine/filter"
)
// mapSession holds the state over a long-polled "map" request to the
// control plane.
//
// It accepts incremental tailcfg.MapResponse values to
// netMapForResponse and returns fully inflated NetworkMaps, filling
// in the omitted data implicit from prior MapResponse values from
// within the same session (the same long-poll HTTP response to the
// one MapRequest).
type mapSession struct {
// Immutable fields.
netmapUpdater NetmapUpdater // called on changes (in addition to the optional hooks below)
controlKnobs *controlknobs.Knobs // or nil
privateNodeKey key.NodePrivate
publicNodeKey key.NodePublic
logf logger.Logf
vlogf logger.Logf
machinePubKey key.MachinePublic
altClock tstime.Clock // if nil, regular time is used
cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context
// sessionAliveCtx is a Background-based context that's alive for the
// duration of the mapSession that we own the lifetime of. It's closed by
// sessionAliveCtxClose.
sessionAliveCtx context.Context
sessionAliveCtxClose context.CancelFunc // closes sessionAliveCtx
// Optional hooks, guaranteed non-nil (set to no-op funcs) by the
// newMapSession constructor. They must be overridden if desired
// before the mapSession is used.
// onDebug specifies what to do with a *tailcfg.Debug message.
onDebug func(context.Context, *tailcfg.Debug) error
// onSelfNodeChanged is called before the NetmapUpdater if the self node was
// changed.
onSelfNodeChanged func(*netmap.NetworkMap)
// Fields storing state over the course of multiple MapResponses.
lastPrintMap time.Time
lastNode tailcfg.NodeView
lastCapSet set.Set[tailcfg.NodeCapability]
peers map[tailcfg.NodeID]*tailcfg.NodeView // pointer to view (oddly). same pointers as sortedPeers.
sortedPeers []*tailcfg.NodeView // same pointers as peers, but sorted by Node.ID
lastDNSConfig *tailcfg.DNSConfig
lastDERPMap *tailcfg.DERPMap
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile
lastPacketFilterRules views.Slice[tailcfg.FilterRule] // concatenation of all namedPacketFilters
namedPacketFilters map[string]views.Slice[tailcfg.FilterRule]
lastParsedPacketFilter []filter.Match
lastSSHPolicy *tailcfg.SSHPolicy
collectServices bool
lastDomain string
lastDomainAuditLogID string
lastHealth []string
lastPopBrowserURL string
lastTKAInfo *tailcfg.TKAInfo
lastNetmapSummary string // from NetworkMap.VeryConcise
lastMaxExpiry time.Duration
}
// newMapSession returns a mostly unconfigured new mapSession.
//
// Modify its optional fields on the returned value before use.
//
// It must have its Close method called to release resources.
func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater, controlKnobs *controlknobs.Knobs) *mapSession {
ms := &mapSession{
netmapUpdater: nu,
controlKnobs: controlKnobs,
privateNodeKey: privateNodeKey,
publicNodeKey: privateNodeKey.Public(),
lastDNSConfig: new(tailcfg.DNSConfig),
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
// Non-nil no-op defaults, to be optionally overridden by the caller.
logf: logger.Discard,
vlogf: logger.Discard,
cancel: func() {},
onDebug: func(context.Context, *tailcfg.Debug) error { return nil },
onSelfNodeChanged: func(*netmap.NetworkMap) {},
}
ms.sessionAliveCtx, ms.sessionAliveCtxClose = context.WithCancel(context.Background())
return ms
}
// occasionallyPrintSummary logs summary at most once very 5 minutes. The
// summary is the Netmap.VeryConcise result from the last received map response.
func (ms *mapSession) occasionallyPrintSummary(summary string) {
// Occasionally print the netmap header.
// This is handy for debugging, and our logs processing
// pipeline depends on it. (TODO: Remove this dependency.)
now := ms.clock().Now()
if now.Sub(ms.lastPrintMap) < 5*time.Minute {
return
}
ms.lastPrintMap = now
ms.logf("[v1] new network map (periodic):\n%s", summary)
}
func (ms *mapSession) clock() tstime.Clock {
return cmp.Or[tstime.Clock](ms.altClock, tstime.StdClock{})
}
func (ms *mapSession) Close() {
ms.sessionAliveCtxClose()
}
// HandleNonKeepAliveMapResponse handles a non-KeepAlive MapResponse (full or
// incremental).
//
// All fields that are valid on a KeepAlive MapResponse have already been
// handled.
//
// TODO(bradfitz): make this handle all fields later. For now (2023-08-20) this
// is [re]factoring progress enough.
func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *tailcfg.MapResponse) error {
if debug := resp.Debug; debug != nil {
if err := ms.onDebug(ctx, debug); err != nil {
return err
}
}
if DevKnob.StripEndpoints() {
for _, p := range resp.Peers {
p.Endpoints = nil
}
for _, p := range resp.PeersChanged {
p.Endpoints = nil
}
}
// For responses that mutate the self node, check for updated nodeAttrs.
if resp.Node != nil {
if DevKnob.StripCaps() {
resp.Node.Capabilities = nil
resp.Node.CapMap = nil
}
// If the server is old and is still sending us Capabilities instead of
// CapMap, convert it to CapMap early so the rest of the client code can
// work only in terms of CapMap.
for _, c := range resp.Node.Capabilities {
if _, ok := resp.Node.CapMap[c]; !ok {
mak.Set(&resp.Node.CapMap, c, nil)
}
}
ms.controlKnobs.UpdateFromNodeAttributes(resp.Node.CapMap)
}
// Call Node.InitDisplayNames on any changed nodes.
initDisplayNames(cmp.Or(resp.Node.View(), ms.lastNode), resp)
ms.patchifyPeersChanged(resp)
ms.updateStateFromResponse(resp)
if ms.tryHandleIncrementally(resp) {
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
return nil
}
// We have to rebuild the whole netmap (lots of garbage & work downstream of
// our UpdateFullNetmap call). This is the part we tried to avoid but
// some field mutations (especially rare ones) aren't yet handled.
if runtime.GOOS == "ios" {
// Memory is tight on iOS. Free what we can while we
// can before this potential burst of in-use memory.
debug.FreeOSMemory()
}
nm := ms.netmap()
ms.lastNetmapSummary = nm.VeryConcise()
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
// If the self node changed, we might need to update persist.
if resp.Node != nil {
ms.onSelfNodeChanged(nm)
}
ms.netmapUpdater.UpdateFullNetmap(nm)
return nil
}
func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool {
if ms.controlKnobs != nil && ms.controlKnobs.DisableDeltaUpdates.Load() {
return false
}
nud, ok := ms.netmapUpdater.(NetmapDeltaUpdater)
if !ok {
return false
}
mutations, ok := netmap.MutationsFromMapResponse(res, time.Now())
if ok && len(mutations) > 0 {
return nud.UpdateNetmapDelta(mutations)
}
return ok
}
// updateStats are some stats from updateStateFromResponse, primarily for
// testing. It's meant to be cheap enough to always compute, though. It doesn't
// allocate.
type updateStats struct {
allNew bool
added int
removed int
changed int
}
// updateStateFromResponse updates ms from res. It takes ownership of res.
func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
ms.updatePeersStateFromResponse(resp)
if resp.Node != nil {
ms.lastNode = resp.Node.View()
capSet := set.Set[tailcfg.NodeCapability]{}
for _, c := range resp.Node.Capabilities {
capSet.Add(c)
}
for c := range resp.Node.CapMap {
capSet.Add(c)
}
ms.lastCapSet = capSet
}
for _, up := range resp.UserProfiles {
ms.lastUserProfile[up.ID] = up
}
// TODO(bradfitz): clean up old user profiles? maybe not worth it.
if dm := resp.DERPMap; dm != nil {
ms.vlogf("netmap: new map contains DERP map")
// Zero-valued fields in a DERPMap mean that we're not changing
// anything and are using the previous value(s).
if ldm := ms.lastDERPMap; ldm != nil {
if dm.Regions == nil {
dm.Regions = ldm.Regions
dm.OmitDefaultRegions = ldm.OmitDefaultRegions
}
if dm.HomeParams == nil {
dm.HomeParams = ldm.HomeParams
} else if oldhh := ldm.HomeParams; oldhh != nil {
// Propagate sub-fields of HomeParams
hh := dm.HomeParams
if hh.RegionScore == nil {
hh.RegionScore = oldhh.RegionScore
}
}
}
ms.lastDERPMap = dm
}
var packetFilterChanged bool
// Older way, one big blob:
if pf := resp.PacketFilter; pf != nil {
packetFilterChanged = true
mak.Set(&ms.namedPacketFilters, "base", views.SliceOf(pf))
}
// Newer way, named chunks:
if m := resp.PacketFilters; m != nil {
packetFilterChanged = true
if v, ok := m["*"]; ok && v == nil {
ms.namedPacketFilters = nil
}
for k, v := range m {
if k == "*" {
continue
}
if v != nil {
mak.Set(&ms.namedPacketFilters, k, views.SliceOf(v))
} else {
delete(ms.namedPacketFilters, k)
}
}
}
if packetFilterChanged {
var concat []tailcfg.FilterRule
for _, v := range slices.Sorted(maps.Keys(ms.namedPacketFilters)) {
concat = ms.namedPacketFilters[v].AppendTo(concat)
}
ms.lastPacketFilterRules = views.SliceOf(concat)
var err error
ms.lastParsedPacketFilter, err = filter.MatchesFromFilterRules(concat)
if err != nil {
ms.logf("parsePacketFilter: %v", err)
}
}
if c := resp.DNSConfig; c != nil {
ms.lastDNSConfig = c
}
if p := resp.SSHPolicy; p != nil {
ms.lastSSHPolicy = p
}
if v, ok := resp.CollectServices.Get(); ok {
ms.collectServices = v
}
if resp.Domain != "" {
ms.lastDomain = resp.Domain
}
if resp.DomainDataPlaneAuditLogID != "" {
ms.lastDomainAuditLogID = resp.DomainDataPlaneAuditLogID
}
if resp.Health != nil {
ms.lastHealth = resp.Health
}
if resp.TKAInfo != nil {
ms.lastTKAInfo = resp.TKAInfo
}
if resp.MaxKeyDuration > 0 {
ms.lastMaxExpiry = resp.MaxKeyDuration
}
}
var (
patchDERPRegion = clientmetric.NewCounter("controlclient_patch_derp")
patchEndpoints = clientmetric.NewCounter("controlclient_patch_endpoints")
patchCap = clientmetric.NewCounter("controlclient_patch_capver")
patchKey = clientmetric.NewCounter("controlclient_patch_key")
patchDiscoKey = clientmetric.NewCounter("controlclient_patch_discokey")
patchOnline = clientmetric.NewCounter("controlclient_patch_online")
patchLastSeen = clientmetric.NewCounter("controlclient_patch_lastseen")
patchKeyExpiry = clientmetric.NewCounter("controlclient_patch_keyexpiry")
patchCapMap = clientmetric.NewCounter("controlclient_patch_capmap")
patchKeySignature = clientmetric.NewCounter("controlclient_patch_keysig")
patchifiedPeer = clientmetric.NewCounter("controlclient_patchified_peer")
patchifiedPeerEqual = clientmetric.NewCounter("controlclient_patchified_peer_equal")
)
// updatePeersStateFromResponseres updates ms.peers and ms.sortedPeers from res. It takes ownership of res.
func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) {
defer func() {
if stats.removed > 0 || stats.added > 0 {
ms.rebuildSorted()
}
}()
if ms.peers == nil {
ms.peers = make(map[tailcfg.NodeID]*tailcfg.NodeView)
}
if len(resp.Peers) > 0 {
// Not delta encoded.
stats.allNew = true
keep := make(map[tailcfg.NodeID]bool, len(resp.Peers))
for _, n := range resp.Peers {
keep[n.ID] = true
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for id := range ms.peers {
if !keep[id] {
stats.removed++
delete(ms.peers, id)
}
}
// Peers precludes all other delta operations so just return.
return
}
for _, id := range resp.PeersRemoved {
if _, ok := ms.peers[id]; ok {
delete(ms.peers, id)
stats.removed++
}
}
for _, n := range resp.PeersChanged {
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for nodeID, seen := range resp.PeerSeenChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
if seen {
mut.LastSeen = ptr.To(clock.Now())
} else {
mut.LastSeen = nil
}
*vp = mut.View()
stats.changed++
}
}
for nodeID, online := range resp.OnlineChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
mut.Online = ptr.To(online)
*vp = mut.View()
stats.changed++
}
}
for _, pc := range resp.PeersChangedPatch {
vp, ok := ms.peers[pc.NodeID]
if !ok {
continue
}
stats.changed++
mut := vp.AsStruct()
if pc.DERPRegion != 0 {
mut.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, pc.DERPRegion)
patchDERPRegion.Add(1)
}
if pc.Cap != 0 {
mut.Cap = pc.Cap
patchCap.Add(1)
}
if pc.Endpoints != nil {
mut.Endpoints = pc.Endpoints
patchEndpoints.Add(1)
}
if pc.Key != nil {
mut.Key = *pc.Key
patchKey.Add(1)
}
if pc.DiscoKey != nil {
mut.DiscoKey = *pc.DiscoKey
patchDiscoKey.Add(1)
}
if v := pc.Online; v != nil {
mut.Online = ptr.To(*v)
patchOnline.Add(1)
}
if v := pc.LastSeen; v != nil {
mut.LastSeen = ptr.To(*v)
patchLastSeen.Add(1)
}
if v := pc.KeyExpiry; v != nil {
mut.KeyExpiry = *v
patchKeyExpiry.Add(1)
}
if v := pc.KeySignature; v != nil {
mut.KeySignature = v
patchKeySignature.Add(1)
}
if v := pc.CapMap; v != nil {
mut.CapMap = v
patchCapMap.Add(1)
}
*vp = mut.View()
}
return
}
// rebuildSorted rebuilds ms.sortedPeers from ms.peers. It should be called
// after any additions or removals from peers.
func (ms *mapSession) rebuildSorted() {
if ms.sortedPeers == nil {
ms.sortedPeers = make([]*tailcfg.NodeView, 0, len(ms.peers))
} else {
if len(ms.sortedPeers) > len(ms.peers) {
clear(ms.sortedPeers[len(ms.peers):])
}
ms.sortedPeers = ms.sortedPeers[:0]
}
for _, p := range ms.peers {
ms.sortedPeers = append(ms.sortedPeers, p)
}
sort.Slice(ms.sortedPeers, func(i, j int) bool {
return ms.sortedPeers[i].ID() < ms.sortedPeers[j].ID()
})
}
func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) {
if userID == 0 {
return
}
if _, dup := nm.UserProfiles[userID]; dup {
// Already populated it from a previous peer.
return
}
if up, ok := ms.lastUserProfile[userID]; ok {
nm.UserProfiles[userID] = up
}
}
var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER")
// patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch
// when possible.
func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) {
filtered := resp.PeersChanged[:0]
for _, n := range resp.PeersChanged {
if p, ok := ms.patchifyPeer(n); ok {
patchifiedPeer.Add(1)
if debugPatchifyPeer() {
patchj, _ := json.Marshal(p)
ms.logf("debug: patchifyPeer[ID=%v]: %s", n.ID, patchj)
}
if p != nil {
resp.PeersChangedPatch = append(resp.PeersChangedPatch, p)
} else {
patchifiedPeerEqual.Add(1)
}
} else {
filtered = append(filtered, n)
}
}
resp.PeersChanged = filtered
if len(resp.PeersChanged) == 0 {
resp.PeersChanged = nil
}
}
var nodeFields = sync.OnceValue(getNodeFields)
// getNodeFields returns the fails of tailcfg.Node.
func getNodeFields() []string {
rt := reflect.TypeFor[tailcfg.Node]()
ret := make([]string, rt.NumField())
for i := range rt.NumField() {
ret[i] = rt.Field(i).Name
}
return ret
}
// patchifyPeer returns a *tailcfg.PeerChange of the session's existing copy of
// the n.ID Node to n.
//
// It returns ok=false if a patch can't be made, (V, ok) on a delta, or (nil,
// true) if all the fields were identical (a zero change).
func (ms *mapSession) patchifyPeer(n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
was, ok := ms.peers[n.ID]
if !ok {
return nil, false
}
return peerChangeDiff(*was, n)
}
// peerChangeDiff returns the difference from 'was' to 'n', if possible.
//
// It returns (nil, true) if the fields were identical.
func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
var ret *tailcfg.PeerChange
pc := func() *tailcfg.PeerChange {
if ret == nil {
ret = new(tailcfg.PeerChange)
}
return ret
}
for _, field := range nodeFields() {
switch field {
default:
// The whole point of using reflect in this function is to panic
// here in tests if we forget to handle a new field.
panic("unhandled field: " + field)
case "computedHostIfDifferent", "ComputedName", "ComputedNameWithHost":
// Caller's responsibility to have populated these.
continue
case "DataPlaneAuditLogID":
// Not sent for peers.
case "Capabilities":
// Deprecated; see https://github.com/tailscale/tailscale/issues/11508
// And it was never sent by any known control server.
case "ID":
if was.ID() != n.ID {
return nil, false
}
case "StableID":
if was.StableID() != n.StableID {
return nil, false
}
case "Name":
if was.Name() != n.Name {
return nil, false
}
case "User":
if was.User() != n.User {
return nil, false
}
case "Sharer":
if was.Sharer() != n.Sharer {
return nil, false
}
case "Key":
if was.Key() != n.Key {
pc().Key = ptr.To(n.Key)
}
case "KeyExpiry":
if !was.KeyExpiry().Equal(n.KeyExpiry) {
pc().KeyExpiry = ptr.To(n.KeyExpiry)
}
case "KeySignature":
if !was.KeySignature().Equal(n.KeySignature) {
pc().KeySignature = slices.Clone(n.KeySignature)
}
case "Machine":
if was.Machine() != n.Machine {
return nil, false
}
case "DiscoKey":
if was.DiscoKey() != n.DiscoKey {
pc().DiscoKey = ptr.To(n.DiscoKey)
}
case "Addresses":
if !views.SliceEqual(was.Addresses(), views.SliceOf(n.Addresses)) {
return nil, false
}
case "AllowedIPs":
if !views.SliceEqual(was.AllowedIPs(), views.SliceOf(n.AllowedIPs)) {
return nil, false
}
case "Endpoints":
if !views.SliceEqual(was.Endpoints(), views.SliceOf(n.Endpoints)) {
pc().Endpoints = slices.Clone(n.Endpoints)
}
case "DERP":
if was.DERP() != n.DERP {
ip, portStr, err := net.SplitHostPort(n.DERP)
if err != nil || ip != "127.3.3.40" {
return nil, false
}
port, err := strconv.Atoi(portStr)
if err != nil || port < 1 || port > 65535 {
return nil, false
}
pc().DERPRegion = port
}
case "Hostinfo":
if !was.Hostinfo().Valid() && !n.Hostinfo.Valid() {
continue
}
if !was.Hostinfo().Valid() || !n.Hostinfo.Valid() {
return nil, false
}
if !was.Hostinfo().Equal(n.Hostinfo) {
return nil, false
}
case "Created":
if !was.Created().Equal(n.Created) {
return nil, false
}
case "Cap":
if was.Cap() != n.Cap {
pc().Cap = n.Cap
}
case "CapMap":
if len(n.CapMap) != was.CapMap().Len() {
if n.CapMap == nil {
pc().CapMap = make(tailcfg.NodeCapMap)
} else {
pc().CapMap = maps.Clone(n.CapMap)
}
break
}
was.CapMap().Range(func(k tailcfg.NodeCapability, v views.Slice[tailcfg.RawMessage]) bool {
nv, ok := n.CapMap[k]
if !ok || !views.SliceEqual(v, views.SliceOf(nv)) {
pc().CapMap = maps.Clone(n.CapMap)
return false
}
return true
})
case "Tags":
if !views.SliceEqual(was.Tags(), views.SliceOf(n.Tags)) {
return nil, false
}
case "PrimaryRoutes":
if !views.SliceEqual(was.PrimaryRoutes(), views.SliceOf(n.PrimaryRoutes)) {
return nil, false
}
case "Online":
wasOnline := was.Online()
if n.Online != nil && wasOnline != nil && *n.Online != *wasOnline {
pc().Online = ptr.To(*n.Online)
}
case "LastSeen":
wasSeen := was.LastSeen()
if n.LastSeen != nil && wasSeen != nil && !wasSeen.Equal(*n.LastSeen) {
pc().LastSeen = ptr.To(*n.LastSeen)
}
case "MachineAuthorized":
if was.MachineAuthorized() != n.MachineAuthorized {
return nil, false
}
case "UnsignedPeerAPIOnly":
if was.UnsignedPeerAPIOnly() != n.UnsignedPeerAPIOnly {
return nil, false
}
case "IsWireGuardOnly":
if was.IsWireGuardOnly() != n.IsWireGuardOnly {
return nil, false
}
case "IsJailed":
if was.IsJailed() != n.IsJailed {
return nil, false
}
case "Expired":
if was.Expired() != n.Expired {
return nil, false
}
case "SelfNodeV4MasqAddrForThisPeer":
va, vb := was.SelfNodeV4MasqAddrForThisPeer(), n.SelfNodeV4MasqAddrForThisPeer
if va == nil && vb == nil {
continue
}
if va == nil || vb == nil || *va != *vb {
return nil, false
}
case "SelfNodeV6MasqAddrForThisPeer":
va, vb := was.SelfNodeV6MasqAddrForThisPeer(), n.SelfNodeV6MasqAddrForThisPeer
if va == nil && vb == nil {
continue
}
if va == nil || vb == nil || *va != *vb {
return nil, false
}
case "ExitNodeDNSResolvers":
va, vb := was.ExitNodeDNSResolvers(), views.SliceOfViews(n.ExitNodeDNSResolvers)
if va.Len() != vb.Len() {
return nil, false
}
for i := range va.Len() {
if !va.At(i).Equal(vb.At(i)) {
return nil, false
}
}
}
}
if ret != nil {
ret.NodeID = n.ID
}
return ret, true
}
// netmap returns a fully populated NetworkMap from the last state seen from
// a call to updateStateFromResponse, filling in omitted
// information from prior MapResponse values.
func (ms *mapSession) netmap() *netmap.NetworkMap {
peerViews := make([]tailcfg.NodeView, len(ms.sortedPeers))
for i, vp := range ms.sortedPeers {
peerViews[i] = *vp
}
nm := &netmap.NetworkMap{
NodeKey: ms.publicNodeKey,
PrivateKey: ms.privateNodeKey,
MachineKey: ms.machinePubKey,
Peers: peerViews,
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile),
Domain: ms.lastDomain,
DomainAuditLogID: ms.lastDomainAuditLogID,
DNS: *ms.lastDNSConfig,
PacketFilter: ms.lastParsedPacketFilter,
PacketFilterRules: ms.lastPacketFilterRules,
SSHPolicy: ms.lastSSHPolicy,
CollectServices: ms.collectServices,
DERPMap: ms.lastDERPMap,
ControlHealth: ms.lastHealth,
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
MaxKeyDuration: ms.lastMaxExpiry,
}
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
ms.logf("error unmarshalling TKAHead: %v", err)
nm.TKAEnabled = false
}
}
if node := ms.lastNode; node.Valid() {
nm.SelfNode = node
nm.Expiry = node.KeyExpiry()
nm.Name = node.Name()
nm.AllCaps = ms.lastCapSet
}
ms.addUserProfile(nm, nm.User())
for _, peer := range peerViews {
ms.addUserProfile(nm, peer.Sharer())
ms.addUserProfile(nm, peer.User())
}
if DevKnob.ForceProxyDNS() {
nm.DNS.Proxied = true
}
return nm
}

406
vendor/tailscale.com/control/controlclient/noise.go generated vendored Normal file
View File

@@ -0,0 +1,406 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"bytes"
"cmp"
"context"
"encoding/json"
"errors"
"math"
"net/http"
"net/url"
"sync"
"time"
"golang.org/x/net/http2"
"tailscale.com/control/controlhttp"
"tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/internal/noiseconn"
"tailscale.com/net/dnscache"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
"tailscale.com/util/singleflight"
"tailscale.com/util/testenv"
)
// NoiseClient provides a http.Client to connect to tailcontrol over
// the ts2021 protocol.
type NoiseClient struct {
// Client is an HTTP client to talk to the coordination server.
// It automatically makes a new Noise connection as needed.
// It does not support node key proofs. To do that, call
// noiseClient.getConn instead to make a connection.
*http.Client
// h2t is the HTTP/2 transport we use a bit to create new
// *http2.ClientConns. We don't use its connection pool and we don't use its
// dialing. We use it for exactly one reason: its idle timeout that can only
// be configured via the HTTP/1 config. And then we call NewClientConn (with
// an existing Noise connection) on the http2.Transport which sets up an
// http2.ClientConn using that idle timeout from an http1.Transport.
h2t *http2.Transport
// sfDial ensures that two concurrent requests for a noise connection only
// produce one shared one between the two callers.
sfDial singleflight.Group[struct{}, *noiseconn.Conn]
dialer *tsdial.Dialer
dnsCache *dnscache.Resolver
privKey key.MachinePrivate
serverPubKey key.MachinePublic
host string // the host part of serverURL
httpPort string // the default port to dial
httpsPort string // the fallback Noise-over-https port or empty if none
// dialPlan optionally returns a ControlDialPlan previously received
// from the control server; either the function or the return value can
// be nil.
dialPlan func() *tailcfg.ControlDialPlan
logf logger.Logf
netMon *netmon.Monitor
health *health.Tracker
// mu only protects the following variables.
mu sync.Mutex
closed bool
last *noiseconn.Conn // or nil
nextID int
connPool map[int]*noiseconn.Conn // active connections not yet closed; see noiseconn.Conn.Close
}
// NoiseOpts contains options for the NewNoiseClient function. All fields are
// required unless otherwise specified.
type NoiseOpts struct {
// PrivKey is this node's private key.
PrivKey key.MachinePrivate
// ServerPubKey is the public key of the server.
ServerPubKey key.MachinePublic
// ServerURL is the URL of the server to connect to.
ServerURL string
// Dialer's SystemDial function is used to connect to the server.
Dialer *tsdial.Dialer
// DNSCache is the caching Resolver to use to connect to the server.
//
// This field can be nil.
DNSCache *dnscache.Resolver
// Logf is the log function to use. This field can be nil.
Logf logger.Logf
// NetMon is the network monitor that, if set, will be used to get the
// network interface state. This field can be nil; if so, the current
// state will be looked up dynamically.
NetMon *netmon.Monitor
// HealthTracker, if non-nil, is the health tracker to use.
HealthTracker *health.Tracker
// DialPlan, if set, is a function that should return an explicit plan
// on how to connect to the server.
DialPlan func() *tailcfg.ControlDialPlan
}
// controlIsPlaintext is whether we should assume that the controlplane is only accessible
// over plaintext HTTP (as the first hop, before the ts2021 encryption begins).
// This is used by some tests which don't have a real TLS certificate.
var controlIsPlaintext = envknob.RegisterBool("TS_CONTROL_IS_PLAINTEXT_HTTP")
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://<host>:<port> (no trailing slash).
//
// netMon may be nil, if non-nil it's used to do faster interface lookups.
// dialPlan may be nil
func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
u, err := url.Parse(opts.ServerURL)
if err != nil {
return nil, err
}
var httpPort string
var httpsPort string
if port := u.Port(); port != "" {
// If there is an explicit port specified, trust the scheme and hope for the best
if u.Scheme == "http" {
httpPort = port
httpsPort = "443"
if (testenv.InTest() || controlIsPlaintext()) && (u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost") {
httpsPort = ""
}
} else {
httpPort = "80"
httpsPort = port
}
} else {
// Otherwise, use the standard ports
httpPort = "80"
httpsPort = "443"
}
np := &NoiseClient{
serverPubKey: opts.ServerPubKey,
privKey: opts.PrivKey,
host: u.Hostname(),
httpPort: httpPort,
httpsPort: httpsPort,
dialer: opts.Dialer,
dnsCache: opts.DNSCache,
dialPlan: opts.DialPlan,
logf: opts.Logf,
netMon: opts.NetMon,
health: opts.HealthTracker,
}
// Create the HTTP/2 Transport using a net/http.Transport
// (which only does HTTP/1) because it's the only way to
// configure certain properties on the http2.Transport. But we
// never actually use the net/http.Transport for any HTTP/1
// requests.
h2Transport, err := http2.ConfigureTransports(&http.Transport{
IdleConnTimeout: time.Minute,
})
if err != nil {
return nil, err
}
np.h2t = h2Transport
np.Client = &http.Client{Transport: np}
return np, nil
}
// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once
// (and must be used once) to make a single HTTP request over the noise channel
// to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
for tries := 0; tries < 3; tries++ {
conn, err := nc.getConn(ctx)
if err != nil {
return nil, nil, err
}
ok, earlyPayloadMaybeNil, err := conn.ReserveNewRequest(ctx)
if err != nil {
return nil, nil, err
}
if ok {
return conn, earlyPayloadMaybeNil, nil
}
}
return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection")
}
// contextErr is an error that wraps another error and is used to indicate that
// the error was because a context expired.
type contextErr struct {
err error
}
func (e contextErr) Error() string {
return e.err.Error()
}
func (e contextErr) Unwrap() error {
return e.err
}
// getConn returns a noiseconn.Conn that can be used to make requests to the
// coordination server. It may return a cached connection or create a new one.
// Dials are singleflighted, so concurrent calls to getConn may only dial once.
// As such, context values may not be respected as there are no guarantees that
// the context passed to getConn is the same as the context passed to dial.
func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) {
nc.mu.Lock()
if last := nc.last; last != nil && last.CanTakeNewRequest() {
nc.mu.Unlock()
return last, nil
}
nc.mu.Unlock()
for {
// We singeflight the dial to avoid making multiple connections, however
// that means that we can't simply cancel the dial if the context is
// canceled. Instead, we have to additionally check that the context
// which was canceled is our context and retry if our context is still
// valid.
conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseconn.Conn, error) {
c, err := nc.dial(ctx)
if err != nil {
if ctx.Err() != nil {
return nil, contextErr{ctx.Err()}
}
return nil, err
}
return c, nil
})
var ce contextErr
if err == nil || !errors.As(err, &ce) {
return conn, err
}
if ctx.Err() == nil {
// The dial failed because of a context error, but our context
// is still valid. Retry.
continue
}
// The dial failed because our context was canceled. Return the
// underlying error.
return nil, ce.Unwrap()
}
}
func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) {
ctx := req.Context()
conn, err := nc.getConn(ctx)
if err != nil {
return nil, err
}
return conn.RoundTrip(req)
}
// connClosed removes the connection with the provided ID from the pool
// of active connections.
func (nc *NoiseClient) connClosed(id int) {
nc.mu.Lock()
defer nc.mu.Unlock()
conn := nc.connPool[id]
if conn != nil {
delete(nc.connPool, id)
if nc.last == conn {
nc.last = nil
}
}
}
// Close closes all the underlying noise connections.
// It is a no-op and returns nil if the connection is already closed.
func (nc *NoiseClient) Close() error {
nc.mu.Lock()
nc.closed = true
conns := nc.connPool
nc.connPool = nil
nc.mu.Unlock()
var errors []error
for _, c := range conns {
if err := c.Close(); err != nil {
errors = append(errors, err)
}
}
return multierr.New(errors...)
}
// dial opens a new connection to tailcontrol, fetching the server noise key
// if not cached.
func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) {
nc.mu.Lock()
connID := nc.nextID
nc.nextID++
nc.mu.Unlock()
if tailcfg.CurrentCapabilityVersion > math.MaxUint16 {
// Panic, because a test should have started failing several
// thousand version numbers before getting to this point.
panic("capability version is too high to fit in the wire protocol")
}
var dialPlan *tailcfg.ControlDialPlan
if nc.dialPlan != nil {
dialPlan = nc.dialPlan()
}
// If we have a dial plan, then set our timeout as slightly longer than
// the maximum amount of time contained therein; we assume that
// explicit instructions on timeouts are more useful than a single
// hard-coded timeout.
//
// The default value of 5 is chosen so that, when there's no dial plan,
// we retain the previous behaviour of 10 seconds end-to-end timeout.
timeoutSec := 5.0
if dialPlan != nil {
for _, c := range dialPlan.Candidates {
if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec {
timeoutSec = v
}
}
}
// After we establish a connection, we need some time to actually
// upgrade it into a Noise connection. With a ballpark worst-case RTT
// of 1000ms, give ourselves an extra 5 seconds to complete the
// handshake.
timeoutSec += 5
// Be extremely defensive and ensure that the timeout is in the range
// [5, 60] seconds (e.g. if we accidentally get a negative number).
if timeoutSec > 60 {
timeoutSec = 60
} else if timeoutSec < 5 {
timeoutSec = 5
}
timeout := time.Duration(timeoutSec * float64(time.Second))
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
clientConn, err := (&controlhttp.Dialer{
Hostname: nc.host,
HTTPPort: nc.httpPort,
HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort),
MachineKey: nc.privKey,
ControlKey: nc.serverPubKey,
ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion),
Dialer: nc.dialer.SystemDial,
DNSCache: nc.dnsCache,
DialPlan: dialPlan,
Logf: nc.logf,
NetMon: nc.netMon,
HealthTracker: nc.health,
Clock: tstime.StdClock{},
}).Dial(ctx)
if err != nil {
return nil, err
}
ncc, err := noiseconn.New(clientConn.Conn, nc.h2t, connID, nc.connClosed)
if err != nil {
return nil, err
}
nc.mu.Lock()
if nc.closed {
nc.mu.Unlock()
ncc.Close() // Needs to be called without holding the lock.
return nil, errors.New("noise client closed")
}
defer nc.mu.Unlock()
mak.Set(&nc.connPool, connID, ncc)
nc.last = ncc
return ncc, nil
}
// post does a POST to the control server at the given path, JSON-encoding body.
// The provided nodeKey is an optional load balancing hint.
func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) {
jbody, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, "POST", "https://"+nc.host+path, bytes.NewReader(jbody))
if err != nil {
return nil, err
}
addLBHeader(req, nodeKey)
req.Header.Set("Content-Type", "application/json")
conn, err := nc.getConn(ctx)
if err != nil {
return nil, err
}
return conn.RoundTrip(req)
}

42
vendor/tailscale.com/control/controlclient/sign.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"crypto"
"errors"
"fmt"
"time"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
var (
errNoCertStore = errors.New("no certificate store")
errCertificateNotConfigured = errors.New("no certificate subject configured")
errUnsupportedSignatureVersion = errors.New("unsupported signature version")
)
// HashRegisterRequest generates the hash required sign or verify a
// tailcfg.RegisterRequest.
func HashRegisterRequest(
version tailcfg.SignatureType, ts time.Time, serverURL string, deviceCert []byte,
serverPubKey, machinePubKey key.MachinePublic) ([]byte, error) {
h := crypto.SHA256.New()
// hash.Hash.Write never returns an error, so we don't check for one here.
switch version {
case tailcfg.SignatureV1:
fmt.Fprintf(h, "%s%s%s%s%s",
ts.UTC().Format(time.RFC3339), serverURL, deviceCert, serverPubKey.ShortString(), machinePubKey.ShortString())
case tailcfg.SignatureV2:
fmt.Fprintf(h, "%s%s%s%s%s",
ts.UTC().Format(time.RFC3339), serverURL, deviceCert, serverPubKey, machinePubKey)
default:
return nil, errUnsupportedSignatureVersion
}
return h.Sum(nil), nil
}

View File

@@ -0,0 +1,205 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build windows
// darwin,cgo is also supported by certstore but untested, so it is not enabled.
package controlclient
import (
"crypto"
"crypto/rsa"
"crypto/x509"
"errors"
"fmt"
"sync"
"time"
"github.com/tailscale/certstore"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/util/syspolicy"
)
var getMachineCertificateSubjectOnce struct {
sync.Once
v string // Subject of machine certificate to search for
}
// getMachineCertificateSubject returns the exact name of a Subject that needs
// to be present in an identity's certificate chain to sign a RegisterRequest,
// formatted as per pkix.Name.String(). The Subject may be that of the identity
// itself, an intermediate CA or the root CA.
//
// If getMachineCertificateSubject() returns "" then no lookup will occur and
// each RegisterRequest will be unsigned.
//
// Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA"
func getMachineCertificateSubject() string {
getMachineCertificateSubjectOnce.Do(func() {
getMachineCertificateSubjectOnce.v, _ = syspolicy.GetString(syspolicy.MachineCertificateSubject, "")
})
return getMachineCertificateSubjectOnce.v
}
var (
errNoMatch = errors.New("no matching certificate")
errBadRequest = errors.New("malformed request")
)
func isSupportedCertificate(cert *x509.Certificate) bool {
return cert.PublicKeyAlgorithm == x509.RSA
}
func isSubjectInChain(subject string, chain []*x509.Certificate) bool {
if len(chain) == 0 || chain[0] == nil {
return false
}
for _, c := range chain {
if c == nil {
continue
}
if c.Subject.String() == subject {
return true
}
}
return false
}
func selectIdentityFromSlice(subject string, ids []certstore.Identity, now time.Time) (certstore.Identity, []*x509.Certificate) {
var bestCandidate struct {
id certstore.Identity
chain []*x509.Certificate
}
for _, id := range ids {
chain, err := id.CertificateChain()
if err != nil {
continue
}
if len(chain) < 1 {
continue
}
if !isSupportedCertificate(chain[0]) {
continue
}
if now.Before(chain[0].NotBefore) || now.After(chain[0].NotAfter) {
// Certificate is not valid at this time
continue
}
if !isSubjectInChain(subject, chain) {
continue
}
// Select the most recently issued certificate. If there is a tie, pick
// one arbitrarily.
if len(bestCandidate.chain) > 0 && bestCandidate.chain[0].NotBefore.After(chain[0].NotBefore) {
continue
}
bestCandidate.id = id
bestCandidate.chain = chain
}
return bestCandidate.id, bestCandidate.chain
}
// findIdentity locates an identity from the Windows or Darwin certificate
// store. It returns the first certificate with a matching Subject anywhere in
// its certificate chain, so it is possible to search for the leaf certificate,
// intermediate CA or root CA. If err is nil then the returned identity will
// never be nil (if no identity is found, the error errNoMatch will be
// returned). If an identity is returned then its certificate chain is also
// returned.
func findIdentity(subject string, st certstore.Store) (certstore.Identity, []*x509.Certificate, error) {
ids, err := st.Identities()
if err != nil {
return nil, nil, err
}
selected, chain := selectIdentityFromSlice(subject, ids, clock.Now())
for _, id := range ids {
if id != selected {
id.Close()
}
}
if selected == nil {
return nil, nil, errNoMatch
}
return selected, chain, nil
}
// signRegisterRequest looks for a suitable machine identity from the local
// system certificate store, and if one is found, signs the RegisterRequest
// using that identity's public key. In addition to the signature, the full
// certificate chain is included so that the control server can validate the
// certificate from a copy of the root CA's certificate.
func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("signRegisterRequest: %w", err)
}
}()
if req.Timestamp == nil {
return errBadRequest
}
machineCertificateSubject := getMachineCertificateSubject()
if machineCertificateSubject == "" {
return errCertificateNotConfigured
}
st, err := certstore.Open(certstore.System)
if err != nil {
return fmt.Errorf("open cert store: %w", err)
}
defer st.Close()
id, chain, err := findIdentity(machineCertificateSubject, st)
if err != nil {
return fmt.Errorf("find identity: %w", err)
}
defer id.Close()
signer, err := id.Signer()
if err != nil {
return fmt.Errorf("create signer: %w", err)
}
cl := 0
for _, c := range chain {
cl += len(c.Raw)
}
req.DeviceCert = make([]byte, 0, cl)
for _, c := range chain {
req.DeviceCert = append(req.DeviceCert, c.Raw...)
}
req.SignatureType = tailcfg.SignatureV2
h, err := HashRegisterRequest(req.SignatureType, req.Timestamp.UTC(), serverURL, req.DeviceCert, serverPubKey, machinePubKey)
if err != nil {
return fmt.Errorf("hash: %w", err)
}
req.Signature, err = signer.Sign(nil, h, &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: crypto.SHA256,
})
if err != nil {
return fmt.Errorf("sign: %w", err)
}
return nil
}

View File

@@ -0,0 +1,16 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package controlclient
import (
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
// signRegisterRequest on non-supported platforms always returns errNoCertStore.
func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error {
return errNoCertStore
}

125
vendor/tailscale.com/control/controlclient/status.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"encoding/json"
"fmt"
"reflect"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/structs"
)
// State is the high-level state of the client. It is used only in
// unit tests for proper sequencing, don't depend on it anywhere else.
//
// TODO(apenwarr): eliminate the state, as it's now obsolete.
//
// apenwarr: Historical note: controlclient.Auto was originally
// intended to be the state machine for the whole tailscale client, but that
// turned out to not be the right abstraction layer, and it moved to
// ipn.Backend. Since ipn.Backend now has a state machine, it would be
// much better if controlclient could be a simple stateless API. But the
// current server-side API (two interlocking polling https calls) makes that
// very hard to implement. A server side API change could untangle this and
// remove all the statefulness.
type State int
const (
StateNew = State(iota)
StateNotAuthenticated
StateAuthenticating
StateURLVisitRequired
StateAuthenticated
StateSynchronized // connected and received map update
)
func (s State) AppendText(b []byte) ([]byte, error) {
return append(b, s.String()...), nil
}
func (s State) MarshalText() ([]byte, error) {
return []byte(s.String()), nil
}
func (s State) String() string {
switch s {
case StateNew:
return "state:new"
case StateNotAuthenticated:
return "state:not-authenticated"
case StateAuthenticating:
return "state:authenticating"
case StateURLVisitRequired:
return "state:url-visit-required"
case StateAuthenticated:
return "state:authenticated"
case StateSynchronized:
return "state:synchronized"
default:
return fmt.Sprintf("state:unknown:%d", int(s))
}
}
type Status struct {
_ structs.Incomparable
// Err, if non-nil, is an error that occurred while logging in.
//
// If it's of type UserVisibleError then it's meant to be shown to users in
// their Tailscale client. Otherwise it's just logged to tailscaled's logs.
Err error
// URL, if non-empty, is the interactive URL to visit to finish logging in.
URL string
// NetMap is the latest server-pushed state of the tailnet network.
NetMap *netmap.NetworkMap
// Persist, when Valid, is the locally persisted configuration.
//
// TODO(bradfitz,maisem): clarify this.
Persist persist.PersistView
// state is the internal state. It should not be exposed outside this
// package, but we have some automated tests elsewhere that need to
// use it via the StateForTest accessor.
// TODO(apenwarr): Unexport or remove these.
state State
}
// LoginFinished reports whether the controlclient is in its "StateAuthenticated"
// state where it's in a happy register state but not yet in a map poll.
//
// TODO(bradfitz): delete this and everything around Status.state.
func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated }
// StateForTest returns the internal state of s for tests only.
func (s *Status) StateForTest() State { return s.state }
// SetStateForTest sets the internal state of s for tests only.
func (s *Status) SetStateForTest(state State) { s.state = state }
// Equal reports whether s and s2 are equal.
func (s *Status) Equal(s2 *Status) bool {
if s == nil && s2 == nil {
return true
}
return s != nil && s2 != nil &&
s.Err == s2.Err &&
s.URL == s2.URL &&
s.state == s2.state &&
reflect.DeepEqual(s.Persist, s2.Persist) &&
reflect.DeepEqual(s.NetMap, s2.NetMap)
}
func (s Status) String() string {
b, err := json.MarshalIndent(s, "", "\t")
if err != nil {
panic(err)
}
return s.state.String() + " " + string(b)
}