This commit is contained in:
2026-02-19 10:07:43 +00:00
parent 007438e372
commit 6e637ecf77
1763 changed files with 60820 additions and 279516 deletions

6
vendor/tailscale.com/util/eventbus/assets/event.html generated vendored Normal file
View File

@@ -0,0 +1,6 @@
<li id="monitor" hx-swap-oob="afterbegin">
<details>
<summary>{{.Count}}: {{.Type}} from {{.Event.From.Name}}, {{len .Event.To}} recipients</summary>
{{.Event.Event}}
</details>
</li>

Binary file not shown.

Binary file not shown.

97
vendor/tailscale.com/util/eventbus/assets/main.html generated vendored Normal file
View File

@@ -0,0 +1,97 @@
<!DOCTYPE html>
<html>
<head>
<script src="bus/htmx.min.js"></script>
<script src="bus/htmx-websocket.min.js"></script>
<link rel="stylesheet" href="bus/style.css">
</head>
<body hx-ext="ws">
<h1>Event bus</h1>
<section>
<h2>General</h2>
{{with $.PublishQueue}}
{{len .}} pending
{{end}}
<button hx-post="bus/monitor" hx-swap="outerHTML">Monitor all events</button>
</section>
<section>
<h2>Clients</h2>
<table>
<thead>
<tr>
<th>Name</th>
<th>Publishing</th>
<th>Subscribing</th>
<th>Pending</th>
</tr>
</thead>
{{range .Clients}}
<tr id="{{.Name}}">
<td>{{.Name}}</td>
<td class="list">
<ul>
{{range .Publish}}
<li><a href="#{{.}}">{{.}}</a></li>
{{end}}
</ul>
</td>
<td class="list">
<ul>
{{range .Subscribe}}
<li><a href="#{{.}}">{{.}}</a></li>
{{end}}
</ul>
</td>
<td>
{{len ($.SubscribeQueue .Client)}}
</td>
</tr>
{{end}}
</table>
</section>
<section>
<h2>Types</h2>
{{range .Types}}
<section id="{{.}}">
<h3>{{.Name}}</h3>
<h4>Definition</h4>
<code>{{prettyPrintStruct .}}</code>
<h4>Published by:</h4>
{{if len (.Publish)}}
<ul>
{{range .Publish}}
<li><a href="#{{.Name}}">{{.Name}}</a></li>
{{end}}
</ul>
{{else}}
<ul>
<li>No publishers.</li>
</ul>
{{end}}
<h4>Received by:</h4>
{{if len (.Subscribe)}}
<ul>
{{range .Subscribe}}
<li><a href="#{{.Name}}">{{.Name}}</a></li>
{{end}}
</ul>
{{else}}
<ul>
<li>No subscribers.</li>
</ul>
{{end}}
</section>
{{end}}
</section>
</body>
</html>

View File

@@ -0,0 +1,5 @@
<div>
<ul id="monitor" ws-connect="bus/monitor">
</ul>
<button hx-get="bus" hx-target="body">Stop monitoring</button>
</div>

90
vendor/tailscale.com/util/eventbus/assets/style.css generated vendored Normal file
View File

@@ -0,0 +1,90 @@
/* CSS reset, thanks Josh Comeau: https://www.joshwcomeau.com/css/custom-css-reset/ */
*, *::before, *::after { box-sizing: border-box; }
* { margin: 0; }
input, button, textarea, select { font: inherit; }
p, h1, h2, h3, h4, h5, h6 { overflow-wrap: break-word; }
p { text-wrap: pretty; }
h1, h2, h3, h4, h5, h6 { text-wrap: balance; }
#root, #__next { isolation: isolate; }
body {
line-height: 1.5;
-webkit-font-smoothing: antialiased;
}
img, picture, video, canvas, svg {
display: block;
max-width: 100%;
}
/* Local styling begins */
body {
padding: 12px;
}
div {
width: 100%;
}
section {
display: flex;
flex-direction: column;
flex-gap: 6px;
align-items: flex-start;
padding: 12px 0;
}
section > * {
margin-left: 24px;
}
section > h2, section > h3 {
margin-left: 0;
padding-bottom: 6px;
padding-top: 12px;
}
details {
padding-bottom: 12px;
}
table {
table-layout: fixed;
width: calc(100% - 48px);
border-collapse: collapse;
border: 1px solid black;
}
th, td {
padding: 12px;
border: 1px solid black;
}
td.list {
vertical-align: top;
}
ul {
list-style: none;
}
td ul {
margin: 0;
padding: 0;
}
code {
padding: 12px;
white-space: pre;
}
#monitor {
width: calc(100% - 48px);
resize: vertical;
padding: 12px;
overflow: scroll;
height: 15lh;
border: 1px inset;
min-height: 1em;
display: flex;
flex-direction: column-reverse;
}

345
vendor/tailscale.com/util/eventbus/bus.go generated vendored Normal file
View File

@@ -0,0 +1,345 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package eventbus
import (
"context"
"log"
"reflect"
"slices"
"tailscale.com/syncs"
"tailscale.com/types/logger"
"tailscale.com/util/set"
)
type PublishedEvent struct {
Event any
From *Client
}
type RoutedEvent struct {
Event any
From *Client
To []*Client
}
// Bus is an event bus that distributes published events to interested
// subscribers.
type Bus struct {
router *worker
write chan PublishedEvent
snapshot chan chan []PublishedEvent
routeDebug hook[RoutedEvent]
logf logger.Logf
topicsMu syncs.Mutex
topics map[reflect.Type][]*subscribeState
// Used for introspection/debugging only, not in the normal event
// publishing path.
clientsMu syncs.Mutex
clients set.Set[*Client]
}
// New returns a new bus with default options. It is equivalent to
// calling [NewWithOptions] with zero [BusOptions].
func New() *Bus { return NewWithOptions(BusOptions{}) }
// NewWithOptions returns a new [Bus] with the specified [BusOptions].
// Use [Bus.Client] to construct clients on the bus.
// Use [Publish] to make event publishers.
// Use [Subscribe] and [SubscribeFunc] to make event subscribers.
func NewWithOptions(opts BusOptions) *Bus {
ret := &Bus{
write: make(chan PublishedEvent),
snapshot: make(chan chan []PublishedEvent),
topics: map[reflect.Type][]*subscribeState{},
clients: set.Set[*Client]{},
logf: opts.logger(),
}
ret.router = runWorker(ret.pump)
return ret
}
// BusOptions are optional parameters for a [Bus]. A zero value is ready for
// use and provides defaults as described.
type BusOptions struct {
// Logf, if non-nil, is used for debug logs emitted by the bus and clients,
// publishers, and subscribers under its care. If it is nil, logs are sent
// to [log.Printf].
Logf logger.Logf
}
func (o BusOptions) logger() logger.Logf {
if o.Logf == nil {
return log.Printf
}
return o.Logf
}
// Client returns a new client with no subscriptions. Use [Subscribe]
// to receive events, and [Publish] to emit events.
//
// The client's name is used only for debugging, to tell humans what
// piece of code a publisher/subscriber belongs to. Aim for something
// short but unique, for example "kernel-route-monitor" or "taildrop",
// not "watcher".
func (b *Bus) Client(name string) *Client {
ret := &Client{
name: name,
bus: b,
pub: set.Set[publisher]{},
}
b.clientsMu.Lock()
defer b.clientsMu.Unlock()
b.clients.Add(ret)
return ret
}
// Debugger returns the debugging facility for the bus.
func (b *Bus) Debugger() *Debugger {
return &Debugger{b}
}
// Close closes the bus. It implicitly closes all clients, publishers and
// subscribers attached to the bus.
//
// Close blocks until the bus is fully shut down. The bus is
// permanently unusable after closing.
func (b *Bus) Close() {
b.router.StopAndWait()
b.clientsMu.Lock()
defer b.clientsMu.Unlock()
for c := range b.clients {
c.Close()
}
b.clients = nil
}
func (b *Bus) pump(ctx context.Context) {
// Limit how many published events we can buffer in the PublishedEvent queue.
//
// Subscribers have unbounded DeliveredEvent queues (see tailscale/tailscale#18020),
// so this queue doesn't need to be unbounded. Keeping it bounded may also help
// catch cases where subscribers stop pumping events completely, such as due to a bug
// in [subscribeState.pump], [Subscriber.dispatch], or [SubscriberFunc.dispatch]).
const maxPublishedEvents = 16
vals := queue[PublishedEvent]{capacity: maxPublishedEvents}
acceptCh := func() chan PublishedEvent {
if vals.Full() {
return nil
}
return b.write
}
for {
// Drain all pending events. Note that while we're draining
// events into subscriber queues, we continue to
// opportunistically accept more incoming events, if we have
// queue space for it.
for !vals.Empty() {
val := vals.Peek()
dests := b.dest(reflect.TypeOf(val.Event))
if b.routeDebug.active() {
clients := make([]*Client, len(dests))
for i := range len(dests) {
clients[i] = dests[i].client
}
b.routeDebug.run(RoutedEvent{
Event: val.Event,
From: val.From,
To: clients,
})
}
for _, d := range dests {
evt := DeliveredEvent{
Event: val.Event,
From: val.From,
To: d.client,
}
deliverOne:
for {
select {
case d.write <- evt:
break deliverOne
case <-d.closed():
// Queue closed, don't block but continue
// delivering to others.
break deliverOne
case in := <-acceptCh():
vals.Add(in)
in.From.publishDebug.run(in)
case <-ctx.Done():
return
case ch := <-b.snapshot:
ch <- vals.Snapshot()
}
}
}
vals.Drop()
}
// Inbound queue empty, wait for at least 1 work item before
// resuming.
for vals.Empty() {
select {
case <-ctx.Done():
return
case in := <-b.write:
vals.Add(in)
in.From.publishDebug.run(in)
case ch := <-b.snapshot:
ch <- nil
}
}
}
}
// logger returns a [logger.Logf] to which logs related to bus activity should be written.
func (b *Bus) logger() logger.Logf { return b.logf }
func (b *Bus) dest(t reflect.Type) []*subscribeState {
b.topicsMu.Lock()
defer b.topicsMu.Unlock()
return b.topics[t]
}
func (b *Bus) shouldPublish(t reflect.Type) bool {
if b.routeDebug.active() {
return true
}
b.topicsMu.Lock()
defer b.topicsMu.Unlock()
return len(b.topics[t]) > 0
}
func (b *Bus) listClients() []*Client {
b.clientsMu.Lock()
defer b.clientsMu.Unlock()
return b.clients.Slice()
}
func (b *Bus) snapshotPublishQueue() []PublishedEvent {
resp := make(chan []PublishedEvent)
select {
case b.snapshot <- resp:
return <-resp
case <-b.router.Done():
return nil
}
}
func (b *Bus) subscribe(t reflect.Type, q *subscribeState) (cancel func()) {
b.topicsMu.Lock()
defer b.topicsMu.Unlock()
b.topics[t] = append(b.topics[t], q)
return func() {
b.unsubscribe(t, q)
}
}
func (b *Bus) unsubscribe(t reflect.Type, q *subscribeState) {
b.topicsMu.Lock()
defer b.topicsMu.Unlock()
// Topic slices are accessed by pump without holding a lock, so we
// have to replace the entire slice when unsubscribing.
// Unsubscribing should be infrequent enough that this won't
// matter.
i := slices.Index(b.topics[t], q)
if i < 0 {
return
}
b.topics[t] = slices.Delete(slices.Clone(b.topics[t]), i, i+1)
}
// A worker runs a worker goroutine and helps coordinate its shutdown.
type worker struct {
ctx context.Context
stop context.CancelFunc
stopped chan struct{}
}
// runWorker creates a worker goroutine running fn. The context passed
// to fn is canceled by [worker.Stop].
func runWorker(fn func(context.Context)) *worker {
ctx, stop := context.WithCancel(context.Background())
ret := &worker{
ctx: ctx,
stop: stop,
stopped: make(chan struct{}),
}
go ret.run(fn)
return ret
}
func (w *worker) run(fn func(context.Context)) {
defer close(w.stopped)
fn(w.ctx)
}
// Stop signals the worker goroutine to shut down.
func (w *worker) Stop() { w.stop() }
// Done returns a channel that is closed when the worker goroutine
// exits.
func (w *worker) Done() <-chan struct{} { return w.stopped }
// Wait waits until the worker goroutine has exited.
func (w *worker) Wait() { <-w.stopped }
// StopAndWait signals the worker goroutine to shut down, then waits
// for it to exit.
func (w *worker) StopAndWait() {
w.stop()
<-w.stopped
}
// stopFlag is a value that can be watched for a notification. The
// zero value is ready for use.
//
// The flag is notified by running [stopFlag.Stop]. Stop can be called
// multiple times. Upon the first call to Stop, [stopFlag.Done] is
// closed, all pending [stopFlag.Wait] calls return, and future Wait
// calls return immediately.
//
// A stopFlag can only notify once, and is intended for use as a
// one-way shutdown signal that's lighter than a cancellable
// context.Context.
type stopFlag struct {
// guards the lazy construction of stopped, and the value of
// alreadyStopped.
mu syncs.Mutex
stopped chan struct{}
alreadyStopped bool
}
func (s *stopFlag) Stop() {
s.mu.Lock()
defer s.mu.Unlock()
if s.alreadyStopped {
return
}
s.alreadyStopped = true
if s.stopped == nil {
s.stopped = make(chan struct{})
}
close(s.stopped)
}
func (s *stopFlag) Done() <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
if s.stopped == nil {
s.stopped = make(chan struct{})
}
return s.stopped
}
func (s *stopFlag) Wait() {
<-s.Done()
}

182
vendor/tailscale.com/util/eventbus/client.go generated vendored Normal file
View File

@@ -0,0 +1,182 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package eventbus
import (
"reflect"
"tailscale.com/syncs"
"tailscale.com/types/logger"
"tailscale.com/util/set"
)
// A Client can publish and subscribe to events on its attached
// bus. See [Publish] to publish events, and [Subscribe] to receive
// events.
//
// Subscribers that share the same client receive events one at a
// time, in the order they were published.
type Client struct {
name string
bus *Bus
publishDebug hook[PublishedEvent]
mu syncs.Mutex
pub set.Set[publisher]
sub *subscribeState // Lazily created on first subscribe
stop stopFlag // signaled on Close
}
func (c *Client) Name() string { return c.name }
func (c *Client) logger() logger.Logf { return c.bus.logger() }
// Close closes the client. It implicitly closes all publishers and
// subscribers obtained from this client.
func (c *Client) Close() {
var (
pub set.Set[publisher]
sub *subscribeState
)
c.mu.Lock()
pub, c.pub = c.pub, nil
sub, c.sub = c.sub, nil
c.mu.Unlock()
if sub != nil {
sub.close()
}
for p := range pub {
p.Close()
}
c.stop.Stop()
}
func (c *Client) isClosed() bool { return c.pub == nil && c.sub == nil }
// Done returns a channel that is closed when [Client.Close] is called.
// The channel is closed after all the publishers and subscribers governed by
// the client have been closed.
func (c *Client) Done() <-chan struct{} { return c.stop.Done() }
func (c *Client) snapshotSubscribeQueue() []DeliveredEvent {
return c.peekSubscribeState().snapshotQueue()
}
func (c *Client) peekSubscribeState() *subscribeState {
c.mu.Lock()
defer c.mu.Unlock()
return c.sub
}
func (c *Client) publishTypes() []reflect.Type {
c.mu.Lock()
defer c.mu.Unlock()
ret := make([]reflect.Type, 0, len(c.pub))
for pub := range c.pub {
ret = append(ret, pub.publishType())
}
return ret
}
func (c *Client) subscribeTypes() []reflect.Type {
return c.peekSubscribeState().subscribeTypes()
}
func (c *Client) subscribeState() *subscribeState {
c.mu.Lock()
defer c.mu.Unlock()
return c.subscribeStateLocked()
}
func (c *Client) subscribeStateLocked() *subscribeState {
if c.sub == nil {
c.sub = newSubscribeState(c)
}
return c.sub
}
func (c *Client) addPublisher(pub publisher) {
c.mu.Lock()
defer c.mu.Unlock()
if c.isClosed() {
panic("cannot Publish on a closed client")
}
c.pub.Add(pub)
}
func (c *Client) deletePublisher(pub publisher) {
c.mu.Lock()
defer c.mu.Unlock()
c.pub.Delete(pub)
}
func (c *Client) addSubscriber(t reflect.Type, s *subscribeState) {
c.bus.subscribe(t, s)
}
func (c *Client) deleteSubscriber(t reflect.Type, s *subscribeState) {
c.bus.unsubscribe(t, s)
}
func (c *Client) publish() chan<- PublishedEvent {
return c.bus.write
}
func (c *Client) shouldPublish(t reflect.Type) bool {
return c.publishDebug.active() || c.bus.shouldPublish(t)
}
// Subscribe requests delivery of events of type T through the given client.
// It panics if c already has a subscriber for type T, or if c is closed.
func Subscribe[T any](c *Client) *Subscriber[T] {
// Hold the client lock throughout the subscription process so that a caller
// attempting to subscribe on a closed client will get a useful diagnostic
// instead of a random panic from inside the subscriber plumbing.
c.mu.Lock()
defer c.mu.Unlock()
// The caller should not race subscriptions with close, give them a useful
// diagnostic at the call site.
if c.isClosed() {
panic("cannot Subscribe on a closed client")
}
r := c.subscribeStateLocked()
s := newSubscriber[T](r, logfForCaller(c.logger()))
r.addSubscriber(s)
return s
}
// SubscribeFunc is like [Subscribe], but calls the provided func for each
// event of type T.
//
// A SubscriberFunc calls f synchronously from the client's goroutine.
// This means the callback must not block for an extended period of time,
// as this will block the subscriber and slow event processing for all
// subscriptions on c.
func SubscribeFunc[T any](c *Client, f func(T)) *SubscriberFunc[T] {
c.mu.Lock()
defer c.mu.Unlock()
// The caller should not race subscriptions with close, give them a useful
// diagnostic at the call site.
if c.isClosed() {
panic("cannot SubscribeFunc on a closed client")
}
r := c.subscribeStateLocked()
s := newSubscriberFunc[T](r, f, logfForCaller(c.logger()))
r.addSubscriber(s)
return s
}
// Publish returns a publisher for event type T using the given client.
// It panics if c is closed.
func Publish[T any](c *Client) *Publisher[T] {
p := newPublisher[T](c)
c.addPublisher(p)
return p
}

242
vendor/tailscale.com/util/eventbus/debug.go generated vendored Normal file
View File

@@ -0,0 +1,242 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package eventbus
import (
"cmp"
"fmt"
"path/filepath"
"reflect"
"runtime"
"slices"
"strings"
"sync/atomic"
"time"
"tailscale.com/syncs"
"tailscale.com/types/logger"
)
// slowSubscriberTimeout is a timeout after which a subscriber that does not
// accept a pending event will be flagged as being slow.
const slowSubscriberTimeout = 5 * time.Second
// A Debugger offers access to a bus's privileged introspection and
// debugging facilities.
//
// The debugger's functionality is intended for humans and their tools
// to examine and troubleshoot bus clients, and should not be used in
// normal codepaths.
//
// In particular, the debugger provides access to information that is
// deliberately withheld from bus clients to encourage more robust and
// maintainable code - for example, the sender of an event, or the
// event streams of other clients. Please don't use the debugger to
// circumvent these restrictions for purposes other than debugging.
type Debugger struct {
bus *Bus
}
// Clients returns a list of all clients attached to the bus.
func (d *Debugger) Clients() []*Client {
ret := d.bus.listClients()
slices.SortFunc(ret, func(a, b *Client) int {
return cmp.Compare(a.Name(), b.Name())
})
return ret
}
// PublishQueue returns the contents of the publish queue.
//
// The publish queue contains events that have been accepted by the
// bus from Publish() calls, but have not yet been routed to relevant
// subscribers.
//
// This queue is expected to be almost empty in normal operation. A
// full publish queue indicates that a slow subscriber downstream is
// causing backpressure and stalling the bus.
func (d *Debugger) PublishQueue() []PublishedEvent {
return d.bus.snapshotPublishQueue()
}
// checkClient verifies that client is attached to the same bus as the
// Debugger, and panics if not.
func (d *Debugger) checkClient(client *Client) {
if client.bus != d.bus {
panic(fmt.Errorf("SubscribeQueue given client belonging to wrong bus"))
}
}
// SubscribeQueue returns the contents of the given client's subscribe
// queue.
//
// The subscribe queue contains events that are to be delivered to the
// client, but haven't yet been handed off to the relevant
// [Subscriber].
//
// This queue is expected to be almost empty in normal operation. A
// full subscribe queue indicates that the client is accepting events
// too slowly, and may be causing the rest of the bus to stall.
func (d *Debugger) SubscribeQueue(client *Client) []DeliveredEvent {
d.checkClient(client)
return client.snapshotSubscribeQueue()
}
// WatchBus streams information about all events passing through the
// bus.
//
// Monitored events are delivered in the bus's global publication
// order (see "Concurrency properties" in the package docs).
//
// The caller must consume monitoring events promptly to avoid
// stalling the bus (see "Expected subscriber behavior" in the package
// docs).
func (d *Debugger) WatchBus() *Subscriber[RoutedEvent] {
return newMonitor(d.bus.routeDebug.add)
}
// WatchPublish streams information about all events published by the
// given client.
//
// Monitored events are delivered in the bus's global publication
// order (see "Concurrency properties" in the package docs).
//
// The caller must consume monitoring events promptly to avoid
// stalling the bus (see "Expected subscriber behavior" in the package
// docs).
func (d *Debugger) WatchPublish(client *Client) *Subscriber[PublishedEvent] {
d.checkClient(client)
return newMonitor(client.publishDebug.add)
}
// WatchSubscribe streams information about all events received by the
// given client.
//
// Monitored events are delivered in the bus's global publication
// order (see "Concurrency properties" in the package docs).
//
// The caller must consume monitoring events promptly to avoid
// stalling the bus (see "Expected subscriber behavior" in the package
// docs).
func (d *Debugger) WatchSubscribe(client *Client) *Subscriber[DeliveredEvent] {
d.checkClient(client)
return newMonitor(client.subscribeState().debug.add)
}
// PublishTypes returns the list of types being published by client.
//
// The returned types are those for which the client has obtained a
// [Publisher]. The client may not have ever sent the type in
// question.
func (d *Debugger) PublishTypes(client *Client) []reflect.Type {
d.checkClient(client)
return client.publishTypes()
}
// SubscribeTypes returns the list of types being subscribed to by
// client.
//
// The returned types are those for which the client has obtained a
// [Subscriber]. The client may not have ever received the type in
// question, and here may not be any publishers of the type.
func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type {
d.checkClient(client)
return client.subscribeTypes()
}
// A hook collects hook functions that can be run as a group.
type hook[T any] struct {
syncs.Mutex
fns []hookFn[T]
}
var hookID atomic.Uint64
// add registers fn to be called when the hook is run. Returns an
// unregistration function that removes fn from the hook when called.
func (h *hook[T]) add(fn func(T)) (remove func()) {
id := hookID.Add(1)
h.Lock()
defer h.Unlock()
h.fns = append(h.fns, hookFn[T]{id, fn})
return func() { h.remove(id) }
}
// remove removes the function with the given ID from the hook.
func (h *hook[T]) remove(id uint64) {
h.Lock()
defer h.Unlock()
h.fns = slices.DeleteFunc(h.fns, func(f hookFn[T]) bool { return f.ID == id })
}
// active reports whether any functions are registered with the
// hook. This can be used to skip expensive work when the hook is
// inactive.
func (h *hook[T]) active() bool {
h.Lock()
defer h.Unlock()
return len(h.fns) > 0
}
// run calls all registered functions with the value v.
func (h *hook[T]) run(v T) {
h.Lock()
defer h.Unlock()
for _, fn := range h.fns {
fn.Fn(v)
}
}
type hookFn[T any] struct {
ID uint64
Fn func(T)
}
// DebugEvent is a representation of an event used for debug clients.
type DebugEvent struct {
Count int
Type string
From string
To []string
Event any
}
// DebugTopics provides the JSON encoding as a wrapper for a collection of [DebugTopic].
type DebugTopics struct {
Topics []DebugTopic
}
// DebugTopic provides the JSON encoding of publishers and subscribers for a
// given topic.
type DebugTopic struct {
Name string
Publisher string
Subscribers []string
}
// logfForCaller returns a [logger.Logf] that prefixes its output with the
// package, filename, and line number of the caller's caller.
// If logf == nil, it returns [logger.Discard].
// If the caller location could not be determined, it returns logf unmodified.
func logfForCaller(logf logger.Logf) logger.Logf {
if logf == nil {
return logger.Discard
}
pc, fpath, line, _ := runtime.Caller(2) // +1 for my caller, +1 for theirs
if f := runtime.FuncForPC(pc); f != nil {
return logger.WithPrefix(logf, fmt.Sprintf("%s %s:%d: ", funcPackageName(f.Name()), filepath.Base(fpath), line))
}
return logf
}
func funcPackageName(funcName string) string {
ls := max(strings.LastIndex(funcName, "/"), 0)
for {
i := strings.LastIndex(funcName, ".")
if i <= ls {
return funcName
}
funcName = funcName[:i]
}
}

240
vendor/tailscale.com/util/eventbus/debughttp.go generated vendored Normal file
View File

@@ -0,0 +1,240 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ios && !android && !ts_omit_debugeventbus
package eventbus
import (
"bytes"
"cmp"
"embed"
"fmt"
"html/template"
"io"
"io/fs"
"log"
"net/http"
"path/filepath"
"reflect"
"slices"
"strings"
"sync"
"github.com/coder/websocket"
"tailscale.com/tsweb"
)
type httpDebugger struct {
*Debugger
}
func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) {
dh := httpDebugger{d}
td.Handle("bus", "Event bus", dh)
td.HandleSilent("bus/monitor", http.HandlerFunc(dh.serveMonitor))
td.HandleSilent("bus/style.css", serveStatic("style.css"))
td.HandleSilent("bus/htmx.min.js", serveStatic("htmx.min.js.gz"))
td.HandleSilent("bus/htmx-websocket.min.js", serveStatic("htmx-websocket.min.js.gz"))
}
//go:embed assets/*.html
var templatesSrc embed.FS
var templates = sync.OnceValue(func() *template.Template {
d, err := fs.Sub(templatesSrc, "assets")
if err != nil {
panic(fmt.Errorf("getting eventbus debughttp templates subdir: %w", err))
}
ret := template.New("").Funcs(map[string]any{
"prettyPrintStruct": prettyPrintStruct,
})
return template.Must(ret.ParseFS(d, "*"))
})
//go:generate go run fetch-htmx.go
//go:embed assets/*.css assets/*.min.js.gz
var static embed.FS
func serveStatic(name string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch {
case strings.HasSuffix(name, ".css"):
w.Header().Set("Content-Type", "text/css")
case strings.HasSuffix(name, ".min.js.gz"):
w.Header().Set("Content-Type", "text/javascript")
w.Header().Set("Content-Encoding", "gzip")
case strings.HasSuffix(name, ".js"):
w.Header().Set("Content-Type", "text/javascript")
default:
http.Error(w, "not found", http.StatusNotFound)
return
}
f, err := static.Open(filepath.Join("assets", name))
if err != nil {
http.Error(w, fmt.Sprintf("opening asset: %v", err), http.StatusInternalServerError)
return
}
defer f.Close()
if _, err := io.Copy(w, f); err != nil {
http.Error(w, fmt.Sprintf("serving asset: %v", err), http.StatusInternalServerError)
return
}
})
}
func render(w http.ResponseWriter, name string, data any) {
err := templates().ExecuteTemplate(w, name+".html", data)
if err != nil {
err := fmt.Errorf("rendering template: %v", err)
log.Print(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func (h httpDebugger) ServeHTTP(w http.ResponseWriter, r *http.Request) {
type clientInfo struct {
*Client
Publish []reflect.Type
Subscribe []reflect.Type
}
type typeInfo struct {
reflect.Type
Publish []*Client
Subscribe []*Client
}
type info struct {
*Debugger
Clients map[string]*clientInfo
Types map[string]*typeInfo
}
data := info{
Debugger: h.Debugger,
Clients: map[string]*clientInfo{},
Types: map[string]*typeInfo{},
}
getTypeInfo := func(t reflect.Type) *typeInfo {
if data.Types[t.Name()] == nil {
data.Types[t.Name()] = &typeInfo{
Type: t,
}
}
return data.Types[t.Name()]
}
for _, c := range h.Clients() {
ci := &clientInfo{
Client: c,
Publish: h.PublishTypes(c),
Subscribe: h.SubscribeTypes(c),
}
slices.SortFunc(ci.Publish, func(a, b reflect.Type) int { return cmp.Compare(a.Name(), b.Name()) })
slices.SortFunc(ci.Subscribe, func(a, b reflect.Type) int { return cmp.Compare(a.Name(), b.Name()) })
data.Clients[c.Name()] = ci
for _, t := range ci.Publish {
ti := getTypeInfo(t)
ti.Publish = append(ti.Publish, c)
}
for _, t := range ci.Subscribe {
ti := getTypeInfo(t)
ti.Subscribe = append(ti.Subscribe, c)
}
}
render(w, "main", data)
}
func (h httpDebugger) serveMonitor(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Upgrade") == "websocket" {
h.serveMonitorStream(w, r)
return
}
render(w, "monitor", nil)
}
func (h httpDebugger) serveMonitorStream(w http.ResponseWriter, r *http.Request) {
conn, err := websocket.Accept(w, r, nil)
if err != nil {
return
}
defer conn.CloseNow()
wsCtx := conn.CloseRead(r.Context())
mon := h.WatchBus()
defer mon.Close()
i := 0
for {
select {
case <-r.Context().Done():
return
case <-wsCtx.Done():
return
case <-mon.Done():
return
case event := <-mon.Events():
msg, err := conn.Writer(r.Context(), websocket.MessageText)
if err != nil {
return
}
data := map[string]any{
"Count": i,
"Type": reflect.TypeOf(event.Event),
"Event": event,
}
i++
if err := templates().ExecuteTemplate(msg, "event.html", data); err != nil {
log.Println(err)
return
}
if err := msg.Close(); err != nil {
return
}
}
}
}
func prettyPrintStruct(t reflect.Type) string {
if t.Kind() != reflect.Struct {
return t.String()
}
var rec func(io.Writer, int, reflect.Type)
rec = func(out io.Writer, indent int, t reflect.Type) {
ind := strings.Repeat(" ", indent)
fmt.Fprintf(out, "%s", t.String())
fs := collectFields(t)
if len(fs) > 0 {
io.WriteString(out, " {\n")
for _, f := range fs {
fmt.Fprintf(out, "%s %s ", ind, f.Name)
if f.Type.Kind() == reflect.Struct {
rec(out, indent+1, f.Type)
} else {
fmt.Fprint(out, f.Type)
}
io.WriteString(out, "\n")
}
fmt.Fprintf(out, "%s}", ind)
}
}
var ret bytes.Buffer
rec(&ret, 0, t)
return ret.String()
}
func collectFields(t reflect.Type) (ret []reflect.StructField) {
for _, f := range reflect.VisibleFields(t) {
if !f.IsExported() {
continue
}
ret = append(ret, f)
}
return ret
}

10
vendor/tailscale.com/util/eventbus/debughttp_off.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build ios || android || ts_omit_debugeventbus
package eventbus
type tswebDebugHandler = any // actually *tsweb.DebugHandler; any to avoid import tsweb with ts_omit_debugeventbus
func (*Debugger) RegisterHTTP(td tswebDebugHandler) {}

102
vendor/tailscale.com/util/eventbus/doc.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package eventbus provides an in-process event bus.
//
// An event bus connects publishers of typed events with subscribers
// interested in those events. Typically, there is one global event
// bus per process.
//
// # Usage
//
// To send or receive events, first use [Bus.Client] to register with
// the bus. Clients should register with a human-readable name that
// identifies the code using the client, to aid in debugging.
//
// To publish events, use [Publish] on a Client to get a typed
// publisher for your event type, then call [Publisher.Publish] as
// needed. If your event is expensive to construct, you can optionally
// use [Publisher.ShouldPublish] to skip the work if nobody is
// listening for the event.
//
// To receive events, use [Subscribe] to get a typed subscriber for
// each event type you're interested in. Receive the events themselves
// by selecting over all your [Subscriber.Events] channels, as well as
// [Subscriber.Done] for shutdown notifications.
//
// # Concurrency properties
//
// The bus serializes all published events across all publishers, and
// preserves that ordering when delivering to subscribers that are
// attached to the same Client. In more detail:
//
// - An event is published to the bus at some instant between the
// start and end of the call to [Publisher.Publish].
// - Two events cannot be published at the same instant, and so are
// totally ordered by their publication time. Given two events E1
// and E2, either E1 happens before E2, or E2 happens before E1.
// - Clients dispatch events to their Subscribers in publication
// order: if E1 happens before E2, the client always delivers E1
// before E2.
// - Clients do not synchronize subscriptions with each other: given
// clients C1 and C2, both subscribed to events E1 and E2, C1 may
// deliver both E1 and E2 before C2 delivers E1.
//
// Less formally: there is one true timeline of all published events.
// If you make a Client and subscribe to events, you will receive
// events one at a time, in the same order as the one true
// timeline. You will "skip over" events you didn't subscribe to, but
// your view of the world always moves forward in time, never
// backwards, and you will observe events in the same order as
// everyone else.
//
// However, you cannot assume that what your client see as "now" is
// the same as what other clients. They may be further behind you in
// working through the timeline, or running ahead of you. This means
// you should be careful about reaching out to another component
// directly after receiving an event, as its view of the world may not
// yet (or ever) be exactly consistent with yours.
//
// To make your code more testable and understandable, you should try
// to structure it following the actor model: you have some local
// state over which you have authority, but your only way to interact
// with state elsewhere in the program is to receive and process
// events coming from elsewhere, or to emit events of your own.
//
// # Expected subscriber behavior
//
// Subscribers are expected to promptly receive their events on
// [Subscriber.Events]. The bus has a small, fixed amount of internal
// buffering, meaning that a slow subscriber will eventually cause
// backpressure and block publication of all further events.
//
// In general, you should receive from your subscriber(s) in a loop,
// and only do fast state updates within that loop. Any heavier work
// should be offloaded to another goroutine.
//
// Causing publishers to block from backpressure is considered a bug
// in the slow subscriber causing the backpressure, and should be
// addressed there. Publishers should assume that Publish will not
// block for extended periods of time, and should not make exceptional
// effort to behave gracefully if they do get blocked.
//
// These blocking semantics are provisional and subject to
// change. Please speak up if this causes development pain, so that we
// can adapt the semantics to better suit our needs.
//
// # Debugging facilities
//
// The [Debugger], obtained through [Bus.Debugger], provides
// introspection facilities to monitor events flowing through the bus,
// and inspect publisher and subscriber state.
//
// Additionally, a debug command exists for monitoring the eventbus:
//
// tailscale debug daemon-bus-events
//
// # Testing facilities
//
// Helpers for testing code with the eventbus can be found in:
//
// eventbus/eventbustest
package eventbus

54
vendor/tailscale.com/util/eventbus/monitor.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package eventbus
import "tailscale.com/syncs"
// A Monitor monitors the execution of a goroutine processing events from a
// [Client], allowing the caller to block until it is complete. The zero value
// of m is valid; its Close and Wait methods return immediately, and its Done
// method returns an already-closed channel.
type Monitor struct {
// These fields are immutable after initialization
cli *Client
done <-chan struct{}
}
// Close closes the client associated with m and blocks until the processing
// goroutine is complete.
func (m Monitor) Close() {
if m.cli == nil {
return
}
m.cli.Close()
<-m.done
}
// Wait blocks until the goroutine monitored by m has finished executing, but
// does not close the associated client. It is safe to call Wait repeatedly,
// and from multiple concurrent goroutines.
func (m Monitor) Wait() {
if m.done == nil {
return
}
<-m.done
}
// Done returns a channel that is closed when the monitored goroutine has
// finished executing.
func (m Monitor) Done() <-chan struct{} {
if m.done == nil {
return syncs.ClosedChan()
}
return m.done
}
// Monitor executes f in a new goroutine attended by a [Monitor]. The caller
// is responsible for waiting for the goroutine to complete, by calling either
// [Monitor.Close] or [Monitor.Wait].
func (c *Client) Monitor(f func(*Client)) Monitor {
done := make(chan struct{})
go func() { defer close(done); f(c) }()
return Monitor{cli: c, done: done}
}

74
vendor/tailscale.com/util/eventbus/publish.go generated vendored Normal file
View File

@@ -0,0 +1,74 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package eventbus
import (
"reflect"
)
// publisher is a uniformly typed wrapper around Publisher[T], so that
// debugging facilities can look at active publishers.
type publisher interface {
publishType() reflect.Type
Close()
}
// A Publisher publishes typed events on a bus.
type Publisher[T any] struct {
client *Client
stop stopFlag
}
func newPublisher[T any](c *Client) *Publisher[T] {
return &Publisher[T]{client: c}
}
// Close closes the publisher.
//
// Calls to Publish after Close silently do nothing.
//
// If the Bus or Client from which the Publisher was created is closed,
// the Publisher is implicitly closed and does not need to be closed
// separately.
func (p *Publisher[T]) Close() {
// Just unblocks any active calls to Publish, no other
// synchronization needed.
p.stop.Stop()
p.client.deletePublisher(p)
}
func (p *Publisher[T]) publishType() reflect.Type {
return reflect.TypeFor[T]()
}
// Publish publishes event v on the bus.
func (p *Publisher[T]) Publish(v T) {
// Check for just a stopped publisher or bus before trying to
// write, so that once closed Publish consistently does nothing.
select {
case <-p.stop.Done():
return
default:
}
evt := PublishedEvent{
Event: v,
From: p.client,
}
select {
case p.client.publish() <- evt:
case <-p.stop.Done():
}
}
// ShouldPublish reports whether anyone is subscribed to the events
// that this publisher emits.
//
// ShouldPublish can be used to skip expensive event construction if
// nobody seems to care. Publishers must not assume that someone will
// definitely receive an event if ShouldPublish returns true.
func (p *Publisher[T]) ShouldPublish() bool {
return p.client.shouldPublish(reflect.TypeFor[T]())
}

85
vendor/tailscale.com/util/eventbus/queue.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package eventbus
import (
"slices"
)
// queue is an ordered queue of length up to capacity,
// if capacity is non-zero. Otherwise it is unbounded.
type queue[T any] struct {
vals []T
start int
capacity int // zero means unbounded
}
// canAppend reports whether a value can be appended to q.vals without
// shifting values around.
func (q *queue[T]) canAppend() bool {
return q.capacity == 0 || cap(q.vals) < q.capacity || len(q.vals) < cap(q.vals)
}
func (q *queue[T]) Full() bool {
return q.start == 0 && !q.canAppend()
}
func (q *queue[T]) Empty() bool {
return q.start == len(q.vals)
}
func (q *queue[T]) Len() int {
return len(q.vals) - q.start
}
// Add adds v to the end of the queue. Blocks until append can be
// done.
func (q *queue[T]) Add(v T) {
if !q.canAppend() {
if q.start == 0 {
panic("Add on a full queue")
}
// Slide remaining values back to the start of the array.
n := copy(q.vals, q.vals[q.start:])
toClear := len(q.vals) - n
clear(q.vals[len(q.vals)-toClear:])
q.vals = q.vals[:n]
q.start = 0
}
q.vals = append(q.vals, v)
}
// Peek returns the first value in the queue, without removing it from
// the queue, or nil if the queue is empty.
func (q *queue[T]) Peek() T {
if q.Empty() {
var zero T
return zero
}
return q.vals[q.start]
}
// Drop discards the first value in the queue, if any.
func (q *queue[T]) Drop() {
if q.Empty() {
return
}
var zero T
q.vals[q.start] = zero
q.start++
if q.Empty() {
// Reset cursor to start of array, it's free to do.
q.start = 0
q.vals = q.vals[:0]
}
}
// Snapshot returns a copy of the queue's contents.
func (q *queue[T]) Snapshot() []T {
return slices.Clone(q.vals[q.start:])
}

356
vendor/tailscale.com/util/eventbus/subscribe.go generated vendored Normal file
View File

@@ -0,0 +1,356 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package eventbus
import (
"context"
"fmt"
"reflect"
"runtime"
"time"
"tailscale.com/syncs"
"tailscale.com/types/logger"
"tailscale.com/util/cibuild"
)
type DeliveredEvent struct {
Event any
From *Client
To *Client
}
// subscriber is a uniformly typed wrapper around Subscriber[T], so
// that debugging facilities can look at active subscribers.
type subscriber interface {
subscribeType() reflect.Type
// dispatch is a function that dispatches the head value in vals to
// a subscriber, while also handling stop and incoming queue write
// events.
//
// dispatch exists because of the strongly typed Subscriber[T]
// wrapper around subscriptions: within the bus events are boxed in an
// 'any', and need to be unpacked to their full type before delivery
// to the subscriber. This involves writing to a strongly-typed
// channel, so subscribeState cannot handle that dispatch by itself -
// but if that strongly typed send blocks, we also need to keep
// processing other potential sources of wakeups, which is how we end
// up at this awkward type signature and sharing of internal state
// through dispatch.
dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool
Close()
}
// subscribeState handles dispatching of events received from a Bus.
type subscribeState struct {
client *Client
dispatcher *worker
write chan DeliveredEvent
snapshot chan chan []DeliveredEvent
debug hook[DeliveredEvent]
outputsMu syncs.Mutex
outputs map[reflect.Type]subscriber
}
func newSubscribeState(c *Client) *subscribeState {
ret := &subscribeState{
client: c,
write: make(chan DeliveredEvent),
snapshot: make(chan chan []DeliveredEvent),
outputs: map[reflect.Type]subscriber{},
}
ret.dispatcher = runWorker(ret.pump)
return ret
}
func (s *subscribeState) pump(ctx context.Context) {
var vals queue[DeliveredEvent]
acceptCh := func() chan DeliveredEvent {
if vals.Full() {
return nil
}
return s.write
}
for {
if !vals.Empty() {
val := vals.Peek()
sub := s.subscriberFor(val.Event)
if sub == nil {
// Raced with unsubscribe.
vals.Drop()
continue
}
if !sub.dispatch(ctx, &vals, acceptCh, s.snapshot) {
return
}
if s.debug.active() {
s.debug.run(DeliveredEvent{
Event: val.Event,
From: val.From,
To: s.client,
})
}
} else {
// Keep the cases in this select in sync with
// Subscriber.dispatch and SubscriberFunc.dispatch below.
// The only difference should be that this select doesn't deliver
// queued values to anyone, and unconditionally accepts new values.
select {
case val := <-s.write:
vals.Add(val)
case <-ctx.Done():
return
case ch := <-s.snapshot:
ch <- vals.Snapshot()
}
}
}
}
func (s *subscribeState) snapshotQueue() []DeliveredEvent {
if s == nil {
return nil
}
resp := make(chan []DeliveredEvent)
select {
case s.snapshot <- resp:
return <-resp
case <-s.dispatcher.Done():
return nil
}
}
func (s *subscribeState) subscribeTypes() []reflect.Type {
if s == nil {
return nil
}
s.outputsMu.Lock()
defer s.outputsMu.Unlock()
ret := make([]reflect.Type, 0, len(s.outputs))
for t := range s.outputs {
ret = append(ret, t)
}
return ret
}
func (s *subscribeState) addSubscriber(sub subscriber) {
s.outputsMu.Lock()
defer s.outputsMu.Unlock()
t := sub.subscribeType()
if s.outputs[t] != nil {
panic(fmt.Errorf("double subscription for event %s", t))
}
s.outputs[t] = sub
s.client.addSubscriber(t, s)
}
func (s *subscribeState) deleteSubscriber(t reflect.Type) {
s.outputsMu.Lock()
defer s.outputsMu.Unlock()
delete(s.outputs, t)
s.client.deleteSubscriber(t, s)
}
func (s *subscribeState) subscriberFor(val any) subscriber {
s.outputsMu.Lock()
defer s.outputsMu.Unlock()
return s.outputs[reflect.TypeOf(val)]
}
// Close closes the subscribeState. It implicitly closes all Subscribers
// linked to this state, and any pending events are discarded.
func (s *subscribeState) close() {
s.dispatcher.StopAndWait()
var subs map[reflect.Type]subscriber
s.outputsMu.Lock()
subs, s.outputs = s.outputs, nil
s.outputsMu.Unlock()
for _, sub := range subs {
sub.Close()
}
}
func (s *subscribeState) closed() <-chan struct{} {
return s.dispatcher.Done()
}
// A Subscriber delivers one type of event from a [Client].
// Events are sent to the [Subscriber.Events] channel.
type Subscriber[T any] struct {
stop stopFlag
read chan T
unregister func()
logf logger.Logf
slow *time.Timer // used to detect slow subscriber service
}
func newSubscriber[T any](r *subscribeState, logf logger.Logf) *Subscriber[T] {
slow := time.NewTimer(0)
slow.Stop() // reset in dispatch
return &Subscriber[T]{
read: make(chan T),
unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) },
logf: logf,
slow: slow,
}
}
func newMonitor[T any](attach func(fn func(T)) (cancel func())) *Subscriber[T] {
ret := &Subscriber[T]{
read: make(chan T, 100), // arbitrary, large
}
ret.unregister = attach(ret.monitor)
return ret
}
func (s *Subscriber[T]) subscribeType() reflect.Type {
return reflect.TypeFor[T]()
}
func (s *Subscriber[T]) monitor(debugEvent T) {
select {
case s.read <- debugEvent:
case <-s.stop.Done():
}
}
func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool {
t := vals.Peek().Event.(T)
start := time.Now()
s.slow.Reset(slowSubscriberTimeout)
defer s.slow.Stop()
for {
// Keep the cases in this select in sync with subscribeState.pump
// above. The only difference should be that this select
// delivers a value on s.read.
select {
case s.read <- t:
vals.Drop()
return true
case val := <-acceptCh():
vals.Add(val)
case <-ctx.Done():
return false
case ch := <-snapshot:
ch <- vals.Snapshot()
case <-s.slow.C:
s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start))
s.slow.Reset(slowSubscriberTimeout)
}
}
}
// Events returns a channel on which the subscriber's events are
// delivered.
func (s *Subscriber[T]) Events() <-chan T {
return s.read
}
// Done returns a channel that is closed when the subscriber is
// closed.
func (s *Subscriber[T]) Done() <-chan struct{} {
return s.stop.Done()
}
// Close closes the Subscriber, indicating the caller no longer wishes
// to receive this event type. After Close, receives on
// [Subscriber.Events] block for ever.
//
// If the Bus from which the Subscriber was created is closed,
// the Subscriber is implicitly closed and does not need to be closed
// separately.
func (s *Subscriber[T]) Close() {
s.stop.Stop() // unblock receivers
s.unregister()
}
// A SubscriberFunc delivers one type of event from a [Client].
// Events are forwarded synchronously to a function provided at construction.
type SubscriberFunc[T any] struct {
stop stopFlag
read func(T)
unregister func()
logf logger.Logf
slow *time.Timer // used to detect slow subscriber service
}
func newSubscriberFunc[T any](r *subscribeState, f func(T), logf logger.Logf) *SubscriberFunc[T] {
slow := time.NewTimer(0)
slow.Stop() // reset in dispatch
return &SubscriberFunc[T]{
read: f,
unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) },
logf: logf,
slow: slow,
}
}
// Close closes the SubscriberFunc, indicating the caller no longer wishes to
// receive this event type. After Close, no further events will be passed to
// the callback.
//
// If the [Bus] from which s was created is closed, s is implicitly closed and
// does not need to be closed separately.
func (s *SubscriberFunc[T]) Close() { s.stop.Stop(); s.unregister() }
// subscribeType implements part of the subscriber interface.
func (s *SubscriberFunc[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() }
// dispatch implements part of the subscriber interface.
func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool {
t := vals.Peek().Event.(T)
callDone := make(chan struct{})
go s.runCallback(t, callDone)
start := time.Now()
s.slow.Reset(slowSubscriberTimeout)
defer s.slow.Stop()
// Keep the cases in this select in sync with subscribeState.pump
// above. The only difference should be that this select
// delivers a value by calling s.read.
for {
select {
case <-callDone:
vals.Drop()
return true
case val := <-acceptCh():
vals.Add(val)
case <-ctx.Done():
// Wait for the callback to be complete, but not forever.
s.slow.Reset(5 * slowSubscriberTimeout)
select {
case <-s.slow.C:
s.logf("giving up on subscriber for %T after %v at close", t, time.Since(start))
if cibuild.On() {
all := make([]byte, 2<<20)
n := runtime.Stack(all, true)
s.logf("goroutine stacks:\n%s", all[:n])
}
case <-callDone:
}
return false
case ch := <-snapshot:
ch <- vals.Snapshot()
case <-s.slow.C:
s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start))
s.slow.Reset(slowSubscriberTimeout)
}
}
}
// runCallback invokes the callback on v and closes ch when it returns.
// This should be run in a goroutine.
func (s *SubscriberFunc[T]) runCallback(v T, ch chan struct{}) {
defer close(ch)
s.read(v)
}