Update dependencies

This commit is contained in:
bluepython508
2024-11-01 17:33:34 +00:00
parent 033ac0b400
commit 5cdfab398d
3596 changed files with 1033483 additions and 259 deletions

2
vendor/tailscale.com/.gitattributes generated vendored Normal file
View File

@@ -0,0 +1,2 @@
go.mod filter=go-mod
*.go diff=golang

51
vendor/tailscale.com/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,51 @@
# Binaries for programs and plugins
*~
*.tmp
*.exe
*.dll
*.so
*.dylib
*.spk
cmd/tailscale/tailscale
cmd/tailscaled/tailscaled
ssh/tailssh/testcontainers/tailscaled
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# direnv config, this may be different for other people so it's probably safer
# to make this nonspecific.
.envrc
# Ignore personal VS Code settings
.vscode/
# Support personal project-specific GOPATH
.gopath/
# Ignore nix build result path
/result
# Ignore direnv nix-shell environment cache
.direnv/
# Ignore web client node modules
.vite/
client/web/node_modules
client/web/build/assets
/gocross
/dist
# Ignore xcode userstate and workspace data
*.xcuserstate
*.xcworkspacedata
/tstest/tailmac/bin
/tstest/tailmac/build

104
vendor/tailscale.com/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,104 @@
linters:
# Don't enable any linters by default; just the ones that we explicitly
# enable in the list below.
disable-all: true
enable:
- bidichk
- gofmt
- goimports
- govet
- misspell
- revive
# Configuration for how we run golangci-lint
run:
timeout: 5m
issues:
# Excluding configuration per-path, per-linter, per-text and per-source
exclude-rules:
# These are forks of an upstream package and thus are exempt from stylistic
# changes that would make pulling in upstream changes harder.
- path: tempfork/.*\.go
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
- path: util/singleflight/.*\.go
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
# Per-linter settings are contained in this top-level key
linters-settings:
# Enable all rules by default; we don't use invisible unicode runes.
bidichk:
gofmt:
rewrite-rules:
- pattern: 'interface{}'
replacement: 'any'
goimports:
govet:
# Matches what we use in corp as of 2023-12-07
enable:
- asmdecl
- assign
- atomic
- bools
- buildtag
- cgocall
- copylocks
- deepequalerrors
- errorsas
- framepointer
- httpresponse
- ifaceassert
- loopclosure
- lostcancel
- nilfunc
- nilness
- printf
- reflectvaluecompare
- shift
- sigchanyzer
- sortslice
- stdmethods
- stringintconv
- structtag
- testinggoroutine
- tests
- unmarshal
- unreachable
- unsafeptr
- unusedresult
settings:
printf:
# List of print function names to check (in addition to default)
funcs:
- github.com/tailscale/tailscale/types/logger.Discard
# NOTE(andrew-d): this doesn't currently work because the printf
# analyzer doesn't support type declarations
#- github.com/tailscale/tailscale/types/logger.Logf
misspell:
revive:
enable-all-rules: false
ignore-generated-header: true
rules:
- name: atomic
- name: context-keys-type
- name: defer
arguments: [[
# Calling 'recover' at the time a defer is registered (i.e. "defer recover()") has no effect.
"immediate-recover",
# Calling 'recover' outside of a deferred function has no effect
"recover",
# Returning values from a deferred function has no effect
"return",
]]
- name: duplicated-imports
- name: errorf
- name: string-of-int
- name: time-equal
- name: unconditional-recursion
- name: useless-break
- name: waitgroup-by-value

1
vendor/tailscale.com/ALPINE.txt generated vendored Normal file
View File

@@ -0,0 +1 @@
3.18

17
vendor/tailscale.com/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,17 @@
# This is the official list of Tailscale
# authors for copyright purposes.
#
# Names should be added to this file as one of
# Organization's name
# Individual's name <submission email address>
# Individual's name <submission email address> <email2> <emailN>
#
# Please keep the list sorted.
#
# You do not need to add entries to this list, and we don't actively
# populate this list. If you do want to be acknowledged explicitly as
# a copyright holder, though, then please send a PR referencing your
# earlier contributions and clarifying whether it's you or your
# company that owns the rights to your contribution.
Tailscale Inc.

1
vendor/tailscale.com/CODEOWNERS generated vendored Normal file
View File

@@ -0,0 +1 @@
/tailcfg/ @tailscale/control-protocol-owners

135
vendor/tailscale.com/CODE_OF_CONDUCT.md generated vendored Normal file
View File

@@ -0,0 +1,135 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation
in our community a harassment-free experience for everyone, regardless
of age, body size, visible or invisible disability, ethnicity, sex
characteristics, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance,
race, religion, or sexual identity and orientation.
We pledge to act and interact in ways that contribute to an open,
welcoming, diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for
our community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our
mistakes, and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or
political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in
a professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our
standards of acceptable behavior and will take appropriate and fair
corrective action in response to any behavior that they deem
inappropriate, threatening, offensive, or harmful.
Community leaders have the right and responsibility to remove, edit,
or reject comments, commits, code, wiki edits, issues, and other
contributions that are not aligned to this Code of Conduct, and will
communicate reasons for moderation decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also
applies when an individual is officially representing the community in
public spaces. Examples of representing our community include using an
official e-mail address, posting via an official social media account,
or acting as an appointed representative at an online or offline
event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported to the community leaders responsible for enforcement
at [info@tailscale.com](mailto:info@tailscale.com). All complaints
will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and
security of the reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in
determining the consequences for any action they deem in violation of
this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior
deemed unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders,
providing clarity around the nature of the violation and an
explanation of why the behavior was inappropriate. A public apology
may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued
behavior. No interaction with the people involved, including
unsolicited interaction with those enforcing the Code of Conduct, for
a specified period of time. This includes avoiding interactions in
community spaces as well as external channels like social
media. Violating these terms may lead to a temporary or permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards,
including sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or
public communication with the community for a specified period of
time. No public or private interaction with the people involved,
including unsolicited interaction with those enforcing the Code of
Conduct, is allowed during this period. Violating these terms may lead
to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of
community standards, including sustained inappropriate behavior,
harassment of an individual, or aggression toward or disparagement of
classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction
within the community.
## Attribution
This Code of Conduct is adapted from the [Contributor
Covenant][homepage], version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of
conduct enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the
FAQ at https://www.contributor-covenant.org/faq. Translations are
available at https://www.contributor-covenant.org/translations.

71
vendor/tailscale.com/Dockerfile generated vendored Normal file
View File

@@ -0,0 +1,71 @@
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
# Note that this Dockerfile is currently NOT used to build any of the published
# Tailscale container images and may have drifted from the image build mechanism
# we use.
# Tailscale images are currently built using https://github.com/tailscale/mkctr,
# and the build script can be found in ./build_docker.sh.
#
#
# This Dockerfile includes all the tailscale binaries.
#
# To build the Dockerfile:
#
# $ docker build -t tailscale/tailscale .
#
# To run the tailscaled agent:
#
# $ docker run -d --name=tailscaled -v /var/lib:/var/lib -v /dev/net/tun:/dev/net/tun --network=host --privileged tailscale/tailscale tailscaled
#
# To then log in:
#
# $ docker exec tailscaled tailscale up
#
# To see status:
#
# $ docker exec tailscaled tailscale status
FROM golang:1.23-alpine AS build-env
WORKDIR /go/src/tailscale
COPY go.mod go.sum ./
RUN go mod download
# Pre-build some stuff before the following COPY line invalidates the Docker cache.
RUN go install \
github.com/aws/aws-sdk-go-v2/aws \
github.com/aws/aws-sdk-go-v2/config \
gvisor.dev/gvisor/pkg/tcpip/adapters/gonet \
gvisor.dev/gvisor/pkg/tcpip/stack \
golang.org/x/crypto/ssh \
golang.org/x/crypto/acme \
github.com/coder/websocket \
github.com/mdlayher/netlink
COPY . .
# see build_docker.sh
ARG VERSION_LONG=""
ENV VERSION_LONG=$VERSION_LONG
ARG VERSION_SHORT=""
ENV VERSION_SHORT=$VERSION_SHORT
ARG VERSION_GIT_HASH=""
ENV VERSION_GIT_HASH=$VERSION_GIT_HASH
ARG TARGETARCH
RUN GOARCH=$TARGETARCH go install -ldflags="\
-X tailscale.com/version.longStamp=$VERSION_LONG \
-X tailscale.com/version.shortStamp=$VERSION_SHORT \
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
FROM alpine:3.18
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables
COPY --from=build-env /go/bin/* /usr/local/bin/
# For compat with the previous run.sh, although ideally you should be
# using build_docker.sh which sets an entrypoint for the image.
RUN mkdir /tailscale && ln -s /usr/local/bin/containerboot /tailscale/run.sh

5
vendor/tailscale.com/Dockerfile.base generated vendored Normal file
View File

@@ -0,0 +1,5 @@
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
FROM alpine:3.18
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils

28
vendor/tailscale.com/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,28 @@
BSD 3-Clause License
Copyright (c) 2020 Tailscale Inc & AUTHORS.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

129
vendor/tailscale.com/Makefile generated vendored Normal file
View File

@@ -0,0 +1,129 @@
IMAGE_REPO ?= tailscale/tailscale
SYNO_ARCH ?= "x86_64"
SYNO_DSM ?= "7"
TAGS ?= "latest"
PLATFORM ?= "flyio" ## flyio==linux/amd64. Set to "" to build all platforms.
vet: ## Run go vet
./tool/go vet ./...
tidy: ## Run go mod tidy
./tool/go mod tidy
lint: ## Run golangci-lint
./tool/go run github.com/golangci/golangci-lint/cmd/golangci-lint run
updatedeps: ## Update depaware deps
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
# it finds in its $$PATH is the right one.
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update \
tailscale.com/cmd/tailscaled \
tailscale.com/cmd/tailscale \
tailscale.com/cmd/derper \
tailscale.com/cmd/k8s-operator \
tailscale.com/cmd/stund
depaware: ## Run depaware checks
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
# it finds in its $$PATH is the right one.
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check \
tailscale.com/cmd/tailscaled \
tailscale.com/cmd/tailscale \
tailscale.com/cmd/derper \
tailscale.com/cmd/k8s-operator \
tailscale.com/cmd/stund
buildwindows: ## Build tailscale CLI for windows/amd64
GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
build386: ## Build tailscale CLI for linux/386
GOOS=linux GOARCH=386 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
buildlinuxarm: ## Build tailscale CLI for linux/arm
GOOS=linux GOARCH=arm ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
buildwasm: ## Build tailscale CLI for js/wasm
GOOS=js GOARCH=wasm ./tool/go install ./cmd/tsconnect/wasm ./cmd/tailscale/cli
buildplan9:
GOOS=plan9 GOARCH=amd64 ./tool/go install ./cmd/tailscale ./cmd/tailscaled
buildlinuxloong64: ## Build tailscale CLI for linux/loong64
GOOS=linux GOARCH=loong64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
buildmultiarchimage: ## Build (and optionally push) multiarch docker image
./build_docker.sh
check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm ## Perform basic checks and compilation tests
staticcheck: ## Run staticcheck.io checks
./tool/go run honnef.co/go/tools/cmd/staticcheck -- $$(./tool/go list ./... | grep -v tempfork)
kube-generate-all: kube-generate-deepcopy ## Refresh generated files for Tailscale Kubernetes Operator
./tool/go generate ./cmd/k8s-operator
# Tailscale operator watches Connector custom resources in a Kubernetes cluster
# and caches them locally. Caching is done implicitly by controller-runtime
# library (the middleware used by Tailscale operator to create kube control
# loops). When a Connector resource is GET/LIST-ed from within our control loop,
# the request goes through the cache. To ensure that cache contents don't get
# modified by control loops, controller-runtime deep copies the requested
# object. In order for this to work, Connector must implement deep copy
# functionality so we autogenerate it here.
# https://github.com/kubernetes-sigs/controller-runtime/blob/v0.16.3/pkg/cache/internal/cache_reader.go#L86-L89
kube-generate-deepcopy: ## Refresh generated deepcopy functionality for Tailscale kube API types
./scripts/kube-deepcopy.sh
spk: ## Build synology package for ${SYNO_ARCH} architecture and ${SYNO_DSM} DSM version
./tool/go run ./cmd/dist build synology/dsm${SYNO_DSM}/${SYNO_ARCH}
spkall: ## Build synology packages for all architectures and DSM versions
./tool/go run ./cmd/dist build synology
pushspk: spk ## Push and install synology package on ${SYNO_HOST} host
echo "Pushing SPK to root@${SYNO_HOST} (env var SYNO_HOST) ..."
scp tailscale.spk root@${SYNO_HOST}:
ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk
publishdevimage: ## Build and publish tailscale image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=client ./build_docker.sh
publishdevoperator: ## Build and publish k8s-operator image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=operator ./build_docker.sh
publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-nameserver" || (echo "REPO=... must not be tailscale/k8s-nameserver" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1)
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh
.PHONY: sshintegrationtest
sshintegrationtest: ## Run the SSH integration tests in various Docker containers
@GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \
GOOS=linux GOARCH=amd64 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \
echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:mantic" && docker build --build-arg="BASE=ubuntu:mantic" -t ssh-ubuntu-mantic ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \
echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers
help: ## Show this help
@echo "\nSpecify a command. The choices are:\n"
@grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}'
@echo ""
.PHONY: help
.DEFAULT_GOAL := help

24
vendor/tailscale.com/PATENTS generated vendored Normal file
View File

@@ -0,0 +1,24 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Tailscale Inc. as part of the Tailscale project.
Tailscale Inc. hereby grants to You a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable (except as stated
in this section) patent license to make, have made, use, offer to
sell, sell, import, transfer and otherwise run, modify and propagate
the contents of this implementation of Tailscale, where such license
applies only to those patent claims, both currently owned or
controlled by Tailscale Inc. and acquired in the future, licensable
by Tailscale Inc. that are necessarily infringed by this
implementation of Tailscale. This grant does not include claims that
would be infringed only as a consequence of further modification of
this implementation. If you or your agent or exclusive licensee
institute or order or agree to the institution of patent litigation
against any entity (including a cross-claim or counterclaim in a
lawsuit) alleging that this implementation of Tailscale or any code
incorporated within this implementation of Tailscale constitutes
direct or contributory patent infringement, or inducement of patent
infringement, then any patent rights granted to you under this License
for this implementation of Tailscale shall terminate as of the date
such litigation is filed.

88
vendor/tailscale.com/README.md generated vendored Normal file
View File

@@ -0,0 +1,88 @@
# Tailscale
https://tailscale.com
Private WireGuard® networks made easy
## Overview
This repository contains the majority of Tailscale's open source code.
Notably, it includes the `tailscaled` daemon and
the `tailscale` CLI tool. The `tailscaled` daemon runs on Linux, Windows,
[macOS](https://tailscale.com/kb/1065/macos-variants/), and to varying degrees
on FreeBSD and OpenBSD. The Tailscale iOS and Android apps use this repo's
code, but this repo doesn't contain the mobile GUI code.
Other [Tailscale repos](https://github.com/orgs/tailscale/repositories) of note:
* the Android app is at https://github.com/tailscale/tailscale-android
* the Synology package is at https://github.com/tailscale/tailscale-synology
* the QNAP package is at https://github.com/tailscale/tailscale-qpkg
* the Chocolatey packaging is at https://github.com/tailscale/tailscale-chocolatey
For background on which parts of Tailscale are open source and why,
see [https://tailscale.com/opensource/](https://tailscale.com/opensource/).
## Using
We serve packages for a variety of distros and platforms at
[https://pkgs.tailscale.com](https://pkgs.tailscale.com/).
## Other clients
The [macOS, iOS, and Windows clients](https://tailscale.com/download)
use the code in this repository but additionally include small GUI
wrappers. The GUI wrappers on non-open source platforms are themselves
not open source.
## Building
We always require the latest Go release, currently Go 1.23. (While we build
releases with our [Go fork](https://github.com/tailscale/go/), its use is not
required.)
```
go install tailscale.com/cmd/tailscale{,d}
```
If you're packaging Tailscale for distribution, use `build_dist.sh`
instead, to burn commit IDs and version info into the binaries:
```
./build_dist.sh tailscale.com/cmd/tailscale
./build_dist.sh tailscale.com/cmd/tailscaled
```
If your distro has conventions that preclude the use of
`build_dist.sh`, please do the equivalent of what it does in your
distro's way, so that bug reports contain useful version information.
## Bugs
Please file any issues about this code or the hosted service on
[the issue tracker](https://github.com/tailscale/tailscale/issues).
## Contributing
PRs welcome! But please file bugs. Commit messages should [reference
bugs](https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls).
We require [Developer Certificate of
Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin)
`Signed-off-by` lines in commits.
See `git log` for our commit message style. It's basically the same as
[Go's style](https://github.com/golang/go/wiki/CommitMessage).
## About Us
[Tailscale](https://tailscale.com/) is primarily developed by the
people at https://github.com/orgs/tailscale/people. For other contributors,
see:
* https://github.com/tailscale/tailscale/graphs/contributors
* https://github.com/tailscale/tailscale-android/graphs/contributors
## Legal
WireGuard is a registered trademark of Jason A. Donenfeld.

8
vendor/tailscale.com/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# Security Policy
## Reporting a Vulnerability
You can report vulnerabilities privately to
[security@tailscale.com](mailto:security@tailscale.com). Tailscale
staff will triage the issue, and work with you on a coordinated
disclosure timeline.

1
vendor/tailscale.com/VERSION.txt generated vendored Normal file
View File

@@ -0,0 +1 @@
1.76.3

2
vendor/tailscale.com/api.md generated vendored Normal file
View File

@@ -0,0 +1,2 @@
> [!IMPORTANT]
> The Tailscale API documentation has moved to https://tailscale.com/api

596
vendor/tailscale.com/appc/appconnector.go generated vendored Normal file
View File

@@ -0,0 +1,596 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package appc implements App Connectors.
// An AppConnector provides DNS domain oriented routing of traffic. An App
// Connector becomes a DNS server for a peer, authoritative for the set of
// configured domains. DNS resolution of the target domain triggers dynamic
// publication of routes to ensure that traffic to the domain is routed through
// the App Connector.
package appc
import (
"context"
"fmt"
"net/netip"
"slices"
"strings"
"sync"
"time"
xmaps "golang.org/x/exp/maps"
"golang.org/x/net/dns/dnsmessage"
"tailscale.com/types/logger"
"tailscale.com/types/views"
"tailscale.com/util/clientmetric"
"tailscale.com/util/dnsname"
"tailscale.com/util/execqueue"
"tailscale.com/util/mak"
"tailscale.com/util/slicesx"
)
// rateLogger responds to calls to update by adding a count for the current period and
// calling the callback if any previous period has finished since update was last called
type rateLogger struct {
interval time.Duration
start time.Time
periodStart time.Time
periodCount int64
now func() time.Time
callback func(int64, time.Time, int64)
}
func (rl *rateLogger) currentIntervalStart(now time.Time) time.Time {
millisSince := now.Sub(rl.start).Milliseconds() % rl.interval.Milliseconds()
return now.Add(-(time.Duration(millisSince)) * time.Millisecond)
}
func (rl *rateLogger) update(numRoutes int64) {
now := rl.now()
periodEnd := rl.periodStart.Add(rl.interval)
if periodEnd.Before(now) {
if rl.periodCount != 0 {
rl.callback(rl.periodCount, rl.periodStart, numRoutes)
}
rl.periodCount = 0
rl.periodStart = rl.currentIntervalStart(now)
}
rl.periodCount++
}
func newRateLogger(now func() time.Time, interval time.Duration, callback func(int64, time.Time, int64)) *rateLogger {
nowTime := now()
return &rateLogger{
callback: callback,
now: now,
interval: interval,
start: nowTime,
periodStart: nowTime,
}
}
// RouteAdvertiser is an interface that allows the AppConnector to advertise
// newly discovered routes that need to be served through the AppConnector.
type RouteAdvertiser interface {
// AdvertiseRoute adds one or more route advertisements skipping any that
// are already advertised.
AdvertiseRoute(...netip.Prefix) error
// UnadvertiseRoute removes any matching route advertisements.
UnadvertiseRoute(...netip.Prefix) error
}
var (
metricStoreRoutesRateBuckets = []int64{1, 2, 3, 4, 5, 10, 100, 1000}
metricStoreRoutesNBuckets = []int64{1, 2, 3, 4, 5, 10, 100, 1000, 10000}
metricStoreRoutesRate []*clientmetric.Metric
metricStoreRoutesN []*clientmetric.Metric
)
func initMetricStoreRoutes() {
for _, n := range metricStoreRoutesRateBuckets {
metricStoreRoutesRate = append(metricStoreRoutesRate, clientmetric.NewCounter(fmt.Sprintf("appc_store_routes_rate_%d", n)))
}
metricStoreRoutesRate = append(metricStoreRoutesRate, clientmetric.NewCounter("appc_store_routes_rate_over"))
for _, n := range metricStoreRoutesNBuckets {
metricStoreRoutesN = append(metricStoreRoutesN, clientmetric.NewCounter(fmt.Sprintf("appc_store_routes_n_routes_%d", n)))
}
metricStoreRoutesN = append(metricStoreRoutesN, clientmetric.NewCounter("appc_store_routes_n_routes_over"))
}
func recordMetric(val int64, buckets []int64, metrics []*clientmetric.Metric) {
if len(buckets) < 1 {
return
}
// finds the first bucket where val <=, or len(buckets) if none match
// for bucket values of 1, 10, 100; 0-1 goes to [0], 2-10 goes to [1], 11-100 goes to [2], 101+ goes to [3]
bucket, _ := slices.BinarySearch(buckets, val)
metrics[bucket].Add(1)
}
func metricStoreRoutes(rate, nRoutes int64) {
if len(metricStoreRoutesRate) == 0 {
initMetricStoreRoutes()
}
recordMetric(rate, metricStoreRoutesRateBuckets, metricStoreRoutesRate)
recordMetric(nRoutes, metricStoreRoutesNBuckets, metricStoreRoutesN)
}
// RouteInfo is a data structure used to persist the in memory state of an AppConnector
// so that we can know, even after a restart, which routes came from ACLs and which were
// learned from domains.
type RouteInfo struct {
// Control is the routes from the 'routes' section of an app connector acl.
Control []netip.Prefix `json:",omitempty"`
// Domains are the routes discovered by observing DNS lookups for configured domains.
Domains map[string][]netip.Addr `json:",omitempty"`
// Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards,
// its result is added to Domains.
Wildcards []string `json:",omitempty"`
}
// AppConnector is an implementation of an AppConnector that performs
// its function as a subsystem inside of a tailscale node. At the control plane
// side App Connector routing is configured in terms of domains rather than IP
// addresses.
// The AppConnectors responsibility inside tailscaled is to apply the routing
// and domain configuration as supplied in the map response.
// DNS requests for configured domains are observed. If the domains resolve to
// routes not yet served by the AppConnector the local node configuration is
// updated to advertise the new route.
type AppConnector struct {
logf logger.Logf
routeAdvertiser RouteAdvertiser
// storeRoutesFunc will be called to persist routes if it is not nil.
storeRoutesFunc func(*RouteInfo) error
// mu guards the fields that follow
mu sync.Mutex
// domains is a map of lower case domain names with no trailing dot, to an
// ordered list of resolved IP addresses.
domains map[string][]netip.Addr
// controlRoutes is the list of routes that were last supplied by control.
controlRoutes []netip.Prefix
// wildcards is the list of domain strings that match subdomains.
wildcards []string
// queue provides ordering for update operations
queue execqueue.ExecQueue
writeRateMinute *rateLogger
writeRateDay *rateLogger
}
// NewAppConnector creates a new AppConnector.
func NewAppConnector(logf logger.Logf, routeAdvertiser RouteAdvertiser, routeInfo *RouteInfo, storeRoutesFunc func(*RouteInfo) error) *AppConnector {
ac := &AppConnector{
logf: logger.WithPrefix(logf, "appc: "),
routeAdvertiser: routeAdvertiser,
storeRoutesFunc: storeRoutesFunc,
}
if routeInfo != nil {
ac.domains = routeInfo.Domains
ac.wildcards = routeInfo.Wildcards
ac.controlRoutes = routeInfo.Control
}
ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) {
ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l)
metricStoreRoutes(c, l)
})
ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) {
ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l)
})
return ac
}
// ShouldStoreRoutes returns true if the appconnector was created with the controlknob on
// and is storing its discovered routes persistently.
func (e *AppConnector) ShouldStoreRoutes() bool {
return e.storeRoutesFunc != nil
}
// storeRoutesLocked takes the current state of the AppConnector and persists it
func (e *AppConnector) storeRoutesLocked() error {
if !e.ShouldStoreRoutes() {
return nil
}
// log write rate and write size
numRoutes := int64(len(e.controlRoutes))
for _, rs := range e.domains {
numRoutes += int64(len(rs))
}
e.writeRateMinute.update(numRoutes)
e.writeRateDay.update(numRoutes)
return e.storeRoutesFunc(&RouteInfo{
Control: e.controlRoutes,
Domains: e.domains,
Wildcards: e.wildcards,
})
}
// ClearRoutes removes all route state from the AppConnector.
func (e *AppConnector) ClearRoutes() error {
e.mu.Lock()
defer e.mu.Unlock()
e.controlRoutes = nil
e.domains = nil
e.wildcards = nil
return e.storeRoutesLocked()
}
// UpdateDomainsAndRoutes starts an asynchronous update of the configuration
// given the new domains and routes.
func (e *AppConnector) UpdateDomainsAndRoutes(domains []string, routes []netip.Prefix) {
e.queue.Add(func() {
// Add the new routes first.
e.updateRoutes(routes)
e.updateDomains(domains)
})
}
// UpdateDomains asynchronously replaces the current set of configured domains
// with the supplied set of domains. Domains must not contain a trailing dot,
// and should be lower case. If the domain contains a leading '*' label it
// matches all subdomains of a domain.
func (e *AppConnector) UpdateDomains(domains []string) {
e.queue.Add(func() {
e.updateDomains(domains)
})
}
// Wait waits for the currently scheduled asynchronous configuration changes to
// complete.
func (e *AppConnector) Wait(ctx context.Context) {
e.queue.Wait(ctx)
}
func (e *AppConnector) updateDomains(domains []string) {
e.mu.Lock()
defer e.mu.Unlock()
var oldDomains map[string][]netip.Addr
oldDomains, e.domains = e.domains, make(map[string][]netip.Addr, len(domains))
e.wildcards = e.wildcards[:0]
for _, d := range domains {
d = strings.ToLower(d)
if len(d) == 0 {
continue
}
if strings.HasPrefix(d, "*.") {
e.wildcards = append(e.wildcards, d[2:])
continue
}
e.domains[d] = oldDomains[d]
delete(oldDomains, d)
}
// Ensure that still-live wildcards addresses are preserved as well.
for d, addrs := range oldDomains {
for _, wc := range e.wildcards {
if dnsname.HasSuffix(d, wc) {
e.domains[d] = addrs
delete(oldDomains, d)
break
}
}
}
// Everything left in oldDomains is a domain we're no longer tracking
// and if we are storing route info we can unadvertise the routes
if e.ShouldStoreRoutes() {
toRemove := []netip.Prefix{}
for _, addrs := range oldDomains {
for _, a := range addrs {
toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen()))
}
}
if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil {
e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", xmaps.Keys(oldDomains), toRemove, err)
}
}
e.logf("handling domains: %v and wildcards: %v", xmaps.Keys(e.domains), e.wildcards)
}
// updateRoutes merges the supplied routes into the currently configured routes. The routes supplied
// by control for UpdateRoutes are supplemental to the routes discovered by DNS resolution, but are
// also more often whole ranges. UpdateRoutes will remove any single address routes that are now
// covered by new ranges.
func (e *AppConnector) updateRoutes(routes []netip.Prefix) {
e.mu.Lock()
defer e.mu.Unlock()
// If there was no change since the last update, no work to do.
if slices.Equal(e.controlRoutes, routes) {
return
}
if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil {
e.logf("failed to advertise routes: %v: %v", routes, err)
return
}
var toRemove []netip.Prefix
// If we're storing routes and know e.controlRoutes is a good
// representation of what should be in AdvertisedRoutes we can stop
// advertising routes that used to be in e.controlRoutes but are not
// in routes.
if e.ShouldStoreRoutes() {
toRemove = routesWithout(e.controlRoutes, routes)
}
nextRoute:
for _, r := range routes {
for _, addr := range e.domains {
for _, a := range addr {
if r.Contains(a) && netip.PrefixFrom(a, a.BitLen()) != r {
pfx := netip.PrefixFrom(a, a.BitLen())
toRemove = append(toRemove, pfx)
continue nextRoute
}
}
}
}
if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil {
e.logf("failed to unadvertise routes: %v: %v", toRemove, err)
}
e.controlRoutes = routes
if err := e.storeRoutesLocked(); err != nil {
e.logf("failed to store route info: %v", err)
}
}
// Domains returns the currently configured domain list.
func (e *AppConnector) Domains() views.Slice[string] {
e.mu.Lock()
defer e.mu.Unlock()
return views.SliceOf(xmaps.Keys(e.domains))
}
// DomainRoutes returns a map of domains to resolved IP
// addresses.
func (e *AppConnector) DomainRoutes() map[string][]netip.Addr {
e.mu.Lock()
defer e.mu.Unlock()
drCopy := make(map[string][]netip.Addr)
for k, v := range e.domains {
drCopy[k] = append(drCopy[k], v...)
}
return drCopy
}
// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS
// response is being returned over the PeerAPI. The response is parsed and
// matched against the configured domains, if matched the routeAdvertiser is
// advised to advertise the discovered route.
func (e *AppConnector) ObserveDNSResponse(res []byte) {
var p dnsmessage.Parser
if _, err := p.Start(res); err != nil {
return
}
if err := p.SkipAllQuestions(); err != nil {
return
}
// cnameChain tracks a chain of CNAMEs for a given query in order to reverse
// a CNAME chain back to the original query for flattening. The keys are
// CNAME record targets, and the value is the name the record answers, so
// for www.example.com CNAME example.com, the map would contain
// ["example.com"] = "www.example.com".
var cnameChain map[string]string
// addressRecords is a list of address records found in the response.
var addressRecords map[string][]netip.Addr
for {
h, err := p.AnswerHeader()
if err == dnsmessage.ErrSectionDone {
break
}
if err != nil {
return
}
if h.Class != dnsmessage.ClassINET {
if err := p.SkipAnswer(); err != nil {
return
}
continue
}
switch h.Type {
case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA:
default:
if err := p.SkipAnswer(); err != nil {
return
}
continue
}
domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".")
if len(domain) == 0 {
continue
}
if h.Type == dnsmessage.TypeCNAME {
res, err := p.CNAMEResource()
if err != nil {
return
}
cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".")
if len(cname) == 0 {
continue
}
mak.Set(&cnameChain, cname, domain)
continue
}
switch h.Type {
case dnsmessage.TypeA:
r, err := p.AResource()
if err != nil {
return
}
addr := netip.AddrFrom4(r.A)
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
case dnsmessage.TypeAAAA:
r, err := p.AAAAResource()
if err != nil {
return
}
addr := netip.AddrFrom16(r.AAAA)
mak.Set(&addressRecords, domain, append(addressRecords[domain], addr))
default:
if err := p.SkipAnswer(); err != nil {
return
}
continue
}
}
e.mu.Lock()
defer e.mu.Unlock()
for domain, addrs := range addressRecords {
domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain)
// domain and none of the CNAMEs in the chain are routed
if !isRouted {
continue
}
// advertise each address we have learned for the routed domain, that
// was not already known.
var toAdvertise []netip.Prefix
for _, addr := range addrs {
if !e.isAddrKnownLocked(domain, addr) {
toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen()))
}
}
if len(toAdvertise) > 0 {
e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise)
e.scheduleAdvertisement(domain, toAdvertise...)
}
}
}
// starting from the given domain that resolved to an address, find it, or any
// of the domains in the CNAME chain toward resolving it, that are routed
// domains, returning the routed domain name and a bool indicating whether a
// routed domain was found.
// e.mu must be held.
func (e *AppConnector) findRoutedDomainLocked(domain string, cnameChain map[string]string) (string, bool) {
var isRouted bool
for {
_, isRouted = e.domains[domain]
if isRouted {
break
}
// match wildcard domains
for _, wc := range e.wildcards {
if dnsname.HasSuffix(domain, wc) {
e.domains[domain] = nil
isRouted = true
break
}
}
next, ok := cnameChain[domain]
if !ok {
break
}
domain = next
}
return domain, isRouted
}
// isAddrKnownLocked returns true if the address is known to be associated with
// the given domain. Known domain tables are updated for covered routes to speed
// up future matches.
// e.mu must be held.
func (e *AppConnector) isAddrKnownLocked(domain string, addr netip.Addr) bool {
if e.hasDomainAddrLocked(domain, addr) {
return true
}
for _, route := range e.controlRoutes {
if route.Contains(addr) {
// record the new address associated with the domain for faster matching in subsequent
// requests and for diagnostic records.
e.addDomainAddrLocked(domain, addr)
return true
}
}
return false
}
// scheduleAdvertisement schedules an advertisement of the given address
// associated with the given domain.
func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Prefix) {
e.queue.Add(func() {
if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil {
e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err)
return
}
e.mu.Lock()
defer e.mu.Unlock()
for _, route := range routes {
if !route.IsSingleIP() {
continue
}
addr := route.Addr()
if !e.hasDomainAddrLocked(domain, addr) {
e.addDomainAddrLocked(domain, addr)
e.logf("[v2] advertised route for %v: %v", domain, addr)
}
}
if err := e.storeRoutesLocked(); err != nil {
e.logf("failed to store route info: %v", err)
}
})
}
// hasDomainAddrLocked returns true if the address has been observed in a
// resolution of domain.
func (e *AppConnector) hasDomainAddrLocked(domain string, addr netip.Addr) bool {
_, ok := slices.BinarySearchFunc(e.domains[domain], addr, compareAddr)
return ok
}
// addDomainAddrLocked adds the address to the list of addresses resolved for
// domain and ensures the list remains sorted. Does not attempt to deduplicate.
func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) {
e.domains[domain] = append(e.domains[domain], addr)
slices.SortFunc(e.domains[domain], compareAddr)
}
func compareAddr(l, r netip.Addr) int {
return l.Compare(r)
}
// routesWithout returns a without b where a and b
// are unsorted slices of netip.Prefix
func routesWithout(a, b []netip.Prefix) []netip.Prefix {
m := make(map[netip.Prefix]bool, len(b))
for _, p := range b {
m[p] = true
}
return slicesx.Filter(make([]netip.Prefix, 0, len(a)), a, func(p netip.Prefix) bool {
return !m[p]
})
}

27
vendor/tailscale.com/assert_ts_toolchain_match.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build tailscale_go
package tailscaleroot
import (
"fmt"
"os"
"strings"
)
func init() {
tsRev, ok := tailscaleToolchainRev()
if !ok {
panic("binary built with tailscale_go build tag but failed to read build info or find tailscale.toolchain.rev in build info")
}
want := strings.TrimSpace(GoToolchainRev)
if tsRev != want {
if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" {
fmt.Fprintf(os.Stderr, "tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1\n", tsRev, want)
return
}
panic(fmt.Sprintf("binary built with tailscale_go build tag but Go toolchain %q doesn't match github.com/tailscale/tailscale expected value %q; override this failure with TS_PERMIT_TOOLCHAIN_MISMATCH=1", tsRev, want))
}
}

51
vendor/tailscale.com/atomicfile/atomicfile.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package atomicfile contains code related to writing to filesystems
// atomically.
//
// This package should be considered internal; its API is not stable.
package atomicfile // import "tailscale.com/atomicfile"
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
// WriteFile writes data to filename+some suffix, then renames it into filename.
// The perm argument is ignored on Windows. If the target filename already
// exists but is not a regular file, WriteFile returns an error.
func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
fi, err := os.Stat(filename)
if err == nil && !fi.Mode().IsRegular() {
return fmt.Errorf("%s already exists and is not a regular file", filename)
}
f, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)+".tmp")
if err != nil {
return err
}
tmpName := f.Name()
defer func() {
if err != nil {
f.Close()
os.Remove(tmpName)
}
}()
if _, err := f.Write(data); err != nil {
return err
}
if runtime.GOOS != "windows" {
if err := f.Chmod(perm); err != nil {
return err
}
}
if err := f.Sync(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
return os.Rename(tmpName, filename)
}

52
vendor/tailscale.com/build_dist.sh generated vendored Normal file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env sh
#
# Runs `go build` with flags configured for binary distribution. All
# it does differently from `go build` is burn git commit and version
# information into the binaries, so that we can track down user
# issues.
#
# If you're packaging Tailscale for a distro, please consider using
# this script, or executing equivalent commands in your
# distro-specific build system.
set -eu
go="go"
if [ -n "${TS_USE_TOOLCHAIN:-}" ]; then
go="./tool/go"
fi
eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion`
if [ "$1" = "shellvars" ]; then
cat <<EOF
VERSION_MINOR="$VERSION_MINOR"
VERSION_SHORT="$VERSION_SHORT"
VERSION_LONG="$VERSION_LONG"
VERSION_GIT_HASH="$VERSION_GIT_HASH"
EOF
exit 0
fi
tags=""
ldflags="-X tailscale.com/version.longStamp=${VERSION_LONG} -X tailscale.com/version.shortStamp=${VERSION_SHORT}"
# build_dist.sh arguments must precede go build arguments.
while [ "$#" -gt 1 ]; do
case "$1" in
--extra-small)
shift
ldflags="$ldflags -w -s"
tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion"
;;
--box)
shift
tags="${tags:+$tags,}ts_include_cli"
;;
*)
break
;;
esac
done
exec $go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@"

84
vendor/tailscale.com/build_docker.sh generated vendored Normal file
View File

@@ -0,0 +1,84 @@
#!/usr/bin/env sh
#
# This script builds Tailscale container images using
# github.com/tailscale/mkctr.
# By default the images will be tagged with the current version and git
# hash of this repository as produced by ./cmd/mkversion.
# This is the image build mechanim used to build the official Tailscale
# container images.
set -eu
# Use the "go" binary from the "tool" directory (which is github.com/tailscale/go)
export PATH="$PWD"/tool:"$PATH"
eval "$(./build_dist.sh shellvars)"
DEFAULT_TARGET="client"
DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}"
DEFAULT_BASE="tailscale/alpine-base:3.18"
PUSH="${PUSH:-false}"
TARGET="${TARGET:-${DEFAULT_TARGET}}"
TAGS="${TAGS:-${DEFAULT_TAGS}}"
BASE="${BASE:-${DEFAULT_BASE}}"
PLATFORM="${PLATFORM:-}" # default to all platforms
case "$TARGET" in
client)
DEFAULT_REPOS="tailscale/tailscale"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="\
tailscale.com/cmd/tailscale:/usr/local/bin/tailscale, \
tailscale.com/cmd/tailscaled:/usr/local/bin/tailscaled, \
tailscale.com/cmd/containerboot:/usr/local/bin/containerboot" \
--ldflags="\
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--gotags="ts_kube,ts_package_container" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
/usr/local/bin/containerboot
;;
operator)
DEFAULT_REPOS="tailscale/k8s-operator"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="tailscale.com/cmd/k8s-operator:/usr/local/bin/operator" \
--ldflags="\
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
/usr/local/bin/operator
;;
k8s-nameserver)
DEFAULT_REPOS="tailscale/k8s-nameserver"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="tailscale.com/cmd/k8s-nameserver:/usr/local/bin/k8s-nameserver" \
--ldflags=" \
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
/usr/local/bin/k8s-nameserver
;;
*)
echo "unknown target: $TARGET"
exit 1
;;
esac

521
vendor/tailscale.com/client/tailscale/acl.go generated vendored Normal file
View File

@@ -0,0 +1,521 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build go1.19
package tailscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/netip"
)
// ACLRow defines a rule that grants access by a set of users or groups to a set
// of servers and ports.
// Only one of Src/Dst or Users/Ports may be specified.
type ACLRow struct {
Action string `json:"action,omitempty"` // valid values: "accept"
Proto string `json:"proto,omitempty"` // protocol
Users []string `json:"users,omitempty"` // old name for src
Ports []string `json:"ports,omitempty"` // old name for dst
Src []string `json:"src,omitempty"`
Dst []string `json:"dst,omitempty"`
}
// ACLTest defines a test for your ACLs to prevent accidental exposure or
// revoking of access to key servers and ports. Only one of Src or User may be
// specified, and only one of Allow/Accept may be specified.
type ACLTest struct {
Src string `json:"src,omitempty"` // source
User string `json:"user,omitempty"` // old name for source
Proto string `json:"proto,omitempty"` // protocol
Accept []string `json:"accept,omitempty"` // expected destination ip:port that user can access
Deny []string `json:"deny,omitempty"` // expected destination ip:port that user cannot access
Allow []string `json:"allow,omitempty"` // old name for accept
}
// NodeAttrGrant defines additional string attributes that apply to specific devices.
type NodeAttrGrant struct {
// Target specifies which nodes the attributes apply to. The nodes can be a
// tag (tag:server), user (alice@example.com), group (group:kids), or *.
Target []string `json:"target,omitempty"`
// Attr are the attributes to set on Target(s).
Attr []string `json:"attr,omitempty"`
}
// ACLDetails contains all the details for an ACL.
type ACLDetails struct {
Tests []ACLTest `json:"tests,omitempty"`
ACLs []ACLRow `json:"acls,omitempty"`
Groups map[string][]string `json:"groups,omitempty"`
TagOwners map[string][]string `json:"tagowners,omitempty"`
Hosts map[string]string `json:"hosts,omitempty"`
NodeAttrs []NodeAttrGrant `json:"nodeAttrs,omitempty"`
}
// ACL contains an ACLDetails and metadata.
type ACL struct {
ACL ACLDetails
ETag string // to check with version on server
}
// ACLHuJSON contains the HuJSON string of the ACL and metadata.
type ACLHuJSON struct {
ACL string
Warnings []string
ETag string // to check with version on server
}
// ACL makes a call to the Tailscale server to get a JSON-parsed version of the ACL.
// The JSON-parsed version of the ACL contains no comments as proper JSON does not support
// comments.
func (c *Client) ACL(ctx context.Context) (acl *ACL, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.ACL: %w", err)
}
}()
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", "application/json")
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
// Otherwise, try to decode the response.
var aclDetails ACLDetails
if err = json.Unmarshal(b, &aclDetails); err != nil {
return nil, err
}
acl = &ACL{
ACL: aclDetails,
ETag: resp.Header.Get("ETag"),
}
return acl, nil
}
// ACLHuJSON makes a call to the Tailscale server to get the ACL HuJSON and returns
// it as a string.
// HuJSON is JSON with a few modifications to make it more human-friendly. The primary
// changes are allowing comments and trailing comments. See the following links for more info:
// https://tailscale.com/s/acl-format
// https://github.com/tailscale/hujson
func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.ACLHuJSON: %w", err)
}
}()
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl?details=1", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", "application/hujson")
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
data := struct {
ACL []byte `json:"acl"`
Warnings []string `json:"warnings"`
}{}
if err := json.Unmarshal(b, &data); err != nil {
return nil, err
}
acl = &ACLHuJSON{
ACL: string(data.ACL),
Warnings: data.Warnings,
ETag: resp.Header.Get("ETag"),
}
return acl, nil
}
// ACLTestFailureSummary specifies a user for which ACL tests
// failed and the related user-friendly error messages.
//
// ACLTestFailureSummary specifies the JSON format sent to the
// JavaScript client to be rendered in the HTML.
type ACLTestFailureSummary struct {
// User is the source ("src") value of the ACL test that failed.
// The name "user" is a legacy holdover from the original naming and
// is kept for compatibility but it may also contain any value
// that's valid in a ACL test "src" field.
User string `json:"user,omitempty"`
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
}
// ACLTestError is ErrResponse but with an extra field to account for ACLTestFailureSummary.
type ACLTestError struct {
ErrResponse
Data []ACLTestFailureSummary `json:"data"`
}
func (e ACLTestError) Error() string {
return fmt.Sprintf("%s, Data: %+v", e.ErrResponse.Error(), e.Data)
}
func (c *Client) aclPOSTRequest(ctx context.Context, body []byte, avoidCollisions bool, etag, acceptHeader string) ([]byte, string, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body))
if err != nil {
return nil, "", err
}
if avoidCollisions {
req.Header.Set("If-Match", etag)
}
req.Header.Set("Accept", acceptHeader)
req.Header.Set("Content-Type", "application/hujson")
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, "", err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
// check if test error
var ate ACLTestError
if err := json.Unmarshal(b, &ate); err != nil {
return nil, "", err
}
ate.Status = resp.StatusCode
return nil, "", ate
}
return b, resp.Header.Get("ETag"), nil
}
// SetACL sends a POST request to update the ACL according to the provided ACL object. If
// `avoidCollisions` is true, it will use the ETag obtained in the GET request in an If-Match
// header to check if the previously obtained ACL was the latest version and that no updates
// were missed.
//
// Returns error with status code 412 if mistmached ETag and avoidCollisions is set to true.
// Returns error if ACL has tests that fail.
// Returns error if there are other errors with the ACL.
func (c *Client) SetACL(ctx context.Context, acl ACL, avoidCollisions bool) (res *ACL, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SetACL: %w", err)
}
}()
postData, err := json.Marshal(acl.ACL)
if err != nil {
return nil, err
}
b, etag, err := c.aclPOSTRequest(ctx, postData, avoidCollisions, acl.ETag, "application/json")
if err != nil {
return nil, err
}
// Otherwise, try to decode the response.
var aclDetails ACLDetails
if err = json.Unmarshal(b, &aclDetails); err != nil {
return nil, err
}
res = &ACL{
ACL: aclDetails,
ETag: etag,
}
return res, nil
}
// SetACLHuJSON sends a POST request to update the ACL according to the provided ACL object. If
// `avoidCollisions` is true, it will use the ETag obtained in the GET request in an If-Match
// header to check if the previously obtained ACL was the latest version and that no updates
// were missed.
//
// Returns error with status code 412 if mistmached ETag and avoidCollisions is set to true.
// Returns error if the HuJSON is invalid.
// Returns error if ACL has tests that fail.
// Returns error if there are other errors with the ACL.
func (c *Client) SetACLHuJSON(ctx context.Context, acl ACLHuJSON, avoidCollisions bool) (res *ACLHuJSON, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SetACLHuJSON: %w", err)
}
}()
postData := []byte(acl.ACL)
b, etag, err := c.aclPOSTRequest(ctx, postData, avoidCollisions, acl.ETag, "application/hujson")
if err != nil {
return nil, err
}
res = &ACLHuJSON{
ACL: string(b),
ETag: etag,
}
return res, nil
}
// UserRuleMatch specifies the source users/groups/hosts that a rule targets
// and the destination ports that they can access.
// LineNumber is only useful for requests provided in HuJSON form.
// While JSON requests will have LineNumber, the value is not useful.
type UserRuleMatch struct {
Users []string `json:"users"`
Ports []string `json:"ports"`
LineNumber int `json:"lineNumber"`
// Via is the list of targets through which Users can access Ports.
// See https://tailscale.com/kb/1378/via for more information.
Via []string `json:"via,omitempty"`
// Postures is a list of posture policies that are
// associated with this match. The rules can be looked
// up in the ACLPreviewResponse parent struct.
// The source of the list is from srcPosture on
// an ACL or Grant rule:
// https://tailscale.com/kb/1288/device-posture#posture-conditions
Postures []string `json:"postures"`
}
// ACLPreviewResponse is the response type of previewACLPostRequest
type ACLPreviewResponse struct {
Matches []UserRuleMatch `json:"matches"` // ACL rules that match the specified user or ipport.
Type string `json:"type"` // The request type: currently only "user" or "ipport".
PreviewFor string `json:"previewFor"` // A specific user or ipport.
// Postures is a map of postures and associated rules that apply
// to this preview.
// For more details about the posture mapping, see:
// https://tailscale.com/kb/1288/device-posture#postures
Postures map[string][]string `json:"postures,omitempty"`
}
// ACLPreview is the response type of PreviewACLForUser, PreviewACLForIPPort, PreviewACLHuJSONForUser, and PreviewACLHuJSONForIPPort
type ACLPreview struct {
Matches []UserRuleMatch `json:"matches"`
User string `json:"user,omitempty"` // Filled if response of PreviewACLForUser or PreviewACLHuJSONForUser
IPPort string `json:"ipport,omitempty"` // Filled if response of PreviewACLForIPPort or PreviewACLHuJSONForIPPort
// Postures is a map of postures and associated rules that apply
// to this preview.
// For more details about the posture mapping, see:
// https://tailscale.com/kb/1288/device-posture#postures
Postures map[string][]string `json:"postures,omitempty"`
}
func (c *Client) previewACLPostRequest(ctx context.Context, body []byte, previewType string, previewFor string) (res *ACLPreviewResponse, err error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl/preview", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
q := req.URL.Query()
q.Add("type", previewType)
q.Add("previewFor", previewFor)
req.URL.RawQuery = q.Encode()
req.Header.Set("Content-Type", "application/hujson")
c.setAuth(req)
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
if err = json.Unmarshal(b, &res); err != nil {
return nil, err
}
return res, nil
}
// PreviewACLForUser determines what rules match a given ACL for a user.
// The ACL can be a locally modified or clean ACL obtained from server.
//
// Returns ACLPreview on success with matches in a slice. If there are no matches,
// the call is still successful but Matches will be an empty slice.
// Returns error if the provided ACL is invalid.
func (c *Client) PreviewACLForUser(ctx context.Context, acl ACL, user string) (res *ACLPreview, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.PreviewACLForUser: %w", err)
}
}()
postData, err := json.Marshal(acl.ACL)
if err != nil {
return nil, err
}
b, err := c.previewACLPostRequest(ctx, postData, "user", user)
if err != nil {
return nil, err
}
return &ACLPreview{
Matches: b.Matches,
User: b.PreviewFor,
Postures: b.Postures,
}, nil
}
// PreviewACLForIPPort determines what rules match a given ACL for a ipport.
// The ACL can be a locally modified or clean ACL obtained from server.
//
// Returns ACLPreview on success with matches in a slice. If there are no matches,
// the call is still successful but Matches will be an empty slice.
// Returns error if the provided ACL is invalid.
func (c *Client) PreviewACLForIPPort(ctx context.Context, acl ACL, ipport netip.AddrPort) (res *ACLPreview, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.PreviewACLForIPPort: %w", err)
}
}()
postData, err := json.Marshal(acl.ACL)
if err != nil {
return nil, err
}
b, err := c.previewACLPostRequest(ctx, postData, "ipport", ipport.String())
if err != nil {
return nil, err
}
return &ACLPreview{
Matches: b.Matches,
IPPort: b.PreviewFor,
Postures: b.Postures,
}, nil
}
// PreviewACLHuJSONForUser determines what rules match a given ACL for a user.
// The ACL can be a locally modified or clean ACL obtained from server.
//
// Returns ACLPreview on success with matches in a slice. If there are no matches,
// the call is still successful but Matches will be an empty slice.
// Returns error if the provided ACL is invalid.
func (c *Client) PreviewACLHuJSONForUser(ctx context.Context, acl ACLHuJSON, user string) (res *ACLPreview, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.PreviewACLHuJSONForUser: %w", err)
}
}()
postData := []byte(acl.ACL)
b, err := c.previewACLPostRequest(ctx, postData, "user", user)
if err != nil {
return nil, err
}
return &ACLPreview{
Matches: b.Matches,
User: b.PreviewFor,
Postures: b.Postures,
}, nil
}
// PreviewACLHuJSONForIPPort determines what rules match a given ACL for a ipport.
// The ACL can be a locally modified or clean ACL obtained from server.
//
// Returns ACLPreview on success with matches in a slice. If there are no matches,
// the call is still successful but Matches will be an empty slice.
// Returns error if the provided ACL is invalid.
func (c *Client) PreviewACLHuJSONForIPPort(ctx context.Context, acl ACLHuJSON, ipport string) (res *ACLPreview, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.PreviewACLHuJSONForIPPort: %w", err)
}
}()
postData := []byte(acl.ACL)
b, err := c.previewACLPostRequest(ctx, postData, "ipport", ipport)
if err != nil {
return nil, err
}
return &ACLPreview{
Matches: b.Matches,
IPPort: b.PreviewFor,
Postures: b.Postures,
}, nil
}
// ValidateACLJSON takes in the given source and destination (in this situation,
// it is assumed that you are checking whether the source can connect to destination)
// and creates an ACLTest from that. It then sends the ACLTest to the control api acl
// validate endpoint, where the test is run. It returns a nil ACLTestError pointer if
// no test errors occur.
func (c *Client) ValidateACLJSON(ctx context.Context, source, dest string) (testErr *ACLTestError, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.ValidateACLJSON: %w", err)
}
}()
tests := []ACLTest{{User: source, Allow: []string{dest}}}
postData, err := json.Marshal(tests)
if err != nil {
return nil, err
}
path := fmt.Sprintf("%s/api/v2/tailnet/%s/acl/validate", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(postData))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
c.setAuth(req)
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("control api responded with %d status code", resp.StatusCode)
}
// The test ran without fail
if len(b) == 0 {
return nil, nil
}
var res ACLTestError
// The test returned errors.
if err = json.Unmarshal(b, &res); err != nil {
// failed to unmarshal
return nil, err
}
return &res, nil
}

View File

@@ -0,0 +1,78 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package apitype contains types for the Tailscale LocalAPI and control plane API.
package apitype
import (
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
)
// LocalAPIHost is the Host header value used by the LocalAPI.
const LocalAPIHost = "local-tailscaled.sock"
// WhoIsResponse is the JSON type returned by tailscaled debug server's /whois?ip=$IP handler.
// In successful whois responses, Node and UserProfile are never nil.
type WhoIsResponse struct {
Node *tailcfg.Node
UserProfile *tailcfg.UserProfile
// CapMap is a map of capabilities to their values.
// See tailcfg.PeerCapMap and tailcfg.PeerCapability for details.
CapMap tailcfg.PeerCapMap
}
// FileTarget is a node to which files can be sent, and the PeerAPI
// URL base to do so via.
type FileTarget struct {
Node *tailcfg.Node
// PeerAPI is the http://ip:port URL base of the node's PeerAPI,
// without any path (not even a single slash).
PeerAPIURL string
}
type WaitingFile struct {
Name string
Size int64
}
// SetPushDeviceTokenRequest is the body POSTed to the LocalAPI endpoint /set-device-token.
type SetPushDeviceTokenRequest struct {
// PushDeviceToken is the iOS/macOS APNs device token (and any future Android equivalent).
PushDeviceToken string
}
// ReloadConfigResponse is the response to a LocalAPI reload-config request.
//
// There are three possible outcomes: (false, "") if no config mode in use,
// (true, "") on success, or (false, "error message") on failure.
type ReloadConfigResponse struct {
Reloaded bool // whether the config was reloaded
Err string // any error message
}
// ExitNodeSuggestionResponse is the response to a LocalAPI suggest-exit-node GET request.
// It returns the StableNodeID, name, and location of a suggested exit node for the client making the request.
type ExitNodeSuggestionResponse struct {
ID tailcfg.StableNodeID
Name string
Location tailcfg.LocationView `json:",omitempty"`
}
// DNSOSConfig mimics dns.OSConfig without forcing us to import the entire dns package
// into the CLI.
type DNSOSConfig struct {
Nameservers []string
SearchDomains []string
MatchDomains []string
}
// DNSQueryResponse is the response to a DNS query request sent via LocalAPI.
type DNSQueryResponse struct {
// Bytes is the raw DNS response bytes.
Bytes []byte
// Resolvers is the list of resolvers that the forwarder deemed able to resolve the query.
Resolvers []*dnstype.Resolver
}

View File

@@ -0,0 +1,19 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package apitype
type DNSConfig struct {
Resolvers []DNSResolver `json:"resolvers"`
FallbackResolvers []DNSResolver `json:"fallbackResolvers"`
Routes map[string][]DNSResolver `json:"routes"`
Domains []string `json:"domains"`
Nameservers []string `json:"nameservers"`
Proxied bool `json:"proxied"`
TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"`
}
type DNSResolver struct {
Addr string `json:"addr"`
BootstrapResolution []string `json:"bootstrapResolution,omitempty"`
}

288
vendor/tailscale.com/client/tailscale/devices.go generated vendored Normal file
View File

@@ -0,0 +1,288 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build go1.19
package tailscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"tailscale.com/types/opt"
)
type GetDevicesResponse struct {
Devices []*Device `json:"devices"`
}
type DerpRegion struct {
Preferred bool `json:"preferred,omitempty"`
LatencyMilliseconds float64 `json:"latencyMs"`
}
type ClientConnectivity struct {
Endpoints []string `json:"endpoints"`
DERP string `json:"derp"`
MappingVariesByDestIP opt.Bool `json:"mappingVariesByDestIP"`
// DERPLatency is mapped by region name (e.g. "New York City", "Seattle").
DERPLatency map[string]DerpRegion `json:"latency"`
ClientSupports map[string]opt.Bool `json:"clientSupports"`
}
type Device struct {
// Addresses is a list of the devices's Tailscale IP addresses.
// It's currently just 1 element, the 100.x.y.z Tailscale IP.
Addresses []string `json:"addresses"`
DeviceID string `json:"id"`
NodeID string `json:"nodeId"`
User string `json:"user"`
Name string `json:"name"`
Hostname string `json:"hostname"`
ClientVersion string `json:"clientVersion"` // Empty for external devices.
UpdateAvailable bool `json:"updateAvailable"` // Empty for external devices.
OS string `json:"os"`
Tags []string `json:"tags"`
Created string `json:"created"` // Empty for external devices.
LastSeen string `json:"lastSeen"`
KeyExpiryDisabled bool `json:"keyExpiryDisabled"`
Expires string `json:"expires"`
Authorized bool `json:"authorized"`
IsExternal bool `json:"isExternal"`
MachineKey string `json:"machineKey"` // Empty for external devices.
NodeKey string `json:"nodeKey"`
// BlocksIncomingConnections is configured via the device's
// Tailscale client preferences. This field is only reported
// to the API starting with Tailscale 1.3.x clients.
BlocksIncomingConnections bool `json:"blocksIncomingConnections"`
// The following fields are not included by default:
// EnabledRoutes are the previously-approved subnet routes
// (e.g. "192.168.4.16/24", "10.5.2.4/32").
EnabledRoutes []string `json:"enabledRoutes"` // Empty for external devices.
// AdvertisedRoutes are the subnets (both enabled and not enabled)
// being requested from the node.
AdvertisedRoutes []string `json:"advertisedRoutes"` // Empty for external devices.
ClientConnectivity *ClientConnectivity `json:"clientConnectivity"`
// PostureIdentity contains extra identifiers collected from the device when
// the tailnet has the device posture identification features enabled. If
// Tailscale have attempted to collect this from the device but it has not
// opted in, PostureIdentity will have Disabled=true.
PostureIdentity *DevicePostureIdentity `json:"postureIdentity"`
}
type DevicePostureIdentity struct {
Disabled bool `json:"disabled,omitempty"`
SerialNumbers []string `json:"serialNumbers,omitempty"`
}
// DeviceFieldsOpts determines which fields should be returned in the response.
//
// Please only use DeviceAllFields and DeviceDefaultFields.
// Other DeviceFieldsOpts are not supported.
//
// TODO: Support other DeviceFieldsOpts.
// In the future, users should be able to create their own DeviceFieldsOpts
// as valid arguments by setting the fields they want returned to a "non-nil"
// value. For example, DeviceFieldsOpts{NodeID: "true"} should only return NodeIDs.
type DeviceFieldsOpts Device
func (d *DeviceFieldsOpts) addFieldsToQueryParameter() string {
if d == DeviceDefaultFields || d == nil {
return "default"
}
if d == DeviceAllFields {
return "all"
}
return ""
}
var (
DeviceAllFields = &DeviceFieldsOpts{}
// DeviceDefaultFields specifies that the following fields are returned:
// Addresses, NodeID, User, Name, Hostname, ClientVersion, UpdateAvailable,
// OS, Created, LastSeen, KeyExpiryDisabled, Expires, Authorized, IsExternal
// MachineKey, NodeKey, BlocksIncomingConnections.
DeviceDefaultFields = &DeviceFieldsOpts{}
)
// Devices retrieves the list of devices for a tailnet.
//
// See the Device structure for the list of fields hidden for external devices.
// The optional fields parameter specifies which fields of the devices to return; currently
// only DeviceDefaultFields (equivalent to nil) and DeviceAllFields are supported.
// Other values are currently undefined.
func (c *Client) Devices(ctx context.Context, fields *DeviceFieldsOpts) (deviceList []*Device, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.Devices: %w", err)
}
}()
path := fmt.Sprintf("%s/api/v2/tailnet/%s/devices", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
// Add fields.
fieldStr := fields.addFieldsToQueryParameter()
q := req.URL.Query()
q.Add("fields", fieldStr)
req.URL.RawQuery = q.Encode()
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
var devices GetDevicesResponse
err = json.Unmarshal(b, &devices)
return devices.Devices, err
}
// Device retrieved the details for a specific device.
//
// See the Device structure for the list of fields hidden for an external device.
// The optional fields parameter specifies which fields of the devices to return; currently
// only DeviceDefaultFields (equivalent to nil) and DeviceAllFields are supported.
// Other values are currently undefined.
func (c *Client) Device(ctx context.Context, deviceID string, fields *DeviceFieldsOpts) (device *Device, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.Device: %w", err)
}
}()
path := fmt.Sprintf("%s/api/v2/device/%s", c.baseURL(), deviceID)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
// Add fields.
fieldStr := fields.addFieldsToQueryParameter()
q := req.URL.Query()
q.Add("fields", fieldStr)
req.URL.RawQuery = q.Encode()
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
err = json.Unmarshal(b, &device)
return device, err
}
// DeleteDevice deletes the specified device from the Client's tailnet.
// NOTE: Only devices that belong to the Client's tailnet can be deleted.
// Deleting external devices is not supported.
func (c *Client) DeleteDevice(ctx context.Context, deviceID string) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.DeleteDevice: %w", err)
}
}()
path := fmt.Sprintf("%s/api/v2/device/%s", c.baseURL(), url.PathEscape(deviceID))
req, err := http.NewRequestWithContext(ctx, "DELETE", path, nil)
if err != nil {
return err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return err
}
log.Printf("RESP: %di, path: %s", resp.StatusCode, path)
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp)
}
return nil
}
// AuthorizeDevice marks a device as authorized.
func (c *Client) AuthorizeDevice(ctx context.Context, deviceID string) error {
return c.SetAuthorized(ctx, deviceID, true)
}
// SetAuthorized marks a device as authorized or not.
func (c *Client) SetAuthorized(ctx context.Context, deviceID string, authorized bool) error {
params := &struct {
Authorized bool `json:"authorized"`
}{Authorized: authorized}
data, err := json.Marshal(params)
if err != nil {
return err
}
path := fmt.Sprintf("%s/api/v2/device/%s/authorized", c.baseURL(), url.PathEscape(deviceID))
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(data))
if err != nil {
return err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp)
}
return nil
}
// SetTags updates the ACL tags on a device.
func (c *Client) SetTags(ctx context.Context, deviceID string, tags []string) error {
params := &struct {
Tags []string `json:"tags"`
}{Tags: tags}
data, err := json.Marshal(params)
if err != nil {
return err
}
path := fmt.Sprintf("%s/api/v2/device/%s/tags", c.baseURL(), url.PathEscape(deviceID))
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(data))
if err != nil {
return err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp)
}
return nil
}

233
vendor/tailscale.com/client/tailscale/dns.go generated vendored Normal file
View File

@@ -0,0 +1,233 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build go1.19
package tailscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"tailscale.com/client/tailscale/apitype"
)
// DNSNameServers is returned when retrieving the list of nameservers.
// It is also the structure provided when setting nameservers.
type DNSNameServers struct {
DNS []string `json:"dns"` // DNS name servers
}
// DNSNameServersPostResponse is returned when setting the list of DNS nameservers.
//
// It includes the MagicDNS status since nameservers changes may affect MagicDNS.
type DNSNameServersPostResponse struct {
DNS []string `json:"dns"` // DNS name servers
MagicDNS bool `json:"magicDNS"` // whether MagicDNS is active for this tailnet (enabled + has fallback nameservers)
}
// DNSSearchpaths is the list of search paths for a given domain.
type DNSSearchPaths struct {
SearchPaths []string `json:"searchPaths"` // DNS search paths
}
// DNSPreferences is the preferences set for a given tailnet.
//
// It includes MagicDNS which can be turned on or off. To enable MagicDNS,
// there must be at least one nameserver. When all nameservers are removed,
// MagicDNS is disabled.
type DNSPreferences struct {
MagicDNS bool `json:"magicDNS"` // whether MagicDNS is active for this tailnet (enabled + has fallback nameservers)
}
func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
return b, nil
}
func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData any) ([]byte, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint)
data, err := json.Marshal(&postData)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(data))
req.Header.Set("Content-Type", "application/json")
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
return b, nil
}
// DNSConfig retrieves the DNSConfig settings for a domain.
func (c *Client) DNSConfig(ctx context.Context) (cfg *apitype.DNSConfig, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.DNSConfig: %w", err)
}
}()
b, err := c.dnsGETRequest(ctx, "config")
if err != nil {
return nil, err
}
var dnsResp apitype.DNSConfig
err = json.Unmarshal(b, &dnsResp)
return &dnsResp, err
}
func (c *Client) SetDNSConfig(ctx context.Context, cfg apitype.DNSConfig) (resp *apitype.DNSConfig, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SetDNSConfig: %w", err)
}
}()
var dnsResp apitype.DNSConfig
b, err := c.dnsPOSTRequest(ctx, "config", cfg)
if err != nil {
return nil, err
}
err = json.Unmarshal(b, &dnsResp)
return &dnsResp, err
}
// NameServers retrieves the list of nameservers set for a domain.
func (c *Client) NameServers(ctx context.Context) (nameservers []string, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.NameServers: %w", err)
}
}()
b, err := c.dnsGETRequest(ctx, "nameservers")
if err != nil {
return nil, err
}
var dnsResp DNSNameServers
err = json.Unmarshal(b, &dnsResp)
return dnsResp.DNS, err
}
// SetNameServers sets the list of nameservers for a tailnet to the list provided
// by the user.
//
// It returns the new list of nameservers and the MagicDNS status in case it was
// affected by the change. For example, removing all nameservers will turn off
// MagicDNS.
func (c *Client) SetNameServers(ctx context.Context, nameservers []string) (dnsResp *DNSNameServersPostResponse, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SetNameServers: %w", err)
}
}()
dnsReq := DNSNameServers{DNS: nameservers}
b, err := c.dnsPOSTRequest(ctx, "nameservers", dnsReq)
if err != nil {
return nil, err
}
err = json.Unmarshal(b, &dnsResp)
return dnsResp, err
}
// DNSPreferences retrieves the DNS preferences set for a tailnet.
//
// It returns the status of MagicDNS.
func (c *Client) DNSPreferences(ctx context.Context) (dnsResp *DNSPreferences, err error) {
// Format return errors to be descriptive.
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.DNSPreferences: %w", err)
}
}()
b, err := c.dnsGETRequest(ctx, "preferences")
if err != nil {
return nil, err
}
err = json.Unmarshal(b, &dnsResp)
return dnsResp, err
}
// SetDNSPreferences sets the DNS preferences for a tailnet.
//
// MagicDNS can only be enabled when there is at least one nameserver provided.
// When all nameservers are removed, MagicDNS is disabled and will stay disabled,
// unless explicitly enabled by a user again.
func (c *Client) SetDNSPreferences(ctx context.Context, magicDNS bool) (dnsResp *DNSPreferences, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SetDNSPreferences: %w", err)
}
}()
dnsReq := DNSPreferences{MagicDNS: magicDNS}
b, err := c.dnsPOSTRequest(ctx, "preferences", dnsReq)
if err != nil {
return
}
err = json.Unmarshal(b, &dnsResp)
return dnsResp, err
}
// SearchPaths retrieves the list of searchpaths set for a tailnet.
func (c *Client) SearchPaths(ctx context.Context) (searchpaths []string, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SearchPaths: %w", err)
}
}()
b, err := c.dnsGETRequest(ctx, "searchpaths")
if err != nil {
return nil, err
}
var dnsResp *DNSSearchPaths
err = json.Unmarshal(b, &dnsResp)
return dnsResp.SearchPaths, err
}
// SetSearchPaths sets the list of searchpaths for a tailnet.
func (c *Client) SetSearchPaths(ctx context.Context, searchpaths []string) (newSearchPaths []string, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SetSearchPaths: %w", err)
}
}()
dnsReq := DNSSearchPaths{SearchPaths: searchpaths}
b, err := c.dnsPOSTRequest(ctx, "searchpaths", dnsReq)
if err != nil {
return nil, err
}
var dnsResp DNSSearchPaths
err = json.Unmarshal(b, &dnsResp)
return dnsResp.SearchPaths, err
}

166
vendor/tailscale.com/client/tailscale/keys.go generated vendored Normal file
View File

@@ -0,0 +1,166 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package tailscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
)
// Key represents a Tailscale API or auth key.
type Key struct {
ID string `json:"id"`
Created time.Time `json:"created"`
Expires time.Time `json:"expires"`
Capabilities KeyCapabilities `json:"capabilities"`
}
// KeyCapabilities are the capabilities of a Key.
type KeyCapabilities struct {
Devices KeyDeviceCapabilities `json:"devices,omitempty"`
}
// KeyDeviceCapabilities are the device-related capabilities of a Key.
type KeyDeviceCapabilities struct {
Create KeyDeviceCreateCapabilities `json:"create"`
}
// KeyDeviceCreateCapabilities are the device creation capabilities of a Key.
type KeyDeviceCreateCapabilities struct {
Reusable bool `json:"reusable"`
Ephemeral bool `json:"ephemeral"`
Preauthorized bool `json:"preauthorized"`
Tags []string `json:"tags,omitempty"`
}
// Keys returns the list of keys for the current user.
func (c *Client) Keys(ctx context.Context) ([]string, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
var keys struct {
Keys []*Key `json:"keys"`
}
if err := json.Unmarshal(b, &keys); err != nil {
return nil, err
}
ret := make([]string, 0, len(keys.Keys))
for _, k := range keys.Keys {
ret = append(ret, k.ID)
}
return ret, nil
}
// CreateKey creates a new key for the current user. Currently, only auth keys
// can be created. It returns the secret key itself, which cannot be retrieved again
// later, and the key metadata.
//
// To create a key with a specific expiry, use CreateKeyWithExpiry.
func (c *Client) CreateKey(ctx context.Context, caps KeyCapabilities) (keySecret string, keyMeta *Key, _ error) {
return c.CreateKeyWithExpiry(ctx, caps, 0)
}
// CreateKeyWithExpiry is like CreateKey, but allows specifying a expiration time.
//
// The time is truncated to a whole number of seconds. If zero, that means no expiration.
func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities, expiry time.Duration) (keySecret string, keyMeta *Key, _ error) {
// convert expirySeconds to an int64 (seconds)
expirySeconds := int64(expiry.Seconds())
if expirySeconds < 0 {
return "", nil, fmt.Errorf("expiry must be positive")
}
if expirySeconds == 0 && expiry != 0 {
return "", nil, fmt.Errorf("non-zero expiry must be at least one second")
}
keyRequest := struct {
Capabilities KeyCapabilities `json:"capabilities"`
ExpirySeconds int64 `json:"expirySeconds,omitempty"`
}{caps, int64(expirySeconds)}
bs, err := json.Marshal(keyRequest)
if err != nil {
return "", nil, err
}
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewReader(bs))
if err != nil {
return "", nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return "", nil, err
}
if resp.StatusCode != http.StatusOK {
return "", nil, handleErrorResponse(b, resp)
}
var key struct {
Key
Secret string `json:"key"`
}
if err := json.Unmarshal(b, &key); err != nil {
return "", nil, err
}
return key.Secret, &key.Key, nil
}
// Key returns the metadata for the given key ID. Currently, capabilities are
// only returned for auth keys, API keys only return general metadata.
func (c *Client) Key(ctx context.Context, id string) (*Key, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
var key Key
if err := json.Unmarshal(b, &key); err != nil {
return nil, err
}
return &key, nil
}
// DeleteKey deletes the key with the given ID.
func (c *Client) DeleteKey(ctx context.Context, id string) error {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id)
req, err := http.NewRequestWithContext(ctx, "DELETE", path, nil)
if err != nil {
return err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp)
}
return nil
}

1663
vendor/tailscale.com/client/tailscale/localclient.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !go1.23
package tailscale
func init() {
you_need_Go_1_23_to_compile_Tailscale()
}

95
vendor/tailscale.com/client/tailscale/routes.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build go1.19
package tailscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/netip"
)
// Routes contains the lists of subnet routes that are currently advertised by a device,
// as well as the subnets that are enabled to be routed by the device.
type Routes struct {
AdvertisedRoutes []netip.Prefix `json:"advertisedRoutes"`
EnabledRoutes []netip.Prefix `json:"enabledRoutes"`
}
// Routes retrieves the list of subnet routes that have been enabled for a device.
// The routes that are returned are not necessarily advertised by the device,
// they have only been preapproved.
func (c *Client) Routes(ctx context.Context, deviceID string) (routes *Routes, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.Routes: %w", err)
}
}()
path := fmt.Sprintf("%s/api/v2/device/%s/routes", c.baseURL(), deviceID)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
var sr Routes
err = json.Unmarshal(b, &sr)
return &sr, err
}
type postRoutesParams struct {
Routes []netip.Prefix `json:"routes"`
}
// SetRoutes updates the list of subnets that are enabled for a device.
// Subnets must be parsable by net/netip.ParsePrefix.
// Subnets do not have to be currently advertised by a device, they may be pre-enabled.
// Returns the updated list of enabled and advertised subnet routes in a *Routes object.
func (c *Client) SetRoutes(ctx context.Context, deviceID string, subnets []netip.Prefix) (routes *Routes, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.SetRoutes: %w", err)
}
}()
params := &postRoutesParams{Routes: subnets}
data, err := json.Marshal(params)
if err != nil {
return nil, err
}
path := fmt.Sprintf("%s/api/v2/device/%s/routes", c.baseURL(), deviceID)
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
// If status code was not successful, return the error.
// TODO: Change the check for the StatusCode to include other 2XX success codes.
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
var srr *Routes
if err := json.Unmarshal(b, &srr); err != nil {
return nil, err
}
return srr, err
}

42
vendor/tailscale.com/client/tailscale/tailnet.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build go1.19
package tailscale
import (
"context"
"fmt"
"net/http"
"net/url"
"tailscale.com/util/httpm"
)
// TailnetDeleteRequest handles sending a DELETE request for a tailnet to control.
func (c *Client) TailnetDeleteRequest(ctx context.Context, tailnetID string) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("tailscale.DeleteTailnet: %w", err)
}
}()
path := fmt.Sprintf("%s/api/v2/tailnet/%s", c.baseURL(), url.PathEscape(string(tailnetID)))
req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil)
if err != nil {
return err
}
c.setAuth(req)
b, resp, err := c.sendRequest(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp)
}
return nil
}

157
vendor/tailscale.com/client/tailscale/tailscale.go generated vendored Normal file
View File

@@ -0,0 +1,157 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build go1.19
// Package tailscale contains Go clients for the Tailscale LocalAPI and
// Tailscale control plane API.
//
// Warning: this package is in development and makes no API compatibility
// promises as of 2022-04-29. It is subject to change at any time.
package tailscale
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
)
// I_Acknowledge_This_API_Is_Unstable must be set true to use this package
// for now. It was added 2022-04-29 when it was moved to this git repo
// and will be removed when the public API has settled.
//
// TODO(bradfitz): remove this after the we're happy with the public API.
var I_Acknowledge_This_API_Is_Unstable = false
// TODO: use url.PathEscape() for deviceID and tailnets when constructing requests.
const defaultAPIBase = "https://api.tailscale.com"
// maxSize is the maximum read size (10MB) of responses from the server.
const maxReadSize = 10 << 20
// Client makes API calls to the Tailscale control plane API server.
//
// Use NewClient to instantiate one. Exported fields should be set before
// the client is used and not changed thereafter.
type Client struct {
// tailnet is the globally unique identifier for a Tailscale network, such
// as "example.com" or "user@gmail.com".
tailnet string
// auth is the authentication method to use for this client.
// nil means none, which generally won't work, but won't crash.
auth AuthMethod
// BaseURL optionally specifies an alternate API server to use.
// If empty, "https://api.tailscale.com" is used.
BaseURL string
// HTTPClient optionally specifies an alternate HTTP client to use.
// If nil, http.DefaultClient is used.
HTTPClient *http.Client
}
func (c *Client) httpClient() *http.Client {
if c.HTTPClient != nil {
return c.HTTPClient
}
return http.DefaultClient
}
func (c *Client) baseURL() string {
if c.BaseURL != "" {
return c.BaseURL
}
return defaultAPIBase
}
// AuthMethod is the interface for API authentication methods.
//
// Most users will use AuthKey.
type AuthMethod interface {
modifyRequest(req *http.Request)
}
// APIKey is an AuthMethod for NewClient that authenticates requests
// using an authkey.
type APIKey string
func (ak APIKey) modifyRequest(req *http.Request) {
req.SetBasicAuth(string(ak), "")
}
func (c *Client) setAuth(r *http.Request) {
if c.auth != nil {
c.auth.modifyRequest(r)
}
}
// NewClient is a convenience method for instantiating a new Client.
//
// tailnet is the globally unique identifier for a Tailscale network, such
// as "example.com" or "user@gmail.com".
// If httpClient is nil, then http.DefaultClient is used.
// "api.tailscale.com" is set as the BaseURL for the returned client
// and can be changed manually by the user.
func NewClient(tailnet string, auth AuthMethod) *Client {
return &Client{
tailnet: tailnet,
auth: auth,
}
}
func (c *Client) Tailnet() string { return c.tailnet }
// Do sends a raw HTTP request, after adding any authentication headers.
func (c *Client) Do(req *http.Request) (*http.Response, error) {
if !I_Acknowledge_This_API_Is_Unstable {
return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable")
}
c.setAuth(req)
return c.httpClient().Do(req)
}
// sendRequest add the authentication key to the request and sends it. It
// receives the response and reads up to 10MB of it.
func (c *Client) sendRequest(req *http.Request) ([]byte, *http.Response, error) {
if !I_Acknowledge_This_API_Is_Unstable {
return nil, nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable")
}
c.setAuth(req)
resp, err := c.httpClient().Do(req)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// Read response. Limit the response to 10MB.
body := io.LimitReader(resp.Body, maxReadSize+1)
b, err := io.ReadAll(body)
if len(b) > maxReadSize {
err = errors.New("API response too large")
}
return b, resp, err
}
// ErrResponse is the HTTP error returned by the Tailscale server.
type ErrResponse struct {
Status int
Message string
}
func (e ErrResponse) Error() string {
return fmt.Sprintf("Status: %d, Message: %q", e.Status, e.Message)
}
// handleErrorResponse decodes the error message from the server and returns
// an ErrResponse from it.
func handleErrorResponse(b []byte, resp *http.Response) error {
var errResp ErrResponse
if err := json.Unmarshal(b, &errResp); err != nil {
return err
}
errResp.Status = resp.StatusCode
return errResp
}

131
vendor/tailscale.com/client/web/assets.go generated vendored Normal file
View File

@@ -0,0 +1,131 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package web
import (
"io"
"io/fs"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
prebuilt "github.com/tailscale/web-client-prebuilt"
)
var start = time.Now()
func assetsHandler(devMode bool) (_ http.Handler, cleanup func()) {
if devMode {
// When in dev mode, proxy asset requests to the Vite dev server.
cleanup := startDevServer()
return devServerProxy(), cleanup
}
fsys := prebuilt.FS()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/")
f, err := openPrecompressedFile(w, r, path, fsys)
if err != nil {
// Rewrite request to just fetch index.html and let
// the frontend router handle it.
r = r.Clone(r.Context())
path = "index.html"
f, err = openPrecompressedFile(w, r, path, fsys)
}
if f == nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
defer f.Close()
// fs.File does not claim to implement Seeker, but in practice it does.
fSeeker, ok := f.(io.ReadSeeker)
if !ok {
http.Error(w, "Not seekable", http.StatusInternalServerError)
return
}
if strings.HasPrefix(path, "assets/") {
// Aggressively cache static assets, since we cache-bust our assets with
// hashed filenames.
w.Header().Set("Cache-Control", "public, max-age=31535996")
w.Header().Set("Vary", "Accept-Encoding")
}
http.ServeContent(w, r, path, start, fSeeker)
}), nil
}
func openPrecompressedFile(w http.ResponseWriter, r *http.Request, path string, fs fs.FS) (fs.File, error) {
if f, err := fs.Open(path + ".gz"); err == nil {
w.Header().Set("Content-Encoding", "gzip")
return f, nil
}
return fs.Open(path) // fallback
}
// startDevServer starts the JS dev server that does on-demand rebuilding
// and serving of web client JS and CSS resources.
func startDevServer() (cleanup func()) {
root := gitRootDir()
webClientPath := filepath.Join(root, "client", "web")
yarn := filepath.Join(root, "tool", "yarn")
node := filepath.Join(root, "tool", "node")
vite := filepath.Join(webClientPath, "node_modules", ".bin", "vite")
log.Printf("installing JavaScript deps using %s...", yarn)
out, err := exec.Command(yarn, "--non-interactive", "-s", "--cwd", webClientPath, "install").CombinedOutput()
if err != nil {
log.Fatalf("error running tailscale web's yarn install: %v, %s", err, out)
}
log.Printf("starting JavaScript dev server...")
cmd := exec.Command(node, vite)
cmd.Dir = webClientPath
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
log.Fatalf("Starting JS dev server: %v", err)
}
log.Printf("JavaScript dev server running as pid %d", cmd.Process.Pid)
return func() {
cmd.Process.Signal(os.Interrupt)
err := cmd.Wait()
log.Printf("JavaScript dev server exited: %v", err)
}
}
// devServerProxy returns a reverse proxy to the vite dev server.
func devServerProxy() *httputil.ReverseProxy {
// We use Vite to develop on the web client.
// Vite starts up its own local server for development,
// which we proxy requests to from Server.ServeHTTP.
// Here we set up the proxy to Vite's server.
handleErr := func(w http.ResponseWriter, r *http.Request, err error) {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusBadGateway)
w.Write([]byte("The web client development server isn't running. " +
"Run `./tool/yarn --cwd client/web start` from " +
"the repo root to start the development server."))
w.Write([]byte("\n\nError: " + err.Error()))
}
viteTarget, _ := url.Parse("http://127.0.0.1:4000")
devProxy := httputil.NewSingleHostReverseProxy(viteTarget)
devProxy.ErrorHandler = handleErr
return devProxy
}
func gitRootDir() string {
top, err := exec.Command("git", "rev-parse", "--show-toplevel").Output()
if err != nil {
log.Fatalf("failed to find git top level (not in corp git?): %v", err)
}
return strings.TrimSpace(string(top))
}

339
vendor/tailscale.com/client/web/auth.go generated vendored Normal file
View File

@@ -0,0 +1,339 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package web
import (
"context"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"net/http"
"net/url"
"slices"
"strings"
"time"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
)
const (
sessionCookieName = "TS-Web-Session"
sessionCookieExpiry = time.Hour * 24 * 30 // 30 days
)
// browserSession holds data about a user's browser session
// on the full management web client.
type browserSession struct {
// ID is the unique identifier for the session.
// It is passed in the user's "TS-Web-Session" browser cookie.
ID string
SrcNode tailcfg.NodeID
SrcUser tailcfg.UserID
AuthID string // from tailcfg.WebClientAuthResponse
AuthURL string // from tailcfg.WebClientAuthResponse
Created time.Time
Authenticated bool
}
// isAuthorized reports true if the given session is authorized
// to be used by its associated user to access the full management
// web client.
//
// isAuthorized is true only when s.Authenticated is true (i.e.
// the user has authenticated the session) and the session is not
// expired.
// 2023-10-05: Sessions expire by default 30 days after creation.
func (s *browserSession) isAuthorized(now time.Time) bool {
switch {
case s == nil:
return false
case !s.Authenticated:
return false // awaiting auth
case s.isExpired(now):
return false // expired
}
return true
}
// isExpired reports true if s is expired.
// 2023-10-05: Sessions expire by default 30 days after creation.
func (s *browserSession) isExpired(now time.Time) bool {
return !s.Created.IsZero() && now.After(s.expires())
}
// expires reports when the given session expires.
func (s *browserSession) expires() time.Time {
return s.Created.Add(sessionCookieExpiry)
}
var (
errNoSession = errors.New("no-browser-session")
errNotUsingTailscale = errors.New("not-using-tailscale")
errTaggedRemoteSource = errors.New("tagged-remote-source")
errTaggedLocalSource = errors.New("tagged-local-source")
errNotOwner = errors.New("not-owner")
)
// getSession retrieves the browser session associated with the request,
// if one exists.
//
// An error is returned in any of the following cases:
//
// - (errNotUsingTailscale) The request was not made over tailscale.
//
// - (errNoSession) The request does not have a session.
//
// - (errTaggedRemoteSource) The source is remote (another node) and tagged.
// Users must use their own user-owned devices to manage other nodes'
// web clients.
//
// - (errTaggedLocalSource) The source is local (the same node) and tagged.
// Tagged nodes can only be remotely managed, allowing ACLs to dictate
// access to web clients.
//
// - (errNotOwner) The source is not the owner of this client (if the
// client is user-owned). Only the owner is allowed to manage the
// node via the web client.
//
// If no error is returned, the browserSession is always non-nil.
// getTailscaleBrowserSession does not check whether the session has been
// authorized by the user. Callers can use browserSession.isAuthorized.
//
// The WhoIsResponse is always populated, with a non-nil Node and UserProfile,
// unless getTailscaleBrowserSession reports errNotUsingTailscale.
func (s *Server) getSession(r *http.Request) (*browserSession, *apitype.WhoIsResponse, *ipnstate.Status, error) {
whoIs, whoIsErr := s.lc.WhoIs(r.Context(), r.RemoteAddr)
status, statusErr := s.lc.StatusWithoutPeers(r.Context())
switch {
case whoIsErr != nil:
return nil, nil, status, errNotUsingTailscale
case statusErr != nil:
return nil, whoIs, nil, statusErr
case status.Self == nil:
return nil, whoIs, status, errors.New("missing self node in tailscale status")
case whoIs.Node.IsTagged() && whoIs.Node.StableID == status.Self.ID:
return nil, whoIs, status, errTaggedLocalSource
case whoIs.Node.IsTagged():
return nil, whoIs, status, errTaggedRemoteSource
case !status.Self.IsTagged() && status.Self.UserID != whoIs.UserProfile.ID:
return nil, whoIs, status, errNotOwner
}
srcNode := whoIs.Node.ID
srcUser := whoIs.UserProfile.ID
cookie, err := r.Cookie(sessionCookieName)
if errors.Is(err, http.ErrNoCookie) {
return nil, whoIs, status, errNoSession
} else if err != nil {
return nil, whoIs, status, err
}
v, ok := s.browserSessions.Load(cookie.Value)
if !ok {
return nil, whoIs, status, errNoSession
}
session := v.(*browserSession)
if session.SrcNode != srcNode || session.SrcUser != srcUser {
// In this case the browser cookie is associated with another tailscale node.
// Maybe the source browser's machine was logged out and then back in as a different node.
// Return errNoSession because there is no session for this user.
return nil, whoIs, status, errNoSession
} else if session.isExpired(s.timeNow()) {
// Session expired, remove from session map and return errNoSession.
s.browserSessions.Delete(session.ID)
return nil, whoIs, status, errNoSession
}
return session, whoIs, status, nil
}
// newSession creates a new session associated with the given source user/node,
// and stores it back to the session cache. Creating of a new session includes
// generating a new auth URL from the control server.
func (s *Server) newSession(ctx context.Context, src *apitype.WhoIsResponse) (*browserSession, error) {
sid, err := s.newSessionID()
if err != nil {
return nil, err
}
session := &browserSession{
ID: sid,
SrcNode: src.Node.ID,
SrcUser: src.UserProfile.ID,
Created: s.timeNow(),
}
if s.controlSupportsCheckMode(ctx) {
// control supports check mode, so get a new auth URL and return.
a, err := s.newAuthURL(ctx, src.Node.ID)
if err != nil {
return nil, err
}
session.AuthID = a.ID
session.AuthURL = a.URL
} else {
// control does not support check mode, so there is no additional auth we can do.
session.Authenticated = true
}
s.browserSessions.Store(sid, session)
return session, nil
}
// controlSupportsCheckMode returns whether the current control server supports web client check mode, to verify a user's identity.
// We assume that only "tailscale.com" control servers support check mode.
// This allows the web client to be used with non-standard control servers.
// If an error occurs getting the control URL, this method returns true to fail closed.
//
// TODO(juanfont/headscale#1623): adjust or remove this when headscale supports check mode.
func (s *Server) controlSupportsCheckMode(ctx context.Context) bool {
prefs, err := s.lc.GetPrefs(ctx)
if err != nil {
return true
}
controlURL, err := url.Parse(prefs.ControlURLOrDefault())
if err != nil {
return true
}
return strings.HasSuffix(controlURL.Host, ".tailscale.com")
}
// awaitUserAuth blocks until the given session auth has been completed
// by the user on the control server, then updates the session cache upon
// completion. An error is returned if control auth failed for any reason.
func (s *Server) awaitUserAuth(ctx context.Context, session *browserSession) error {
if session.isAuthorized(s.timeNow()) {
return nil // already authorized
}
a, err := s.waitAuthURL(ctx, session.AuthID, session.SrcNode)
if err != nil {
// Clean up the session. Doing this on any error from control
// server to avoid the user getting stuck with a bad session
// cookie.
s.browserSessions.Delete(session.ID)
return err
}
if a.Complete {
session.Authenticated = a.Complete
s.browserSessions.Store(session.ID, session)
}
return nil
}
func (s *Server) newSessionID() (string, error) {
raw := make([]byte, 16)
for range 5 {
if _, err := rand.Read(raw); err != nil {
return "", err
}
cookie := "ts-web-" + base64.RawURLEncoding.EncodeToString(raw)
if _, ok := s.browserSessions.Load(cookie); !ok {
return cookie, nil
}
}
return "", errors.New("too many collisions generating new session; please refresh page")
}
// peerCapabilities holds information about what a source
// peer is allowed to edit via the web UI.
//
// map value is true if the peer can edit the given feature.
// Only capFeatures included in validCaps will be included.
type peerCapabilities map[capFeature]bool
// canEdit is true if the peerCapabilities grant edit access
// to the given feature.
func (p peerCapabilities) canEdit(feature capFeature) bool {
if p == nil {
return false
}
if p[capFeatureAll] {
return true
}
return p[feature]
}
// isEmpty is true if p is either nil or has no capabilities
// with value true.
func (p peerCapabilities) isEmpty() bool {
if p == nil {
return true
}
for _, v := range p {
if v == true {
return false
}
}
return true
}
type capFeature string
const (
// The following values should not be edited.
// New caps can be added, but existing ones should not be changed,
// as these exact values are used by users in tailnet policy files.
//
// IMPORTANT: When adding a new cap, also update validCaps slice below.
capFeatureAll capFeature = "*" // grants peer management of all features
capFeatureSSH capFeature = "ssh" // grants peer SSH server management
capFeatureSubnets capFeature = "subnets" // grants peer subnet routes management
capFeatureExitNodes capFeature = "exitnodes" // grants peer ability to advertise-as and use exit nodes
capFeatureAccount capFeature = "account" // grants peer ability to turn on auto updates and log out of node
)
// validCaps contains the list of valid capabilities used in the web client.
// Any capabilities included in a peer's grants that do not fall into this
// list will be ignored.
var validCaps []capFeature = []capFeature{
capFeatureAll,
capFeatureSSH,
capFeatureSubnets,
capFeatureExitNodes,
capFeatureAccount,
}
type capRule struct {
CanEdit []string `json:"canEdit,omitempty"` // list of features peer is allowed to edit
}
// toPeerCapabilities parses out the web ui capabilities from the
// given whois response.
func toPeerCapabilities(status *ipnstate.Status, whois *apitype.WhoIsResponse) (peerCapabilities, error) {
if whois == nil || status == nil {
return peerCapabilities{}, nil
}
if whois.Node.IsTagged() {
// We don't allow management *from* tagged nodes, so ignore caps.
// The web client auth flow relies on having a true user identity
// that can be verified through login.
return peerCapabilities{}, nil
}
if !status.Self.IsTagged() {
// User owned nodes are only ever manageable by the owner.
if status.Self.UserID != whois.UserProfile.ID {
return peerCapabilities{}, nil
} else {
return peerCapabilities{capFeatureAll: true}, nil // owner can edit all features
}
}
// For tagged nodes, we actually look at the granted capabilities.
caps := peerCapabilities{}
rules, err := tailcfg.UnmarshalCapJSON[capRule](whois.CapMap, tailcfg.PeerCapabilityWebUI)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal capability: %v", err)
}
for _, c := range rules {
for _, f := range c.CanEdit {
cap := capFeature(strings.ToLower(f))
if slices.Contains(validCaps, cap) {
caps[cap] = true
}
}
}
return caps, nil
}

28
vendor/tailscale.com/client/web/index.html generated vendored Normal file
View File

@@ -0,0 +1,28 @@
<!doctype html>
<html class="bg-gray-50">
<head>
<title>Tailscale</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link rel="shortcut icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAQAAADZc7J/AAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAAmJLR0QA/4ePzL8AAAAHdElNRQflAx4QGA4EvmzDAAAA30lEQVRIx2NgGAWMCKa8JKM4A8Ovt88ekyLCDGOoyDBJMjExMbFy8zF8/EKsCAMDE8yAPyIwFps48SJIBpAL4AZwvoSx/r0lXgQpDN58EWL5x/7/H+vL20+JFxluQKVe5b3Ke5V+0kQQCamfoYKBg4GDwUKI8d0BYkWQkrLKewYBKPPDHUFiRaiZkBgmwhj/F5IgggyUJ6i8V3mv0kCayDAAeEsklXqGAgYGhgV3CnGrwVciYSYk0kokhgS44/JxqqFpiYSZbEgskd4dEBRk1GD4wdB5twKXmlHAwMDAAACdEZau06NQUwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAyMC0wNy0xNVQxNTo1Mzo0MCswMDowMCVXsDIAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMjAtMDctMTVUMTU6NTM6NDArMDA6MDBUCgiOAAAAAElFTkSuQmCC" />
<link rel="stylesheet" type="text/css" href="/src/index.css" />
<link rel="preload" as="font" href="/src/assets/fonts/Inter.var.latin.woff2" type="font/woff2" crossorigin />
</head>
<body class="px-2">
<noscript>
<p class="mb-2">You need to enable Javascript to access the Tailscale web client.</p>
<p>If you need any help, feel free to <a href="mailto:support+webclient@tailscale.com" class="link">contact us</a>.</p>
</noscript>
<script type="module" src="/src/index.tsx"></script>
<script>
// if this script is changed, also change hash in web.go
window.addEventListener("load", () => {
if (!window.Tailscale) {
const rootEl = document.createElement("p")
rootEl.innerHTML = 'Tailscale web interface is unavailable.';
document.body.append(rootEl)
}
})
</script>
</body>
</html>

81
vendor/tailscale.com/client/web/package.json generated vendored Normal file
View File

@@ -0,0 +1,81 @@
{
"name": "webclient",
"version": "0.0.1",
"license": "BSD-3-Clause",
"engines": {
"node": "18.20.4",
"yarn": "1.22.19"
},
"type": "module",
"private": true,
"dependencies": {
"@radix-ui/react-collapsible": "^1.0.3",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-popover": "^1.0.6",
"classnames": "^2.3.1",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"swr": "^2.2.4",
"wouter": "^2.11.0",
"zustand": "^4.4.7"
},
"devDependencies": {
"@types/node": "^18.16.1",
"@types/react": "^18.0.20",
"@types/react-dom": "^18.0.6",
"@vitejs/plugin-react-swc": "^3.6.0",
"autoprefixer": "^10.4.15",
"eslint": "^8.23.1",
"eslint-config-react-app": "^7.0.1",
"eslint-plugin-curly-quotes": "^1.0.4",
"jsdom": "^23.0.1",
"postcss": "^8.4.31",
"prettier": "^2.5.1",
"prettier-plugin-organize-imports": "^3.2.2",
"tailwindcss": "^3.3.3",
"typescript": "^5.3.3",
"vite": "^5.1.7",
"vite-plugin-svgr": "^4.2.0",
"vite-tsconfig-paths": "^3.5.0",
"vitest": "^1.3.1"
},
"resolutions": {
"@typescript-eslint/eslint-plugin": "^6.2.1",
"@typescript-eslint/parser": "^6.2.1"
},
"scripts": {
"build": "vite build",
"start": "vite",
"lint": "tsc --noEmit && eslint 'src/**/*.{ts,tsx,js,jsx}'",
"test": "vitest",
"format": "prettier --write 'src/**/*.{ts,tsx}'",
"format-check": "prettier --check 'src/**/*.{ts,tsx}'"
},
"eslintConfig": {
"extends": [
"react-app"
],
"plugins": [
"curly-quotes",
"react-hooks"
],
"rules": {
"curly-quotes/no-straight-quotes": "warn",
"react-hooks/rules-of-hooks": "error",
"react-hooks/exhaustive-deps": "error"
},
"settings": {
"projectRoot": "client/web/package.json"
}
},
"prettier": {
"semi": false,
"printWidth": 80
},
"postcss": {
"plugins": {
"tailwindcss": {},
"autoprefixer": {}
}
}
}

127
vendor/tailscale.com/client/web/qnap.go generated vendored Normal file
View File

@@ -0,0 +1,127 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// qnap.go contains handlers and logic, such as authentication,
// that is specific to running the web client on QNAP.
package web
import (
"crypto/tls"
"encoding/xml"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
)
// authorizeQNAP authenticates the logged-in QNAP user and verifies that they
// are authorized to use the web client.
// If the user is not authorized to use the client, an error is returned.
func authorizeQNAP(r *http.Request) (authorized bool, err error) {
_, resp, err := qnapAuthn(r)
if err != nil {
return false, err
}
if resp.IsAdmin == 0 {
return false, errors.New("user is not an admin")
}
return true, nil
}
type qnapAuthResponse struct {
AuthPassed int `xml:"authPassed"`
IsAdmin int `xml:"isAdmin"`
AuthSID string `xml:"authSid"`
ErrorValue int `xml:"errorValue"`
}
func qnapAuthn(r *http.Request) (string, *qnapAuthResponse, error) {
user, err := r.Cookie("NAS_USER")
if err != nil {
return "", nil, err
}
token, err := r.Cookie("qtoken")
if err == nil {
return qnapAuthnQtoken(r, user.Value, token.Value)
}
sid, err := r.Cookie("NAS_SID")
if err == nil {
return qnapAuthnSid(r, user.Value, sid.Value)
}
return "", nil, fmt.Errorf("not authenticated by any mechanism")
}
// qnapAuthnURL returns the auth URL to use by inferring where the UI is
// running based on the request URL. This is necessary because QNAP has so
// many options, see https://github.com/tailscale/tailscale/issues/7108
// and https://github.com/tailscale/tailscale/issues/6903
func qnapAuthnURL(requestUrl string, query url.Values) string {
in, err := url.Parse(requestUrl)
scheme := ""
host := ""
if err != nil || in.Scheme == "" {
log.Printf("Cannot parse QNAP login URL %v", err)
// try localhost and hope for the best
scheme = "http"
host = "localhost"
} else {
scheme = in.Scheme
host = in.Host
}
u := url.URL{
Scheme: scheme,
Host: host,
Path: "/cgi-bin/authLogin.cgi",
RawQuery: query.Encode(),
}
return u.String()
}
func qnapAuthnQtoken(r *http.Request, user, token string) (string, *qnapAuthResponse, error) {
query := url.Values{
"qtoken": []string{token},
"user": []string{user},
}
return qnapAuthnFinish(user, qnapAuthnURL(r.URL.String(), query))
}
func qnapAuthnSid(r *http.Request, user, sid string) (string, *qnapAuthResponse, error) {
query := url.Values{
"sid": []string{sid},
}
return qnapAuthnFinish(user, qnapAuthnURL(r.URL.String(), query))
}
func qnapAuthnFinish(user, url string) (string, *qnapAuthResponse, error) {
// QNAP Force HTTPS mode uses a self-signed certificate. Even importing
// the QNAP root CA isn't enough, the cert doesn't have a usable CN nor
// SAN. See https://github.com/tailscale/tailscale/issues/6903
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
resp, err := client.Get(url)
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
out, err := io.ReadAll(resp.Body)
if err != nil {
return "", nil, err
}
authResp := &qnapAuthResponse{}
if err := xml.Unmarshal(out, authResp); err != nil {
return "", nil, err
}
if authResp.AuthPassed == 0 {
return "", nil, fmt.Errorf("not authenticated")
}
return user, authResp, nil
}

85
vendor/tailscale.com/client/web/styles.json generated vendored Normal file
View File

@@ -0,0 +1,85 @@
{
"colors": {
"transparent": "transparent",
"current": "currentColor",
"white": "rgb(var(--color-white) / <alpha-value>)",
"gray": {
"0": "rgb(var(--color-gray-0) / <alpha-value>)",
"50": "rgb(var(--color-gray-50) / <alpha-value>)",
"100": "rgb(var(--color-gray-100) / <alpha-value>)",
"200": "rgb(var(--color-gray-200) / <alpha-value>)",
"300": "rgb(var(--color-gray-300) / <alpha-value>)",
"400": "rgb(var(--color-gray-400) / <alpha-value>)",
"500": "rgb(var(--color-gray-500) / <alpha-value>)",
"600": "rgb(var(--color-gray-600) / <alpha-value>)",
"700": "rgb(var(--color-gray-700) / <alpha-value>)",
"800": "rgb(var(--color-gray-800) / <alpha-value>)",
"900": "rgb(var(--color-gray-900) / <alpha-value>)"
},
"blue": {
"0": "rgb(var(--color-blue-0) / <alpha-value>)",
"50": "rgb(var(--color-blue-50) / <alpha-value>)",
"100": "rgb(var(--color-blue-100) / <alpha-value>)",
"200": "rgb(var(--color-blue-200) / <alpha-value>)",
"300": "rgb(var(--color-blue-300) / <alpha-value>)",
"400": "rgb(var(--color-blue-400) / <alpha-value>)",
"500": "rgb(var(--color-blue-500) / <alpha-value>)",
"600": "rgb(var(--color-blue-600) / <alpha-value>)",
"700": "rgb(var(--color-blue-700) / <alpha-value>)",
"800": "rgb(var(--color-blue-800) / <alpha-value>)",
"900": "rgb(var(--color-blue-900) / <alpha-value>)"
},
"green": {
"0": "rgb(var(--color-green-0) / <alpha-value>)",
"50": "rgb(var(--color-green-50) / <alpha-value>)",
"100": "rgb(var(--color-green-100) / <alpha-value>)",
"200": "rgb(var(--color-green-200) / <alpha-value>)",
"300": "rgb(var(--color-green-300) / <alpha-value>)",
"400": "rgb(var(--color-green-400) / <alpha-value>)",
"500": "rgb(var(--color-green-500) / <alpha-value>)",
"600": "rgb(var(--color-green-600) / <alpha-value>)",
"700": "rgb(var(--color-green-700) / <alpha-value>)",
"800": "rgb(var(--color-green-800) / <alpha-value>)",
"900": "rgb(var(--color-green-900) / <alpha-value>)"
},
"red": {
"0": "rgb(var(--color-red-0) / <alpha-value>)",
"50": "rgb(var(--color-red-50) / <alpha-value>)",
"100": "rgb(var(--color-red-100) / <alpha-value>)",
"200": "rgb(var(--color-red-200) / <alpha-value>)",
"300": "rgb(var(--color-red-300) / <alpha-value>)",
"400": "rgb(var(--color-red-400) / <alpha-value>)",
"500": "rgb(var(--color-red-500) / <alpha-value>)",
"600": "rgb(var(--color-red-600) / <alpha-value>)",
"700": "rgb(var(--color-red-700) / <alpha-value>)",
"800": "rgb(var(--color-red-800) / <alpha-value>)",
"900": "rgb(var(--color-red-900) / <alpha-value>)"
},
"yellow": {
"0": "rgb(var(--color-yellow-0) / <alpha-value>)",
"50": "rgb(var(--color-yellow-50) / <alpha-value>)",
"100": "rgb(var(--color-yellow-100) / <alpha-value>)",
"200": "rgb(var(--color-yellow-200) / <alpha-value>)",
"300": "rgb(var(--color-yellow-300) / <alpha-value>)",
"400": "rgb(var(--color-yellow-400) / <alpha-value>)",
"500": "rgb(var(--color-yellow-500) / <alpha-value>)",
"600": "rgb(var(--color-yellow-600) / <alpha-value>)",
"700": "rgb(var(--color-yellow-700) / <alpha-value>)",
"800": "rgb(var(--color-yellow-800) / <alpha-value>)",
"900": "rgb(var(--color-yellow-900) / <alpha-value>)"
},
"orange": {
"0": "rgb(var(--color-orange-0) / <alpha-value>)",
"50": "rgb(var(--color-orange-50) / <alpha-value>)",
"100": "rgb(var(--color-orange-100) / <alpha-value>)",
"200": "rgb(var(--color-orange-200) / <alpha-value>)",
"300": "rgb(var(--color-orange-300) / <alpha-value>)",
"400": "rgb(var(--color-orange-400) / <alpha-value>)",
"500": "rgb(var(--color-orange-500) / <alpha-value>)",
"600": "rgb(var(--color-orange-600) / <alpha-value>)",
"700": "rgb(var(--color-orange-700) / <alpha-value>)",
"800": "rgb(var(--color-orange-800) / <alpha-value>)",
"900": "rgb(var(--color-orange-900) / <alpha-value>)"
}
}
}

59
vendor/tailscale.com/client/web/synology.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// synology.go contains handlers and logic, such as authentication,
// that is specific to running the web client on Synology.
package web
import (
"errors"
"fmt"
"net/http"
"os/exec"
"strings"
"tailscale.com/util/groupmember"
)
// authorizeSynology authenticates the logged-in Synology user and verifies
// that they are authorized to use the web client.
// If the user is authenticated, but not authorized to use the client, an error is returned.
func authorizeSynology(r *http.Request) (authorized bool, err error) {
if !hasSynoToken(r) {
return false, nil
}
// authenticate the Synology user
cmd := exec.Command("/usr/syno/synoman/webman/modules/authenticate.cgi")
out, err := cmd.CombinedOutput()
if err != nil {
return false, fmt.Errorf("auth: %v: %s", err, out)
}
user := strings.TrimSpace(string(out))
// check if the user is in the administrators group
isAdmin, err := groupmember.IsMemberOfGroup("administrators", user)
if err != nil {
return false, err
}
if !isAdmin {
return false, errors.New("not a member of administrators group")
}
return true, nil
}
// hasSynoToken returns true if the request include a SynoToken used for synology auth.
func hasSynoToken(r *http.Request) bool {
if r.Header.Get("X-Syno-Token") != "" {
return true
}
if r.URL.Query().Get("SynoToken") != "" {
return true
}
if r.Method == "POST" && r.FormValue("SynoToken") != "" {
return true
}
return false
}

117
vendor/tailscale.com/client/web/tailwind.config.js generated vendored Normal file
View File

@@ -0,0 +1,117 @@
import plugin from "tailwindcss/plugin"
import styles from "./styles.json"
const config = {
theme: {
screens: {
sm: "420px",
md: "768px",
lg: "1024px",
},
fontFamily: {
sans: [
"Inter",
"-apple-system",
"BlinkMacSystemFont",
"Helvetica",
"Arial",
"sans-serif",
],
mono: [
"SFMono-Regular",
"SFMono Regular",
"Consolas",
"Liberation Mono",
"Menlo",
"Courier",
"monospace",
],
},
fontWeight: {
normal: "400",
medium: "500",
semibold: "600",
bold: "700",
},
colors: styles.colors,
extend: {
colors: {
...styles.colors,
"bg-app": "var(--color-bg-app)",
"bg-menu-item-hover": "var(--color-bg-menu-item-hover)",
"border-base": "var(--color-border-base)",
"text-base": "var(--color-text-base)",
"text-muted": "var(--color-text-muted)",
"text-disabled": "var(--color-text-disabled)",
"text-primary": "var(--color-text-primary)",
"text-warning": "var(--color-text-warning)",
"text-danger": "var(--color-text-danger)",
},
borderColor: {
DEFAULT: "var(--color-border-base)",
},
boxShadow: {
dialog: "0 10px 40px rgba(0,0,0,0.12), 0 0 16px rgba(0,0,0,0.08)",
form: "0 1px 1px rgba(0, 0, 0, 0.04)",
soft: "0 4px 12px 0 rgba(0, 0, 0, 0.03)",
popover:
"0 0 0 1px rgba(136, 152, 170, 0.1), 0 15px 35px 0 rgba(49, 49, 93, 0.1), 0 5px 15px 0 rgba(0, 0, 0, 0.08)",
},
animation: {
"scale-in": "scale-in 120ms cubic-bezier(0.16, 1, 0.3, 1)",
"scale-out": "scale-out 120ms cubic-bezier(0.16, 1, 0.3, 1)",
},
transformOrigin: {
"radix-hovercard": "var(--radix-hover-card-content-transform-origin)",
"radix-popover": "var(--radix-popover-content-transform-origin)",
"radix-tooltip": "var(--radix-tooltip-content-transform-origin)",
},
keyframes: {
"scale-in": {
"0%": {
transform: "scale(0.94)",
opacity: "0",
},
"100%": {
transform: "scale(1)",
opacity: "1",
},
},
"scale-out": {
"0%": {
transform: "scale(1)",
opacity: "1",
},
"100%": {
transform: "scale(0.94)",
opacity: "0",
},
},
},
},
},
plugins: [
plugin(function ({ addVariant }) {
addVariant("state-open", [
"&[data-state=“open”]",
"[data-state=“open”] &",
])
addVariant("state-closed", [
"&[data-state=“closed”]",
"[data-state=“closed”] &",
])
addVariant("state-delayed-open", [
"&[data-state=“delayed-open”]",
"[data-state=“delayed-open”] &",
])
addVariant("state-active", ["&[data-state=“active”]"])
addVariant("state-inactive", ["&[data-state=“inactive”]"])
}),
],
content: ["./src/**/*.html", "./src/**/*.{ts,tsx}", "./index.html"],
}
export default config

18
vendor/tailscale.com/client/web/tsconfig.json generated vendored Normal file
View File

@@ -0,0 +1,18 @@
{
"compilerOptions": {
"baseUrl": ".",
"target": "ES2017",
"module": "ES2020",
"strict": true,
"sourceMap": true,
"skipLibCheck": true,
"isolatedModules": true,
"moduleResolution": "node",
"forceConsistentCasingInFileNames": true,
"allowSyntheticDefaultImports": true,
"jsx": "react",
"types": ["vite-plugin-svgr/client", "vite/client"]
},
"include": ["src/**/*"],
"exclude": ["node_modules"]
}

57
vendor/tailscale.com/client/web/vite.config.ts generated vendored Normal file
View File

@@ -0,0 +1,57 @@
/// <reference types="vitest" />
import { createLogger, defineConfig } from "vite"
import svgr from "vite-plugin-svgr"
import paths from "vite-tsconfig-paths"
// Use a custom logger that filters out Vite's logging of server URLs, since
// they are an attractive nuisance (we run a proxy in front of Vite, and the
// tailscale web client should be accessed through that).
// Unfortunately there's no option to disable this logging, so the best we can
// do it to ignore calls from a specific function.
const filteringLogger = createLogger(undefined, { allowClearScreen: false })
const originalInfoLog = filteringLogger.info
filteringLogger.info = (...args) => {
if (new Error("ignored").stack?.includes("printServerUrls")) {
return
}
originalInfoLog.apply(filteringLogger, args)
}
// https://vitejs.dev/config/
export default defineConfig({
base: "./",
plugins: [
paths(),
svgr(),
],
build: {
outDir: "build",
sourcemap: false,
},
esbuild: {
logOverride: {
// Silence a warning about `this` being undefined in ESM when at the
// top-level. The way JSX is transpiled causes this to happen, but it
// isn't a problem.
// See: https://github.com/vitejs/vite/issues/8644
"this-is-undefined-in-esm": "silent",
},
},
server: {
// This needs to be 127.0.0.1 instead of localhost, because of how our
// Go proxy connects to it.
host: "127.0.0.1",
// If you change the port, be sure to update the proxy in assets.go too.
port: 4000,
},
test: {
exclude: ["**/node_modules/**", "**/dist/**"],
testTimeout: 20000,
environment: "jsdom",
deps: {
inline: ["date-fns", /\.wasm\?url$/],
},
},
clearScreen: false,
customLogger: filteringLogger,
})

1347
vendor/tailscale.com/client/web/web.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

5434
vendor/tailscale.com/client/web/yarn.lock generated vendored Normal file

File diff suppressed because it is too large Load Diff

1216
vendor/tailscale.com/clientupdate/clientupdate.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build (linux && !android) || windows
package clientupdate
import (
"context"
"tailscale.com/clientupdate/distsign"
)
func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
c, err := distsign.NewClient(up.Logf, up.PkgsAddr)
if err != nil {
return err
}
return c.Download(context.Background(), pathSrc, fileDst)
}

View File

@@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !((linux && !android) || windows)
package clientupdate
func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) {
panic("unreachable")
}

View File

@@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package clientupdate
func (up *Updater) updateWindows() error {
panic("unreachable")
}

View File

@@ -0,0 +1,225 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Windows-specific stuff that can't go in clientupdate.go because it needs
// x/sys/windows.
package clientupdate
import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/google/uuid"
"golang.org/x/sys/windows"
"tailscale.com/util/winutil"
"tailscale.com/util/winutil/authenticode"
)
const (
// winMSIEnv is the environment variable that, if set, is the MSI file for
// the update command to install. It's passed like this so we can stop the
// tailscale.exe process from running before the msiexec process runs and
// tries to overwrite ourselves.
winMSIEnv = "TS_UPDATE_WIN_MSI"
// winExePathEnv is the environment variable that is set along with
// winMSIEnv and carries the full path of the calling tailscale.exe binary.
// It is used to re-launch the GUI process (tailscale-ipn.exe) after
// install is complete.
winExePathEnv = "TS_UPDATE_WIN_EXE_PATH"
)
func makeSelfCopy() (origPathExe, tmpPathExe string, err error) {
selfExe, err := os.Executable()
if err != nil {
return "", "", err
}
f, err := os.Open(selfExe)
if err != nil {
return "", "", err
}
defer f.Close()
f2, err := os.CreateTemp("", "tailscale-updater-*.exe")
if err != nil {
return "", "", err
}
if err := markTempFileWindows(f2.Name()); err != nil {
return "", "", err
}
if _, err := io.Copy(f2, f); err != nil {
f2.Close()
return "", "", err
}
return selfExe, f2.Name(), f2.Close()
}
func markTempFileWindows(name string) error {
name16 := windows.StringToUTF16Ptr(name)
return windows.MoveFileEx(name16, nil, windows.MOVEFILE_DELAY_UNTIL_REBOOT)
}
const certSubjectTailscale = "Tailscale Inc."
func verifyAuthenticode(path string) error {
return authenticode.Verify(path, certSubjectTailscale)
}
func (up *Updater) updateWindows() error {
if msi := os.Getenv(winMSIEnv); msi != "" {
// stdout/stderr from this part of the install could be lost since the
// parent tailscaled is replaced. Create a temp log file to have some
// output to debug with in case update fails.
close, err := up.switchOutputToFile()
if err != nil {
up.Logf("failed to create log file for installation: %v; proceeding with existing outputs", err)
} else {
defer close.Close()
}
up.Logf("installing %v ...", msi)
if err := up.installMSI(msi); err != nil {
up.Logf("MSI install failed: %v", err)
return err
}
up.Logf("success.")
return nil
}
if !winutil.IsCurrentProcessElevated() {
return errors.New(`update must be run as Administrator
you can run the command prompt as Administrator one of these ways:
* right-click cmd.exe, select 'Run as administrator'
* press Windows+x, then press a
* press Windows+r, type in "cmd", then press Ctrl+Shift+Enter`)
}
ver, err := requestedTailscaleVersion(up.Version, up.Track)
if err != nil {
return err
}
arch := runtime.GOARCH
if arch == "386" {
arch = "x86"
}
if !up.confirm(ver) {
return nil
}
tsDir := filepath.Join(os.Getenv("ProgramData"), "Tailscale")
msiDir := filepath.Join(tsDir, "MSICache")
if fi, err := os.Stat(tsDir); err != nil {
return fmt.Errorf("expected %s to exist, got stat error: %w", tsDir, err)
} else if !fi.IsDir() {
return fmt.Errorf("expected %s to be a directory; got %v", tsDir, fi.Mode())
}
if err := os.MkdirAll(msiDir, 0700); err != nil {
return err
}
up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi"))
pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch)
msiTarget := filepath.Join(msiDir, path.Base(pkgsPath))
if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil {
return err
}
up.Logf("verifying MSI authenticode...")
if err := verifyAuthenticode(msiTarget); err != nil {
return fmt.Errorf("authenticode verification of %s failed: %w", msiTarget, err)
}
up.Logf("authenticode verification succeeded")
up.Logf("making tailscale.exe copy to switch to...")
up.cleanupOldDownloads(filepath.Join(os.TempDir(), "tailscale-updater-*.exe"))
selfOrig, selfCopy, err := makeSelfCopy()
if err != nil {
return err
}
defer os.Remove(selfCopy)
up.Logf("running tailscale.exe copy for final install...")
cmd := exec.Command(selfCopy, "update")
cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig)
cmd.Stdout = up.Stderr
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
if err := cmd.Start(); err != nil {
return err
}
// Once it's started, exit ourselves, so the binary is free
// to be replaced.
os.Exit(0)
panic("unreachable")
}
func (up *Updater) installMSI(msi string) error {
var err error
for tries := 0; tries < 2; tries++ {
cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn")
cmd.Dir = filepath.Dir(msi)
cmd.Stdout = up.Stdout
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
if err == nil {
break
}
up.Logf("Install attempt failed: %v", err)
uninstallVersion := up.currentVersion
if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" {
uninstallVersion = v
}
// Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first.
up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion)
cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn")
cmd.Stdout = up.Stdout
cmd.Stderr = up.Stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
up.Logf("msiexec uninstall: %v", err)
}
return err
}
func msiUUIDForVersion(ver string) string {
arch := runtime.GOARCH
if arch == "386" {
arch = "x86"
}
track, err := versionToTrack(ver)
if err != nil {
track = UnstableTrack
}
msiURL := fmt.Sprintf("https://pkgs.tailscale.com/%s/tailscale-setup-%s-%s.msi", track, ver, arch)
return "{" + strings.ToUpper(uuid.NewSHA1(uuid.NameSpaceURL, []byte(msiURL)).String()) + "}"
}
func (up *Updater) switchOutputToFile() (io.Closer, error) {
var logFilePath string
exePath, err := os.Executable()
if err != nil {
logFilePath = filepath.Join(os.TempDir(), "tailscale-updater.log")
} else {
logFilePath = strings.TrimSuffix(exePath, ".exe") + ".log"
}
up.Logf("writing update output to %q", logFilePath)
logFile, err := os.Create(logFilePath)
if err != nil {
return nil, err
}
up.Logf = func(m string, args ...any) {
fmt.Fprintf(logFile, m+"\n", args...)
}
up.Stdout = logFile
up.Stderr = logFile
return logFile, nil
}

486
vendor/tailscale.com/clientupdate/distsign/distsign.go generated vendored Normal file
View File

@@ -0,0 +1,486 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package distsign implements signature and validation of arbitrary
// distributable files.
//
// There are 3 parties in this exchange:
// - builder, which creates files, signs them with signing keys and publishes
// to server
// - server, which distributes public signing keys, files and signatures
// - client, which downloads files and signatures from server, and validates
// the signatures
//
// There are 2 types of keys:
// - signing keys, that sign individual distributable files on the builder
// - root keys, that sign signing keys and are kept offline
//
// root keys -(sign)-> signing keys -(sign)-> files
//
// All keys are asymmetric Ed25519 key pairs.
//
// The server serves static files under some known prefix. The kinds of files are:
// - distsign.pub - bundle of PEM-encoded public signing keys
// - distsign.pub.sig - signature of distsign.pub using one of the root keys
// - $file - any distributable file
// - $file.sig - signature of $file using any of the signing keys
//
// The root public keys are baked into the client software at compile time.
// These keys are long-lived and prove the validity of current signing keys
// from distsign.pub. To rotate root keys, a new client release must be
// published, they are not rotated dynamically. There are multiple root keys in
// different locations specifically to allow this rotation without using the
// discarded root key for any new signatures.
//
// The signing public keys are fetched by the client dynamically before every
// download and can be rotated more readily, assuming that most deployed
// clients trust the root keys used to issue fresh signing keys.
package distsign
import (
"context"
"crypto/ed25519"
"crypto/rand"
"encoding/binary"
"encoding/pem"
"errors"
"fmt"
"hash"
"io"
"log"
"net/http"
"net/url"
"os"
"time"
"github.com/hdevalence/ed25519consensus"
"golang.org/x/crypto/blake2s"
"tailscale.com/net/tshttpproxy"
"tailscale.com/types/logger"
"tailscale.com/util/httpm"
"tailscale.com/util/must"
)
const (
pemTypeRootPrivate = "ROOT PRIVATE KEY"
pemTypeRootPublic = "ROOT PUBLIC KEY"
pemTypeSigningPrivate = "SIGNING PRIVATE KEY"
pemTypeSigningPublic = "SIGNING PUBLIC KEY"
downloadSizeLimit = 1 << 29 // 512MB
signingKeysSizeLimit = 1 << 20 // 1MB
signatureSizeLimit = ed25519.SignatureSize
)
// RootKey is a root key used to sign signing keys.
type RootKey struct {
k ed25519.PrivateKey
}
// GenerateRootKey generates a new root key pair and encodes it as PEM.
func GenerateRootKey() (priv, pub []byte, err error) {
pub, priv, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, err
}
return pem.EncodeToMemory(&pem.Block{
Type: pemTypeRootPrivate,
Bytes: []byte(priv),
}), pem.EncodeToMemory(&pem.Block{
Type: pemTypeRootPublic,
Bytes: []byte(pub),
}), nil
}
// ParseRootKey parses the PEM-encoded private root key. The key must be in the
// same format as returned by GenerateRootKey.
func ParseRootKey(privKey []byte) (*RootKey, error) {
k, err := parsePrivateKey(privKey, pemTypeRootPrivate)
if err != nil {
return nil, fmt.Errorf("failed to parse root key: %w", err)
}
return &RootKey{k: k}, nil
}
// SignSigningKeys signs the bundle of public signing keys. The bundle must be
// a sequence of PEM blocks joined with newlines.
func (r *RootKey) SignSigningKeys(pubBundle []byte) ([]byte, error) {
if _, err := ParseSigningKeyBundle(pubBundle); err != nil {
return nil, err
}
return ed25519.Sign(r.k, pubBundle), nil
}
// SigningKey is a signing key used to sign packages.
type SigningKey struct {
k ed25519.PrivateKey
}
// GenerateSigningKey generates a new signing key pair and encodes it as PEM.
func GenerateSigningKey() (priv, pub []byte, err error) {
pub, priv, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, err
}
return pem.EncodeToMemory(&pem.Block{
Type: pemTypeSigningPrivate,
Bytes: []byte(priv),
}), pem.EncodeToMemory(&pem.Block{
Type: pemTypeSigningPublic,
Bytes: []byte(pub),
}), nil
}
// ParseSigningKey parses the PEM-encoded private signing key. The key must be
// in the same format as returned by GenerateSigningKey.
func ParseSigningKey(privKey []byte) (*SigningKey, error) {
k, err := parsePrivateKey(privKey, pemTypeSigningPrivate)
if err != nil {
return nil, fmt.Errorf("failed to parse root key: %w", err)
}
return &SigningKey{k: k}, nil
}
// SignPackageHash signs the hash and the length of a package. Use PackageHash
// to compute the inputs.
func (s *SigningKey) SignPackageHash(hash []byte, len int64) ([]byte, error) {
if len <= 0 {
return nil, fmt.Errorf("package length must be positive, got %d", len)
}
msg := binary.LittleEndian.AppendUint64(hash, uint64(len))
return ed25519.Sign(s.k, msg), nil
}
// PackageHash is a hash.Hash that counts the number of bytes written. Use it
// to get the hash and length inputs to SigningKey.SignPackageHash.
type PackageHash struct {
hash.Hash
len int64
}
// NewPackageHash returns an initialized PackageHash using BLAKE2s.
func NewPackageHash() *PackageHash {
h, err := blake2s.New256(nil)
if err != nil {
// Should never happen with a nil key passed to blake2s.
panic(err)
}
return &PackageHash{Hash: h}
}
func (ph *PackageHash) Write(b []byte) (int, error) {
ph.len += int64(len(b))
return ph.Hash.Write(b)
}
// Reset the PackageHash to its initial state.
func (ph *PackageHash) Reset() {
ph.len = 0
ph.Hash.Reset()
}
// Len returns the total number of bytes written.
func (ph *PackageHash) Len() int64 { return ph.len }
// Client downloads and validates files from a distribution server.
type Client struct {
logf logger.Logf
roots []ed25519.PublicKey
pkgsAddr *url.URL
}
// NewClient returns a new client for distribution server located at pkgsAddr,
// and uses embedded root keys from the roots/ subdirectory of this package.
func NewClient(logf logger.Logf, pkgsAddr string) (*Client, error) {
if logf == nil {
logf = log.Printf
}
u, err := url.Parse(pkgsAddr)
if err != nil {
return nil, fmt.Errorf("invalid pkgsAddr %q: %w", pkgsAddr, err)
}
return &Client{logf: logf, roots: roots(), pkgsAddr: u}, nil
}
func (c *Client) url(path string) string {
return c.pkgsAddr.JoinPath(path).String()
}
// Download fetches a file at path srcPath from pkgsAddr passed in NewClient.
// The file is downloaded to dstPath and its signature is validated using the
// embedded root keys. Download returns an error if anything goes wrong with
// the actual file download or with signature validation.
func (c *Client) Download(ctx context.Context, srcPath, dstPath string) error {
// Always fetch a fresh signing key.
sigPub, err := c.signingKeys()
if err != nil {
return err
}
srcURL := c.url(srcPath)
sigURL := srcURL + ".sig"
c.logf("Downloading %q", srcURL)
dstPathUnverified := dstPath + ".unverified"
hash, len, err := c.download(ctx, srcURL, dstPathUnverified, downloadSizeLimit)
if err != nil {
return err
}
c.logf("Downloading %q", sigURL)
sig, err := fetch(sigURL, signatureSizeLimit)
if err != nil {
// Best-effort clean up of downloaded package.
os.Remove(dstPathUnverified)
return err
}
msg := binary.LittleEndian.AppendUint64(hash, uint64(len))
if !VerifyAny(sigPub, msg, sig) {
// Best-effort clean up of downloaded package.
os.Remove(dstPathUnverified)
return fmt.Errorf("signature %q for file %q does not validate with the current release signing key; either you are under attack, or attempting to download an old version of Tailscale which was signed with an older signing key", sigURL, srcURL)
}
c.logf("Signature OK")
if err := os.Rename(dstPathUnverified, dstPath); err != nil {
return fmt.Errorf("failed to move %q to %q after signature validation", dstPathUnverified, dstPath)
}
return nil
}
// ValidateLocalBinary fetches the latest signature associated with the binary
// at srcURLPath and uses it to validate the file located on disk via
// localFilePath. ValidateLocalBinary returns an error if anything goes wrong
// with the signature download or with signature validation.
func (c *Client) ValidateLocalBinary(srcURLPath, localFilePath string) error {
// Always fetch a fresh signing key.
sigPub, err := c.signingKeys()
if err != nil {
return err
}
srcURL := c.url(srcURLPath)
sigURL := srcURL + ".sig"
localFile, err := os.Open(localFilePath)
if err != nil {
return err
}
defer localFile.Close()
h := NewPackageHash()
_, err = io.Copy(h, localFile)
if err != nil {
return err
}
hash, hashLen := h.Sum(nil), h.Len()
c.logf("Downloading %q", sigURL)
sig, err := fetch(sigURL, signatureSizeLimit)
if err != nil {
return err
}
msg := binary.LittleEndian.AppendUint64(hash, uint64(hashLen))
if !VerifyAny(sigPub, msg, sig) {
return fmt.Errorf("signature %q for file %q does not validate with the current release signing key; either you are under attack, or attempting to download an old version of Tailscale which was signed with an older signing key", sigURL, localFilePath)
}
c.logf("Signature OK")
return nil
}
// signingKeys fetches current signing keys from the server and validates them
// against the roots. Should be called before validation of any downloaded file
// to get the fresh keys.
func (c *Client) signingKeys() ([]ed25519.PublicKey, error) {
keyURL := c.url("distsign.pub")
sigURL := keyURL + ".sig"
raw, err := fetch(keyURL, signingKeysSizeLimit)
if err != nil {
return nil, err
}
sig, err := fetch(sigURL, signatureSizeLimit)
if err != nil {
return nil, err
}
if !VerifyAny(c.roots, raw, sig) {
return nil, fmt.Errorf("signature %q for key %q does not validate with any known root key; either you are under attack, or running a very old version of Tailscale with outdated root keys", sigURL, keyURL)
}
keys, err := ParseSigningKeyBundle(raw)
if err != nil {
return nil, fmt.Errorf("cannot parse signing key bundle from %q: %w", keyURL, err)
}
return keys, nil
}
// fetch reads the response body from url into memory, up to limit bytes.
func fetch(url string, limit int64) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return io.ReadAll(io.LimitReader(resp.Body, limit))
}
// download writes the response body of url into a local file at dst, up to
// limit bytes. On success, the returned value is a BLAKE2s hash of the file.
func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([]byte, int64, error) {
tr := http.DefaultTransport.(*http.Transport).Clone()
tr.Proxy = tshttpproxy.ProxyFromEnvironment
defer tr.CloseIdleConnections()
hc := &http.Client{Transport: tr}
quickCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
headReq := must.Get(http.NewRequestWithContext(quickCtx, httpm.HEAD, url, nil))
res, err := hc.Do(headReq)
if err != nil {
return nil, 0, err
}
if res.StatusCode != http.StatusOK {
return nil, 0, fmt.Errorf("HEAD %q: %v", url, res.Status)
}
if res.ContentLength <= 0 {
return nil, 0, fmt.Errorf("HEAD %q: unexpected Content-Length %v", url, res.ContentLength)
}
c.logf("Download size: %v", res.ContentLength)
dlReq := must.Get(http.NewRequestWithContext(ctx, httpm.GET, url, nil))
dlRes, err := hc.Do(dlReq)
if err != nil {
return nil, 0, err
}
defer dlRes.Body.Close()
// TODO(bradfitz): resume from existing partial file on disk
if dlRes.StatusCode != http.StatusOK {
return nil, 0, fmt.Errorf("GET %q: %v", url, dlRes.Status)
}
of, err := os.Create(dst)
if err != nil {
return nil, 0, err
}
defer of.Close()
pw := &progressWriter{total: res.ContentLength, logf: c.logf}
h := NewPackageHash()
n, err := io.Copy(io.MultiWriter(of, h, pw), io.LimitReader(dlRes.Body, limit))
if err != nil {
return nil, n, err
}
if n != res.ContentLength {
return nil, n, fmt.Errorf("GET %q: downloaded %v, want %v", url, n, res.ContentLength)
}
if err := dlRes.Body.Close(); err != nil {
return nil, n, err
}
if err := of.Close(); err != nil {
return nil, n, err
}
pw.print()
return h.Sum(nil), h.Len(), nil
}
type progressWriter struct {
done int64
total int64
lastPrint time.Time
logf logger.Logf
}
func (pw *progressWriter) Write(p []byte) (n int, err error) {
pw.done += int64(len(p))
if time.Since(pw.lastPrint) > 2*time.Second {
pw.print()
}
return len(p), nil
}
func (pw *progressWriter) print() {
pw.lastPrint = time.Now()
pw.logf("Downloaded %v/%v (%.1f%%)", pw.done, pw.total, float64(pw.done)/float64(pw.total)*100)
}
func parsePrivateKey(data []byte, typeTag string) (ed25519.PrivateKey, error) {
b, rest := pem.Decode(data)
if b == nil {
return nil, errors.New("failed to decode PEM data")
}
if len(rest) > 0 {
return nil, errors.New("trailing PEM data")
}
if b.Type != typeTag {
return nil, fmt.Errorf("PEM type is %q, want %q", b.Type, typeTag)
}
if len(b.Bytes) != ed25519.PrivateKeySize {
return nil, errors.New("private key has incorrect length for an Ed25519 private key")
}
return ed25519.PrivateKey(b.Bytes), nil
}
// ParseSigningKeyBundle parses the bundle of PEM-encoded public signing keys.
func ParseSigningKeyBundle(bundle []byte) ([]ed25519.PublicKey, error) {
return parsePublicKeyBundle(bundle, pemTypeSigningPublic)
}
// ParseRootKeyBundle parses the bundle of PEM-encoded public root keys.
func ParseRootKeyBundle(bundle []byte) ([]ed25519.PublicKey, error) {
return parsePublicKeyBundle(bundle, pemTypeRootPublic)
}
func parsePublicKeyBundle(bundle []byte, typeTag string) ([]ed25519.PublicKey, error) {
var keys []ed25519.PublicKey
for len(bundle) > 0 {
pub, rest, err := parsePublicKey(bundle, typeTag)
if err != nil {
return nil, err
}
keys = append(keys, pub)
bundle = rest
}
if len(keys) == 0 {
return nil, errors.New("no signing keys found in the bundle")
}
return keys, nil
}
func parseSinglePublicKey(data []byte, typeTag string) (ed25519.PublicKey, error) {
pub, rest, err := parsePublicKey(data, typeTag)
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, errors.New("trailing PEM data")
}
return pub, err
}
func parsePublicKey(data []byte, typeTag string) (pub ed25519.PublicKey, rest []byte, retErr error) {
b, rest := pem.Decode(data)
if b == nil {
return nil, nil, errors.New("failed to decode PEM data")
}
if b.Type != typeTag {
return nil, nil, fmt.Errorf("PEM type is %q, want %q", b.Type, typeTag)
}
if len(b.Bytes) != ed25519.PublicKeySize {
return nil, nil, errors.New("public key has incorrect length for an Ed25519 public key")
}
return ed25519.PublicKey(b.Bytes), rest, nil
}
// VerifyAny verifies whether sig is valid for msg using any of the keys.
// VerifyAny will panic if any of the keys have the wrong size for Ed25519.
func VerifyAny(keys []ed25519.PublicKey, msg, sig []byte) bool {
for _, k := range keys {
if ed25519consensus.Verify(k, msg, sig) {
return true
}
}
return false
}

54
vendor/tailscale.com/clientupdate/distsign/roots.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package distsign
import (
"crypto/ed25519"
"embed"
"errors"
"fmt"
"path"
"path/filepath"
"sync"
)
//go:embed roots
var rootsFS embed.FS
var roots = sync.OnceValue(func() []ed25519.PublicKey {
roots, err := parseRoots()
if err != nil {
panic(err)
}
return roots
})
func parseRoots() ([]ed25519.PublicKey, error) {
files, err := rootsFS.ReadDir("roots")
if err != nil {
return nil, err
}
var keys []ed25519.PublicKey
for _, f := range files {
if !f.Type().IsRegular() {
continue
}
if filepath.Ext(f.Name()) != ".pem" {
continue
}
raw, err := rootsFS.ReadFile(path.Join("roots", f.Name()))
if err != nil {
return nil, err
}
key, err := parseSinglePublicKey(raw, pemTypeRootPublic)
if err != nil {
return nil, fmt.Errorf("parsing root key %q: %w", f.Name(), err)
}
keys = append(keys, key)
}
if len(keys) == 0 {
return nil, errors.New("no embedded root keys, please check clientupdate/distsign/roots/")
}
return keys, nil
}

View File

@@ -0,0 +1,3 @@
-----BEGIN ROOT PUBLIC KEY-----
Psrabv2YNiEDhPlnLVSMtB5EKACm7zxvKxfvYD4i7X8=
-----END ROOT PUBLIC KEY-----

View File

@@ -0,0 +1,3 @@
-----BEGIN ROOT PUBLIC KEY-----
ZjjKhUHBtLNRSO1dhOTjrXJGJ8lDe1594WM2XDuheVQ=
-----END ROOT PUBLIC KEY-----

408
vendor/tailscale.com/control/controlbase/conn.go generated vendored Normal file
View File

@@ -0,0 +1,408 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package controlbase implements the base transport of the Tailscale
// 2021 control protocol.
//
// The base transport implements Noise IK, instantiated with
// Curve25519, ChaCha20Poly1305 and BLAKE2s.
package controlbase
import (
"crypto/cipher"
"encoding/binary"
"fmt"
"net"
"sync"
"time"
"golang.org/x/crypto/blake2s"
chp "golang.org/x/crypto/chacha20poly1305"
"tailscale.com/types/key"
)
const (
// maxMessageSize is the maximum size of a protocol frame on the
// wire, including header and payload.
maxMessageSize = 4096
// maxCiphertextSize is the maximum amount of ciphertext bytes
// that one protocol frame can carry, after framing.
maxCiphertextSize = maxMessageSize - 3
// maxPlaintextSize is the maximum amount of plaintext bytes that
// one protocol frame can carry, after encryption and framing.
maxPlaintextSize = maxCiphertextSize - chp.Overhead
)
// A Conn is a secured Noise connection. It implements the net.Conn
// interface, with the unusual trait that any write error (including a
// SetWriteDeadline induced i/o timeout) causes all future writes to
// fail.
type Conn struct {
conn net.Conn
version uint16
peer key.MachinePublic
handshakeHash [blake2s.Size]byte
rx rxState
tx txState
}
// rxState is all the Conn state that Read uses.
type rxState struct {
sync.Mutex
cipher cipher.AEAD
nonce nonce
buf *maxMsgBuffer // or nil when reads exhausted
n int // number of valid bytes in buf
next int // offset of next undecrypted packet
plaintext []byte // slice into buf of decrypted bytes
hdrBuf [headerLen]byte // small buffer used when buf is nil
}
// txState is all the Conn state that Write uses.
type txState struct {
sync.Mutex
cipher cipher.AEAD
nonce nonce
err error // records the first partial write error for all future calls
}
// ProtocolVersion returns the protocol version that was used to
// establish this Conn.
func (c *Conn) ProtocolVersion() int {
return int(c.version)
}
// HandshakeHash returns the Noise handshake hash for the connection,
// which can be used to bind other messages to this connection
// (i.e. to ensure that the message wasn't replayed from a different
// connection).
func (c *Conn) HandshakeHash() [blake2s.Size]byte {
return c.handshakeHash
}
// Peer returns the peer's long-term public key.
func (c *Conn) Peer() key.MachinePublic {
return c.peer
}
// readNLocked reads into c.rx.buf until buf contains at least total
// bytes. Returns a slice of the total bytes in rxBuf, or an
// error if fewer than total bytes are available.
//
// It may be called with a nil c.rx.buf only if total == headerLen.
//
// On success, c.rx.buf will be non-nil.
func (c *Conn) readNLocked(total int) ([]byte, error) {
if total > maxMessageSize {
return nil, errReadTooBig{total}
}
for {
if total <= c.rx.n {
return c.rx.buf[:total], nil
}
var n int
var err error
if c.rx.buf == nil {
if c.rx.n != 0 || total != headerLen {
panic("unexpected")
}
// Optimization to reduce memory usage.
// Most connections are blocked forever waiting for
// a read, so we don't want c.rx.buf to be allocated until
// we know there's data to read. Instead, when we're
// waiting for data to arrive here, read into the
// 3 byte hdrBuf:
n, err = c.conn.Read(c.rx.hdrBuf[:])
if n > 0 {
c.rx.buf = getMaxMsgBuffer()
copy(c.rx.buf[:], c.rx.hdrBuf[:n])
}
} else {
n, err = c.conn.Read(c.rx.buf[c.rx.n:])
}
c.rx.n += n
if err != nil {
return nil, err
}
}
}
// decryptLocked decrypts msg (which is header+ciphertext) in-place
// and sets c.rx.plaintext to the decrypted bytes.
func (c *Conn) decryptLocked(msg []byte) (err error) {
if msgType := msg[0]; msgType != msgTypeRecord {
return fmt.Errorf("received message with unexpected type %d, want %d", msgType, msgTypeRecord)
}
// We don't check the length field here, because the caller
// already did in order to figure out how big the msg slice should
// be.
ciphertext := msg[headerLen:]
if !c.rx.nonce.Valid() {
return errCipherExhausted{}
}
c.rx.plaintext, err = c.rx.cipher.Open(ciphertext[:0], c.rx.nonce[:], ciphertext, nil)
c.rx.nonce.Increment()
if err != nil {
// Once a decryption has failed, our Conn is no longer
// synchronized with our peer. Nuke the cipher state to be
// safe, so that no further decryptions are attempted. Future
// read attempts will return net.ErrClosed.
c.rx.cipher = nil
}
return err
}
// encryptLocked encrypts plaintext into buf (including the
// packet header) and returns a slice of the ciphertext, or an error
// if the cipher is exhausted (i.e. can no longer be used safely).
func (c *Conn) encryptLocked(plaintext []byte, buf *maxMsgBuffer) ([]byte, error) {
if !c.tx.nonce.Valid() {
// Received 2^64-1 messages on this cipher state. Connection
// is no longer usable.
return nil, errCipherExhausted{}
}
buf[0] = msgTypeRecord
binary.BigEndian.PutUint16(buf[1:headerLen], uint16(len(plaintext)+chp.Overhead))
ret := c.tx.cipher.Seal(buf[:headerLen], c.tx.nonce[:], plaintext, nil)
c.tx.nonce.Increment()
return ret, nil
}
// wholeMessageLocked returns a slice of one whole Noise transport
// message from c.rx.buf, if one whole message is available, and
// advances the read state to the next Noise message in the
// buffer. Returns nil without advancing read state if there isn't one
// whole message in c.rx.buf.
func (c *Conn) wholeMessageLocked() []byte {
available := c.rx.n - c.rx.next
if available < headerLen {
return nil
}
bs := c.rx.buf[c.rx.next:c.rx.n]
totalSize := headerLen + int(binary.BigEndian.Uint16(bs[1:3]))
if len(bs) < totalSize {
return nil
}
c.rx.next += totalSize
return bs[:totalSize]
}
// decryptOneLocked decrypts one Noise transport message, reading from
// c.conn as needed, and sets c.rx.plaintext to point to the decrypted
// bytes. c.rx.plaintext is only valid if err == nil.
func (c *Conn) decryptOneLocked() error {
c.rx.plaintext = nil
// Fast path: do we have one whole ciphertext frame buffered
// already?
if bs := c.wholeMessageLocked(); bs != nil {
return c.decryptLocked(bs)
}
if c.rx.next != 0 {
// To simplify the read logic, move the remainder of the
// buffered bytes back to the head of the buffer, so we can
// grow it without worrying about wraparound.
c.rx.n = copy(c.rx.buf[:], c.rx.buf[c.rx.next:c.rx.n])
c.rx.next = 0
}
// Return our buffer to the pool if it's empty, lest we be
// blocked in a long Read call, reading the 3 byte header. We
// don't to keep that buffer unnecessarily alive.
if c.rx.n == 0 && c.rx.next == 0 && c.rx.buf != nil {
bufPool.Put(c.rx.buf)
c.rx.buf = nil
}
bs, err := c.readNLocked(headerLen)
if err != nil {
return err
}
// The rest of the header (besides the length field) gets verified
// in decryptLocked, not here.
messageLen := headerLen + int(binary.BigEndian.Uint16(bs[1:3]))
bs, err = c.readNLocked(messageLen)
if err != nil {
return err
}
c.rx.next = len(bs)
return c.decryptLocked(bs)
}
// Read implements io.Reader.
func (c *Conn) Read(bs []byte) (int, error) {
c.rx.Lock()
defer c.rx.Unlock()
if c.rx.cipher == nil {
return 0, net.ErrClosed
}
// If no plaintext is buffered, decrypt incoming frames until we
// have some plaintext. Zero-byte Noise frames are allowed in this
// protocol, which is why we have to loop here rather than decrypt
// a single additional frame.
for len(c.rx.plaintext) == 0 {
if err := c.decryptOneLocked(); err != nil {
return 0, err
}
}
n := copy(bs, c.rx.plaintext)
c.rx.plaintext = c.rx.plaintext[n:]
// Lose slice's underlying array pointer to unneeded memory so
// GC can collect more.
if len(c.rx.plaintext) == 0 {
c.rx.plaintext = nil
}
return n, nil
}
// Write implements io.Writer.
func (c *Conn) Write(bs []byte) (n int, err error) {
c.tx.Lock()
defer c.tx.Unlock()
if c.tx.err != nil {
return 0, c.tx.err
}
defer func() {
if err != nil {
// All write errors are fatal for this conn, so clear the
// cipher state whenever an error happens.
c.tx.cipher = nil
}
if c.tx.err == nil {
// Only set c.tx.err if not nil so that we can return one
// error on the first failure, and a different one for
// subsequent calls. See the error handling around Write
// below for why.
c.tx.err = err
}
}()
if c.tx.cipher == nil {
return 0, net.ErrClosed
}
buf := getMaxMsgBuffer()
defer bufPool.Put(buf)
var sent int
for len(bs) > 0 {
toSend := bs
if len(toSend) > maxPlaintextSize {
toSend = bs[:maxPlaintextSize]
}
bs = bs[len(toSend):]
ciphertext, err := c.encryptLocked(toSend, buf)
if err != nil {
return sent, err
}
if _, err := c.conn.Write(ciphertext); err != nil {
// Return the raw error on the Write that actually
// failed. For future writes, return that error wrapped in
// a desync error.
c.tx.err = errPartialWrite{err}
return sent, err
}
sent += len(toSend)
}
return sent, nil
}
// Close implements io.Closer.
func (c *Conn) Close() error {
closeErr := c.conn.Close() // unblocks any waiting reads or writes
// Remove references to live cipher state. Strictly speaking this
// is unnecessary, but we want to try and hand the active cipher
// state to the garbage collector promptly, to preserve perfect
// forward secrecy as much as we can.
c.rx.Lock()
c.rx.cipher = nil
c.rx.Unlock()
c.tx.Lock()
c.tx.cipher = nil
c.tx.Unlock()
return closeErr
}
func (c *Conn) LocalAddr() net.Addr { return c.conn.LocalAddr() }
func (c *Conn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() }
func (c *Conn) SetDeadline(t time.Time) error { return c.conn.SetDeadline(t) }
func (c *Conn) SetReadDeadline(t time.Time) error { return c.conn.SetReadDeadline(t) }
func (c *Conn) SetWriteDeadline(t time.Time) error { return c.conn.SetWriteDeadline(t) }
// errCipherExhausted is the error returned when we run out of nonces
// on a cipher.
type errCipherExhausted struct{}
func (errCipherExhausted) Error() string {
return "cipher exhausted, no more nonces available for current key"
}
func (errCipherExhausted) Timeout() bool { return false }
func (errCipherExhausted) Temporary() bool { return false }
// errPartialWrite is the error returned when the cipher state has
// become unusable due to a past partial write.
type errPartialWrite struct {
err error
}
func (e errPartialWrite) Error() string {
return fmt.Sprintf("cipher state desynchronized due to partial write (%v)", e.err)
}
func (e errPartialWrite) Unwrap() error { return e.err }
func (e errPartialWrite) Temporary() bool { return false }
func (e errPartialWrite) Timeout() bool { return false }
// errReadTooBig is the error returned when the peer sent an
// unacceptably large Noise frame.
type errReadTooBig struct {
requested int
}
func (e errReadTooBig) Error() string {
return fmt.Sprintf("requested read of %d bytes exceeds max allowed Noise frame size", e.requested)
}
func (e errReadTooBig) Temporary() bool {
// permanent error because this error only occurs when our peer
// sends us a frame so large we're unwilling to ever decode it.
return false
}
func (e errReadTooBig) Timeout() bool { return false }
type nonce [chp.NonceSize]byte
func (n *nonce) Valid() bool {
return binary.BigEndian.Uint32(n[:4]) == 0 && binary.BigEndian.Uint64(n[4:]) != invalidNonce
}
func (n *nonce) Increment() {
if !n.Valid() {
panic("increment of invalid nonce")
}
binary.BigEndian.PutUint64(n[4:], 1+binary.BigEndian.Uint64(n[4:]))
}
type maxMsgBuffer [maxMessageSize]byte
// bufPool holds the temporary buffers for Conn.Read & Write.
var bufPool = &sync.Pool{
New: func() any {
return new(maxMsgBuffer)
},
}
func getMaxMsgBuffer() *maxMsgBuffer {
return bufPool.Get().(*maxMsgBuffer)
}

494
vendor/tailscale.com/control/controlbase/handshake.go generated vendored Normal file
View File

@@ -0,0 +1,494 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlbase
import (
"context"
"crypto/cipher"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"net"
"strconv"
"time"
"go4.org/mem"
"golang.org/x/crypto/blake2s"
chp "golang.org/x/crypto/chacha20poly1305"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/hkdf"
"tailscale.com/types/key"
)
const (
// protocolName is the name of the specific instantiation of Noise
// that the control protocol uses. This string's value is fixed by
// the Noise spec, and shouldn't be changed unless we're updating
// the control protocol to use a different Noise instance.
protocolName = "Noise_IK_25519_ChaChaPoly_BLAKE2s"
// protocolVersion is the version of the control protocol that
// Client will use when initiating a handshake.
//protocolVersion uint16 = 1
// protocolVersionPrefix is the name portion of the protocol
// name+version string that gets mixed into the handshake as a
// prologue.
//
// This mixing verifies that both clients agree that they're
// executing the control protocol at a specific version that
// matches the advertised version in the cleartext packet header.
protocolVersionPrefix = "Tailscale Control Protocol v"
invalidNonce = ^uint64(0)
)
func protocolVersionPrologue(version uint16) []byte {
ret := make([]byte, 0, len(protocolVersionPrefix)+5) // 5 bytes is enough to encode all possible version numbers.
ret = append(ret, protocolVersionPrefix...)
return strconv.AppendUint(ret, uint64(version), 10)
}
// HandshakeContinuation upgrades a net.Conn to a Conn. The net.Conn
// is assumed to have already sent the client>server handshake
// initiation message.
type HandshakeContinuation func(context.Context, net.Conn) (*Conn, error)
// ClientDeferred initiates a control client handshake, returning the
// initial message to send to the server and a continuation to
// finalize the handshake.
//
// ClientDeferred is split in this way for RTT reduction: we run this
// protocol after negotiating a protocol switch from HTTP/HTTPS. If we
// completely serialized the negotiation followed by the handshake,
// we'd pay an extra RTT to transmit the handshake initiation after
// protocol switching. By splitting the handshake into an initial
// message and a continuation, we can embed the handshake initiation
// into the HTTP protocol switching request and avoid a bit of delay.
func ClientDeferred(machineKey key.MachinePrivate, controlKey key.MachinePublic, protocolVersion uint16) (initialHandshake []byte, continueHandshake HandshakeContinuation, err error) {
var s symmetricState
s.Initialize()
// prologue
s.MixHash(protocolVersionPrologue(protocolVersion))
// <- s
// ...
s.MixHash(controlKey.UntypedBytes())
// -> e, es, s, ss
init := mkInitiationMessage(protocolVersion)
machineEphemeral := key.NewMachine()
machineEphemeralPub := machineEphemeral.Public()
copy(init.EphemeralPub(), machineEphemeralPub.UntypedBytes())
s.MixHash(machineEphemeralPub.UntypedBytes())
cipher, err := s.MixDH(machineEphemeral, controlKey)
if err != nil {
return nil, nil, fmt.Errorf("computing es: %w", err)
}
machineKeyPub := machineKey.Public()
s.EncryptAndHash(cipher, init.MachinePub(), machineKeyPub.UntypedBytes())
cipher, err = s.MixDH(machineKey, controlKey)
if err != nil {
return nil, nil, fmt.Errorf("computing ss: %w", err)
}
s.EncryptAndHash(cipher, init.Tag(), nil) // empty message payload
cont := func(ctx context.Context, conn net.Conn) (*Conn, error) {
return continueClientHandshake(ctx, conn, &s, machineKey, machineEphemeral, controlKey, protocolVersion)
}
return init[:], cont, nil
}
// Client wraps ClientDeferred and immediately invokes the returned
// continuation with conn.
//
// This is a helper for when you don't need the fancy
// continuation-style handshake, and just want to synchronously
// upgrade a net.Conn to a secure transport.
func Client(ctx context.Context, conn net.Conn, machineKey key.MachinePrivate, controlKey key.MachinePublic, protocolVersion uint16) (*Conn, error) {
init, cont, err := ClientDeferred(machineKey, controlKey, protocolVersion)
if err != nil {
return nil, err
}
if _, err := conn.Write(init); err != nil {
return nil, err
}
return cont(ctx, conn)
}
func continueClientHandshake(ctx context.Context, conn net.Conn, s *symmetricState, machineKey, machineEphemeral key.MachinePrivate, controlKey key.MachinePublic, protocolVersion uint16) (*Conn, error) {
// No matter what, this function can only run once per s. Ensure
// attempted reuse causes a panic.
defer func() {
s.finished = true
}()
if deadline, ok := ctx.Deadline(); ok {
if err := conn.SetDeadline(deadline); err != nil {
return nil, fmt.Errorf("setting conn deadline: %w", err)
}
defer func() {
conn.SetDeadline(time.Time{})
}()
}
// Read in the payload and look for errors/protocol violations from the server.
var resp responseMessage
if _, err := io.ReadFull(conn, resp.Header()); err != nil {
return nil, fmt.Errorf("reading response header: %w", err)
}
if resp.Type() != msgTypeResponse {
if resp.Type() != msgTypeError {
return nil, fmt.Errorf("unexpected response message type %d", resp.Type())
}
msg := make([]byte, resp.Length())
if _, err := io.ReadFull(conn, msg); err != nil {
return nil, err
}
return nil, fmt.Errorf("server error: %q", msg)
}
if resp.Length() != len(resp.Payload()) {
return nil, fmt.Errorf("wrong length %d received for handshake response", resp.Length())
}
if _, err := io.ReadFull(conn, resp.Payload()); err != nil {
return nil, err
}
// <- e, ee, se
controlEphemeralPub := key.MachinePublicFromRaw32(mem.B(resp.EphemeralPub()))
s.MixHash(controlEphemeralPub.UntypedBytes())
if _, err := s.MixDH(machineEphemeral, controlEphemeralPub); err != nil {
return nil, fmt.Errorf("computing ee: %w", err)
}
cipher, err := s.MixDH(machineKey, controlEphemeralPub)
if err != nil {
return nil, fmt.Errorf("computing se: %w", err)
}
if err := s.DecryptAndHash(cipher, nil, resp.Tag()); err != nil {
return nil, fmt.Errorf("decrypting payload: %w", err)
}
c1, c2, err := s.Split()
if err != nil {
return nil, fmt.Errorf("finalizing handshake: %w", err)
}
c := &Conn{
conn: conn,
version: protocolVersion,
peer: controlKey,
handshakeHash: s.h,
tx: txState{
cipher: c1,
},
rx: rxState{
cipher: c2,
},
}
return c, nil
}
// Server initiates a control server handshake, returning the resulting
// control connection.
//
// optionalInit can be the client's initial handshake message as
// returned by ClientDeferred, or nil in which case the initial
// message is read from conn.
//
// The context deadline, if any, covers the entire handshaking
// process.
func Server(ctx context.Context, conn net.Conn, controlKey key.MachinePrivate, optionalInit []byte) (*Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
if err := conn.SetDeadline(deadline); err != nil {
return nil, fmt.Errorf("setting conn deadline: %w", err)
}
defer func() {
conn.SetDeadline(time.Time{})
}()
}
// Deliberately does not support formatting, so that we don't echo
// attacker-controlled input back to them.
sendErr := func(msg string) error {
if len(msg) >= 1<<16 {
msg = msg[:1<<16]
}
var hdr [headerLen]byte
hdr[0] = msgTypeError
binary.BigEndian.PutUint16(hdr[1:3], uint16(len(msg)))
if _, err := conn.Write(hdr[:]); err != nil {
return fmt.Errorf("sending %q error to client: %w", msg, err)
}
if _, err := io.WriteString(conn, msg); err != nil {
return fmt.Errorf("sending %q error to client: %w", msg, err)
}
return fmt.Errorf("refused client handshake: %q", msg)
}
var s symmetricState
s.Initialize()
var init initiationMessage
if optionalInit != nil {
if len(optionalInit) != len(init) {
return nil, sendErr("wrong handshake initiation size")
}
copy(init[:], optionalInit)
} else if _, err := io.ReadFull(conn, init.Header()); err != nil {
return nil, err
}
// Just a rename to make it more obvious what the value is. In the
// current implementation we don't need to block any protocol
// versions at this layer, it's safe to let the handshake proceed
// and then let the caller make decisions based on the agreed-upon
// protocol version.
clientVersion := init.Version()
if init.Type() != msgTypeInitiation {
return nil, sendErr("unexpected handshake message type")
}
if init.Length() != len(init.Payload()) {
return nil, sendErr("wrong handshake initiation length")
}
// if optionalInit was provided, we have the payload already.
if optionalInit == nil {
if _, err := io.ReadFull(conn, init.Payload()); err != nil {
return nil, err
}
}
// prologue. Can only do this once we at least think the client is
// handshaking using a supported version.
s.MixHash(protocolVersionPrologue(clientVersion))
// <- s
// ...
controlKeyPub := controlKey.Public()
s.MixHash(controlKeyPub.UntypedBytes())
// -> e, es, s, ss
machineEphemeralPub := key.MachinePublicFromRaw32(mem.B(init.EphemeralPub()))
s.MixHash(machineEphemeralPub.UntypedBytes())
cipher, err := s.MixDH(controlKey, machineEphemeralPub)
if err != nil {
return nil, fmt.Errorf("computing es: %w", err)
}
var machineKeyBytes [32]byte
if err := s.DecryptAndHash(cipher, machineKeyBytes[:], init.MachinePub()); err != nil {
return nil, fmt.Errorf("decrypting machine key: %w", err)
}
machineKey := key.MachinePublicFromRaw32(mem.B(machineKeyBytes[:]))
cipher, err = s.MixDH(controlKey, machineKey)
if err != nil {
return nil, fmt.Errorf("computing ss: %w", err)
}
if err := s.DecryptAndHash(cipher, nil, init.Tag()); err != nil {
return nil, fmt.Errorf("decrypting initiation tag: %w", err)
}
// <- e, ee, se
resp := mkResponseMessage()
controlEphemeral := key.NewMachine()
controlEphemeralPub := controlEphemeral.Public()
copy(resp.EphemeralPub(), controlEphemeralPub.UntypedBytes())
s.MixHash(controlEphemeralPub.UntypedBytes())
if _, err := s.MixDH(controlEphemeral, machineEphemeralPub); err != nil {
return nil, fmt.Errorf("computing ee: %w", err)
}
cipher, err = s.MixDH(controlEphemeral, machineKey)
if err != nil {
return nil, fmt.Errorf("computing se: %w", err)
}
s.EncryptAndHash(cipher, resp.Tag(), nil) // empty message payload
c1, c2, err := s.Split()
if err != nil {
return nil, fmt.Errorf("finalizing handshake: %w", err)
}
if _, err := conn.Write(resp[:]); err != nil {
return nil, err
}
c := &Conn{
conn: conn,
version: clientVersion,
peer: machineKey,
handshakeHash: s.h,
tx: txState{
cipher: c2,
},
rx: rxState{
cipher: c1,
},
}
return c, nil
}
// symmetricState contains the state of an in-flight handshake.
type symmetricState struct {
finished bool
h [blake2s.Size]byte // hash of currently-processed handshake state
ck [blake2s.Size]byte // chaining key used to construct session keys at the end of the handshake
}
func (s *symmetricState) checkFinished() {
if s.finished {
panic("attempted to use symmetricState after Split was called")
}
}
// Initialize sets s to the initial handshake state, prior to
// processing any handshake messages.
func (s *symmetricState) Initialize() {
s.checkFinished()
s.h = blake2s.Sum256([]byte(protocolName))
s.ck = s.h
}
// MixHash updates s.h to be BLAKE2s(s.h || data), where || is
// concatenation.
func (s *symmetricState) MixHash(data []byte) {
s.checkFinished()
h := newBLAKE2s()
h.Write(s.h[:])
h.Write(data)
h.Sum(s.h[:0])
}
// MixDH updates s.ck with the result of X25519(priv, pub) and returns
// a singleUseCHP that can be used to encrypt or decrypt handshake
// data.
//
// MixDH corresponds to MixKey(X25519(...))) in the spec. Implementing
// it as a single function allows for strongly-typed arguments that
// reduce the risk of error in the caller (e.g. invoking X25519 with
// two private keys, or two public keys), and thus producing the wrong
// calculation.
func (s *symmetricState) MixDH(priv key.MachinePrivate, pub key.MachinePublic) (*singleUseCHP, error) {
s.checkFinished()
keyData, err := curve25519.X25519(priv.UntypedBytes(), pub.UntypedBytes())
if err != nil {
return nil, fmt.Errorf("computing X25519: %w", err)
}
r := hkdf.New(newBLAKE2s, keyData, s.ck[:], nil)
if _, err := io.ReadFull(r, s.ck[:]); err != nil {
return nil, fmt.Errorf("extracting ck: %w", err)
}
var k [chp.KeySize]byte
if _, err := io.ReadFull(r, k[:]); err != nil {
return nil, fmt.Errorf("extracting k: %w", err)
}
return newSingleUseCHP(k), nil
}
// EncryptAndHash encrypts plaintext into ciphertext (which must be
// the correct size to hold the encrypted plaintext) using cipher,
// mixes the ciphertext into s.h, and returns the ciphertext.
func (s *symmetricState) EncryptAndHash(cipher *singleUseCHP, ciphertext, plaintext []byte) {
s.checkFinished()
if len(ciphertext) != len(plaintext)+chp.Overhead {
panic("ciphertext is wrong size for given plaintext")
}
ret := cipher.Seal(ciphertext[:0], plaintext, s.h[:])
s.MixHash(ret)
}
// DecryptAndHash decrypts the given ciphertext into plaintext (which
// must be the correct size to hold the decrypted ciphertext) using
// cipher. If decryption is successful, it mixes the ciphertext into
// s.h.
func (s *symmetricState) DecryptAndHash(cipher *singleUseCHP, plaintext, ciphertext []byte) error {
s.checkFinished()
if len(ciphertext) != len(plaintext)+chp.Overhead {
return errors.New("plaintext is wrong size for given ciphertext")
}
if _, err := cipher.Open(plaintext[:0], ciphertext, s.h[:]); err != nil {
return err
}
s.MixHash(ciphertext)
return nil
}
// Split returns two ChaCha20Poly1305 ciphers with keys derived from
// the current handshake state. Methods on s cannot be used again
// after calling Split.
func (s *symmetricState) Split() (c1, c2 cipher.AEAD, err error) {
s.finished = true
var k1, k2 [chp.KeySize]byte
r := hkdf.New(newBLAKE2s, nil, s.ck[:], nil)
if _, err := io.ReadFull(r, k1[:]); err != nil {
return nil, nil, fmt.Errorf("extracting k1: %w", err)
}
if _, err := io.ReadFull(r, k2[:]); err != nil {
return nil, nil, fmt.Errorf("extracting k2: %w", err)
}
c1, err = chp.New(k1[:])
if err != nil {
return nil, nil, fmt.Errorf("constructing AEAD c1: %w", err)
}
c2, err = chp.New(k2[:])
if err != nil {
return nil, nil, fmt.Errorf("constructing AEAD c2: %w", err)
}
return c1, c2, nil
}
// newBLAKE2s returns a hash.Hash implementing BLAKE2s, or panics on
// error.
func newBLAKE2s() hash.Hash {
h, err := blake2s.New256(nil)
if err != nil {
// Should never happen, errors only happen when using BLAKE2s
// in MAC mode with a key.
panic(err)
}
return h
}
// newCHP returns a cipher.AEAD implementing ChaCha20Poly1305, or
// panics on error.
func newCHP(key [chp.KeySize]byte) cipher.AEAD {
aead, err := chp.New(key[:])
if err != nil {
// Can only happen if we passed a key of the wrong length. The
// function signature prevents that.
panic(err)
}
return aead
}
// singleUseCHP is an instance of ChaCha20Poly1305 that can be used
// only once, either for encrypting or decrypting, but not both. The
// chosen operation is always executed with an all-zeros
// nonce. Subsequent calls to either Seal or Open panic.
type singleUseCHP struct {
c cipher.AEAD
}
func newSingleUseCHP(key [chp.KeySize]byte) *singleUseCHP {
return &singleUseCHP{newCHP(key)}
}
func (c *singleUseCHP) Seal(dst, plaintext, additionalData []byte) []byte {
if c.c == nil {
panic("Attempted reuse of singleUseAEAD")
}
cipher := c.c
c.c = nil
var nonce [chp.NonceSize]byte
return cipher.Seal(dst, nonce[:], plaintext, additionalData)
}
func (c *singleUseCHP) Open(dst, ciphertext, additionalData []byte) ([]byte, error) {
if c.c == nil {
panic("Attempted reuse of singleUseAEAD")
}
cipher := c.c
c.c = nil
var nonce [chp.NonceSize]byte
return cipher.Open(dst, nonce[:], ciphertext, additionalData)
}

87
vendor/tailscale.com/control/controlbase/messages.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlbase
import "encoding/binary"
const (
// msgTypeInitiation frames carry a Noise IK handshake initiation message.
msgTypeInitiation = 1
// msgTypeResponse frames carry a Noise IK handshake response message.
msgTypeResponse = 2
// msgTypeError frames carry an unauthenticated human-readable
// error message.
//
// Errors reported in this message type must be treated as public
// hints only. They are not encrypted or authenticated, and so can
// be seen and tampered with on the wire.
msgTypeError = 3
// msgTypeRecord frames carry session data bytes.
msgTypeRecord = 4
// headerLen is the size of the header on all messages except msgTypeInitiation.
headerLen = 3
// initiationHeaderLen is the size of the header on all msgTypeInitiation messages.
initiationHeaderLen = 5
)
// initiationMessage is the protocol message sent from a client
// machine to a control server.
//
// 2b: protocol version
// 1b: message type (0x01)
// 2b: payload length (96)
// 5b: header (see headerLen for fields)
// 32b: client ephemeral public key (cleartext)
// 48b: client machine public key (encrypted)
// 16b: message tag (authenticates the whole message)
type initiationMessage [101]byte
func mkInitiationMessage(protocolVersion uint16) initiationMessage {
var ret initiationMessage
binary.BigEndian.PutUint16(ret[:2], protocolVersion)
ret[2] = msgTypeInitiation
binary.BigEndian.PutUint16(ret[3:5], uint16(len(ret.Payload())))
return ret
}
func (m *initiationMessage) Header() []byte { return m[:initiationHeaderLen] }
func (m *initiationMessage) Payload() []byte { return m[initiationHeaderLen:] }
func (m *initiationMessage) Version() uint16 { return binary.BigEndian.Uint16(m[:2]) }
func (m *initiationMessage) Type() byte { return m[2] }
func (m *initiationMessage) Length() int { return int(binary.BigEndian.Uint16(m[3:5])) }
func (m *initiationMessage) EphemeralPub() []byte {
return m[initiationHeaderLen : initiationHeaderLen+32]
}
func (m *initiationMessage) MachinePub() []byte {
return m[initiationHeaderLen+32 : initiationHeaderLen+32+48]
}
func (m *initiationMessage) Tag() []byte { return m[initiationHeaderLen+32+48:] }
// responseMessage is the protocol message sent from a control server
// to a client machine.
//
// 1b: message type (0x02)
// 2b: payload length (48)
// 32b: control ephemeral public key (cleartext)
// 16b: message tag (authenticates the whole message)
type responseMessage [51]byte
func mkResponseMessage() responseMessage {
var ret responseMessage
ret[0] = msgTypeResponse
binary.BigEndian.PutUint16(ret[1:], uint16(len(ret.Payload())))
return ret
}
func (m *responseMessage) Header() []byte { return m[:headerLen] }
func (m *responseMessage) Payload() []byte { return m[headerLen:] }
func (m *responseMessage) Type() byte { return m[0] }
func (m *responseMessage) Length() int { return int(binary.BigEndian.Uint16(m[1:3])) }
func (m *responseMessage) EphemeralPub() []byte { return m[headerLen : headerLen+32] }
func (m *responseMessage) Tag() []byte { return m[headerLen+32:] }

746
vendor/tailscale.com/control/controlclient/auto.go generated vendored Normal file
View File

@@ -0,0 +1,746 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"context"
"errors"
"fmt"
"net/http"
"sync"
"sync/atomic"
"time"
"tailscale.com/logtail/backoff"
"tailscale.com/net/sockstats"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/structs"
"tailscale.com/util/execqueue"
)
type LoginGoal struct {
_ structs.Incomparable
flags LoginFlags // flags to use when logging in
url string // auth url that needs to be visited
}
var _ Client = (*Auto)(nil)
// waitUnpause waits until either the client is unpaused or the Auto client is
// shut down. It reports whether the client should keep running (i.e. it's not
// closed).
func (c *Auto) waitUnpause(routineLogName string) (keepRunning bool) {
c.mu.Lock()
if !c.paused || c.closed {
defer c.mu.Unlock()
return !c.closed
}
unpaused := c.unpausedChanLocked()
c.mu.Unlock()
c.logf("%s: awaiting unpause", routineLogName)
return <-unpaused
}
// updateRoutine is responsible for informing the server of worthy changes to
// our local state. It runs in its own goroutine.
func (c *Auto) updateRoutine() {
defer close(c.updateDone)
bo := backoff.NewBackoff("updateRoutine", c.logf, 30*time.Second)
// lastUpdateGenInformed is the value of lastUpdateAt that we've successfully
// informed the server of.
var lastUpdateGenInformed updateGen
for {
if !c.waitUnpause("updateRoutine") {
c.logf("updateRoutine: exiting")
return
}
c.mu.Lock()
gen := c.lastUpdateGen
ctx := c.mapCtx
needUpdate := gen > 0 && gen != lastUpdateGenInformed && c.loggedIn
c.mu.Unlock()
if !needUpdate {
// Nothing to do, wait for a signal.
select {
case <-ctx.Done():
continue
case <-c.updateCh:
continue
}
}
t0 := c.clock.Now()
err := c.direct.SendUpdate(ctx)
d := time.Since(t0).Round(time.Millisecond)
if err != nil {
if ctx.Err() == nil {
c.direct.logf("lite map update error after %v: %v", d, err)
}
bo.BackOff(ctx, err)
continue
}
bo.BackOff(ctx, nil)
c.direct.logf("[v1] successful lite map update in %v", d)
lastUpdateGenInformed = gen
}
}
// atomicGen is an atomic int64 generator. It is used to generate monotonically
// increasing numbers for updateGen.
var atomicGen atomic.Int64
func nextUpdateGen() updateGen {
return updateGen(atomicGen.Add(1))
}
// updateGen is a monotonically increasing number that represents a particular
// update to the local state.
type updateGen int64
// Auto connects to a tailcontrol server for a node.
// It's a concrete implementation of the Client interface.
type Auto struct {
direct *Direct // our interface to the server APIs
clock tstime.Clock
logf logger.Logf
closed bool
updateCh chan struct{} // readable when we should inform the server of a change
observer Observer // called to update Client status; always non-nil
observerQueue execqueue.ExecQueue
unregisterHealthWatch func()
mu sync.Mutex // mutex guards the following fields
wantLoggedIn bool // whether the user wants to be logged in per last method call
urlToVisit string // the last url we were told to visit
expiry time.Time
// lastUpdateGen is the gen of last update we had an update worth sending to
// the server.
lastUpdateGen updateGen
paused bool // whether we should stop making HTTP requests
unpauseWaiters []chan bool // chans that gets sent true (once) on wake, or false on Shutdown
loggedIn bool // true if currently logged in
loginGoal *LoginGoal // non-nil if some login activity is desired
inMapPoll bool // true once we get the first MapResponse in a stream; false when HTTP response ends
state State // TODO(bradfitz): delete this, make it computed by method from other state
authCtx context.Context // context used for auth requests
mapCtx context.Context // context used for netmap and update requests
authCancel func() // cancel authCtx
mapCancel func() // cancel mapCtx
authDone chan struct{} // when closed, authRoutine is done
mapDone chan struct{} // when closed, mapRoutine is done
updateDone chan struct{} // when closed, updateRoutine is done
}
// New creates and starts a new Auto.
func New(opts Options) (*Auto, error) {
c, err := NewNoStart(opts)
if c != nil {
c.Start()
}
return c, err
}
// NewNoStart creates a new Auto, but without calling Start on it.
func NewNoStart(opts Options) (_ *Auto, err error) {
direct, err := NewDirect(opts)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
direct.Close()
}
}()
if opts.Observer == nil {
return nil, errors.New("missing required Options.Observer")
}
if opts.Logf == nil {
opts.Logf = func(fmt string, args ...any) {}
}
if opts.Clock == nil {
opts.Clock = tstime.StdClock{}
}
c := &Auto{
direct: direct,
clock: opts.Clock,
logf: opts.Logf,
updateCh: make(chan struct{}, 1),
authDone: make(chan struct{}),
mapDone: make(chan struct{}),
updateDone: make(chan struct{}),
observer: opts.Observer,
}
c.authCtx, c.authCancel = context.WithCancel(context.Background())
c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf)
c.mapCtx, c.mapCancel = context.WithCancel(context.Background())
c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf)
c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(direct.ReportHealthChange)
return c, nil
}
// SetPaused controls whether HTTP activity should be paused.
//
// The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once.
func (c *Auto) SetPaused(paused bool) {
c.mu.Lock()
defer c.mu.Unlock()
if paused == c.paused || c.closed {
return
}
c.logf("setPaused(%v)", paused)
c.paused = paused
if paused {
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
return
}
for _, ch := range c.unpauseWaiters {
ch <- true
}
c.unpauseWaiters = nil
}
// Start starts the client's goroutines.
//
// It should only be called for clients created by NewNoStart.
func (c *Auto) Start() {
go c.authRoutine()
go c.mapRoutine()
go c.updateRoutine()
}
// updateControl sends a new OmitPeers, non-streaming map request (to just send
// Hostinfo/Netinfo/Endpoints info, while keeping an existing streaming response
// open).
//
// It should be called whenever there's something new to tell the server.
func (c *Auto) updateControl() {
gen := nextUpdateGen()
c.mu.Lock()
if gen < c.lastUpdateGen {
// This update is out of date.
c.mu.Unlock()
return
}
c.lastUpdateGen = gen
c.mu.Unlock()
select {
case c.updateCh <- struct{}{}:
default:
}
}
// cancelAuthCtxLocked is like cancelAuthCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelAuthCtxLocked() {
if c.authCancel != nil {
c.authCancel()
}
if !c.closed {
c.authCtx, c.authCancel = context.WithCancel(context.Background())
c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, c.logf)
}
}
// cancelMapCtxLocked is like cancelMapCtx, but assumes the caller holds c.mu.
func (c *Auto) cancelMapCtxLocked() {
if c.mapCancel != nil {
c.mapCancel()
}
if !c.closed {
c.mapCtx, c.mapCancel = context.WithCancel(context.Background())
c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, c.logf)
}
}
// restartMap cancels the existing mapPoll and liteUpdates, and then starts a
// new one.
func (c *Auto) restartMap() {
c.mu.Lock()
c.cancelMapCtxLocked()
synced := c.inMapPoll
c.mu.Unlock()
c.logf("[v1] restartMap: synced=%v", synced)
c.updateControl()
}
func (c *Auto) authRoutine() {
defer close(c.authDone)
bo := backoff.NewBackoff("authRoutine", c.logf, 30*time.Second)
for {
if !c.waitUnpause("authRoutine") {
c.logf("authRoutine: exiting")
return
}
c.mu.Lock()
goal := c.loginGoal
ctx := c.authCtx
if goal != nil {
c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, true)
} else {
c.logf("[v1] authRoutine: %s; goal=nil paused=%v", c.state, c.paused)
}
c.mu.Unlock()
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
if ctx.Err() == nil {
c.sendStatus("authRoutine-report", err, "", nil)
}
}
if goal == nil {
c.direct.health.SetAuthRoutineInError(nil)
// Wait for user to Login or Logout.
<-ctx.Done()
c.logf("[v1] authRoutine: context done.")
continue
}
c.mu.Lock()
c.urlToVisit = goal.url
if goal.url != "" {
c.state = StateURLVisitRequired
} else {
c.state = StateAuthenticating
}
c.mu.Unlock()
var url string
var err error
var f string
if goal.url != "" {
url, err = c.direct.WaitLoginURL(ctx, goal.url)
f = "WaitLoginURL"
} else {
url, err = c.direct.TryLogin(ctx, goal.flags)
f = "TryLogin"
}
if err != nil {
c.direct.health.SetAuthRoutineInError(err)
report(err, f)
bo.BackOff(ctx, err)
continue
}
if url != "" {
// goal.url ought to be empty here. However, not all control servers
// get this right, and logging about it here just generates noise.
//
// TODO(bradfitz): I don't follow that comment. Our own testcontrol
// used by tstest/integration hits this path, in fact.
if c.direct.panicOnUse {
panic("tainted client")
}
c.mu.Lock()
c.urlToVisit = url
c.loginGoal = &LoginGoal{
flags: LoginDefault,
url: url,
}
c.state = StateURLVisitRequired
c.mu.Unlock()
c.sendStatus("authRoutine-url", err, url, nil)
if goal.url == url {
// The server sent us the same URL we already tried,
// backoff to avoid a busy loop.
bo.BackOff(ctx, errors.New("login URL not changing"))
} else {
bo.BackOff(ctx, nil)
}
continue
}
// success
c.direct.health.SetAuthRoutineInError(nil)
c.mu.Lock()
c.urlToVisit = ""
c.loggedIn = true
c.loginGoal = nil
c.state = StateAuthenticated
c.mu.Unlock()
c.sendStatus("authRoutine-success", nil, "", nil)
c.restartMap()
bo.BackOff(ctx, nil)
}
}
// ExpiryForTests returns the credential expiration time, or the zero value if
// the expiration time isn't known. It's used in tests only.
func (c *Auto) ExpiryForTests() time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.expiry
}
// DirectForTest returns the underlying direct client object.
// It's used in tests only.
func (c *Auto) DirectForTest() *Direct {
return c.direct
}
// unpausedChanLocked returns a new channel that gets sent
// either a true when unpaused or false on Auto.Shutdown.
//
// c.mu must be held
func (c *Auto) unpausedChanLocked() <-chan bool {
unpaused := make(chan bool, 1)
c.unpauseWaiters = append(c.unpauseWaiters, unpaused)
return unpaused
}
// mapRoutineState is the state of Auto.mapRoutine while it's running.
type mapRoutineState struct {
c *Auto
bo *backoff.Backoff
}
var _ NetmapDeltaUpdater = mapRoutineState{}
func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) {
c := mrs.c
c.mu.Lock()
ctx := c.mapCtx
c.inMapPoll = true
if c.loggedIn {
c.state = StateSynchronized
}
c.expiry = nm.Expiry
stillAuthed := c.loggedIn
c.logf("[v1] mapRoutine: netmap received: %s", c.state)
c.mu.Unlock()
if stillAuthed {
c.sendStatus("mapRoutine-got-netmap", nil, "", nm)
}
// Reset the backoff timer if we got a netmap.
mrs.bo.BackOff(ctx, nil)
}
func (mrs mapRoutineState) UpdateNetmapDelta(muts []netmap.NodeMutation) bool {
c := mrs.c
c.mu.Lock()
goodState := c.loggedIn && c.inMapPoll
ndu, canDelta := c.observer.(NetmapDeltaUpdater)
c.mu.Unlock()
if !goodState || !canDelta {
return false
}
ctx, cancel := context.WithTimeout(c.mapCtx, 2*time.Second)
defer cancel()
var ok bool
err := c.observerQueue.RunSync(ctx, func() {
ok = ndu.UpdateNetmapDelta(muts)
})
return err == nil && ok
}
// mapRoutine is responsible for keeping a read-only streaming connection to the
// control server, and keeping the netmap up to date.
func (c *Auto) mapRoutine() {
defer close(c.mapDone)
mrs := mapRoutineState{
c: c,
bo: backoff.NewBackoff("mapRoutine", c.logf, 30*time.Second),
}
for {
if !c.waitUnpause("mapRoutine") {
c.logf("mapRoutine: exiting")
return
}
c.mu.Lock()
c.logf("[v1] mapRoutine: %s", c.state)
loggedIn := c.loggedIn
ctx := c.mapCtx
c.mu.Unlock()
report := func(err error, msg string) {
c.logf("[v1] %s: %v", msg, err)
err = fmt.Errorf("%s: %w", msg, err)
// don't send status updates for context errors,
// since context cancelation is always on purpose.
if ctx.Err() == nil {
c.sendStatus("mapRoutine1", err, "", nil)
}
}
if !loggedIn {
// Wait for something interesting to happen
c.mu.Lock()
c.inMapPoll = false
c.mu.Unlock()
<-ctx.Done()
c.logf("[v1] mapRoutine: context done.")
continue
}
c.direct.health.SetOutOfPollNetMap()
err := c.direct.PollNetMap(ctx, mrs)
c.direct.health.SetOutOfPollNetMap()
c.mu.Lock()
c.inMapPoll = false
if c.state == StateSynchronized {
c.state = StateAuthenticated
}
paused := c.paused
c.mu.Unlock()
if paused {
mrs.bo.BackOff(ctx, nil)
c.logf("mapRoutine: paused")
} else {
mrs.bo.BackOff(ctx, err)
report(err, "PollNetMap")
}
}
}
func (c *Auto) AuthCantContinue() bool {
if c == nil {
return true
}
c.mu.Lock()
defer c.mu.Unlock()
return !c.loggedIn && (c.loginGoal == nil || c.loginGoal.url != "")
}
func (c *Auto) SetHostinfo(hi *tailcfg.Hostinfo) {
if hi == nil {
panic("nil Hostinfo")
}
if !c.direct.SetHostinfo(hi) {
// No changes. Don't log.
return
}
// Send new Hostinfo to server
c.updateControl()
}
func (c *Auto) SetNetInfo(ni *tailcfg.NetInfo) {
if ni == nil {
panic("nil NetInfo")
}
if !c.direct.SetNetInfo(ni) {
return
}
// Send new NetInfo to server
c.updateControl()
}
// SetTKAHead updates the TKA head hash that map-request infrastructure sends.
func (c *Auto) SetTKAHead(headHash string) {
if !c.direct.SetTKAHead(headHash) {
return
}
// Send new TKAHead to server
c.updateControl()
}
// sendStatus can not be called with the c.mu held.
func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkMap) {
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return
}
state := c.state
loggedIn := c.loggedIn
inMapPoll := c.inMapPoll
c.mu.Unlock()
c.logf("[v1] sendStatus: %s: %v", who, state)
var p persist.PersistView
if nm != nil && loggedIn && inMapPoll {
p = c.direct.GetPersist()
} else {
// don't send netmap status, as it's misleading when we're
// not logged in.
nm = nil
}
new := Status{
URL: url,
Persist: p,
NetMap: nm,
Err: err,
state: state,
}
// Launch a new goroutine to avoid blocking the caller while the observer
// does its thing, which may result in a call back into the client.
c.observerQueue.Add(func() {
c.observer.SetControlClientStatus(c, new)
})
}
func (c *Auto) Login(flags LoginFlags) {
c.logf("client.Login(%v)", flags)
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return
}
if c.direct != nil && c.direct.panicOnUse {
panic("tainted client")
}
c.wantLoggedIn = true
c.loginGoal = &LoginGoal{
flags: flags,
}
c.cancelMapCtxLocked()
c.cancelAuthCtxLocked()
}
var ErrClientClosed = errors.New("client closed")
func (c *Auto) Logout(ctx context.Context) error {
c.logf("client.Logout()")
c.mu.Lock()
c.wantLoggedIn = false
c.loginGoal = nil
closed := c.closed
if c.direct != nil && c.direct.panicOnUse {
panic("tainted client")
}
c.mu.Unlock()
if closed {
return ErrClientClosed
}
if err := c.direct.TryLogout(ctx); err != nil {
return err
}
c.mu.Lock()
c.loggedIn = false
c.state = StateNotAuthenticated
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
c.mu.Unlock()
c.sendStatus("authRoutine-wantout", nil, "", nil)
return nil
}
func (c *Auto) SetExpirySooner(ctx context.Context, expiry time.Time) error {
return c.direct.SetExpirySooner(ctx, expiry)
}
// UpdateEndpoints sets the client's discovered endpoints and sends
// them to the control server if they've changed.
//
// It does not retain the provided slice.
func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) {
changed := c.direct.SetEndpoints(endpoints)
if changed {
c.updateControl()
}
}
func (c *Auto) Shutdown() {
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return
}
c.logf("client.Shutdown ...")
direct := c.direct
c.closed = true
c.observerQueue.Shutdown()
c.cancelAuthCtxLocked()
c.cancelMapCtxLocked()
for _, w := range c.unpauseWaiters {
w <- false
}
c.unpauseWaiters = nil
c.mu.Unlock()
c.unregisterHealthWatch()
<-c.authDone
<-c.mapDone
<-c.updateDone
if direct != nil {
direct.Close()
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
c.observerQueue.Wait(ctx)
c.logf("Client.Shutdown done.")
}
// NodePublicKey returns the node public key currently in use. This is
// used exclusively in tests.
func (c *Auto) TestOnlyNodePublicKey() key.NodePublic {
priv := c.direct.GetPersist()
return priv.PrivateNodeKey().Public()
}
func (c *Auto) TestOnlySetAuthKey(authkey string) {
c.direct.mu.Lock()
defer c.direct.mu.Unlock()
c.direct.authKey = authkey
}
func (c *Auto) TestOnlyTimeNow() time.Time {
return c.clock.Now()
}
// SetDNS sends the SetDNSRequest request to the control plane server,
// requesting a DNS record be created or updated.
func (c *Auto) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) error {
return c.direct.SetDNS(ctx, req)
}
func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) {
return c.direct.DoNoiseRequest(req)
}
// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used
// once (and must be used once) to make a single HTTP request over the noise
// channel to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
return c.direct.GetSingleUseNoiseRoundTripper(ctx)
}

90
vendor/tailscale.com/control/controlclient/client.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package controlclient implements the client for the Tailscale
// control plane.
//
// It handles authentication, port picking, and collects the local
// network configuration.
package controlclient
import (
"context"
"tailscale.com/tailcfg"
)
// LoginFlags is a bitmask of options to change the behavior of Client.Login
// and LocalBackend.
type LoginFlags int
const (
LoginDefault = LoginFlags(0)
LoginInteractive = LoginFlags(1 << iota) // force user login and key refresh
LoginEphemeral // set RegisterRequest.Ephemeral
// LocalBackendStartKeyOSNeutral instructs NewLocalBackend to start the
// LocalBackend without any OS-dependent StateStore StartKey behavior.
//
// See https://github.com/tailscale/tailscale/issues/6973.
LocalBackendStartKeyOSNeutral
)
// Client represents a client connection to the control server.
// Currently this is done through a pair of polling https requests in
// the Auto client, but that might change eventually.
//
// The Client must be comparable as it is used by the Observer to detect stale
// clients.
type Client interface {
// Shutdown closes this session, which should not be used any further
// afterwards.
Shutdown()
// Login begins an interactive or non-interactive login process.
// Client will eventually call the Status callback with either a
// LoginFinished flag (on success) or an auth URL (if further
// interaction is needed). It merely sets the process in motion,
// and doesn't wait for it to complete.
Login(LoginFlags)
// Logout starts a synchronous logout process. It doesn't return
// until the logout operation has been completed.
Logout(context.Context) error
// SetPaused pauses or unpauses the controlclient activity as much
// as possible, without losing its internal state, to minimize
// unnecessary network activity.
// TODO: It might be better to simply shutdown the controlclient and
// make a new one when it's time to unpause.
SetPaused(bool)
// AuthCantContinue returns whether authentication is blocked. If it
// is, you either need to visit the auth URL (previously sent in a
// Status callback) or call the Login function appropriately.
// TODO: this probably belongs in the Status itself instead.
AuthCantContinue() bool
// SetHostinfo changes the Hostinfo structure that will be sent in
// subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
SetHostinfo(*tailcfg.Hostinfo)
// SetNetinfo changes the NetIinfo structure that will be sent in
// subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
SetNetInfo(*tailcfg.NetInfo)
// SetTKAHead changes the TKA head hash value that will be sent in
// subsequent netmap requests.
SetTKAHead(headHash string)
// UpdateEndpoints changes the Endpoint structure that will be sent
// in subsequent node registration requests.
// TODO: a server-side change would let us simply upload this
// in a separate http request. It has nothing to do with the rest of
// the state machine.
UpdateEndpoints(endpoints []tailcfg.Endpoint)
}
// UserVisibleError is an error that should be shown to users.
type UserVisibleError string
func (e UserVisibleError) Error() string { return string(e) }
func (e UserVisibleError) UserVisibleError() string { return string(e) }

1699
vendor/tailscale.com/control/controlclient/direct.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

832
vendor/tailscale.com/control/controlclient/map.go generated vendored Normal file
View File

@@ -0,0 +1,832 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"cmp"
"context"
"encoding/json"
"fmt"
"maps"
"net"
"reflect"
"runtime"
"runtime/debug"
"slices"
"sort"
"strconv"
"sync"
"time"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/ptr"
"tailscale.com/types/views"
"tailscale.com/util/clientmetric"
"tailscale.com/util/mak"
"tailscale.com/util/set"
"tailscale.com/wgengine/filter"
)
// mapSession holds the state over a long-polled "map" request to the
// control plane.
//
// It accepts incremental tailcfg.MapResponse values to
// netMapForResponse and returns fully inflated NetworkMaps, filling
// in the omitted data implicit from prior MapResponse values from
// within the same session (the same long-poll HTTP response to the
// one MapRequest).
type mapSession struct {
// Immutable fields.
netmapUpdater NetmapUpdater // called on changes (in addition to the optional hooks below)
controlKnobs *controlknobs.Knobs // or nil
privateNodeKey key.NodePrivate
publicNodeKey key.NodePublic
logf logger.Logf
vlogf logger.Logf
machinePubKey key.MachinePublic
altClock tstime.Clock // if nil, regular time is used
cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context
// sessionAliveCtx is a Background-based context that's alive for the
// duration of the mapSession that we own the lifetime of. It's closed by
// sessionAliveCtxClose.
sessionAliveCtx context.Context
sessionAliveCtxClose context.CancelFunc // closes sessionAliveCtx
// Optional hooks, guaranteed non-nil (set to no-op funcs) by the
// newMapSession constructor. They must be overridden if desired
// before the mapSession is used.
// onDebug specifies what to do with a *tailcfg.Debug message.
onDebug func(context.Context, *tailcfg.Debug) error
// onSelfNodeChanged is called before the NetmapUpdater if the self node was
// changed.
onSelfNodeChanged func(*netmap.NetworkMap)
// Fields storing state over the course of multiple MapResponses.
lastPrintMap time.Time
lastNode tailcfg.NodeView
lastCapSet set.Set[tailcfg.NodeCapability]
peers map[tailcfg.NodeID]*tailcfg.NodeView // pointer to view (oddly). same pointers as sortedPeers.
sortedPeers []*tailcfg.NodeView // same pointers as peers, but sorted by Node.ID
lastDNSConfig *tailcfg.DNSConfig
lastDERPMap *tailcfg.DERPMap
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile
lastPacketFilterRules views.Slice[tailcfg.FilterRule] // concatenation of all namedPacketFilters
namedPacketFilters map[string]views.Slice[tailcfg.FilterRule]
lastParsedPacketFilter []filter.Match
lastSSHPolicy *tailcfg.SSHPolicy
collectServices bool
lastDomain string
lastDomainAuditLogID string
lastHealth []string
lastPopBrowserURL string
lastTKAInfo *tailcfg.TKAInfo
lastNetmapSummary string // from NetworkMap.VeryConcise
lastMaxExpiry time.Duration
}
// newMapSession returns a mostly unconfigured new mapSession.
//
// Modify its optional fields on the returned value before use.
//
// It must have its Close method called to release resources.
func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater, controlKnobs *controlknobs.Knobs) *mapSession {
ms := &mapSession{
netmapUpdater: nu,
controlKnobs: controlKnobs,
privateNodeKey: privateNodeKey,
publicNodeKey: privateNodeKey.Public(),
lastDNSConfig: new(tailcfg.DNSConfig),
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
// Non-nil no-op defaults, to be optionally overridden by the caller.
logf: logger.Discard,
vlogf: logger.Discard,
cancel: func() {},
onDebug: func(context.Context, *tailcfg.Debug) error { return nil },
onSelfNodeChanged: func(*netmap.NetworkMap) {},
}
ms.sessionAliveCtx, ms.sessionAliveCtxClose = context.WithCancel(context.Background())
return ms
}
// occasionallyPrintSummary logs summary at most once very 5 minutes. The
// summary is the Netmap.VeryConcise result from the last received map response.
func (ms *mapSession) occasionallyPrintSummary(summary string) {
// Occasionally print the netmap header.
// This is handy for debugging, and our logs processing
// pipeline depends on it. (TODO: Remove this dependency.)
now := ms.clock().Now()
if now.Sub(ms.lastPrintMap) < 5*time.Minute {
return
}
ms.lastPrintMap = now
ms.logf("[v1] new network map (periodic):\n%s", summary)
}
func (ms *mapSession) clock() tstime.Clock {
return cmp.Or[tstime.Clock](ms.altClock, tstime.StdClock{})
}
func (ms *mapSession) Close() {
ms.sessionAliveCtxClose()
}
// HandleNonKeepAliveMapResponse handles a non-KeepAlive MapResponse (full or
// incremental).
//
// All fields that are valid on a KeepAlive MapResponse have already been
// handled.
//
// TODO(bradfitz): make this handle all fields later. For now (2023-08-20) this
// is [re]factoring progress enough.
func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *tailcfg.MapResponse) error {
if debug := resp.Debug; debug != nil {
if err := ms.onDebug(ctx, debug); err != nil {
return err
}
}
if DevKnob.StripEndpoints() {
for _, p := range resp.Peers {
p.Endpoints = nil
}
for _, p := range resp.PeersChanged {
p.Endpoints = nil
}
}
// For responses that mutate the self node, check for updated nodeAttrs.
if resp.Node != nil {
if DevKnob.StripCaps() {
resp.Node.Capabilities = nil
resp.Node.CapMap = nil
}
// If the server is old and is still sending us Capabilities instead of
// CapMap, convert it to CapMap early so the rest of the client code can
// work only in terms of CapMap.
for _, c := range resp.Node.Capabilities {
if _, ok := resp.Node.CapMap[c]; !ok {
mak.Set(&resp.Node.CapMap, c, nil)
}
}
ms.controlKnobs.UpdateFromNodeAttributes(resp.Node.CapMap)
}
// Call Node.InitDisplayNames on any changed nodes.
initDisplayNames(cmp.Or(resp.Node.View(), ms.lastNode), resp)
ms.patchifyPeersChanged(resp)
ms.updateStateFromResponse(resp)
if ms.tryHandleIncrementally(resp) {
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
return nil
}
// We have to rebuild the whole netmap (lots of garbage & work downstream of
// our UpdateFullNetmap call). This is the part we tried to avoid but
// some field mutations (especially rare ones) aren't yet handled.
if runtime.GOOS == "ios" {
// Memory is tight on iOS. Free what we can while we
// can before this potential burst of in-use memory.
debug.FreeOSMemory()
}
nm := ms.netmap()
ms.lastNetmapSummary = nm.VeryConcise()
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
// If the self node changed, we might need to update persist.
if resp.Node != nil {
ms.onSelfNodeChanged(nm)
}
ms.netmapUpdater.UpdateFullNetmap(nm)
return nil
}
func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool {
if ms.controlKnobs != nil && ms.controlKnobs.DisableDeltaUpdates.Load() {
return false
}
nud, ok := ms.netmapUpdater.(NetmapDeltaUpdater)
if !ok {
return false
}
mutations, ok := netmap.MutationsFromMapResponse(res, time.Now())
if ok && len(mutations) > 0 {
return nud.UpdateNetmapDelta(mutations)
}
return ok
}
// updateStats are some stats from updateStateFromResponse, primarily for
// testing. It's meant to be cheap enough to always compute, though. It doesn't
// allocate.
type updateStats struct {
allNew bool
added int
removed int
changed int
}
// updateStateFromResponse updates ms from res. It takes ownership of res.
func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
ms.updatePeersStateFromResponse(resp)
if resp.Node != nil {
ms.lastNode = resp.Node.View()
capSet := set.Set[tailcfg.NodeCapability]{}
for _, c := range resp.Node.Capabilities {
capSet.Add(c)
}
for c := range resp.Node.CapMap {
capSet.Add(c)
}
ms.lastCapSet = capSet
}
for _, up := range resp.UserProfiles {
ms.lastUserProfile[up.ID] = up
}
// TODO(bradfitz): clean up old user profiles? maybe not worth it.
if dm := resp.DERPMap; dm != nil {
ms.vlogf("netmap: new map contains DERP map")
// Zero-valued fields in a DERPMap mean that we're not changing
// anything and are using the previous value(s).
if ldm := ms.lastDERPMap; ldm != nil {
if dm.Regions == nil {
dm.Regions = ldm.Regions
dm.OmitDefaultRegions = ldm.OmitDefaultRegions
}
if dm.HomeParams == nil {
dm.HomeParams = ldm.HomeParams
} else if oldhh := ldm.HomeParams; oldhh != nil {
// Propagate sub-fields of HomeParams
hh := dm.HomeParams
if hh.RegionScore == nil {
hh.RegionScore = oldhh.RegionScore
}
}
}
ms.lastDERPMap = dm
}
var packetFilterChanged bool
// Older way, one big blob:
if pf := resp.PacketFilter; pf != nil {
packetFilterChanged = true
mak.Set(&ms.namedPacketFilters, "base", views.SliceOf(pf))
}
// Newer way, named chunks:
if m := resp.PacketFilters; m != nil {
packetFilterChanged = true
if v, ok := m["*"]; ok && v == nil {
ms.namedPacketFilters = nil
}
for k, v := range m {
if k == "*" {
continue
}
if v != nil {
mak.Set(&ms.namedPacketFilters, k, views.SliceOf(v))
} else {
delete(ms.namedPacketFilters, k)
}
}
}
if packetFilterChanged {
var concat []tailcfg.FilterRule
for _, v := range slices.Sorted(maps.Keys(ms.namedPacketFilters)) {
concat = ms.namedPacketFilters[v].AppendTo(concat)
}
ms.lastPacketFilterRules = views.SliceOf(concat)
var err error
ms.lastParsedPacketFilter, err = filter.MatchesFromFilterRules(concat)
if err != nil {
ms.logf("parsePacketFilter: %v", err)
}
}
if c := resp.DNSConfig; c != nil {
ms.lastDNSConfig = c
}
if p := resp.SSHPolicy; p != nil {
ms.lastSSHPolicy = p
}
if v, ok := resp.CollectServices.Get(); ok {
ms.collectServices = v
}
if resp.Domain != "" {
ms.lastDomain = resp.Domain
}
if resp.DomainDataPlaneAuditLogID != "" {
ms.lastDomainAuditLogID = resp.DomainDataPlaneAuditLogID
}
if resp.Health != nil {
ms.lastHealth = resp.Health
}
if resp.TKAInfo != nil {
ms.lastTKAInfo = resp.TKAInfo
}
if resp.MaxKeyDuration > 0 {
ms.lastMaxExpiry = resp.MaxKeyDuration
}
}
var (
patchDERPRegion = clientmetric.NewCounter("controlclient_patch_derp")
patchEndpoints = clientmetric.NewCounter("controlclient_patch_endpoints")
patchCap = clientmetric.NewCounter("controlclient_patch_capver")
patchKey = clientmetric.NewCounter("controlclient_patch_key")
patchDiscoKey = clientmetric.NewCounter("controlclient_patch_discokey")
patchOnline = clientmetric.NewCounter("controlclient_patch_online")
patchLastSeen = clientmetric.NewCounter("controlclient_patch_lastseen")
patchKeyExpiry = clientmetric.NewCounter("controlclient_patch_keyexpiry")
patchCapMap = clientmetric.NewCounter("controlclient_patch_capmap")
patchKeySignature = clientmetric.NewCounter("controlclient_patch_keysig")
patchifiedPeer = clientmetric.NewCounter("controlclient_patchified_peer")
patchifiedPeerEqual = clientmetric.NewCounter("controlclient_patchified_peer_equal")
)
// updatePeersStateFromResponseres updates ms.peers and ms.sortedPeers from res. It takes ownership of res.
func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) {
defer func() {
if stats.removed > 0 || stats.added > 0 {
ms.rebuildSorted()
}
}()
if ms.peers == nil {
ms.peers = make(map[tailcfg.NodeID]*tailcfg.NodeView)
}
if len(resp.Peers) > 0 {
// Not delta encoded.
stats.allNew = true
keep := make(map[tailcfg.NodeID]bool, len(resp.Peers))
for _, n := range resp.Peers {
keep[n.ID] = true
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for id := range ms.peers {
if !keep[id] {
stats.removed++
delete(ms.peers, id)
}
}
// Peers precludes all other delta operations so just return.
return
}
for _, id := range resp.PeersRemoved {
if _, ok := ms.peers[id]; ok {
delete(ms.peers, id)
stats.removed++
}
}
for _, n := range resp.PeersChanged {
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for nodeID, seen := range resp.PeerSeenChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
if seen {
mut.LastSeen = ptr.To(clock.Now())
} else {
mut.LastSeen = nil
}
*vp = mut.View()
stats.changed++
}
}
for nodeID, online := range resp.OnlineChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
mut.Online = ptr.To(online)
*vp = mut.View()
stats.changed++
}
}
for _, pc := range resp.PeersChangedPatch {
vp, ok := ms.peers[pc.NodeID]
if !ok {
continue
}
stats.changed++
mut := vp.AsStruct()
if pc.DERPRegion != 0 {
mut.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, pc.DERPRegion)
patchDERPRegion.Add(1)
}
if pc.Cap != 0 {
mut.Cap = pc.Cap
patchCap.Add(1)
}
if pc.Endpoints != nil {
mut.Endpoints = pc.Endpoints
patchEndpoints.Add(1)
}
if pc.Key != nil {
mut.Key = *pc.Key
patchKey.Add(1)
}
if pc.DiscoKey != nil {
mut.DiscoKey = *pc.DiscoKey
patchDiscoKey.Add(1)
}
if v := pc.Online; v != nil {
mut.Online = ptr.To(*v)
patchOnline.Add(1)
}
if v := pc.LastSeen; v != nil {
mut.LastSeen = ptr.To(*v)
patchLastSeen.Add(1)
}
if v := pc.KeyExpiry; v != nil {
mut.KeyExpiry = *v
patchKeyExpiry.Add(1)
}
if v := pc.KeySignature; v != nil {
mut.KeySignature = v
patchKeySignature.Add(1)
}
if v := pc.CapMap; v != nil {
mut.CapMap = v
patchCapMap.Add(1)
}
*vp = mut.View()
}
return
}
// rebuildSorted rebuilds ms.sortedPeers from ms.peers. It should be called
// after any additions or removals from peers.
func (ms *mapSession) rebuildSorted() {
if ms.sortedPeers == nil {
ms.sortedPeers = make([]*tailcfg.NodeView, 0, len(ms.peers))
} else {
if len(ms.sortedPeers) > len(ms.peers) {
clear(ms.sortedPeers[len(ms.peers):])
}
ms.sortedPeers = ms.sortedPeers[:0]
}
for _, p := range ms.peers {
ms.sortedPeers = append(ms.sortedPeers, p)
}
sort.Slice(ms.sortedPeers, func(i, j int) bool {
return ms.sortedPeers[i].ID() < ms.sortedPeers[j].ID()
})
}
func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) {
if userID == 0 {
return
}
if _, dup := nm.UserProfiles[userID]; dup {
// Already populated it from a previous peer.
return
}
if up, ok := ms.lastUserProfile[userID]; ok {
nm.UserProfiles[userID] = up
}
}
var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER")
// patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch
// when possible.
func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) {
filtered := resp.PeersChanged[:0]
for _, n := range resp.PeersChanged {
if p, ok := ms.patchifyPeer(n); ok {
patchifiedPeer.Add(1)
if debugPatchifyPeer() {
patchj, _ := json.Marshal(p)
ms.logf("debug: patchifyPeer[ID=%v]: %s", n.ID, patchj)
}
if p != nil {
resp.PeersChangedPatch = append(resp.PeersChangedPatch, p)
} else {
patchifiedPeerEqual.Add(1)
}
} else {
filtered = append(filtered, n)
}
}
resp.PeersChanged = filtered
if len(resp.PeersChanged) == 0 {
resp.PeersChanged = nil
}
}
var nodeFields = sync.OnceValue(getNodeFields)
// getNodeFields returns the fails of tailcfg.Node.
func getNodeFields() []string {
rt := reflect.TypeFor[tailcfg.Node]()
ret := make([]string, rt.NumField())
for i := range rt.NumField() {
ret[i] = rt.Field(i).Name
}
return ret
}
// patchifyPeer returns a *tailcfg.PeerChange of the session's existing copy of
// the n.ID Node to n.
//
// It returns ok=false if a patch can't be made, (V, ok) on a delta, or (nil,
// true) if all the fields were identical (a zero change).
func (ms *mapSession) patchifyPeer(n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
was, ok := ms.peers[n.ID]
if !ok {
return nil, false
}
return peerChangeDiff(*was, n)
}
// peerChangeDiff returns the difference from 'was' to 'n', if possible.
//
// It returns (nil, true) if the fields were identical.
func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
var ret *tailcfg.PeerChange
pc := func() *tailcfg.PeerChange {
if ret == nil {
ret = new(tailcfg.PeerChange)
}
return ret
}
for _, field := range nodeFields() {
switch field {
default:
// The whole point of using reflect in this function is to panic
// here in tests if we forget to handle a new field.
panic("unhandled field: " + field)
case "computedHostIfDifferent", "ComputedName", "ComputedNameWithHost":
// Caller's responsibility to have populated these.
continue
case "DataPlaneAuditLogID":
// Not sent for peers.
case "Capabilities":
// Deprecated; see https://github.com/tailscale/tailscale/issues/11508
// And it was never sent by any known control server.
case "ID":
if was.ID() != n.ID {
return nil, false
}
case "StableID":
if was.StableID() != n.StableID {
return nil, false
}
case "Name":
if was.Name() != n.Name {
return nil, false
}
case "User":
if was.User() != n.User {
return nil, false
}
case "Sharer":
if was.Sharer() != n.Sharer {
return nil, false
}
case "Key":
if was.Key() != n.Key {
pc().Key = ptr.To(n.Key)
}
case "KeyExpiry":
if !was.KeyExpiry().Equal(n.KeyExpiry) {
pc().KeyExpiry = ptr.To(n.KeyExpiry)
}
case "KeySignature":
if !was.KeySignature().Equal(n.KeySignature) {
pc().KeySignature = slices.Clone(n.KeySignature)
}
case "Machine":
if was.Machine() != n.Machine {
return nil, false
}
case "DiscoKey":
if was.DiscoKey() != n.DiscoKey {
pc().DiscoKey = ptr.To(n.DiscoKey)
}
case "Addresses":
if !views.SliceEqual(was.Addresses(), views.SliceOf(n.Addresses)) {
return nil, false
}
case "AllowedIPs":
if !views.SliceEqual(was.AllowedIPs(), views.SliceOf(n.AllowedIPs)) {
return nil, false
}
case "Endpoints":
if !views.SliceEqual(was.Endpoints(), views.SliceOf(n.Endpoints)) {
pc().Endpoints = slices.Clone(n.Endpoints)
}
case "DERP":
if was.DERP() != n.DERP {
ip, portStr, err := net.SplitHostPort(n.DERP)
if err != nil || ip != "127.3.3.40" {
return nil, false
}
port, err := strconv.Atoi(portStr)
if err != nil || port < 1 || port > 65535 {
return nil, false
}
pc().DERPRegion = port
}
case "Hostinfo":
if !was.Hostinfo().Valid() && !n.Hostinfo.Valid() {
continue
}
if !was.Hostinfo().Valid() || !n.Hostinfo.Valid() {
return nil, false
}
if !was.Hostinfo().Equal(n.Hostinfo) {
return nil, false
}
case "Created":
if !was.Created().Equal(n.Created) {
return nil, false
}
case "Cap":
if was.Cap() != n.Cap {
pc().Cap = n.Cap
}
case "CapMap":
if len(n.CapMap) != was.CapMap().Len() {
if n.CapMap == nil {
pc().CapMap = make(tailcfg.NodeCapMap)
} else {
pc().CapMap = maps.Clone(n.CapMap)
}
break
}
was.CapMap().Range(func(k tailcfg.NodeCapability, v views.Slice[tailcfg.RawMessage]) bool {
nv, ok := n.CapMap[k]
if !ok || !views.SliceEqual(v, views.SliceOf(nv)) {
pc().CapMap = maps.Clone(n.CapMap)
return false
}
return true
})
case "Tags":
if !views.SliceEqual(was.Tags(), views.SliceOf(n.Tags)) {
return nil, false
}
case "PrimaryRoutes":
if !views.SliceEqual(was.PrimaryRoutes(), views.SliceOf(n.PrimaryRoutes)) {
return nil, false
}
case "Online":
wasOnline := was.Online()
if n.Online != nil && wasOnline != nil && *n.Online != *wasOnline {
pc().Online = ptr.To(*n.Online)
}
case "LastSeen":
wasSeen := was.LastSeen()
if n.LastSeen != nil && wasSeen != nil && !wasSeen.Equal(*n.LastSeen) {
pc().LastSeen = ptr.To(*n.LastSeen)
}
case "MachineAuthorized":
if was.MachineAuthorized() != n.MachineAuthorized {
return nil, false
}
case "UnsignedPeerAPIOnly":
if was.UnsignedPeerAPIOnly() != n.UnsignedPeerAPIOnly {
return nil, false
}
case "IsWireGuardOnly":
if was.IsWireGuardOnly() != n.IsWireGuardOnly {
return nil, false
}
case "IsJailed":
if was.IsJailed() != n.IsJailed {
return nil, false
}
case "Expired":
if was.Expired() != n.Expired {
return nil, false
}
case "SelfNodeV4MasqAddrForThisPeer":
va, vb := was.SelfNodeV4MasqAddrForThisPeer(), n.SelfNodeV4MasqAddrForThisPeer
if va == nil && vb == nil {
continue
}
if va == nil || vb == nil || *va != *vb {
return nil, false
}
case "SelfNodeV6MasqAddrForThisPeer":
va, vb := was.SelfNodeV6MasqAddrForThisPeer(), n.SelfNodeV6MasqAddrForThisPeer
if va == nil && vb == nil {
continue
}
if va == nil || vb == nil || *va != *vb {
return nil, false
}
case "ExitNodeDNSResolvers":
va, vb := was.ExitNodeDNSResolvers(), views.SliceOfViews(n.ExitNodeDNSResolvers)
if va.Len() != vb.Len() {
return nil, false
}
for i := range va.Len() {
if !va.At(i).Equal(vb.At(i)) {
return nil, false
}
}
}
}
if ret != nil {
ret.NodeID = n.ID
}
return ret, true
}
// netmap returns a fully populated NetworkMap from the last state seen from
// a call to updateStateFromResponse, filling in omitted
// information from prior MapResponse values.
func (ms *mapSession) netmap() *netmap.NetworkMap {
peerViews := make([]tailcfg.NodeView, len(ms.sortedPeers))
for i, vp := range ms.sortedPeers {
peerViews[i] = *vp
}
nm := &netmap.NetworkMap{
NodeKey: ms.publicNodeKey,
PrivateKey: ms.privateNodeKey,
MachineKey: ms.machinePubKey,
Peers: peerViews,
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile),
Domain: ms.lastDomain,
DomainAuditLogID: ms.lastDomainAuditLogID,
DNS: *ms.lastDNSConfig,
PacketFilter: ms.lastParsedPacketFilter,
PacketFilterRules: ms.lastPacketFilterRules,
SSHPolicy: ms.lastSSHPolicy,
CollectServices: ms.collectServices,
DERPMap: ms.lastDERPMap,
ControlHealth: ms.lastHealth,
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
MaxKeyDuration: ms.lastMaxExpiry,
}
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
ms.logf("error unmarshalling TKAHead: %v", err)
nm.TKAEnabled = false
}
}
if node := ms.lastNode; node.Valid() {
nm.SelfNode = node
nm.Expiry = node.KeyExpiry()
nm.Name = node.Name()
nm.AllCaps = ms.lastCapSet
}
ms.addUserProfile(nm, nm.User())
for _, peer := range peerViews {
ms.addUserProfile(nm, peer.Sharer())
ms.addUserProfile(nm, peer.User())
}
if DevKnob.ForceProxyDNS() {
nm.DNS.Proxied = true
}
return nm
}

406
vendor/tailscale.com/control/controlclient/noise.go generated vendored Normal file
View File

@@ -0,0 +1,406 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"bytes"
"cmp"
"context"
"encoding/json"
"errors"
"math"
"net/http"
"net/url"
"sync"
"time"
"golang.org/x/net/http2"
"tailscale.com/control/controlhttp"
"tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/internal/noiseconn"
"tailscale.com/net/dnscache"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
"tailscale.com/util/singleflight"
"tailscale.com/util/testenv"
)
// NoiseClient provides a http.Client to connect to tailcontrol over
// the ts2021 protocol.
type NoiseClient struct {
// Client is an HTTP client to talk to the coordination server.
// It automatically makes a new Noise connection as needed.
// It does not support node key proofs. To do that, call
// noiseClient.getConn instead to make a connection.
*http.Client
// h2t is the HTTP/2 transport we use a bit to create new
// *http2.ClientConns. We don't use its connection pool and we don't use its
// dialing. We use it for exactly one reason: its idle timeout that can only
// be configured via the HTTP/1 config. And then we call NewClientConn (with
// an existing Noise connection) on the http2.Transport which sets up an
// http2.ClientConn using that idle timeout from an http1.Transport.
h2t *http2.Transport
// sfDial ensures that two concurrent requests for a noise connection only
// produce one shared one between the two callers.
sfDial singleflight.Group[struct{}, *noiseconn.Conn]
dialer *tsdial.Dialer
dnsCache *dnscache.Resolver
privKey key.MachinePrivate
serverPubKey key.MachinePublic
host string // the host part of serverURL
httpPort string // the default port to dial
httpsPort string // the fallback Noise-over-https port or empty if none
// dialPlan optionally returns a ControlDialPlan previously received
// from the control server; either the function or the return value can
// be nil.
dialPlan func() *tailcfg.ControlDialPlan
logf logger.Logf
netMon *netmon.Monitor
health *health.Tracker
// mu only protects the following variables.
mu sync.Mutex
closed bool
last *noiseconn.Conn // or nil
nextID int
connPool map[int]*noiseconn.Conn // active connections not yet closed; see noiseconn.Conn.Close
}
// NoiseOpts contains options for the NewNoiseClient function. All fields are
// required unless otherwise specified.
type NoiseOpts struct {
// PrivKey is this node's private key.
PrivKey key.MachinePrivate
// ServerPubKey is the public key of the server.
ServerPubKey key.MachinePublic
// ServerURL is the URL of the server to connect to.
ServerURL string
// Dialer's SystemDial function is used to connect to the server.
Dialer *tsdial.Dialer
// DNSCache is the caching Resolver to use to connect to the server.
//
// This field can be nil.
DNSCache *dnscache.Resolver
// Logf is the log function to use. This field can be nil.
Logf logger.Logf
// NetMon is the network monitor that, if set, will be used to get the
// network interface state. This field can be nil; if so, the current
// state will be looked up dynamically.
NetMon *netmon.Monitor
// HealthTracker, if non-nil, is the health tracker to use.
HealthTracker *health.Tracker
// DialPlan, if set, is a function that should return an explicit plan
// on how to connect to the server.
DialPlan func() *tailcfg.ControlDialPlan
}
// controlIsPlaintext is whether we should assume that the controlplane is only accessible
// over plaintext HTTP (as the first hop, before the ts2021 encryption begins).
// This is used by some tests which don't have a real TLS certificate.
var controlIsPlaintext = envknob.RegisterBool("TS_CONTROL_IS_PLAINTEXT_HTTP")
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://<host>:<port> (no trailing slash).
//
// netMon may be nil, if non-nil it's used to do faster interface lookups.
// dialPlan may be nil
func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
u, err := url.Parse(opts.ServerURL)
if err != nil {
return nil, err
}
var httpPort string
var httpsPort string
if port := u.Port(); port != "" {
// If there is an explicit port specified, trust the scheme and hope for the best
if u.Scheme == "http" {
httpPort = port
httpsPort = "443"
if (testenv.InTest() || controlIsPlaintext()) && (u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost") {
httpsPort = ""
}
} else {
httpPort = "80"
httpsPort = port
}
} else {
// Otherwise, use the standard ports
httpPort = "80"
httpsPort = "443"
}
np := &NoiseClient{
serverPubKey: opts.ServerPubKey,
privKey: opts.PrivKey,
host: u.Hostname(),
httpPort: httpPort,
httpsPort: httpsPort,
dialer: opts.Dialer,
dnsCache: opts.DNSCache,
dialPlan: opts.DialPlan,
logf: opts.Logf,
netMon: opts.NetMon,
health: opts.HealthTracker,
}
// Create the HTTP/2 Transport using a net/http.Transport
// (which only does HTTP/1) because it's the only way to
// configure certain properties on the http2.Transport. But we
// never actually use the net/http.Transport for any HTTP/1
// requests.
h2Transport, err := http2.ConfigureTransports(&http.Transport{
IdleConnTimeout: time.Minute,
})
if err != nil {
return nil, err
}
np.h2t = h2Transport
np.Client = &http.Client{Transport: np}
return np, nil
}
// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once
// (and must be used once) to make a single HTTP request over the noise channel
// to the coordination server.
//
// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise
// payload, if any.
func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) {
for tries := 0; tries < 3; tries++ {
conn, err := nc.getConn(ctx)
if err != nil {
return nil, nil, err
}
ok, earlyPayloadMaybeNil, err := conn.ReserveNewRequest(ctx)
if err != nil {
return nil, nil, err
}
if ok {
return conn, earlyPayloadMaybeNil, nil
}
}
return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection")
}
// contextErr is an error that wraps another error and is used to indicate that
// the error was because a context expired.
type contextErr struct {
err error
}
func (e contextErr) Error() string {
return e.err.Error()
}
func (e contextErr) Unwrap() error {
return e.err
}
// getConn returns a noiseconn.Conn that can be used to make requests to the
// coordination server. It may return a cached connection or create a new one.
// Dials are singleflighted, so concurrent calls to getConn may only dial once.
// As such, context values may not be respected as there are no guarantees that
// the context passed to getConn is the same as the context passed to dial.
func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) {
nc.mu.Lock()
if last := nc.last; last != nil && last.CanTakeNewRequest() {
nc.mu.Unlock()
return last, nil
}
nc.mu.Unlock()
for {
// We singeflight the dial to avoid making multiple connections, however
// that means that we can't simply cancel the dial if the context is
// canceled. Instead, we have to additionally check that the context
// which was canceled is our context and retry if our context is still
// valid.
conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseconn.Conn, error) {
c, err := nc.dial(ctx)
if err != nil {
if ctx.Err() != nil {
return nil, contextErr{ctx.Err()}
}
return nil, err
}
return c, nil
})
var ce contextErr
if err == nil || !errors.As(err, &ce) {
return conn, err
}
if ctx.Err() == nil {
// The dial failed because of a context error, but our context
// is still valid. Retry.
continue
}
// The dial failed because our context was canceled. Return the
// underlying error.
return nil, ce.Unwrap()
}
}
func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) {
ctx := req.Context()
conn, err := nc.getConn(ctx)
if err != nil {
return nil, err
}
return conn.RoundTrip(req)
}
// connClosed removes the connection with the provided ID from the pool
// of active connections.
func (nc *NoiseClient) connClosed(id int) {
nc.mu.Lock()
defer nc.mu.Unlock()
conn := nc.connPool[id]
if conn != nil {
delete(nc.connPool, id)
if nc.last == conn {
nc.last = nil
}
}
}
// Close closes all the underlying noise connections.
// It is a no-op and returns nil if the connection is already closed.
func (nc *NoiseClient) Close() error {
nc.mu.Lock()
nc.closed = true
conns := nc.connPool
nc.connPool = nil
nc.mu.Unlock()
var errors []error
for _, c := range conns {
if err := c.Close(); err != nil {
errors = append(errors, err)
}
}
return multierr.New(errors...)
}
// dial opens a new connection to tailcontrol, fetching the server noise key
// if not cached.
func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) {
nc.mu.Lock()
connID := nc.nextID
nc.nextID++
nc.mu.Unlock()
if tailcfg.CurrentCapabilityVersion > math.MaxUint16 {
// Panic, because a test should have started failing several
// thousand version numbers before getting to this point.
panic("capability version is too high to fit in the wire protocol")
}
var dialPlan *tailcfg.ControlDialPlan
if nc.dialPlan != nil {
dialPlan = nc.dialPlan()
}
// If we have a dial plan, then set our timeout as slightly longer than
// the maximum amount of time contained therein; we assume that
// explicit instructions on timeouts are more useful than a single
// hard-coded timeout.
//
// The default value of 5 is chosen so that, when there's no dial plan,
// we retain the previous behaviour of 10 seconds end-to-end timeout.
timeoutSec := 5.0
if dialPlan != nil {
for _, c := range dialPlan.Candidates {
if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec {
timeoutSec = v
}
}
}
// After we establish a connection, we need some time to actually
// upgrade it into a Noise connection. With a ballpark worst-case RTT
// of 1000ms, give ourselves an extra 5 seconds to complete the
// handshake.
timeoutSec += 5
// Be extremely defensive and ensure that the timeout is in the range
// [5, 60] seconds (e.g. if we accidentally get a negative number).
if timeoutSec > 60 {
timeoutSec = 60
} else if timeoutSec < 5 {
timeoutSec = 5
}
timeout := time.Duration(timeoutSec * float64(time.Second))
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
clientConn, err := (&controlhttp.Dialer{
Hostname: nc.host,
HTTPPort: nc.httpPort,
HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort),
MachineKey: nc.privKey,
ControlKey: nc.serverPubKey,
ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion),
Dialer: nc.dialer.SystemDial,
DNSCache: nc.dnsCache,
DialPlan: dialPlan,
Logf: nc.logf,
NetMon: nc.netMon,
HealthTracker: nc.health,
Clock: tstime.StdClock{},
}).Dial(ctx)
if err != nil {
return nil, err
}
ncc, err := noiseconn.New(clientConn.Conn, nc.h2t, connID, nc.connClosed)
if err != nil {
return nil, err
}
nc.mu.Lock()
if nc.closed {
nc.mu.Unlock()
ncc.Close() // Needs to be called without holding the lock.
return nil, errors.New("noise client closed")
}
defer nc.mu.Unlock()
mak.Set(&nc.connPool, connID, ncc)
nc.last = ncc
return ncc, nil
}
// post does a POST to the control server at the given path, JSON-encoding body.
// The provided nodeKey is an optional load balancing hint.
func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) {
jbody, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, "POST", "https://"+nc.host+path, bytes.NewReader(jbody))
if err != nil {
return nil, err
}
addLBHeader(req, nodeKey)
req.Header.Set("Content-Type", "application/json")
conn, err := nc.getConn(ctx)
if err != nil {
return nil, err
}
return conn.RoundTrip(req)
}

42
vendor/tailscale.com/control/controlclient/sign.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"crypto"
"errors"
"fmt"
"time"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
var (
errNoCertStore = errors.New("no certificate store")
errCertificateNotConfigured = errors.New("no certificate subject configured")
errUnsupportedSignatureVersion = errors.New("unsupported signature version")
)
// HashRegisterRequest generates the hash required sign or verify a
// tailcfg.RegisterRequest.
func HashRegisterRequest(
version tailcfg.SignatureType, ts time.Time, serverURL string, deviceCert []byte,
serverPubKey, machinePubKey key.MachinePublic) ([]byte, error) {
h := crypto.SHA256.New()
// hash.Hash.Write never returns an error, so we don't check for one here.
switch version {
case tailcfg.SignatureV1:
fmt.Fprintf(h, "%s%s%s%s%s",
ts.UTC().Format(time.RFC3339), serverURL, deviceCert, serverPubKey.ShortString(), machinePubKey.ShortString())
case tailcfg.SignatureV2:
fmt.Fprintf(h, "%s%s%s%s%s",
ts.UTC().Format(time.RFC3339), serverURL, deviceCert, serverPubKey, machinePubKey)
default:
return nil, errUnsupportedSignatureVersion
}
return h.Sum(nil), nil
}

View File

@@ -0,0 +1,205 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build windows
// darwin,cgo is also supported by certstore but untested, so it is not enabled.
package controlclient
import (
"crypto"
"crypto/rsa"
"crypto/x509"
"errors"
"fmt"
"sync"
"time"
"github.com/tailscale/certstore"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/util/syspolicy"
)
var getMachineCertificateSubjectOnce struct {
sync.Once
v string // Subject of machine certificate to search for
}
// getMachineCertificateSubject returns the exact name of a Subject that needs
// to be present in an identity's certificate chain to sign a RegisterRequest,
// formatted as per pkix.Name.String(). The Subject may be that of the identity
// itself, an intermediate CA or the root CA.
//
// If getMachineCertificateSubject() returns "" then no lookup will occur and
// each RegisterRequest will be unsigned.
//
// Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA"
func getMachineCertificateSubject() string {
getMachineCertificateSubjectOnce.Do(func() {
getMachineCertificateSubjectOnce.v, _ = syspolicy.GetString(syspolicy.MachineCertificateSubject, "")
})
return getMachineCertificateSubjectOnce.v
}
var (
errNoMatch = errors.New("no matching certificate")
errBadRequest = errors.New("malformed request")
)
func isSupportedCertificate(cert *x509.Certificate) bool {
return cert.PublicKeyAlgorithm == x509.RSA
}
func isSubjectInChain(subject string, chain []*x509.Certificate) bool {
if len(chain) == 0 || chain[0] == nil {
return false
}
for _, c := range chain {
if c == nil {
continue
}
if c.Subject.String() == subject {
return true
}
}
return false
}
func selectIdentityFromSlice(subject string, ids []certstore.Identity, now time.Time) (certstore.Identity, []*x509.Certificate) {
var bestCandidate struct {
id certstore.Identity
chain []*x509.Certificate
}
for _, id := range ids {
chain, err := id.CertificateChain()
if err != nil {
continue
}
if len(chain) < 1 {
continue
}
if !isSupportedCertificate(chain[0]) {
continue
}
if now.Before(chain[0].NotBefore) || now.After(chain[0].NotAfter) {
// Certificate is not valid at this time
continue
}
if !isSubjectInChain(subject, chain) {
continue
}
// Select the most recently issued certificate. If there is a tie, pick
// one arbitrarily.
if len(bestCandidate.chain) > 0 && bestCandidate.chain[0].NotBefore.After(chain[0].NotBefore) {
continue
}
bestCandidate.id = id
bestCandidate.chain = chain
}
return bestCandidate.id, bestCandidate.chain
}
// findIdentity locates an identity from the Windows or Darwin certificate
// store. It returns the first certificate with a matching Subject anywhere in
// its certificate chain, so it is possible to search for the leaf certificate,
// intermediate CA or root CA. If err is nil then the returned identity will
// never be nil (if no identity is found, the error errNoMatch will be
// returned). If an identity is returned then its certificate chain is also
// returned.
func findIdentity(subject string, st certstore.Store) (certstore.Identity, []*x509.Certificate, error) {
ids, err := st.Identities()
if err != nil {
return nil, nil, err
}
selected, chain := selectIdentityFromSlice(subject, ids, clock.Now())
for _, id := range ids {
if id != selected {
id.Close()
}
}
if selected == nil {
return nil, nil, errNoMatch
}
return selected, chain, nil
}
// signRegisterRequest looks for a suitable machine identity from the local
// system certificate store, and if one is found, signs the RegisterRequest
// using that identity's public key. In addition to the signature, the full
// certificate chain is included so that the control server can validate the
// certificate from a copy of the root CA's certificate.
func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("signRegisterRequest: %w", err)
}
}()
if req.Timestamp == nil {
return errBadRequest
}
machineCertificateSubject := getMachineCertificateSubject()
if machineCertificateSubject == "" {
return errCertificateNotConfigured
}
st, err := certstore.Open(certstore.System)
if err != nil {
return fmt.Errorf("open cert store: %w", err)
}
defer st.Close()
id, chain, err := findIdentity(machineCertificateSubject, st)
if err != nil {
return fmt.Errorf("find identity: %w", err)
}
defer id.Close()
signer, err := id.Signer()
if err != nil {
return fmt.Errorf("create signer: %w", err)
}
cl := 0
for _, c := range chain {
cl += len(c.Raw)
}
req.DeviceCert = make([]byte, 0, cl)
for _, c := range chain {
req.DeviceCert = append(req.DeviceCert, c.Raw...)
}
req.SignatureType = tailcfg.SignatureV2
h, err := HashRegisterRequest(req.SignatureType, req.Timestamp.UTC(), serverURL, req.DeviceCert, serverPubKey, machinePubKey)
if err != nil {
return fmt.Errorf("hash: %w", err)
}
req.Signature, err = signer.Sign(nil, h, &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: crypto.SHA256,
})
if err != nil {
return fmt.Errorf("sign: %w", err)
}
return nil
}

View File

@@ -0,0 +1,16 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !windows
package controlclient
import (
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
// signRegisterRequest on non-supported platforms always returns errNoCertStore.
func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error {
return errNoCertStore
}

125
vendor/tailscale.com/control/controlclient/status.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"encoding/json"
"fmt"
"reflect"
"tailscale.com/types/netmap"
"tailscale.com/types/persist"
"tailscale.com/types/structs"
)
// State is the high-level state of the client. It is used only in
// unit tests for proper sequencing, don't depend on it anywhere else.
//
// TODO(apenwarr): eliminate the state, as it's now obsolete.
//
// apenwarr: Historical note: controlclient.Auto was originally
// intended to be the state machine for the whole tailscale client, but that
// turned out to not be the right abstraction layer, and it moved to
// ipn.Backend. Since ipn.Backend now has a state machine, it would be
// much better if controlclient could be a simple stateless API. But the
// current server-side API (two interlocking polling https calls) makes that
// very hard to implement. A server side API change could untangle this and
// remove all the statefulness.
type State int
const (
StateNew = State(iota)
StateNotAuthenticated
StateAuthenticating
StateURLVisitRequired
StateAuthenticated
StateSynchronized // connected and received map update
)
func (s State) AppendText(b []byte) ([]byte, error) {
return append(b, s.String()...), nil
}
func (s State) MarshalText() ([]byte, error) {
return []byte(s.String()), nil
}
func (s State) String() string {
switch s {
case StateNew:
return "state:new"
case StateNotAuthenticated:
return "state:not-authenticated"
case StateAuthenticating:
return "state:authenticating"
case StateURLVisitRequired:
return "state:url-visit-required"
case StateAuthenticated:
return "state:authenticated"
case StateSynchronized:
return "state:synchronized"
default:
return fmt.Sprintf("state:unknown:%d", int(s))
}
}
type Status struct {
_ structs.Incomparable
// Err, if non-nil, is an error that occurred while logging in.
//
// If it's of type UserVisibleError then it's meant to be shown to users in
// their Tailscale client. Otherwise it's just logged to tailscaled's logs.
Err error
// URL, if non-empty, is the interactive URL to visit to finish logging in.
URL string
// NetMap is the latest server-pushed state of the tailnet network.
NetMap *netmap.NetworkMap
// Persist, when Valid, is the locally persisted configuration.
//
// TODO(bradfitz,maisem): clarify this.
Persist persist.PersistView
// state is the internal state. It should not be exposed outside this
// package, but we have some automated tests elsewhere that need to
// use it via the StateForTest accessor.
// TODO(apenwarr): Unexport or remove these.
state State
}
// LoginFinished reports whether the controlclient is in its "StateAuthenticated"
// state where it's in a happy register state but not yet in a map poll.
//
// TODO(bradfitz): delete this and everything around Status.state.
func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated }
// StateForTest returns the internal state of s for tests only.
func (s *Status) StateForTest() State { return s.state }
// SetStateForTest sets the internal state of s for tests only.
func (s *Status) SetStateForTest(state State) { s.state = state }
// Equal reports whether s and s2 are equal.
func (s *Status) Equal(s2 *Status) bool {
if s == nil && s2 == nil {
return true
}
return s != nil && s2 != nil &&
s.Err == s2.Err &&
s.URL == s2.URL &&
s.state == s2.state &&
reflect.DeepEqual(s.Persist, s2.Persist) &&
reflect.DeepEqual(s.NetMap, s2.NetMap)
}
func (s Status) String() string {
b, err := json.MarshalIndent(s, "", "\t")
if err != nil {
panic(err)
}
return s.state.String() + " " + string(b)
}

612
vendor/tailscale.com/control/controlhttp/client.go generated vendored Normal file
View File

@@ -0,0 +1,612 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !js
// Package controlhttp implements the Tailscale 2021 control protocol
// base transport over HTTP.
//
// This tunnels the protocol in control/controlbase over HTTP with a
// variety of compatibility fallbacks for handling picky or deep
// inspecting proxies.
//
// In the happy path, a client makes a single cleartext HTTP request
// to the server, the server responds with 101 Switching Protocols,
// and the control base protocol takes place over plain TCP.
//
// In the compatibility path, the client does the above over HTTPS,
// resulting in double encryption (once for the control transport, and
// once for the outer TLS layer).
package controlhttp
import (
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"math"
"net"
"net/http"
"net/http/httptrace"
"net/netip"
"net/url"
"runtime"
"sort"
"sync/atomic"
"time"
"tailscale.com/control/controlbase"
"tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/net/dnscache"
"tailscale.com/net/dnsfallback"
"tailscale.com/net/netutil"
"tailscale.com/net/sockstats"
"tailscale.com/net/tlsdial"
"tailscale.com/net/tshttpproxy"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/util/multierr"
)
var stdDialer net.Dialer
// Dial connects to the HTTP server at this Dialer's Host:HTTPPort, requests to
// switch to the Tailscale control protocol, and returns an established control
// protocol connection.
//
// If Dial fails to connect using HTTP, it also tries to tunnel over TLS to the
// Dialer's Host:HTTPSPort as a compatibility fallback.
//
// The provided ctx is only used for the initial connection, until
// Dial returns. It does not affect the connection once established.
func (a *Dialer) Dial(ctx context.Context) (*ClientConn, error) {
if a.Hostname == "" {
return nil, errors.New("required Dialer.Hostname empty")
}
return a.dial(ctx)
}
func (a *Dialer) logf(format string, args ...any) {
if a.Logf != nil {
a.Logf(format, args...)
}
}
func (a *Dialer) getProxyFunc() func(*http.Request) (*url.URL, error) {
if a.proxyFunc != nil {
return a.proxyFunc
}
return tshttpproxy.ProxyFromEnvironment
}
// httpsFallbackDelay is how long we'll wait for a.HTTPPort to work before
// starting to try a.HTTPSPort.
func (a *Dialer) httpsFallbackDelay() time.Duration {
if v := a.testFallbackDelay; v != 0 {
return v
}
return 500 * time.Millisecond
}
var _ = envknob.RegisterBool("TS_USE_CONTROL_DIAL_PLAN") // to record at init time whether it's in use
func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) {
// If we don't have a dial plan, just fall back to dialing the single
// host we know about.
useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN")
if !useDialPlan || a.DialPlan == nil || len(a.DialPlan.Candidates) == 0 {
return a.dialHost(ctx, netip.Addr{})
}
candidates := a.DialPlan.Candidates
// Otherwise, we try dialing per the plan. Store the highest priority
// in the list, so that if we get a connection to one of those
// candidates we can return quickly.
var highestPriority int = math.MinInt
for _, c := range candidates {
if c.Priority > highestPriority {
highestPriority = c.Priority
}
}
// This context allows us to cancel in-flight connections if we get a
// highest-priority connection before we're all done.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Now, for each candidate, kick off a dial in parallel.
type dialResult struct {
conn *ClientConn
err error
addr netip.Addr
priority int
}
resultsCh := make(chan dialResult, len(candidates))
var pending atomic.Int32
pending.Store(int32(len(candidates)))
for _, c := range candidates {
go func(ctx context.Context, c tailcfg.ControlIPCandidate) {
var (
conn *ClientConn
err error
)
// Always send results back to our channel.
defer func() {
resultsCh <- dialResult{conn, err, c.IP, c.Priority}
if pending.Add(-1) == 0 {
close(resultsCh)
}
}()
// If non-zero, wait the configured start timeout
// before we do anything.
if c.DialStartDelaySec > 0 {
a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP)
tmr, tmrChannel := a.clock().NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second)))
defer tmr.Stop()
select {
case <-ctx.Done():
err = ctx.Err()
return
case <-tmrChannel:
}
}
// Now, create a sub-context with the given timeout and
// try dialing the provided host.
ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second)))
defer cancel()
// This will dial, and the defer above sends it back to our parent.
a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP)
conn, err = a.dialHost(ctx, c.IP)
}(ctx, c)
}
var results []dialResult
for res := range resultsCh {
// If we get a response that has the highest priority, we don't
// need to wait for any of the other connections to finish; we
// can just return this connection.
//
// TODO(andrew): we could make this better by keeping track of
// the highest remaining priority dynamically, instead of just
// checking for the highest total
if res.priority == highestPriority && res.conn != nil {
a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, res.addr)
// Drain the channel and any existing connections in
// the background.
go func() {
for _, res := range results {
if res.conn != nil {
res.conn.Close()
}
}
for res := range resultsCh {
if res.conn != nil {
res.conn.Close()
}
}
if a.drainFinished != nil {
close(a.drainFinished)
}
}()
return res.conn, nil
}
// This isn't a highest-priority result, so just store it until
// we're done.
results = append(results, res)
}
// After we finish this function, close any remaining open connections.
defer func() {
for _, result := range results {
// Note: below, we nil out the returned connection (if
// any) in the slice so we don't close it.
if result.conn != nil {
result.conn.Close()
}
}
// We don't drain asynchronously after this point, so notify our
// channel when we return.
if a.drainFinished != nil {
close(a.drainFinished)
}
}()
// Sort by priority, then take the first non-error response.
sort.Slice(results, func(i, j int) bool {
// NOTE: intentionally inverted so that the highest priority
// item comes first
return results[i].priority > results[j].priority
})
var (
conn *ClientConn
errs []error
)
for i, result := range results {
if result.err != nil {
errs = append(errs, result.err)
continue
}
a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, result.addr)
conn = result.conn
results[i].conn = nil // so we don't close it in the defer
return conn, nil
}
merr := multierr.New(errs...)
// If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS.
a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error())
return a.dialHost(ctx, netip.Addr{})
}
// The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to
// always use port 443 HTTPS connections to the controlplane and not try the
// port 80 HTTP fast path.
//
// This is currently (2023-01-17) needed for Docker Desktop's "VPNKit" proxy
// that breaks port 80 for us post-Noise-handshake, causing us to never try port
// 443. Until one of Docker's proxy and/or this package's port 443 fallback is
// fixed, this is a workaround. It might also be useful for future debugging.
var forceNoise443 = envknob.RegisterBool("TS_FORCE_NOISE_443")
// forceNoise443 reports whether the controlclient noise dialer should always
// use HTTPS connections as its underlay connection (double crypto). This can
// be necessary when networks or middle boxes are messing with port 80.
func (d *Dialer) forceNoise443() bool {
if forceNoise443() {
return true
}
if d.HealthTracker.LastNoiseDialWasRecent() {
// If we dialed recently, assume there was a recent failure and fall
// back to HTTPS dials for the subsequent retries.
//
// This heuristic works around networks where port 80 is MITMed and
// appears to work for a bit post-Upgrade but then gets closed,
// such as seen in https://github.com/tailscale/tailscale/issues/13597.
d.logf("controlhttp: forcing port 443 dial due to recent noise dial")
return true
}
return false
}
func (d *Dialer) clock() tstime.Clock {
if d.Clock != nil {
return d.Clock
}
return tstime.StdClock{}
}
var debugNoiseDial = envknob.RegisterBool("TS_DEBUG_NOISE_DIAL")
// dialHost connects to the configured Dialer.Hostname and upgrades the
// connection into a controlbase.Conn.
//
// If optAddr is valid, then no DNS is used and the connection will be made to the
// provided address.
func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, error) {
// Create one shared context used by both port 80 and port 443 dials.
// If port 80 is still in flight when 443 returns, this deferred cancel
// will stop the port 80 dial.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx = sockstats.WithSockStats(ctx, sockstats.LabelControlClientDialer, a.logf)
// u80 and u443 are the URLs we'll try to hit over HTTP or HTTPS,
// respectively, in order to do the HTTP upgrade to a net.Conn over which
// we'll speak Noise.
u80 := &url.URL{
Scheme: "http",
Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPPort, "80")),
Path: serverUpgradePath,
}
u443 := &url.URL{
Scheme: "https",
Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPSPort, "443")),
Path: serverUpgradePath,
}
if a.HTTPSPort == NoPort {
u443 = nil
}
type tryURLRes struct {
u *url.URL // input (the URL conn+err are for/from)
conn *ClientConn // result (mutually exclusive with err)
err error
}
ch := make(chan tryURLRes) // must be unbuffered
try := func(u *url.URL) {
if debugNoiseDial() {
a.logf("trying noise dial (%v, %v) ...", u, optAddr)
}
cbConn, err := a.dialURL(ctx, u, optAddr)
if debugNoiseDial() {
a.logf("noise dial (%v, %v) = (%v, %v)", u, optAddr, cbConn, err)
}
select {
case ch <- tryURLRes{u, cbConn, err}:
case <-ctx.Done():
if cbConn != nil {
cbConn.Close()
}
}
}
forceTLS := a.forceNoise443()
// Start the plaintext HTTP attempt first, unless disabled by the envknob.
if !forceTLS || u443 == nil {
go try(u80)
}
// In case outbound port 80 blocked or MITM'ed poorly, start a backup timer
// to dial port 443 if port 80 doesn't either succeed or fail quickly.
var try443Timer tstime.TimerController
if u443 != nil {
delay := a.httpsFallbackDelay()
if forceTLS {
delay = 0
}
try443Timer = a.clock().AfterFunc(delay, func() { try(u443) })
defer try443Timer.Stop()
}
var err80, err443 error
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("connection attempts aborted by context: %w", ctx.Err())
case res := <-ch:
if res.err == nil {
return res.conn, nil
}
switch res.u {
case u80:
// Connecting over plain HTTP failed; assume it's an HTTP proxy
// being difficult and see if we can get through over HTTPS.
err80 = res.err
// Stop the fallback timer and run it immediately. We don't use
// Timer.Reset(0) here because on AfterFuncs, that can run it
// again.
if try443Timer != nil && try443Timer.Stop() {
go try(u443)
} // else we lost the race and it started already which is what we want
case u443:
err443 = res.err
default:
panic("invalid")
}
if err80 != nil && err443 != nil {
return nil, fmt.Errorf("all connection attempts failed (HTTP: %v, HTTPS: %v)", err80, err443)
}
}
}
}
// dialURL attempts to connect to the given URL.
//
// If optAddr is valid, then no DNS is used and the connection will be made to the
// provided address.
func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr) (*ClientConn, error) {
init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion)
if err != nil {
return nil, err
}
netConn, err := a.tryURLUpgrade(ctx, u, optAddr, init)
if err != nil {
return nil, err
}
cbConn, err := cont(ctx, netConn)
if err != nil {
netConn.Close()
return nil, err
}
return &ClientConn{
Conn: cbConn,
}, nil
}
// resolver returns a.DNSCache if non-nil or a new *dnscache.Resolver
// otherwise.
func (a *Dialer) resolver() *dnscache.Resolver {
if a.DNSCache != nil {
return a.DNSCache
}
return &dnscache.Resolver{
Forward: dnscache.Get().Forward,
LookupIPFallback: dnsfallback.MakeLookupFunc(a.logf, a.NetMon),
UseLastGood: true,
Logf: a.Logf, // not a.logf method; we want to propagate nil-ness
}
}
func isLoopback(a net.Addr) bool {
if ta, ok := a.(*net.TCPAddr); ok {
return ta.IP.IsLoopback()
}
return false
}
var macOSScreenTime = health.Register(&health.Warnable{
Code: "macos-screen-time",
Severity: health.SeverityHigh,
Title: "Tailscale blocked by Screen Time",
Text: func(args health.Args) string {
return "macOS Screen Time seems to be blocking Tailscale. Try disabling Screen Time in System Settings > Screen Time > Content & Privacy > Access to Web Content."
},
ImpactsConnectivity: true,
})
// tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn.
//
// If optAddr is valid, then no DNS is used and the connection will be made to
// the provided address.
//
// Only the provided ctx is used, not a.ctx.
func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, init []byte) (_ net.Conn, retErr error) {
var dns *dnscache.Resolver
// If we were provided an address to dial, then create a resolver that just
// returns that value; otherwise, fall back to DNS.
if optAddr.IsValid() {
dns = &dnscache.Resolver{
SingleHostStaticResult: []netip.Addr{optAddr},
SingleHost: u.Hostname(),
Logf: a.Logf, // not a.logf method; we want to propagate nil-ness
}
} else {
dns = a.resolver()
}
var dialer dnscache.DialContextFunc
if a.Dialer != nil {
dialer = a.Dialer
} else {
dialer = stdDialer.DialContext
}
// On macOS, see if Screen Time is blocking things.
if runtime.GOOS == "darwin" {
var proxydIntercepted atomic.Bool // intercepted by macOS webfilterproxyd
origDialer := dialer
dialer = func(ctx context.Context, network, address string) (net.Conn, error) {
c, err := origDialer(ctx, network, address)
if err != nil {
return nil, err
}
if isLoopback(c.LocalAddr()) && isLoopback(c.RemoteAddr()) {
proxydIntercepted.Store(true)
}
return c, nil
}
defer func() {
if retErr != nil && proxydIntercepted.Load() {
a.HealthTracker.SetUnhealthy(macOSScreenTime, nil)
retErr = fmt.Errorf("macOS Screen Time is blocking network access: %w", retErr)
} else {
a.HealthTracker.SetHealthy(macOSScreenTime)
}
}()
}
tr := http.DefaultTransport.(*http.Transport).Clone()
defer tr.CloseIdleConnections()
tr.Proxy = a.getProxyFunc()
tshttpproxy.SetTransportGetProxyConnectHeader(tr)
tr.DialContext = dnscache.Dialer(dialer, dns)
// Disable HTTP2, since h2 can't do protocol switching.
tr.TLSClientConfig.NextProtos = []string{}
tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
tr.TLSClientConfig = tlsdial.Config(a.Hostname, a.HealthTracker, tr.TLSClientConfig)
if !tr.TLSClientConfig.InsecureSkipVerify {
panic("unexpected") // should be set by tlsdial.Config
}
verify := tr.TLSClientConfig.VerifyConnection
if verify == nil {
panic("unexpected") // should be set by tlsdial.Config
}
// Demote all cert verification errors to log messages. We don't actually
// care about the TLS security (because we just do the Noise crypto atop whatever
// connection we get, including HTTP port 80 plaintext) so this permits
// middleboxes to MITM their users. All they'll see is some Noise.
tr.TLSClientConfig.VerifyConnection = func(cs tls.ConnectionState) error {
if err := verify(cs); err != nil && a.Logf != nil && !a.omitCertErrorLogging {
a.Logf("warning: TLS cert verificication for %q failed: %v", a.Hostname, err)
}
return nil // regardless
}
tr.DialTLSContext = dnscache.TLSDialer(dialer, dns, tr.TLSClientConfig)
tr.DisableCompression = true
// (mis)use httptrace to extract the underlying net.Conn from the
// transport. The transport handles 101 Switching Protocols correctly,
// such that the Conn will not be reused or kept alive by the transport
// once the response has been handed back from RoundTrip.
//
// In theory, the machinery of net/http should make it such that
// the trace callback happens-before we get the response, but
// there's no promise of that. So, to make sure, we use a buffered
// channel as a synchronization step to avoid data races.
//
// Note that even though we're able to extract a net.Conn via this
// mechanism, we must still keep using the eventual resp.Body to
// read from, because it includes a buffer we can't get rid of. If
// the server never sends any data after sending the HTTP
// response, we could get away with it, but violating this
// assumption leads to very mysterious transport errors (lockups,
// unexpected EOFs...), and we're bound to forget someday and
// introduce a protocol optimization at a higher level that starts
// eagerly transmitting from the server.
var lastConn syncs.AtomicValue[net.Conn]
trace := httptrace.ClientTrace{
// Even though we only make a single HTTP request which should
// require a single connection, the context (with the attached
// trace configuration) might be used by our custom dialer to
// make other HTTP requests (e.g. BootstrapDNS). We only care
// about the last connection made, which should be the one to
// the control server.
GotConn: func(info httptrace.GotConnInfo) {
lastConn.Store(info.Conn)
},
}
ctx = httptrace.WithClientTrace(ctx, &trace)
req := &http.Request{
Method: "POST",
URL: u,
Header: http.Header{
"Upgrade": []string{upgradeHeaderValue},
"Connection": []string{"upgrade"},
handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
},
}
req = req.WithContext(ctx)
resp, err := tr.RoundTrip(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusSwitchingProtocols {
return nil, fmt.Errorf("unexpected HTTP response: %s", resp.Status)
}
// From here on, the underlying net.Conn is ours to use, but there
// is still a read buffer attached to it within resp.Body. So, we
// must direct I/O through resp.Body, but we can still use the
// underlying net.Conn for stuff like deadlines.
switchedConn := lastConn.Load()
if switchedConn == nil {
resp.Body.Close()
return nil, fmt.Errorf("httptrace didn't provide a connection")
}
if next := resp.Header.Get("Upgrade"); next != upgradeHeaderValue {
resp.Body.Close()
return nil, fmt.Errorf("server switched to unexpected protocol %q", next)
}
rwc, ok := resp.Body.(io.ReadWriteCloser)
if !ok {
resp.Body.Close()
return nil, errors.New("http Transport did not provide a writable body")
}
return netutil.NewAltReadWriteCloserConn(rwc, switchedConn), nil
}

View File

@@ -0,0 +1,17 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlhttp
import (
"tailscale.com/control/controlbase"
)
// ClientConn is a Tailscale control client as returned by the Dialer.
//
// It's effectively just a *controlbase.Conn (which it embeds) with
// optional metadata.
type ClientConn struct {
// Conn is the noise connection.
*controlbase.Conn
}

61
vendor/tailscale.com/control/controlhttp/client_js.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlhttp
import (
"context"
"encoding/base64"
"errors"
"net"
"net/url"
"github.com/coder/websocket"
"tailscale.com/control/controlbase"
"tailscale.com/net/wsconn"
)
// Variant of Dial that tunnels the request over WebSockets, since we cannot do
// bi-directional communication over an HTTP connection when in JS.
func (d *Dialer) Dial(ctx context.Context) (*ClientConn, error) {
if d.Hostname == "" {
return nil, errors.New("required Dialer.Hostname empty")
}
init, cont, err := controlbase.ClientDeferred(d.MachineKey, d.ControlKey, d.ProtocolVersion)
if err != nil {
return nil, err
}
wsScheme := "wss"
host := d.Hostname
// If using a custom control server (on a non-standard port), prefer that.
// This mirrors the port selection in newNoiseClient from noise.go.
if d.HTTPPort != "" && d.HTTPPort != "80" && d.HTTPSPort == "443" {
wsScheme = "ws"
host = net.JoinHostPort(host, d.HTTPPort)
}
wsURL := &url.URL{
Scheme: wsScheme,
Host: host,
Path: serverUpgradePath,
// Can't set HTTP headers on the websocket request, so we have to to send
// the handshake via an HTTP header.
RawQuery: url.Values{
handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
}.Encode(),
}
wsConn, _, err := websocket.Dial(ctx, wsURL.String(), &websocket.DialOptions{
Subprotocols: []string{upgradeHeaderValue},
})
if err != nil {
return nil, err
}
netConn := wsconn.NetConn(context.Background(), wsConn, websocket.MessageBinary, wsURL.String())
cbConn, err := cont(ctx, netConn)
if err != nil {
netConn.Close()
return nil, err
}
return &ClientConn{Conn: cbConn}, nil
}

116
vendor/tailscale.com/control/controlhttp/constants.go generated vendored Normal file
View File

@@ -0,0 +1,116 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package controlhttp
import (
"net/http"
"net/url"
"time"
"tailscale.com/health"
"tailscale.com/net/dnscache"
"tailscale.com/net/netmon"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
)
const (
// upgradeHeader is the value of the Upgrade HTTP header used to
// indicate the Tailscale control protocol.
upgradeHeaderValue = "tailscale-control-protocol"
// handshakeHeaderName is the HTTP request header that can
// optionally contain base64-encoded initial handshake
// payload, to save an RTT.
handshakeHeaderName = "X-Tailscale-Handshake"
// serverUpgradePath is where the server-side HTTP handler to
// to do the protocol switch is located.
serverUpgradePath = "/ts2021"
)
// NoPort is a sentinel value for Dialer.HTTPSPort to indicate that HTTPS
// should not be tried on any port. It exists primarily for some localhost
// tests where the control plane only runs on HTTP.
const NoPort = "none"
// Dialer contains configuration on how to dial the Tailscale control server.
type Dialer struct {
// Hostname is the hostname to connect to, with no port number.
//
// This field is required.
Hostname string
// MachineKey contains the current machine's private key.
//
// This field is required.
MachineKey key.MachinePrivate
// ControlKey contains the expected public key for the control server.
//
// This field is required.
ControlKey key.MachinePublic
// ProtocolVersion is the expected protocol version to negotiate.
//
// This field is required.
ProtocolVersion uint16
// HTTPPort is the port number to use when making a HTTP connection.
//
// If not specified, this defaults to port 80.
HTTPPort string
// HTTPSPort is the port number to use when making a HTTPS connection.
//
// If not specified, this defaults to port 443.
//
// If "none" (NoPort), HTTPS is disabled.
HTTPSPort string
// Dialer is the dialer used to make outbound connections.
//
// If not specified, this defaults to net.Dialer.DialContext.
Dialer dnscache.DialContextFunc
// DNSCache is the caching Resolver used by this Dialer.
//
// If not specified, a new Resolver is created per attempt.
DNSCache *dnscache.Resolver
// Logf, if set, is a logging function to use; if unset, logs are
// dropped.
Logf logger.Logf
NetMon *netmon.Monitor
// HealthTracker, if non-nil, is the health tracker to use.
HealthTracker *health.Tracker
// DialPlan, if set, contains instructions from the control server on
// how to connect to it. If present, we will try the methods in this
// plan before falling back to DNS.
DialPlan *tailcfg.ControlDialPlan
proxyFunc func(*http.Request) (*url.URL, error) // or nil
// For tests only
drainFinished chan struct{}
omitCertErrorLogging bool
testFallbackDelay time.Duration
// Clock, if non-nil, overrides the clock to use.
// If nil, tstime.StdClock is used.
// This exists primarily for tests.
Clock tstime.Clock
}
func strDef(v1, v2 string) string {
if v1 != "" {
return v1
}
return v2
}

217
vendor/tailscale.com/control/controlhttp/server.go generated vendored Normal file
View File

@@ -0,0 +1,217 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ios
package controlhttp
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net"
"net/http"
"strings"
"time"
"github.com/coder/websocket"
"tailscale.com/control/controlbase"
"tailscale.com/net/netutil"
"tailscale.com/net/wsconn"
"tailscale.com/types/key"
)
// AcceptHTTP upgrades the HTTP request given by w and r into a Tailscale
// control protocol base transport connection.
//
// AcceptHTTP always writes an HTTP response to w. The caller must not attempt
// their own response after calling AcceptHTTP.
//
// earlyWrite optionally specifies a func to write to the noise connection
// (encrypted). It receives the negotiated version and a writer to write to, if
// desired.
func AcceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate, earlyWrite func(protocolVersion int, w io.Writer) error) (*controlbase.Conn, error) {
return acceptHTTP(ctx, w, r, private, earlyWrite)
}
func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate, earlyWrite func(protocolVersion int, w io.Writer) error) (_ *controlbase.Conn, retErr error) {
next := strings.ToLower(r.Header.Get("Upgrade"))
if next == "" {
http.Error(w, "missing next protocol", http.StatusBadRequest)
return nil, errors.New("no next protocol in HTTP request")
}
if next == "websocket" {
return acceptWebsocket(ctx, w, r, private)
}
if next != upgradeHeaderValue {
http.Error(w, "unknown next protocol", http.StatusBadRequest)
return nil, fmt.Errorf("client requested unhandled next protocol %q", next)
}
initB64 := r.Header.Get(handshakeHeaderName)
if initB64 == "" {
http.Error(w, "missing Tailscale handshake header", http.StatusBadRequest)
return nil, errors.New("no tailscale handshake header in HTTP request")
}
init, err := base64.StdEncoding.DecodeString(initB64)
if err != nil {
http.Error(w, "invalid tailscale handshake header", http.StatusBadRequest)
return nil, fmt.Errorf("decoding base64 handshake header: %v", err)
}
hijacker, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "make request over HTTP/1", http.StatusBadRequest)
return nil, errors.New("can't hijack client connection")
}
w.Header().Set("Upgrade", upgradeHeaderValue)
w.Header().Set("Connection", "upgrade")
w.WriteHeader(http.StatusSwitchingProtocols)
conn, brw, err := hijacker.Hijack()
if err != nil {
return nil, fmt.Errorf("hijacking client connection: %w", err)
}
defer func() {
if retErr != nil {
conn.Close()
}
}()
if err := brw.Flush(); err != nil {
return nil, fmt.Errorf("flushing hijacked HTTP buffer: %w", err)
}
conn = netutil.NewDrainBufConn(conn, brw.Reader)
cwc := newWriteCorkingConn(conn)
nc, err := controlbase.Server(ctx, cwc, private, init)
if err != nil {
return nil, fmt.Errorf("noise handshake failed: %w", err)
}
if earlyWrite != nil {
if deadline, ok := ctx.Deadline(); ok {
if err := conn.SetDeadline(deadline); err != nil {
return nil, fmt.Errorf("setting conn deadline: %w", err)
}
defer conn.SetDeadline(time.Time{})
}
if err := earlyWrite(nc.ProtocolVersion(), nc); err != nil {
return nil, err
}
}
if err := cwc.uncork(); err != nil {
return nil, err
}
return nc, nil
}
// acceptWebsocket upgrades a WebSocket connection (from a client that cannot
// speak HTTP) to a Tailscale control protocol base transport connection.
func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate) (*controlbase.Conn, error) {
c, err := websocket.Accept(w, r, &websocket.AcceptOptions{
Subprotocols: []string{upgradeHeaderValue},
OriginPatterns: []string{"*"},
// Disable compression because we transmit Noise messages that are not
// compressible.
// Additionally, Safari has a broken implementation of compression
// (see https://github.com/nhooyr/websocket/issues/218) that makes
// enabling it actively harmful.
CompressionMode: websocket.CompressionDisabled,
})
if err != nil {
return nil, fmt.Errorf("Could not accept WebSocket connection %v", err)
}
if c.Subprotocol() != upgradeHeaderValue {
c.Close(websocket.StatusPolicyViolation, "client must speak the control subprotocol")
return nil, fmt.Errorf("Unexpected subprotocol %q", c.Subprotocol())
}
if err := r.ParseForm(); err != nil {
c.Close(websocket.StatusPolicyViolation, "Could not parse parameters")
return nil, fmt.Errorf("parse query parameters: %v", err)
}
initB64 := r.Form.Get(handshakeHeaderName)
if initB64 == "" {
c.Close(websocket.StatusPolicyViolation, "missing Tailscale handshake parameter")
return nil, errors.New("no tailscale handshake parameter in HTTP request")
}
init, err := base64.StdEncoding.DecodeString(initB64)
if err != nil {
c.Close(websocket.StatusPolicyViolation, "invalid tailscale handshake parameter")
return nil, fmt.Errorf("decoding base64 handshake parameter: %v", err)
}
conn := wsconn.NetConn(ctx, c, websocket.MessageBinary, r.RemoteAddr)
nc, err := controlbase.Server(ctx, conn, private, init)
if err != nil {
conn.Close()
return nil, fmt.Errorf("noise handshake failed: %w", err)
}
return nc, nil
}
// corkConn is a net.Conn wrapper that initially buffers all writes until uncork
// is called. If the conn is corked and a Read occurs, the Read will flush any
// buffered (corked) write.
//
// Until uncorked, Read/Write/uncork may be not called concurrently.
//
// Deadlines still work, but a corked write ignores deadlines until a Read or
// uncork goes to do that Write.
//
// Use newWriteCorkingConn to create one.
type corkConn struct {
net.Conn
corked bool
buf []byte // corked data
}
func newWriteCorkingConn(c net.Conn) *corkConn {
return &corkConn{Conn: c, corked: true}
}
func (c *corkConn) Write(b []byte) (int, error) {
if c.corked {
c.buf = append(c.buf, b...)
return len(b), nil
}
return c.Conn.Write(b)
}
func (c *corkConn) Read(b []byte) (int, error) {
if c.corked {
if err := c.flush(); err != nil {
return 0, err
}
}
return c.Conn.Read(b)
}
// uncork flushes any buffered data and uncorks the connection so future Writes
// don't buffer. It may not be called concurrently with reads or writes and
// may only be called once.
func (c *corkConn) uncork() error {
if !c.corked {
panic("usage error; uncork called twice") // worth panicking to catch misuse
}
err := c.flush()
c.corked = false
return err
}
func (c *corkConn) flush() error {
if len(c.buf) == 0 {
return nil
}
_, err := c.Conn.Write(c.buf)
c.buf = nil
return err
}

View File

@@ -0,0 +1,191 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package controlknobs contains client options configurable from control which can be turned on
// or off. The ability to turn options on and off is for incrementally adding features in.
package controlknobs
import (
"sync/atomic"
"tailscale.com/syncs"
"tailscale.com/tailcfg"
"tailscale.com/types/opt"
)
// Knobs is the set of knobs that the control plane's coordination server can
// adjust at runtime.
type Knobs struct {
// DisableUPnP indicates whether to attempt UPnP mapping.
DisableUPnP atomic.Bool
// KeepFullWGConfig is whether we should disable the lazy wireguard
// programming and instead give WireGuard the full netmap always, even for
// idle peers.
KeepFullWGConfig atomic.Bool
// RandomizeClientPort is whether control says we should randomize
// the client port.
RandomizeClientPort atomic.Bool
// OneCGNAT is whether the the node should make one big CGNAT route
// in the OS rather than one /32 per peer.
OneCGNAT syncs.AtomicValue[opt.Bool]
// ForceBackgroundSTUN forces netcheck STUN queries to keep
// running in magicsock, even when idle.
ForceBackgroundSTUN atomic.Bool
// DisableDeltaUpdates is whether the node should not process
// incremental (delta) netmap updates and should treat all netmap
// changes as "full" ones as tailscaled did in 1.48.x and earlier.
DisableDeltaUpdates atomic.Bool
// PeerMTUEnable is whether the node should do peer path MTU discovery.
PeerMTUEnable atomic.Bool
// DisableDNSForwarderTCPRetries is whether the DNS forwarder should
// skip retrying truncated queries over TCP.
DisableDNSForwarderTCPRetries atomic.Bool
// SilentDisco is whether the node should suppress disco heartbeats to its
// peers.
SilentDisco atomic.Bool
// LinuxForceIPTables is whether the node should use iptables for Linux
// netfiltering, unless overridden by the user.
LinuxForceIPTables atomic.Bool
// LinuxForceNfTables is whether the node should use nftables for Linux
// netfiltering, unless overridden by the user.
LinuxForceNfTables atomic.Bool
// SeamlessKeyRenewal is whether to enable the alpha functionality of
// renewing node keys without breaking connections.
// http://go/seamless-key-renewal
SeamlessKeyRenewal atomic.Bool
// ProbeUDPLifetime is whether the node should probe UDP path lifetime on
// the tail end of an active direct connection in magicsock.
ProbeUDPLifetime atomic.Bool
// AppCStoreRoutes is whether the node should store RouteInfo to StateStore
// if it's an app connector.
AppCStoreRoutes atomic.Bool
// UserDialUseRoutes is whether tsdial.Dialer.UserDial should use routes to determine
// how to dial the destination address. When true, it also makes the DNS forwarder
// use UserDial instead of SystemDial when dialing resolvers.
UserDialUseRoutes atomic.Bool
// DisableSplitDNSWhenNoCustomResolvers indicates that the node's DNS manager
// should not adopt a split DNS configuration even though the Config of the
// resolver only contains routes that do not specify custom resolver(s), hence
// all DNS queries can be safely sent to the upstream DNS resolver and the
// node's DNS forwarder doesn't need to handle all DNS traffic.
// This is for now (2024-06-06) an iOS-specific battery life optimization,
// and this knob allows us to disable the optimization remotely if needed.
DisableSplitDNSWhenNoCustomResolvers atomic.Bool
// DisableLocalDNSOverrideViaNRPT indicates that the node's DNS manager should not
// create a default (catch-all) Windows NRPT rule when "Override local DNS" is enabled.
// Without this rule, Windows 8.1 and newer devices issue parallel DNS requests to DNS servers
// associated with all network adapters, even when "Override local DNS" is enabled and/or
// a Mullvad exit node is being used, resulting in DNS leaks.
// We began creating this rule on 2024-06-14, and this knob
// allows us to disable the new behavior remotely if needed.
DisableLocalDNSOverrideViaNRPT atomic.Bool
// DisableCryptorouting indicates that the node should not use the
// magicsock crypto routing feature.
DisableCryptorouting atomic.Bool
// DisableCaptivePortalDetection is whether the node should not perform captive portal detection
// automatically when the network state changes.
DisableCaptivePortalDetection atomic.Bool
}
// UpdateFromNodeAttributes updates k (if non-nil) based on the provided self
// node attributes (Node.Capabilities).
func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) {
if k == nil {
return
}
has := capMap.Contains
var (
keepFullWG = has(tailcfg.NodeAttrDebugDisableWGTrim)
disableUPnP = has(tailcfg.NodeAttrDisableUPnP)
randomizeClientPort = has(tailcfg.NodeAttrRandomizeClientPort)
disableDeltaUpdates = has(tailcfg.NodeAttrDisableDeltaUpdates)
oneCGNAT opt.Bool
forceBackgroundSTUN = has(tailcfg.NodeAttrDebugForceBackgroundSTUN)
peerMTUEnable = has(tailcfg.NodeAttrPeerMTUEnable)
dnsForwarderDisableTCPRetries = has(tailcfg.NodeAttrDNSForwarderDisableTCPRetries)
silentDisco = has(tailcfg.NodeAttrSilentDisco)
forceIPTables = has(tailcfg.NodeAttrLinuxMustUseIPTables)
forceNfTables = has(tailcfg.NodeAttrLinuxMustUseNfTables)
seamlessKeyRenewal = has(tailcfg.NodeAttrSeamlessKeyRenewal)
probeUDPLifetime = has(tailcfg.NodeAttrProbeUDPLifetime)
appCStoreRoutes = has(tailcfg.NodeAttrStoreAppCRoutes)
userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes)
disableSplitDNSWhenNoCustomResolvers = has(tailcfg.NodeAttrDisableSplitDNSWhenNoCustomResolvers)
disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT)
disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting)
disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection)
)
if has(tailcfg.NodeAttrOneCGNATEnable) {
oneCGNAT.Set(true)
} else if has(tailcfg.NodeAttrOneCGNATDisable) {
oneCGNAT.Set(false)
}
k.KeepFullWGConfig.Store(keepFullWG)
k.DisableUPnP.Store(disableUPnP)
k.RandomizeClientPort.Store(randomizeClientPort)
k.OneCGNAT.Store(oneCGNAT)
k.ForceBackgroundSTUN.Store(forceBackgroundSTUN)
k.DisableDeltaUpdates.Store(disableDeltaUpdates)
k.PeerMTUEnable.Store(peerMTUEnable)
k.DisableDNSForwarderTCPRetries.Store(dnsForwarderDisableTCPRetries)
k.SilentDisco.Store(silentDisco)
k.LinuxForceIPTables.Store(forceIPTables)
k.LinuxForceNfTables.Store(forceNfTables)
k.SeamlessKeyRenewal.Store(seamlessKeyRenewal)
k.ProbeUDPLifetime.Store(probeUDPLifetime)
k.AppCStoreRoutes.Store(appCStoreRoutes)
k.UserDialUseRoutes.Store(userDialUseRoutes)
k.DisableSplitDNSWhenNoCustomResolvers.Store(disableSplitDNSWhenNoCustomResolvers)
k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT)
k.DisableCryptorouting.Store(disableCryptorouting)
k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection)
}
// AsDebugJSON returns k as something that can be marshalled with json.Marshal
// for debug.
func (k *Knobs) AsDebugJSON() map[string]any {
if k == nil {
return nil
}
return map[string]any{
"DisableUPnP": k.DisableUPnP.Load(),
"KeepFullWGConfig": k.KeepFullWGConfig.Load(),
"RandomizeClientPort": k.RandomizeClientPort.Load(),
"OneCGNAT": k.OneCGNAT.Load(),
"ForceBackgroundSTUN": k.ForceBackgroundSTUN.Load(),
"DisableDeltaUpdates": k.DisableDeltaUpdates.Load(),
"PeerMTUEnable": k.PeerMTUEnable.Load(),
"DisableDNSForwarderTCPRetries": k.DisableDNSForwarderTCPRetries.Load(),
"SilentDisco": k.SilentDisco.Load(),
"LinuxForceIPTables": k.LinuxForceIPTables.Load(),
"LinuxForceNfTables": k.LinuxForceNfTables.Load(),
"SeamlessKeyRenewal": k.SeamlessKeyRenewal.Load(),
"ProbeUDPLifetime": k.ProbeUDPLifetime.Load(),
"AppCStoreRoutes": k.AppCStoreRoutes.Load(),
"UserDialUseRoutes": k.UserDialUseRoutes.Load(),
"DisableSplitDNSWhenNoCustomResolvers": k.DisableSplitDNSWhenNoCustomResolvers.Load(),
"DisableLocalDNSOverrideViaNRPT": k.DisableLocalDNSOverrideViaNRPT.Load(),
"DisableCryptorouting": k.DisableCryptorouting.Load(),
"DisableCaptivePortalDetection": k.DisableCaptivePortalDetection.Load(),
}
}

61
vendor/tailscale.com/derp/README.md generated vendored Normal file
View File

@@ -0,0 +1,61 @@
# DERP
This directory (and subdirectories) contain the DERP code. The server itself is
in `../cmd/derper`.
DERP is a packet relay system (client and servers) where peers are addressed
using WireGuard public keys instead of IP addresses.
It relays two types of packets:
* "Disco" discovery messages (see `../disco`) as the a side channel during [NAT
traversal](https://tailscale.com/blog/how-nat-traversal-works/).
* Encrypted WireGuard packets as the fallback of last resort when UDP is blocked
or NAT traversal fails.
## DERP Map
Each client receives a "[DERP
Map](https://pkg.go.dev/tailscale.com/tailcfg#DERPMap)" from the coordination
server describing the DERP servers the client should try to use.
The client picks its home "DERP home" based on latency. This is done to keep
costs low by avoid using cloud load balancers (pricey) or anycast, which would
necessarily require server-side routing between DERP regions.
Clients pick their DERP home and report it to the coordination server which
shares it to all the peers in the tailnet. When a peer wants to send a packet
and it doesn't already have a WireGuard session open, it sends disco messages
(some direct, and some over DERP), trying to do the NAT traversal. The client
will make connections to multiple DERP regions as needed. Only the DERP home
region connection needs to be alive forever.
## DERP Regions
Tailscale runs 1 or more DERP nodes (instances of `cmd/derper`) in various
geographic regions to make sure users have low latency to their DERP home.
Regions generally have multiple nodes per region "meshed" (routing to each
other) together for redundancy: it allows for cloud failures or upgrades without
kicking users out to a higher latency region. Instead, clients will reconnect to
the next node in the region. Each node in the region is required to to be meshed
with every other node in the region and forward packets to the other nodes in
the region. Packets are forwarded only one hop within the region. There is no
routing between regions. The assumption is that the mesh TCP connections are
over a VPC that's very fast, low latency, and not charged per byte. The
coordination server assigns the list of nodes in a region as a function of the
tailnet, so all nodes within a tailnet should generally be on the same node and
not require forwarding. Only after a failure do clients of a particular tailnet
get split between nodes in a region and require inter-node forwarding. But over
time it balances back out. There's also an admin-only DERP frame type to force
close the TCP connection of a particular client to force them to reconnect to
their primary if the operator wants to force things to balance out sooner.
(Using the `(*derphttp.Client).ClosePeer` method, as used by Tailscale's
internal rarely-used `cmd/derpprune` maintenance tool)
We generally run a minimum of three nodes in a region not for quorum reasons
(there's no voting) but just because two is too uncomfortably few for cascading
failure reasons: if you're running two nodes at 51% load (CPU, memory, etc) and
then one fails, that makes the second one fail. With three or more nodes, you
can run each node a bit hotter.

256
vendor/tailscale.com/derp/derp.go generated vendored Normal file
View File

@@ -0,0 +1,256 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package derp implements the Designated Encrypted Relay for Packets (DERP)
// protocol.
//
// DERP routes packets to clients using curve25519 keys as addresses.
//
// DERP is used by Tailscale nodes to proxy encrypted WireGuard
// packets through the Tailscale cloud servers when a direct path
// cannot be found or opened. DERP is a last resort. Both sides
// between very aggressive NATs, firewalls, no IPv6, etc? Well, DERP.
package derp
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
)
// MaxPacketSize is the maximum size of a packet sent over DERP.
// (This only includes the data bytes visible to magicsock, not
// including its on-wire framing overhead)
const MaxPacketSize = 64 << 10
// magic is the DERP magic number, sent in the frameServerKey frame
// upon initial connection.
const magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91
const (
nonceLen = 24
frameHeaderLen = 1 + 4 // frameType byte + 4 byte length
keyLen = 32
maxInfoLen = 1 << 20
keepAlive = 60 * time.Second
)
// ProtocolVersion is bumped whenever there's a wire-incompatible change.
// - version 1 (zero on wire): consistent box headers, in use by employee dev nodes a bit
// - version 2: received packets have src addrs in frameRecvPacket at beginning
const ProtocolVersion = 2
// frameType is the one byte frame type at the beginning of the frame
// header. The second field is a big-endian uint32 describing the
// length of the remaining frame (not including the initial 5 bytes).
type frameType byte
/*
Protocol flow:
Login:
* client connects
* server sends frameServerKey
* client sends frameClientInfo
* server sends frameServerInfo
Steady state:
* server occasionally sends frameKeepAlive (or framePing)
* client responds to any framePing with a framePong
* client sends frameSendPacket
* server then sends frameRecvPacket to recipient
*/
const (
frameServerKey = frameType(0x01) // 8B magic + 32B public key + (0+ bytes future use)
frameClientInfo = frameType(0x02) // 32B pub key + 24B nonce + naclbox(json)
frameServerInfo = frameType(0x03) // 24B nonce + naclbox(json)
frameSendPacket = frameType(0x04) // 32B dest pub key + packet bytes
frameForwardPacket = frameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes
frameRecvPacket = frameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes
frameKeepAlive = frameType(0x06) // no payload, no-op (to be replaced with ping/pong)
frameNotePreferred = frameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node
// framePeerGone is sent from server to client to signal that
// a previous sender is no longer connected. That is, if A
// sent to B, and then if A disconnects, the server sends
// framePeerGone to B so B can forget that a reverse path
// exists on that connection to get back to A. It is also sent
// if A tries to send a CallMeMaybe to B and the server has no
// record of B (which currently would only happen if there was
// a bug).
framePeerGone = frameType(0x08) // 32B pub key of peer that's gone + 1 byte reason
// framePeerPresent is like framePeerGone, but for other members of the DERP
// region when they're meshed up together.
//
// The message is at least 32 bytes (the public key of the peer that's
// connected). If there are at least 18 bytes remaining after that, it's the
// 16 byte IP + 2 byte BE uint16 port of the client. If there's another byte
// remaining after that, it's a PeerPresentFlags byte.
// While current servers send 41 bytes, old servers will send fewer, and newer
// servers might send more.
framePeerPresent = frameType(0x09)
// frameWatchConns is how one DERP node in a regional mesh
// subscribes to the others in the region.
// There's no payload. If the sender doesn't have permission, the connection
// is closed. Otherwise, the client is initially flooded with
// framePeerPresent for all connected nodes, and then a stream of
// framePeerPresent & framePeerGone has peers connect and disconnect.
frameWatchConns = frameType(0x10)
// frameClosePeer is a privileged frame type (requires the
// mesh key for now) that closes the provided peer's
// connection. (To be used for cluster load balancing
// purposes, when clients end up on a non-ideal node)
frameClosePeer = frameType(0x11) // 32B pub key of peer to close.
framePing = frameType(0x12) // 8 byte ping payload, to be echoed back in framePong
framePong = frameType(0x13) // 8 byte payload, the contents of the ping being replied to
// frameHealth is sent from server to client to tell the client
// if their connection is unhealthy somehow. Currently the only unhealthy state
// is whether the connection is detected as a duplicate.
// The entire frame body is the text of the error message. An empty message
// clears the error state.
frameHealth = frameType(0x14)
// frameRestarting is sent from server to client for the
// server to declare that it's restarting. Payload is two big
// endian uint32 durations in milliseconds: when to reconnect,
// and how long to try total. See ServerRestartingMessage docs for
// more details on how the client should interpret them.
frameRestarting = frameType(0x15)
)
// PeerGoneReasonType is a one byte reason code explaining why a
// server does not have a path to the requested destination.
type PeerGoneReasonType byte
const (
PeerGoneReasonDisconnected = PeerGoneReasonType(0x00) // peer disconnected from this server
PeerGoneReasonNotHere = PeerGoneReasonType(0x01) // server doesn't know about this peer, unexpected
PeerGoneReasonMeshConnBroke = PeerGoneReasonType(0xf0) // invented by Client.RunWatchConnectionLoop on disconnect; not sent on the wire
)
// PeerPresentFlags is an optional byte of bit flags sent after a framePeerPresent message.
//
// For a modern server, the value should always be non-zero. If the value is zero,
// that means the server doesn't support this field.
type PeerPresentFlags byte
// PeerPresentFlags bits.
const (
PeerPresentIsRegular = 1 << 0
PeerPresentIsMeshPeer = 1 << 1
PeerPresentIsProber = 1 << 2
)
var bin = binary.BigEndian
func writeUint32(bw *bufio.Writer, v uint32) error {
var b [4]byte
bin.PutUint32(b[:], v)
// Writing a byte at a time is a bit silly,
// but it causes b not to escape,
// which more than pays for the silliness.
for _, c := range &b {
err := bw.WriteByte(c)
if err != nil {
return err
}
}
return nil
}
func readUint32(br *bufio.Reader) (uint32, error) {
var b [4]byte
// Reading a byte at a time is a bit silly,
// but it causes b not to escape,
// which more than pays for the silliness.
for i := range &b {
c, err := br.ReadByte()
if err != nil {
return 0, err
}
b[i] = c
}
return bin.Uint32(b[:]), nil
}
func readFrameTypeHeader(br *bufio.Reader, wantType frameType) (frameLen uint32, err error) {
gotType, frameLen, err := readFrameHeader(br)
if err == nil && wantType != gotType {
err = fmt.Errorf("bad frame type 0x%X, want 0x%X", gotType, wantType)
}
return frameLen, err
}
func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) {
tb, err := br.ReadByte()
if err != nil {
return 0, 0, err
}
frameLen, err = readUint32(br)
if err != nil {
return 0, 0, err
}
return frameType(tb), frameLen, nil
}
// readFrame reads a frame header and then reads its payload into
// b[:frameLen].
//
// If the frame header length is greater than maxSize, readFrame returns
// an error after reading the frame header.
//
// If the frame is less than maxSize but greater than len(b), len(b)
// bytes are read, err will be io.ErrShortBuffer, and frameLen and t
// will both be set. That is, callers need to explicitly handle when
// they get more data than expected.
func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLen uint32, err error) {
t, frameLen, err = readFrameHeader(br)
if err != nil {
return 0, 0, err
}
if frameLen > maxSize {
return 0, 0, fmt.Errorf("frame header size %d exceeds reader limit of %d", frameLen, maxSize)
}
n, err := io.ReadFull(br, b[:min(frameLen, uint32(len(b)))])
if err != nil {
return 0, 0, err
}
remain := frameLen - uint32(n)
if remain > 0 {
if _, err := io.CopyN(io.Discard, br, int64(remain)); err != nil {
return 0, 0, err
}
err = io.ErrShortBuffer
}
return t, frameLen, err
}
func writeFrameHeader(bw *bufio.Writer, t frameType, frameLen uint32) error {
if err := bw.WriteByte(byte(t)); err != nil {
return err
}
return writeUint32(bw, frameLen)
}
// writeFrame writes a complete frame & flushes it.
func writeFrame(bw *bufio.Writer, t frameType, b []byte) error {
if len(b) > 10<<20 {
return errors.New("unreasonably large frame write")
}
if err := writeFrameHeader(bw, t, uint32(len(b))); err != nil {
return err
}
if _, err := bw.Write(b); err != nil {
return err
}
return bw.Flush()
}

673
vendor/tailscale.com/derp/derp_client.go generated vendored Normal file
View File

@@ -0,0 +1,673 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derp
import (
"bufio"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"net/netip"
"sync"
"time"
"go4.org/mem"
"golang.org/x/time/rate"
"tailscale.com/syncs"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
)
// Client is a DERP client.
type Client struct {
serverKey key.NodePublic // of the DERP server; not a machine or node key
privateKey key.NodePrivate
publicKey key.NodePublic // of privateKey
logf logger.Logf
nc Conn
br *bufio.Reader
meshKey string
canAckPings bool
isProber bool
wmu sync.Mutex // hold while writing to bw
bw *bufio.Writer
rate *rate.Limiter // if non-nil, rate limiter to use
// Owned by Recv:
peeked int // bytes to discard on next Recv
readErr syncs.AtomicValue[error] // sticky (set by Recv)
clock tstime.Clock
}
// ClientOpt is an option passed to NewClient.
type ClientOpt interface {
update(*clientOpt)
}
type clientOptFunc func(*clientOpt)
func (f clientOptFunc) update(o *clientOpt) { f(o) }
// clientOpt are the options passed to newClient.
type clientOpt struct {
MeshKey string
ServerPub key.NodePublic
CanAckPings bool
IsProber bool
}
// MeshKey returns a ClientOpt to pass to the DERP server during connect to get
// access to join the mesh.
//
// An empty key means to not use a mesh key.
func MeshKey(key string) ClientOpt { return clientOptFunc(func(o *clientOpt) { o.MeshKey = key }) }
// IsProber returns a ClientOpt to pass to the DERP server during connect to
// declare that this client is a a prober.
func IsProber(v bool) ClientOpt { return clientOptFunc(func(o *clientOpt) { o.IsProber = v }) }
// ServerPublicKey returns a ClientOpt to declare that the server's DERP public key is known.
// If key is the zero value, the returned ClientOpt is a no-op.
func ServerPublicKey(key key.NodePublic) ClientOpt {
return clientOptFunc(func(o *clientOpt) { o.ServerPub = key })
}
// CanAckPings returns a ClientOpt to set whether it advertises to the
// server that it's capable of acknowledging ping requests.
func CanAckPings(v bool) ClientOpt {
return clientOptFunc(func(o *clientOpt) { o.CanAckPings = v })
}
func NewClient(privateKey key.NodePrivate, nc Conn, brw *bufio.ReadWriter, logf logger.Logf, opts ...ClientOpt) (*Client, error) {
var opt clientOpt
for _, o := range opts {
if o == nil {
return nil, errors.New("nil ClientOpt")
}
o.update(&opt)
}
return newClient(privateKey, nc, brw, logf, opt)
}
func newClient(privateKey key.NodePrivate, nc Conn, brw *bufio.ReadWriter, logf logger.Logf, opt clientOpt) (*Client, error) {
c := &Client{
privateKey: privateKey,
publicKey: privateKey.Public(),
logf: logf,
nc: nc,
br: brw.Reader,
bw: brw.Writer,
meshKey: opt.MeshKey,
canAckPings: opt.CanAckPings,
isProber: opt.IsProber,
clock: tstime.StdClock{},
}
if opt.ServerPub.IsZero() {
if err := c.recvServerKey(); err != nil {
return nil, fmt.Errorf("derp.Client: failed to receive server key: %v", err)
}
} else {
c.serverKey = opt.ServerPub
}
if err := c.sendClientKey(); err != nil {
return nil, fmt.Errorf("derp.Client: failed to send client key: %v", err)
}
return c, nil
}
func (c *Client) PublicKey() key.NodePublic { return c.publicKey }
func (c *Client) recvServerKey() error {
var buf [40]byte
t, flen, err := readFrame(c.br, 1<<10, buf[:])
if err == io.ErrShortBuffer {
// For future-proofing, allow server to send more in its greeting.
err = nil
}
if err != nil {
return err
}
if flen < uint32(len(buf)) || t != frameServerKey || string(buf[:len(magic)]) != magic {
return errors.New("invalid server greeting")
}
c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(magic):]))
return nil
}
func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) {
const maxLength = nonceLen + maxInfoLen
fl := len(b)
if fl < nonceLen {
return nil, fmt.Errorf("short serverInfo frame")
}
if fl > maxLength {
return nil, fmt.Errorf("long serverInfo frame")
}
msg, ok := c.privateKey.OpenFrom(c.serverKey, b)
if !ok {
return nil, fmt.Errorf("failed to open naclbox from server key %s", c.serverKey)
}
info := new(serverInfo)
if err := json.Unmarshal(msg, info); err != nil {
return nil, fmt.Errorf("invalid JSON: %v", err)
}
return info, nil
}
type clientInfo struct {
// MeshKey optionally specifies a pre-shared key used by
// trusted clients. It's required to subscribe to the
// connection list & forward packets. It's empty for regular
// users.
MeshKey string `json:"meshKey,omitempty"`
// Version is the DERP protocol version that the client was built with.
// See the ProtocolVersion const.
Version int `json:"version,omitempty"`
// CanAckPings is whether the client declares it's able to ack
// pings.
CanAckPings bool
// IsProber is whether this client is a prober.
IsProber bool `json:",omitempty"`
}
func (c *Client) sendClientKey() error {
msg, err := json.Marshal(clientInfo{
Version: ProtocolVersion,
MeshKey: c.meshKey,
CanAckPings: c.canAckPings,
IsProber: c.isProber,
})
if err != nil {
return err
}
msgbox := c.privateKey.SealTo(c.serverKey, msg)
buf := make([]byte, 0, keyLen+len(msgbox))
buf = c.publicKey.AppendTo(buf)
buf = append(buf, msgbox...)
return writeFrame(c.bw, frameClientInfo, buf)
}
// ServerPublicKey returns the server's public key.
func (c *Client) ServerPublicKey() key.NodePublic { return c.serverKey }
// Send sends a packet to the Tailscale node identified by dstKey.
//
// It is an error if the packet is larger than 64KB.
func (c *Client) Send(dstKey key.NodePublic, pkt []byte) error { return c.send(dstKey, pkt) }
func (c *Client) send(dstKey key.NodePublic, pkt []byte) (ret error) {
defer func() {
if ret != nil {
ret = fmt.Errorf("derp.Send: %w", ret)
}
}()
if len(pkt) > MaxPacketSize {
return fmt.Errorf("packet too big: %d", len(pkt))
}
c.wmu.Lock()
defer c.wmu.Unlock()
if c.rate != nil {
pktLen := frameHeaderLen + key.NodePublicRawLen + len(pkt)
if !c.rate.AllowN(c.clock.Now(), pktLen) {
return nil // drop
}
}
if err := writeFrameHeader(c.bw, frameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil {
return err
}
if _, err := c.bw.Write(dstKey.AppendTo(nil)); err != nil {
return err
}
if _, err := c.bw.Write(pkt); err != nil {
return err
}
return c.bw.Flush()
}
func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("derp.ForwardPacket: %w", err)
}
}()
if len(pkt) > MaxPacketSize {
return fmt.Errorf("packet too big: %d", len(pkt))
}
c.wmu.Lock()
defer c.wmu.Unlock()
timer := c.clock.AfterFunc(5*time.Second, c.writeTimeoutFired)
defer timer.Stop()
if err := writeFrameHeader(c.bw, frameForwardPacket, uint32(keyLen*2+len(pkt))); err != nil {
return err
}
if _, err := c.bw.Write(srcKey.AppendTo(nil)); err != nil {
return err
}
if _, err := c.bw.Write(dstKey.AppendTo(nil)); err != nil {
return err
}
if _, err := c.bw.Write(pkt); err != nil {
return err
}
return c.bw.Flush()
}
func (c *Client) writeTimeoutFired() { c.nc.Close() }
func (c *Client) SendPing(data [8]byte) error {
return c.sendPingOrPong(framePing, data)
}
func (c *Client) SendPong(data [8]byte) error {
return c.sendPingOrPong(framePong, data)
}
func (c *Client) sendPingOrPong(typ frameType, data [8]byte) error {
c.wmu.Lock()
defer c.wmu.Unlock()
if err := writeFrameHeader(c.bw, typ, 8); err != nil {
return err
}
if _, err := c.bw.Write(data[:]); err != nil {
return err
}
return c.bw.Flush()
}
// NotePreferred sends a packet that tells the server whether this
// client is the user's preferred server. This is only used in the
// server for stats.
func (c *Client) NotePreferred(preferred bool) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("derp.NotePreferred: %v", err)
}
}()
c.wmu.Lock()
defer c.wmu.Unlock()
if err := writeFrameHeader(c.bw, frameNotePreferred, 1); err != nil {
return err
}
var b byte = 0x00
if preferred {
b = 0x01
}
if err := c.bw.WriteByte(b); err != nil {
return err
}
return c.bw.Flush()
}
// WatchConnectionChanges sends a request to subscribe to the peer's connection list.
// It's a fatal error if the client wasn't created using MeshKey.
func (c *Client) WatchConnectionChanges() error {
c.wmu.Lock()
defer c.wmu.Unlock()
if err := writeFrameHeader(c.bw, frameWatchConns, 0); err != nil {
return err
}
return c.bw.Flush()
}
// ClosePeer asks the server to close target's TCP connection.
// It's a fatal error if the client wasn't created using MeshKey.
func (c *Client) ClosePeer(target key.NodePublic) error {
c.wmu.Lock()
defer c.wmu.Unlock()
return writeFrame(c.bw, frameClosePeer, target.AppendTo(nil))
}
// ReceivedMessage represents a type returned by Client.Recv. Unless
// otherwise documented, the returned message aliases the byte slice
// provided to Recv and thus the message is only as good as that
// buffer, which is up to the caller.
type ReceivedMessage interface {
msg()
}
// ReceivedPacket is a ReceivedMessage representing an incoming packet.
type ReceivedPacket struct {
Source key.NodePublic
// Data is the received packet bytes. It aliases the memory
// passed to Client.Recv.
Data []byte
}
func (ReceivedPacket) msg() {}
// PeerGoneMessage is a ReceivedMessage that indicates that the client
// identified by the underlying public key is not connected to this
// server.
//
// It has only historically been sent by the server when the client
// connection count decremented from 1 to 0 and not from e.g. 2 to 1.
// See https://github.com/tailscale/tailscale/issues/13566 for details.
type PeerGoneMessage struct {
Peer key.NodePublic
Reason PeerGoneReasonType
}
func (PeerGoneMessage) msg() {}
// PeerPresentMessage is a ReceivedMessage that indicates that the client is
// connected to the server. (Only used by trusted mesh clients)
//
// It will be sent to client watchers for every new connection from a client,
// even if the client's already connected with that public key.
// See https://github.com/tailscale/tailscale/issues/13566 for PeerPresentMessage
// and PeerGoneMessage not being 1:1.
type PeerPresentMessage struct {
// Key is the public key of the client.
Key key.NodePublic
// IPPort is the remote IP and port of the client.
IPPort netip.AddrPort
// Flags is a bitmask of info about the client.
Flags PeerPresentFlags
}
func (PeerPresentMessage) msg() {}
// ServerInfoMessage is sent by the server upon first connect.
type ServerInfoMessage struct {
// TokenBucketBytesPerSecond is how many bytes per second the
// server says it will accept, including all framing bytes.
//
// Zero means unspecified. There might be a limit, but the
// client need not try to respect it.
TokenBucketBytesPerSecond int
// TokenBucketBytesBurst is how many bytes the server will
// allow to burst, temporarily violating
// TokenBucketBytesPerSecond.
//
// Zero means unspecified. There might be a limit, but the
// client need not try to respect it.
TokenBucketBytesBurst int
}
func (ServerInfoMessage) msg() {}
// PingMessage is a request from a client or server to reply to the
// other side with a PongMessage with the given payload.
type PingMessage [8]byte
func (PingMessage) msg() {}
// PongMessage is a reply to a PingMessage from a client or server
// with the payload sent previously in a PingMessage.
type PongMessage [8]byte
func (PongMessage) msg() {}
// KeepAliveMessage is a one-way empty message from server to client, just to
// keep the connection alive. It's like a PingMessage, but doesn't solicit
// a reply from the client.
type KeepAliveMessage struct{}
func (KeepAliveMessage) msg() {}
// HealthMessage is a one-way message from server to client, declaring the
// connection health state.
type HealthMessage struct {
// Problem, if non-empty, is a description of why the connection
// is unhealthy.
//
// The empty string means the connection is healthy again.
//
// The default condition is healthy, so the server doesn't
// broadcast a HealthMessage until a problem exists.
Problem string
}
func (HealthMessage) msg() {}
// ServerRestartingMessage is a one-way message from server to client,
// advertising that the server is restarting.
type ServerRestartingMessage struct {
// ReconnectIn is an advisory duration that the client should wait
// before attempting to reconnect. It might be zero.
// It exists for the server to smear out the reconnects.
ReconnectIn time.Duration
// TryFor is an advisory duration for how long the client
// should attempt to reconnect before giving up and proceeding
// with its normal connection failure logic. The interval
// between retries is undefined for now.
// A server should not send a TryFor duration more than a few
// seconds.
TryFor time.Duration
}
func (ServerRestartingMessage) msg() {}
// Recv reads a message from the DERP server.
//
// The returned message may alias memory owned by the Client; it
// should only be accessed until the next call to Client.
//
// Once Recv returns an error, the Client is dead forever.
func (c *Client) Recv() (m ReceivedMessage, err error) {
return c.recvTimeout(120 * time.Second)
}
func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err error) {
readErr := c.readErr.Load()
if readErr != nil {
return nil, readErr
}
defer func() {
if err != nil {
err = fmt.Errorf("derp.Recv: %w", err)
c.readErr.Store(err)
}
}()
for {
c.nc.SetReadDeadline(time.Now().Add(timeout))
// Discard any peeked bytes from a previous Recv call.
if c.peeked != 0 {
if n, err := c.br.Discard(c.peeked); err != nil || n != c.peeked {
// Documented to never fail, but might as well check.
return nil, fmt.Errorf("bufio.Reader.Discard(%d bytes): got %v, %v", c.peeked, n, err)
}
c.peeked = 0
}
t, n, err := readFrameHeader(c.br)
if err != nil {
return nil, err
}
if n > 1<<20 {
return nil, fmt.Errorf("unexpectedly large frame of %d bytes returned", n)
}
var b []byte // frame payload (past the 5 byte header)
// If the frame fits in our bufio.Reader buffer, just use it.
// In practice it's 4KB (from derphttp.Client's bufio.NewReader(httpConn)) and
// in practive, WireGuard packets (and thus DERP frames) are under 1.5KB.
// So this is the common path.
if int(n) <= c.br.Size() {
b, err = c.br.Peek(int(n))
c.peeked = int(n)
} else {
// But if for some reason we read a large DERP message (which isn't necessarily
// a WireGuard packet), then just allocate memory for it.
// TODO(bradfitz): use a pool if large frames ever happen in practice.
b = make([]byte, n)
_, err = io.ReadFull(c.br, b)
}
if err != nil {
return nil, err
}
switch t {
default:
continue
case frameServerInfo:
// Server sends this at start-up. Currently unused.
// Just has a JSON message saying "version: 2",
// but the protocol seems extensible enough as-is without
// needing to wait an RTT to discover the version at startup.
// We'd prefer to give the connection to the client (magicsock)
// to start writing as soon as possible.
si, err := c.parseServerInfo(b)
if err != nil {
return nil, fmt.Errorf("invalid server info frame: %v", err)
}
sm := ServerInfoMessage{
TokenBucketBytesPerSecond: si.TokenBucketBytesPerSecond,
TokenBucketBytesBurst: si.TokenBucketBytesBurst,
}
c.setSendRateLimiter(sm)
return sm, nil
case frameKeepAlive:
// A one-way keep-alive message that doesn't require an acknowledgement.
// This predated framePing/framePong.
return KeepAliveMessage{}, nil
case framePeerGone:
if n < keyLen {
c.logf("[unexpected] dropping short peerGone frame from DERP server")
continue
}
// Backward compatibility for the older peerGone without reason byte
reason := PeerGoneReasonDisconnected
if n > keyLen {
reason = PeerGoneReasonType(b[keyLen])
}
pg := PeerGoneMessage{
Peer: key.NodePublicFromRaw32(mem.B(b[:keyLen])),
Reason: reason,
}
return pg, nil
case framePeerPresent:
remain := b
chunk, remain, ok := cutLeadingN(remain, keyLen)
if !ok {
c.logf("[unexpected] dropping short peerPresent frame from DERP server")
continue
}
var msg PeerPresentMessage
msg.Key = key.NodePublicFromRaw32(mem.B(chunk))
const ipLen = 16
const portLen = 2
chunk, remain, ok = cutLeadingN(remain, ipLen+portLen)
if !ok {
// Older server which didn't send the IP.
return msg, nil
}
msg.IPPort = netip.AddrPortFrom(
netip.AddrFrom16([16]byte(chunk[:ipLen])).Unmap(),
binary.BigEndian.Uint16(chunk[ipLen:]),
)
chunk, _, ok = cutLeadingN(remain, 1)
if !ok {
// Older server which doesn't send PeerPresentFlags.
return msg, nil
}
msg.Flags = PeerPresentFlags(chunk[0])
return msg, nil
case frameRecvPacket:
var rp ReceivedPacket
if n < keyLen {
c.logf("[unexpected] dropping short packet from DERP server")
continue
}
rp.Source = key.NodePublicFromRaw32(mem.B(b[:keyLen]))
rp.Data = b[keyLen:n]
return rp, nil
case framePing:
var pm PingMessage
if n < 8 {
c.logf("[unexpected] dropping short ping frame")
continue
}
copy(pm[:], b[:])
return pm, nil
case framePong:
var pm PongMessage
if n < 8 {
c.logf("[unexpected] dropping short ping frame")
continue
}
copy(pm[:], b[:])
return pm, nil
case frameHealth:
return HealthMessage{Problem: string(b[:])}, nil
case frameRestarting:
var m ServerRestartingMessage
if n < 8 {
c.logf("[unexpected] dropping short server restarting frame")
continue
}
m.ReconnectIn = time.Duration(binary.BigEndian.Uint32(b[0:4])) * time.Millisecond
m.TryFor = time.Duration(binary.BigEndian.Uint32(b[4:8])) * time.Millisecond
return m, nil
}
}
}
func (c *Client) setSendRateLimiter(sm ServerInfoMessage) {
c.wmu.Lock()
defer c.wmu.Unlock()
if sm.TokenBucketBytesPerSecond == 0 {
c.rate = nil
} else {
c.rate = rate.NewLimiter(
rate.Limit(sm.TokenBucketBytesPerSecond),
sm.TokenBucketBytesBurst)
}
}
// LocalAddr returns the TCP connection's local address.
//
// If the client is broken in some previously detectable way, it
// returns an error.
func (c *Client) LocalAddr() (netip.AddrPort, error) {
readErr, _ := c.readErr.Load().(error)
if readErr != nil {
return netip.AddrPort{}, readErr
}
if c.nc == nil {
return netip.AddrPort{}, errors.New("nil conn")
}
a := c.nc.LocalAddr()
if a == nil {
return netip.AddrPort{}, errors.New("nil addr")
}
return netip.ParseAddrPort(a.String())
}
func cutLeadingN(b []byte, n int) (chunk, remain []byte, ok bool) {
if len(b) >= n {
return b[:n], b[n:], true
}
return nil, b, false
}

2261
vendor/tailscale.com/derp/derp_server.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

13
vendor/tailscale.com/derp/derp_server_default.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !linux
package derp
import "context"
func (c *sclient) startStatsLoop(ctx context.Context) {
// Nothing to do
return
}

89
vendor/tailscale.com/derp/derp_server_linux.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derp
import (
"context"
"crypto/tls"
"net"
"time"
"tailscale.com/net/tcpinfo"
)
func (c *sclient) startStatsLoop(ctx context.Context) {
// Get the RTT initially to verify it's supported.
conn := c.tcpConn()
if conn == nil {
c.s.tcpRtt.Add("non-tcp", 1)
return
}
if _, err := tcpinfo.RTT(conn); err != nil {
c.logf("error fetching initial RTT: %v", err)
c.s.tcpRtt.Add("error", 1)
return
}
const statsInterval = 10 * time.Second
// Don't launch a goroutine; use a timer instead.
var gatherStats func()
gatherStats = func() {
// Do nothing if the context is finished.
if ctx.Err() != nil {
return
}
// Reschedule ourselves when this stats gathering is finished.
defer c.s.clock.AfterFunc(statsInterval, gatherStats)
// Gather TCP RTT information.
rtt, err := tcpinfo.RTT(conn)
if err == nil {
c.s.tcpRtt.Add(durationToLabel(rtt), 1)
}
// TODO(andrew): more metrics?
}
// Kick off the initial timer.
c.s.clock.AfterFunc(statsInterval, gatherStats)
}
// tcpConn attempts to get the underlying *net.TCPConn from this client's
// Conn; if it cannot, then it will return nil.
func (c *sclient) tcpConn() *net.TCPConn {
nc := c.nc
for {
switch v := nc.(type) {
case *net.TCPConn:
return v
case *tls.Conn:
nc = v.NetConn()
default:
return nil
}
}
}
func durationToLabel(dur time.Duration) string {
switch {
case dur <= 10*time.Millisecond:
return "10ms"
case dur <= 20*time.Millisecond:
return "20ms"
case dur <= 50*time.Millisecond:
return "50ms"
case dur <= 100*time.Millisecond:
return "100ms"
case dur <= 150*time.Millisecond:
return "150ms"
case dur <= 250*time.Millisecond:
return "250ms"
case dur <= 500*time.Millisecond:
return "500ms"
default:
return "inf"
}
}

1143
vendor/tailscale.com/derp/derphttp/derphttp_client.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

108
vendor/tailscale.com/derp/derphttp/derphttp_server.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
import (
"fmt"
"log"
"net/http"
"strings"
"tailscale.com/derp"
)
// fastStartHeader is the header (with value "1") that signals to the HTTP
// server that the DERP HTTP client does not want the HTTP 101 response
// headers and it will begin writing & reading the DERP protocol immediately
// following its HTTP request.
const fastStartHeader = "Derp-Fast-Start"
// Handler returns an http.Handler to be mounted at /derp, serving s.
func Handler(s *derp.Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// These are installed both here and in cmd/derper. The check here
// catches both cmd/derper run with DERP disabled (STUN only mode) as
// well as DERP being run in tests with derphttp.Handler directly,
// as netcheck still assumes this replies.
switch r.URL.Path {
case "/derp/probe", "/derp/latency-check":
ProbeHandler(w, r)
return
}
up := strings.ToLower(r.Header.Get("Upgrade"))
if up != "websocket" && up != "derp" {
if up != "" {
log.Printf("Weird upgrade: %q", up)
}
http.Error(w, "DERP requires connection upgrade", http.StatusUpgradeRequired)
return
}
fastStart := r.Header.Get(fastStartHeader) == "1"
h, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "HTTP does not support general TCP support", 500)
return
}
netConn, conn, err := h.Hijack()
if err != nil {
log.Printf("Hijack failed: %v", err)
http.Error(w, "HTTP does not support general TCP support", 500)
return
}
if !fastStart {
pubKey := s.PublicKey()
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
"Upgrade: DERP\r\n"+
"Connection: Upgrade\r\n"+
"Derp-Version: %v\r\n"+
"Derp-Public-Key: %s\r\n\r\n",
derp.ProtocolVersion,
pubKey.UntypedHexString())
}
s.Accept(r.Context(), netConn, conn, netConn.RemoteAddr().String())
})
}
// ProbeHandler is the endpoint that clients without UDP access (including js/wasm) hit to measure
// DERP latency, as a replacement for UDP STUN queries.
func ProbeHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "HEAD", "GET":
w.Header().Set("Access-Control-Allow-Origin", "*")
default:
http.Error(w, "bogus probe method", http.StatusMethodNotAllowed)
}
}
// ServeNoContent generates the /generate_204 response used by Tailscale's
// captive portal detection.
func ServeNoContent(w http.ResponseWriter, r *http.Request) {
if challenge := r.Header.Get(NoContentChallengeHeader); challenge != "" {
badChar := strings.IndexFunc(challenge, func(r rune) bool {
return !isChallengeChar(r)
}) != -1
if len(challenge) <= 64 && !badChar {
w.Header().Set(NoContentResponseHeader, "response "+challenge)
}
}
w.WriteHeader(http.StatusNoContent)
}
func isChallengeChar(c rune) bool {
// Semi-randomly chosen as a limited set of valid characters
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9') ||
c == '.' || c == '-' || c == '_'
}
const (
NoContentChallengeHeader = "X-Tailscale-Challenge"
NoContentResponseHeader = "X-Tailscale-Response"
)

177
vendor/tailscale.com/derp/derphttp/mesh_client.go generated vendored Normal file
View File

@@ -0,0 +1,177 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
import (
"context"
"sync"
"time"
"tailscale.com/derp"
"tailscale.com/types/key"
"tailscale.com/types/logger"
)
var retryInterval = 5 * time.Second
// testHookWatchLookConnectResult, if non-nil for tests, is called by RunWatchConnectionLoop
// with the connect result. If it returns false, the loop ends.
var testHookWatchLookConnectResult func(connectError error, wasSelfConnect bool) (keepRunning bool)
// RunWatchConnectionLoop loops until ctx is done, sending
// WatchConnectionChanges and subscribing to connection changes.
//
// If the server's public key is ignoreServerKey, RunWatchConnectionLoop
// returns.
//
// Otherwise, the add and remove funcs are called as clients come & go.
// Note that add is called for every new connection and remove is only
// called for the final disconnection. See https://github.com/tailscale/tailscale/issues/13566.
// This behavior will likely change. Callers should do their own accounting
// and dup suppression as needed.
//
// infoLogf, if non-nil, is the logger to write periodic status updates about
// how many peers are on the server. Error log output is set to the c's logger,
// regardless of infoLogf's value.
//
// To force RunWatchConnectionLoop to return quickly, its ctx needs to be
// closed, and c itself needs to be closed.
//
// It is a fatal error to call this on an already-started Client without having
// initialized Client.WatchConnectionChanges to true.
//
// If the DERP connection breaks and reconnects, remove will be called for all
// previously seen peers, with Reason type PeerGoneReasonSynthetic. Those
// clients are likely still connected and their add message will appear after
// reconnect.
func (c *Client) RunWatchConnectionLoop(ctx context.Context, ignoreServerKey key.NodePublic, infoLogf logger.Logf, add func(derp.PeerPresentMessage), remove func(derp.PeerGoneMessage)) {
if !c.WatchConnectionChanges {
if c.isStarted() {
panic("invalid use of RunWatchConnectionLoop on already-started Client without setting Client.RunWatchConnectionLoop")
}
c.WatchConnectionChanges = true
}
if infoLogf == nil {
infoLogf = logger.Discard
}
logf := c.logf
const statusInterval = 10 * time.Second
var (
mu sync.Mutex
present = map[key.NodePublic]bool{}
loggedConnected = false
)
clear := func() {
mu.Lock()
defer mu.Unlock()
if len(present) == 0 {
return
}
logf("reconnected; clearing %d forwarding mappings", len(present))
for k := range present {
remove(derp.PeerGoneMessage{Peer: k, Reason: derp.PeerGoneReasonMeshConnBroke})
}
present = map[key.NodePublic]bool{}
}
lastConnGen := 0
lastStatus := c.clock.Now()
logConnectedLocked := func() {
if loggedConnected {
return
}
infoLogf("connected; %d peers", len(present))
loggedConnected = true
}
const logConnectedDelay = 200 * time.Millisecond
timer := c.clock.AfterFunc(2*time.Second, func() {
mu.Lock()
defer mu.Unlock()
logConnectedLocked()
})
defer timer.Stop()
updatePeer := func(k key.NodePublic, isPresent bool) {
mu.Lock()
defer mu.Unlock()
if isPresent {
present[k] = true
if !loggedConnected {
timer.Reset(logConnectedDelay)
}
} else {
// If we got a peerGone message, that means the initial connection's
// flood of peerPresent messages is done, so we can log already:
logConnectedLocked()
delete(present, k)
}
}
sleep := func(d time.Duration) {
t, tChannel := c.clock.NewTimer(d)
select {
case <-ctx.Done():
t.Stop()
case <-tChannel:
}
}
for ctx.Err() == nil {
// Make sure we're connected before calling s.ServerPublicKey.
_, _, err := c.connect(ctx, "RunWatchConnectionLoop")
if err != nil {
if f := testHookWatchLookConnectResult; f != nil && !f(err, false) {
return
}
logf("mesh connect: %v", err)
sleep(retryInterval)
continue
}
selfConnect := c.ServerPublicKey() == ignoreServerKey
if f := testHookWatchLookConnectResult; f != nil && !f(err, selfConnect) {
return
}
if selfConnect {
logf("detected self-connect; ignoring host")
return
}
for {
m, connGen, err := c.RecvDetail()
if err != nil {
clear()
logf("Recv: %v", err)
sleep(retryInterval)
break
}
if connGen != lastConnGen {
lastConnGen = connGen
clear()
}
switch m := m.(type) {
case derp.PeerPresentMessage:
add(m)
updatePeer(m.Key, true)
case derp.PeerGoneMessage:
switch m.Reason {
case derp.PeerGoneReasonDisconnected:
// Normal case, log nothing
case derp.PeerGoneReasonNotHere:
logf("Recv: peer %s not connected to %s",
key.NodePublic(m.Peer).ShortString(), c.ServerPublicKey().ShortString())
default:
logf("Recv: peer %s not at server %s for unknown reason %v",
key.NodePublic(m.Peer).ShortString(), c.ServerPublicKey().ShortString(), m.Reason)
}
remove(m)
updatePeer(m.Peer, false)
default:
continue
}
if now := c.clock.Now(); now.Sub(lastStatus) > statusInterval {
lastStatus = now
infoLogf("%d peers", len(present))
}
}
}
}

32
vendor/tailscale.com/derp/derphttp/websocket.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux || js
package derphttp
import (
"context"
"log"
"net"
"github.com/coder/websocket"
"tailscale.com/net/wsconn"
)
func init() {
dialWebsocketFunc = dialWebsocket
}
func dialWebsocket(ctx context.Context, urlStr string) (net.Conn, error) {
c, res, err := websocket.Dial(ctx, urlStr, &websocket.DialOptions{
Subprotocols: []string{"derp"},
})
if err != nil {
log.Printf("websocket Dial: %v, %+v", err, res)
return nil, err
}
log.Printf("websocket: connected to %v", urlStr)
netConn := wsconn.NetConn(context.Background(), c, websocket.MessageBinary, urlStr)
return netConn, nil
}

33
vendor/tailscale.com/derp/dropreason_string.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Code generated by "stringer -type=dropReason -trimprefix=dropReason"; DO NOT EDIT.
package derp
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[dropReasonUnknownDest-0]
_ = x[dropReasonUnknownDestOnFwd-1]
_ = x[dropReasonGoneDisconnected-2]
_ = x[dropReasonQueueHead-3]
_ = x[dropReasonQueueTail-4]
_ = x[dropReasonWriteError-5]
_ = x[dropReasonDupClient-6]
_ = x[numDropReasons-7]
}
const _dropReason_name = "UnknownDestUnknownDestOnFwdGoneDisconnectedQueueHeadQueueTailWriteErrorDupClientnumDropReasons"
var _dropReason_index = [...]uint8{0, 11, 27, 43, 52, 61, 71, 80, 94}
func (i dropReason) String() string {
if i < 0 || i >= dropReason(len(_dropReason_index)-1) {
return "dropReason(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _dropReason_name[_dropReason_index[i]:_dropReason_index[i+1]]
}

272
vendor/tailscale.com/disco/disco.go generated vendored Normal file
View File

@@ -0,0 +1,272 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package disco contains the discovery message types.
//
// A discovery message is:
//
// Header:
//
// magic [6]byte // “TS💬” (0x54 53 f0 9f 92 ac)
// senderDiscoPub [32]byte // nacl public key
// nonce [24]byte
//
// The recipient then decrypts the bytes following (the nacl box)
// and then the inner payload structure is:
//
// messageType byte (the MessageType constants below)
// messageVersion byte (0 for now; but always ignore bytes at the end)
// message-payload [...]byte
package disco
import (
"encoding/binary"
"errors"
"fmt"
"net"
"net/netip"
"go4.org/mem"
"tailscale.com/types/key"
)
// Magic is the 6 byte header of all discovery messages.
const Magic = "TS💬" // 6 bytes: 0x54 53 f0 9f 92 ac
const keyLen = 32
// NonceLen is the length of the nonces used by nacl box.
const NonceLen = 24
type MessageType byte
const (
TypePing = MessageType(0x01)
TypePong = MessageType(0x02)
TypeCallMeMaybe = MessageType(0x03)
)
const v0 = byte(0)
var errShort = errors.New("short message")
// LooksLikeDiscoWrapper reports whether p looks like it's a packet
// containing an encrypted disco message.
func LooksLikeDiscoWrapper(p []byte) bool {
if len(p) < len(Magic)+keyLen+NonceLen {
return false
}
return string(p[:len(Magic)]) == Magic
}
// Source returns the slice of p that represents the
// disco public key source, and whether p looks like
// a disco message.
func Source(p []byte) (src []byte, ok bool) {
if !LooksLikeDiscoWrapper(p) {
return nil, false
}
return p[len(Magic):][:keyLen], true
}
// Parse parses the encrypted part of the message from inside the
// nacl box.
func Parse(p []byte) (Message, error) {
if len(p) < 2 {
return nil, errShort
}
t, ver, p := MessageType(p[0]), p[1], p[2:]
switch t {
case TypePing:
return parsePing(ver, p)
case TypePong:
return parsePong(ver, p)
case TypeCallMeMaybe:
return parseCallMeMaybe(ver, p)
default:
return nil, fmt.Errorf("unknown message type 0x%02x", byte(t))
}
}
// Message a discovery message.
type Message interface {
// AppendMarshal appends the message's marshaled representation.
AppendMarshal([]byte) []byte
}
// MessageHeaderLen is the length of a message header, 2 bytes for type and version.
const MessageHeaderLen = 2
// appendMsgHeader appends two bytes (for t and ver) and then also
// dataLen bytes to b, returning the appended slice in all. The
// returned data slice is a subslice of all with just dataLen bytes of
// where the caller will fill in the data.
func appendMsgHeader(b []byte, t MessageType, ver uint8, dataLen int) (all, data []byte) {
// TODO: optimize this?
all = append(b, make([]byte, dataLen+2)...)
all[len(b)] = byte(t)
all[len(b)+1] = ver
data = all[len(b)+2:]
return
}
type Ping struct {
// TxID is a random client-generated per-ping transaction ID.
TxID [12]byte
// NodeKey is allegedly the ping sender's wireguard public key.
// Old clients (~1.16.0 and earlier) don't send this field.
// It shouldn't be trusted by itself, but can be combined with
// netmap data to reduce the discokey:nodekey relation from 1:N to
// 1:1.
NodeKey key.NodePublic
// Padding is the number of 0 bytes at the end of the
// message. (It's used to probe path MTU.)
Padding int
}
// PingLen is the length of a marshalled ping message, without the message
// header or padding.
const PingLen = 12 + key.NodePublicRawLen
func (m *Ping) AppendMarshal(b []byte) []byte {
dataLen := 12
hasKey := !m.NodeKey.IsZero()
if hasKey {
dataLen += key.NodePublicRawLen
}
ret, d := appendMsgHeader(b, TypePing, v0, dataLen+m.Padding)
n := copy(d, m.TxID[:])
if hasKey {
m.NodeKey.AppendTo(d[:n])
}
return ret
}
func parsePing(ver uint8, p []byte) (m *Ping, err error) {
if len(p) < 12 {
return nil, errShort
}
m = new(Ping)
m.Padding = len(p)
p = p[copy(m.TxID[:], p):]
m.Padding -= 12
// Deliberately lax on longer-than-expected messages, for future
// compatibility.
if len(p) >= key.NodePublicRawLen {
m.NodeKey = key.NodePublicFromRaw32(mem.B(p[:key.NodePublicRawLen]))
m.Padding -= key.NodePublicRawLen
}
return m, nil
}
// CallMeMaybe is a message sent only over DERP to request that the recipient try
// to open up a magicsock path back to the sender.
//
// The sender should've already sent UDP packets to the peer to open
// up the stateful firewall mappings inbound.
//
// The recipient may choose to not open a path back, if it's already
// happy with its path. But usually it will.
type CallMeMaybe struct {
// MyNumber is what the peer believes its endpoints are.
//
// Prior to Tailscale 1.4, the endpoints were exchanged purely
// between nodes and the control server.
//
// Starting with Tailscale 1.4, clients advertise their endpoints.
// Older clients won't use this, but newer clients should
// use any endpoints in here that aren't included from control.
//
// Control might have sent stale endpoints if the client was idle
// before contacting us. In that case, the client likely did a STUN
// request immediately before sending the CallMeMaybe to recreate
// their NAT port mapping, and that new good endpoint is included
// in this field, but might not yet be in control's endpoints.
// (And in the future, control will stop distributing endpoints
// when clients are suitably new.)
MyNumber []netip.AddrPort
}
const epLength = 16 + 2 // 16 byte IP address + 2 byte port
func (m *CallMeMaybe) AppendMarshal(b []byte) []byte {
ret, p := appendMsgHeader(b, TypeCallMeMaybe, v0, epLength*len(m.MyNumber))
for _, ipp := range m.MyNumber {
a := ipp.Addr().As16()
copy(p[:], a[:])
binary.BigEndian.PutUint16(p[16:], ipp.Port())
p = p[epLength:]
}
return ret
}
func parseCallMeMaybe(ver uint8, p []byte) (m *CallMeMaybe, err error) {
m = new(CallMeMaybe)
if len(p)%epLength != 0 || ver != 0 || len(p) == 0 {
return m, nil
}
m.MyNumber = make([]netip.AddrPort, 0, len(p)/epLength)
for len(p) > 0 {
var a [16]byte
copy(a[:], p)
m.MyNumber = append(m.MyNumber, netip.AddrPortFrom(
netip.AddrFrom16(a).Unmap(),
binary.BigEndian.Uint16(p[16:18])))
p = p[epLength:]
}
return m, nil
}
// Pong is a response a Ping.
//
// It includes the sender's source IP + port, so it's effectively a
// STUN response.
type Pong struct {
TxID [12]byte
Src netip.AddrPort // 18 bytes (16+2) on the wire; v4-mapped ipv6 for IPv4
}
// pongLen is the length of a marshalled pong message, without the message
// header or padding.
const pongLen = 12 + 16 + 2
func (m *Pong) AppendMarshal(b []byte) []byte {
ret, d := appendMsgHeader(b, TypePong, v0, pongLen)
d = d[copy(d, m.TxID[:]):]
ip16 := m.Src.Addr().As16()
d = d[copy(d, ip16[:]):]
binary.BigEndian.PutUint16(d, m.Src.Port())
return ret
}
func parsePong(ver uint8, p []byte) (m *Pong, err error) {
if len(p) < pongLen {
return nil, errShort
}
m = new(Pong)
copy(m.TxID[:], p)
p = p[12:]
srcIP, _ := netip.AddrFromSlice(net.IP(p[:16]))
p = p[16:]
port := binary.BigEndian.Uint16(p)
m.Src = netip.AddrPortFrom(srcIP.Unmap(), port)
return m, nil
}
// MessageSummary returns a short summary of m for logging purposes.
func MessageSummary(m Message) string {
switch m := m.(type) {
case *Ping:
return fmt.Sprintf("ping tx=%x padding=%v", m.TxID[:6], m.Padding)
case *Pong:
return fmt.Sprintf("pong tx=%x", m.TxID[:6])
case *CallMeMaybe:
return "call-me-maybe"
default:
return fmt.Sprintf("%#v", m)
}
}

17
vendor/tailscale.com/disco/disco_fuzzer.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build gofuzz
package disco
func Fuzz(data []byte) int {
m, _ := Parse(data)
newBytes := m.AppendMarshal(data)
parsedMarshall, _ := Parse(newBytes)
if m != parsedMarshall {
panic("Parsing error")
}
return 1
}

40
vendor/tailscale.com/disco/pcap.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package disco
import (
"bytes"
"encoding/binary"
"net/netip"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
// ToPCAPFrame marshals the bytes for a pcap record that describe a disco frame.
//
// Warning: Alloc garbage. Acceptable while capturing.
func ToPCAPFrame(src netip.AddrPort, derpNodeSrc key.NodePublic, payload []byte) []byte {
var (
b bytes.Buffer
flag uint8
)
b.Grow(128) // Most disco frames will probably be smaller than this.
if src.Addr() == tailcfg.DerpMagicIPAddr {
flag |= 0x01
}
b.WriteByte(flag) // 1b: flag
derpSrc := derpNodeSrc.Raw32()
b.Write(derpSrc[:]) // 32b: derp public key
binary.Write(&b, binary.LittleEndian, uint16(src.Port())) // 2b: port
addr, _ := src.Addr().MarshalBinary()
binary.Write(&b, binary.LittleEndian, uint16(len(addr))) // 2b: len(addr)
b.Write(addr) // Xb: addr
binary.Write(&b, binary.LittleEndian, uint16(len(payload))) // 2b: len(payload)
b.Write(payload) // Xb: payload
return b.Bytes()
}

79
vendor/tailscale.com/doctor/doctor.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package doctor contains more in-depth healthchecks that can be run to aid in
// diagnosing Tailscale issues.
package doctor
import (
"context"
"sync"
"tailscale.com/types/logger"
)
// Check is the interface defining a singular check.
//
// A check should log information that it gathers using the provided log
// function, and should attempt to make as much progress as possible in error
// conditions.
type Check interface {
// Name should return a name describing this check, in lower-kebab-case
// (i.e. "my-check", not "MyCheck" or "my_check").
Name() string
// Run executes the check, logging diagnostic information to the
// provided logger function.
Run(context.Context, logger.Logf) error
}
// RunChecks runs a list of checks in parallel, and logs any returned errors
// after all checks have returned.
func RunChecks(ctx context.Context, log logger.Logf, checks ...Check) {
if len(checks) == 0 {
return
}
type namedErr struct {
name string
err error
}
errs := make(chan namedErr, len(checks))
var wg sync.WaitGroup
wg.Add(len(checks))
for _, check := range checks {
go func(c Check) {
defer wg.Done()
plog := logger.WithPrefix(log, c.Name()+": ")
errs <- namedErr{
name: c.Name(),
err: c.Run(ctx, plog),
}
}(check)
}
wg.Wait()
close(errs)
for n := range errs {
if n.err == nil {
continue
}
log("check %s: %v", n.name, n.err)
}
}
// CheckFunc creates a Check from a name and a function.
func CheckFunc(name string, run func(context.Context, logger.Logf) error) Check {
return checkFunc{name, run}
}
type checkFunc struct {
name string
run func(context.Context, logger.Logf) error
}
func (c checkFunc) Name() string { return c.name }
func (c checkFunc) Run(ctx context.Context, log logger.Logf) error { return c.run(ctx, log) }

23
vendor/tailscale.com/doctor/ethtool/ethtool.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package ethtool provides a doctor.Check that prints diagnostic information
// obtained from the 'ethtool' utility on the current system.
package ethtool
import (
"context"
"tailscale.com/types/logger"
)
// Check implements the doctor.Check interface.
type Check struct{}
func (Check) Name() string {
return "ethtool"
}
func (Check) Run(_ context.Context, logf logger.Logf) error {
return ethtoolImpl(logf)
}

78
vendor/tailscale.com/doctor/ethtool/ethtool_linux.go generated vendored Normal file
View File

@@ -0,0 +1,78 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package ethtool
import (
"net/netip"
"sort"
"github.com/safchain/ethtool"
"tailscale.com/net/netmon"
"tailscale.com/types/logger"
"tailscale.com/util/set"
)
func ethtoolImpl(logf logger.Logf) error {
et, err := ethtool.NewEthtool()
if err != nil {
logf("could not create ethtool: %v", err)
return nil
}
defer et.Close()
netmon.ForeachInterface(func(iface netmon.Interface, _ []netip.Prefix) {
ilogf := logger.WithPrefix(logf, iface.Name+": ")
features, err := et.Features(iface.Name)
if err == nil {
enabled := []string{}
for feature, value := range features {
if value {
enabled = append(enabled, feature)
}
}
sort.Strings(enabled)
ilogf("features: %v", enabled)
} else {
ilogf("features: error: %v", err)
}
stats, err := et.Stats(iface.Name)
if err == nil {
printStats(ilogf, stats)
} else {
ilogf("stats: error: %v", err)
}
})
return nil
}
// Stats that should be printed if non-zero
var nonzeroStats = set.SetOf([]string{
// AWS ENA driver statistics; see:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-network-performance-ena.html
"bw_in_allowance_exceeded",
"bw_out_allowance_exceeded",
"conntrack_allowance_exceeded",
"linklocal_allowance_exceeded",
"pps_allowance_exceeded",
})
// Stats that should be printed if zero
var zeroStats = set.SetOf([]string{
// AWS ENA driver statistics; see:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-network-performance-ena.html
"conntrack_allowance_available",
})
func printStats(logf logger.Logf, stats map[string]uint64) {
for name, value := range stats {
if value != 0 && nonzeroStats.Contains(name) {
logf("stats: warning: %s = %d > 0", name, value)
}
if value == 0 && zeroStats.Contains(name) {
logf("stats: warning: %s = %d == 0", name, value)
}
}
}

17
vendor/tailscale.com/doctor/ethtool/ethtool_other.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !linux
package ethtool
import (
"runtime"
"tailscale.com/types/logger"
)
func ethtoolImpl(logf logger.Logf) error {
logf("unsupported on %s/%s", runtime.GOOS, runtime.GOARCH)
return nil
}

59
vendor/tailscale.com/doctor/permissions/permissions.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package permissions provides a doctor.Check that prints the process
// permissions for the running process.
package permissions
import (
"context"
"fmt"
"os/user"
"strings"
"golang.org/x/exp/constraints"
"tailscale.com/types/logger"
)
// Check implements the doctor.Check interface.
type Check struct{}
func (Check) Name() string {
return "permissions"
}
func (Check) Run(_ context.Context, logf logger.Logf) error {
return permissionsImpl(logf)
}
//lint:ignore U1000 used in non-windows implementations.
func formatUserID[T constraints.Integer](id T) string {
idStr := fmt.Sprint(id)
if uu, err := user.LookupId(idStr); err != nil {
return idStr + "(<unknown>)"
} else {
return fmt.Sprintf("%s(%q)", idStr, uu.Username)
}
}
//lint:ignore U1000 used in non-windows implementations.
func formatGroupID[T constraints.Integer](id T) string {
idStr := fmt.Sprint(id)
if g, err := user.LookupGroupId(idStr); err != nil {
return idStr + "(<unknown>)"
} else {
return fmt.Sprintf("%s(%q)", idStr, g.Name)
}
}
//lint:ignore U1000 used in non-windows implementations.
func formatGroups[T constraints.Integer](groups []T) string {
var buf strings.Builder
for i, group := range groups {
if i > 0 {
buf.WriteByte(',')
}
buf.WriteString(formatGroupID(group))
}
return buf.String()
}

View File

@@ -0,0 +1,23 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build darwin || freebsd || openbsd
package permissions
import (
"golang.org/x/sys/unix"
"tailscale.com/types/logger"
)
func permissionsImpl(logf logger.Logf) error {
groups, _ := unix.Getgroups()
logf("uid=%s euid=%s gid=%s egid=%s groups=%s",
formatUserID(unix.Getuid()),
formatUserID(unix.Geteuid()),
formatGroupID(unix.Getgid()),
formatGroupID(unix.Getegid()),
formatGroups(groups),
)
return nil
}

View File

@@ -0,0 +1,62 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package permissions
import (
"fmt"
"strings"
"unsafe"
"golang.org/x/sys/unix"
"tailscale.com/types/logger"
)
func permissionsImpl(logf logger.Logf) error {
// NOTE: getresuid and getresgid never fail unless passed an
// invalid address.
var ruid, euid, suid uint64
unix.Syscall(unix.SYS_GETRESUID,
uintptr(unsafe.Pointer(&ruid)),
uintptr(unsafe.Pointer(&euid)),
uintptr(unsafe.Pointer(&suid)),
)
var rgid, egid, sgid uint64
unix.Syscall(unix.SYS_GETRESGID,
uintptr(unsafe.Pointer(&rgid)),
uintptr(unsafe.Pointer(&egid)),
uintptr(unsafe.Pointer(&sgid)),
)
groups, _ := unix.Getgroups()
var buf strings.Builder
fmt.Fprintf(&buf, "ruid=%s euid=%s suid=%s rgid=%s egid=%s sgid=%s groups=%s",
formatUserID(ruid), formatUserID(euid), formatUserID(suid),
formatGroupID(rgid), formatGroupID(egid), formatGroupID(sgid),
formatGroups(groups),
)
// Get process capabilities
var (
capHeader = unix.CapUserHeader{
Version: unix.LINUX_CAPABILITY_VERSION_3,
Pid: 0, // 0 means 'ourselves'
}
capData unix.CapUserData
)
if err := unix.Capget(&capHeader, &capData); err != nil {
fmt.Fprintf(&buf, " caperr=%v", err)
} else {
fmt.Fprintf(&buf, " cap_effective=%08x cap_permitted=%08x cap_inheritable=%08x",
capData.Effective, capData.Permitted, capData.Inheritable,
)
}
logf("%s", buf.String())
return nil
}

View File

@@ -0,0 +1,17 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !(linux || darwin || freebsd || openbsd)
package permissions
import (
"runtime"
"tailscale.com/types/logger"
)
func permissionsImpl(logf logger.Logf) error {
logf("unsupported on %s/%s", runtime.GOOS, runtime.GOARCH)
return nil
}

34
vendor/tailscale.com/doctor/routetable/routetable.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package routetable provides a doctor.Check that dumps the current system's
// route table to the log.
package routetable
import (
"context"
"tailscale.com/net/routetable"
"tailscale.com/types/logger"
)
// MaxRoutes is the maximum number of routes that will be displayed.
const MaxRoutes = 1000
// Check implements the doctor.Check interface.
type Check struct{}
func (Check) Name() string {
return "routetable"
}
func (Check) Run(_ context.Context, logf logger.Logf) error {
rs, err := routetable.Get(MaxRoutes)
if err != nil {
return err
}
for _, r := range rs {
logf("%s", r)
}
return nil
}

44
vendor/tailscale.com/drive/drive_clone.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT.
package drive
// Clone makes a deep copy of Share.
// The result aliases no memory with the original.
func (src *Share) Clone() *Share {
if src == nil {
return nil
}
dst := new(Share)
*dst = *src
dst.BookmarkData = append(src.BookmarkData[:0:0], src.BookmarkData...)
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _ShareCloneNeedsRegeneration = Share(struct {
Name string
Path string
As string
BookmarkData []byte
}{})
// Clone duplicates src into dst and reports whether it succeeded.
// To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>,
// where T is one of Share.
func Clone(dst, src any) bool {
switch src := src.(type) {
case *Share:
switch dst := dst.(type) {
case *Share:
*dst = *src.Clone()
return true
case **Share:
*dst = src.Clone()
return true
}
}
return false
}

75
vendor/tailscale.com/drive/drive_view.go generated vendored Normal file
View File

@@ -0,0 +1,75 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Code generated by tailscale/cmd/viewer; DO NOT EDIT.
package drive
import (
"encoding/json"
"errors"
"tailscale.com/types/views"
)
//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Share
// View returns a readonly view of Share.
func (p *Share) View() ShareView {
return ShareView{ж: p}
}
// ShareView provides a read-only view over Share.
//
// Its methods should only be called if `Valid()` returns true.
type ShareView struct {
// ж is the underlying mutable value, named with a hard-to-type
// character that looks pointy like a pointer.
// It is named distinctively to make you think of how dangerous it is to escape
// to callers. You must not let callers be able to mutate it.
ж *Share
}
// Valid reports whether underlying value is non-nil.
func (v ShareView) Valid() bool { return v.ж != nil }
// AsStruct returns a clone of the underlying value which aliases no memory with
// the original.
func (v ShareView) AsStruct() *Share {
if v.ж == nil {
return nil
}
return v.ж.Clone()
}
func (v ShareView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
func (v *ShareView) UnmarshalJSON(b []byte) error {
if v.ж != nil {
return errors.New("already initialized")
}
if len(b) == 0 {
return nil
}
var x Share
if err := json.Unmarshal(b, &x); err != nil {
return err
}
v.ж = &x
return nil
}
func (v ShareView) Name() string { return v.ж.Name }
func (v ShareView) Path() string { return v.ж.Path }
func (v ShareView) As() string { return v.ж.As }
func (v ShareView) BookmarkData() views.ByteSlice[[]byte] {
return views.ByteSliceOf(v.ж.BookmarkData)
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _ShareViewNeedsRegeneration = Share(struct {
Name string
Path string
As string
BookmarkData []byte
}{})

37
vendor/tailscale.com/drive/local.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package drive provides a filesystem that allows sharing folders between
// Tailscale nodes using WebDAV. The actual implementation of the core Taildrive
// functionality lives in package driveimpl. These packages are separated to
// allow users of Taildrive to refer to the interfaces without having a hard
// dependency on Taildrive, so that programs which don't actually use Taildrive can
// avoid its transitive dependencies.
package drive
import (
"net"
"net/http"
)
// Remote represents a remote Taildrive node.
type Remote struct {
Name string
URL string
Available func() bool
}
// FileSystemForLocal is the Taildrive filesystem exposed to local clients. It
// provides a unified WebDAV interface to remote Taildrive shares on other nodes.
type FileSystemForLocal interface {
// HandleConn handles connections from local WebDAV clients
HandleConn(conn net.Conn, remoteAddr net.Addr) error
// SetRemotes sets the complete set of remotes on the given tailnet domain
// using a map of name -> url. If transport is specified, that transport
// will be used to connect to these remotes.
SetRemotes(domain string, remotes []*Remote, transport http.RoundTripper)
// Close() stops serving the WebDAV content
Close() error
}

133
vendor/tailscale.com/drive/remote.go generated vendored Normal file
View File

@@ -0,0 +1,133 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package drive
//go:generate go run tailscale.com/cmd/viewer --type=Share --clonefunc
import (
"bytes"
"errors"
"net/http"
"regexp"
"strings"
)
var (
// DisallowShareAs forcibly disables sharing as a specific user, only used
// for testing.
DisallowShareAs = false
ErrDriveNotEnabled = errors.New("Taildrive not enabled")
ErrInvalidShareName = errors.New("Share names may only contain the letters a-z, underscore _, parentheses (), or spaces")
)
var (
shareNameRegex = regexp.MustCompile(`^[a-z0-9_\(\) ]+$`)
)
// AllowShareAs reports whether sharing files as a specific user is allowed.
func AllowShareAs() bool {
return !DisallowShareAs && doAllowShareAs()
}
// Share configures a folder to be shared through drive.
type Share struct {
// Name is how this share appears on remote nodes.
Name string `json:"name,omitempty"`
// Path is the path to the directory on this machine that's being shared.
Path string `json:"path,omitempty"`
// As is the UNIX or Windows username of the local account used for this
// share. File read/write permissions are enforced based on this username.
// Can be left blank to use the default value of "whoever is running the
// Tailscale GUI".
As string `json:"who,omitempty"`
// BookmarkData contains security-scoped bookmark data for the Sandboxed
// Mac application. The Sandboxed Mac application gains permission to
// access the Share's folder as a result of a user selecting it in a file
// picker. In order to retain access to it across restarts, it needs to
// hold on to a security-scoped bookmark. That bookmark is stored here. See
// https://developer.apple.com/documentation/security/app_sandbox/accessing_files_from_the_macos_app_sandbox#4144043
BookmarkData []byte `json:"bookmarkData,omitempty"`
}
func ShareViewsEqual(a, b ShareView) bool {
if !a.Valid() && !b.Valid() {
return true
}
if !a.Valid() || !b.Valid() {
return false
}
return a.Name() == b.Name() && a.Path() == b.Path() && a.As() == b.As() && a.BookmarkData().Equal(b.ж.BookmarkData)
}
func SharesEqual(a, b *Share) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
return a.Name == b.Name && a.Path == b.Path && a.As == b.As && bytes.Equal(a.BookmarkData, b.BookmarkData)
}
func CompareShares(a, b *Share) int {
if a == nil && b == nil {
return 0
}
if a == nil {
return -1
}
if b == nil {
return 1
}
return strings.Compare(a.Name, b.Name)
}
// FileSystemForRemote is the drive filesystem exposed to remote nodes. It
// provides a unified WebDAV interface to local directories that have been
// shared.
type FileSystemForRemote interface {
// SetFileServerAddr sets the address of the file server to which we
// should proxy. This is used on platforms like Windows and MacOS
// sandboxed where we can't spawn user-specific sub-processes and instead
// rely on the UI application that's already running as an unprivileged
// user to access the filesystem for us.
//
// Note that this includes both the file server's secret token and its
// address, delimited by a pipe |.
SetFileServerAddr(addr string)
// SetShares sets the complete set of shares exposed by this node. If
// AllowShareAs() reports true, we will use one subprocess per user to
// access the filesystem (see userServer). Otherwise, we will use the file
// server configured via SetFileServerAddr.
SetShares(shares []*Share)
// ServeHTTPWithPerms behaves like the similar method from http.Handler but
// also accepts a Permissions map that captures the permissions of the
// connecting node.
ServeHTTPWithPerms(permissions Permissions, w http.ResponseWriter, r *http.Request)
// Close() stops serving the WebDAV content
Close() error
}
// NormalizeShareName normalizes the given share name and returns an error if
// it contains any disallowed characters.
func NormalizeShareName(name string) (string, error) {
// Force all share names to lowercase to avoid potential incompatibilities
// with clients that don't support case-sensitive filenames.
name = strings.ToLower(name)
// Trim whitespace
name = strings.TrimSpace(name)
if !shareNameRegex.MatchString(name) {
return "", ErrInvalidShareName
}
return name, nil
}

13
vendor/tailscale.com/drive/remote_nonunix.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !unix
package drive
func doAllowShareAs() bool {
// On non-UNIX platforms, we use the GUI application (e.g. Windows taskbar
// icon) to access the filesystem as whatever unprivileged user is running
// the GUI app, so we cannot allow sharing as a different user.
return false
}

65
vendor/tailscale.com/drive/remote_permissions.go generated vendored Normal file
View File

@@ -0,0 +1,65 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package drive
import (
"encoding/json"
"fmt"
)
type Permission uint8
const (
PermissionNone Permission = iota
PermissionReadOnly
PermissionReadWrite
)
const (
accessReadOnly = "ro"
accessReadWrite = "rw"
wildcardShare = "*"
)
// Permissions represents the set of permissions for a given principal to a
// set of shares.
type Permissions map[string]Permission
type grant struct {
Shares []string
Access string
}
// ParsePermissions builds a Permissions map from a lis of raw grants.
func ParsePermissions(rawGrants [][]byte) (Permissions, error) {
permissions := make(Permissions)
for _, rawGrant := range rawGrants {
var g grant
err := json.Unmarshal(rawGrant, &g)
if err != nil {
return nil, fmt.Errorf("unmarshal raw grants %s: %v", rawGrant, err)
}
for _, share := range g.Shares {
existingPermission := permissions[share]
permission := PermissionReadOnly
if g.Access == accessReadWrite {
permission = PermissionReadWrite
}
if permission > existingPermission {
permissions[share] = permission
}
}
}
return permissions, nil
}
func (p Permissions) For(share string) Permission {
specific := p[share]
wildcard := p[wildcardShare]
if specific > wildcard {
return specific
}
return wildcard
}

Some files were not shown because too many files have changed in this diff Show More