diff --git a/.gitignore b/.gitignore index e8462a28bb56d..47d2bbe959ae1 100644 --- a/.gitignore +++ b/.gitignore @@ -44,4 +44,8 @@ client/web/build/assets /gocross /dist -.DS_Store +# Ignore xcode userstate and workspace data +*.xcuserstate +*.xcworkspacedata +/tstest/tailmac/bin +/tstest/tailmac/build diff --git a/Dockerfile b/Dockerfile index 8166000eda6fc..4ad3d88d9577a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ # $ docker exec tailscaled tailscale status -FROM golang:1.22-alpine AS build-env +FROM golang:1.23-alpine AS build-env WORKDIR /go/src/tailscale diff --git a/Makefile b/Makefile index eae85f0cb19e0..98c3d36cc1c9e 100644 --- a/Makefile +++ b/Makefile @@ -117,7 +117,8 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:mantic" && docker build --build-arg="BASE=ubuntu:mantic" -t ssh-ubuntu-mantic ssh/tailssh/testcontainers && \ - echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers + echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ + echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers help: ## Show this help @echo "\nSpecify a command. The choices are:\n" diff --git a/README.md b/README.md index 458ff79ea98e1..4627d9780f0b5 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ not open source. ## Building -We always require the latest Go release, currently Go 1.22. (While we build +We always require the latest Go release, currently Go 1.23. (While we build releases with our [Go fork](https://github.com/tailscale/go/), its use is not required.) diff --git a/VERSION.txt b/VERSION.txt index 22d6771a47d93..dc87e8af82f69 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.72.1 +1.74.0 diff --git a/client/tailscale/acl.go b/client/tailscale/acl.go index fc672ded881b4..8d8bdfc86baf1 100644 --- a/client/tailscale/acl.go +++ b/client/tailscale/acl.go @@ -19,6 +19,7 @@ import ( // Only one of Src/Dst or Users/Ports may be specified. type ACLRow struct { Action string `json:"action,omitempty"` // valid values: "accept" + Proto string `json:"proto,omitempty"` // protocol Users []string `json:"users,omitempty"` // old name for src Ports []string `json:"ports,omitempty"` // old name for dst Src []string `json:"src,omitempty"` @@ -31,6 +32,7 @@ type ACLRow struct { type ACLTest struct { Src string `json:"src,omitempty"` // source User string `json:"user,omitempty"` // old name for source + Proto string `json:"proto,omitempty"` // protocol Accept []string `json:"accept,omitempty"` // expected destination ip:port that user can access Deny []string `json:"deny,omitempty"` // expected destination ip:port that user cannot access diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index 1fcd70a51a6f7..81879aac31ded 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -57,3 +57,11 @@ type ExitNodeSuggestionResponse struct { Name string Location tailcfg.LocationView `json:",omitempty"` } + +// DNSOSConfig mimics dns.OSConfig without forcing us to import the entire dns package +// into the CLI. +type DNSOSConfig struct { + Nameservers []string + SearchDomains []string + MatchDomains []string +} diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index 6f27e56b86fc4..29e28a1549a7d 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -353,6 +353,12 @@ func (lc *LocalClient) DaemonMetrics(ctx context.Context) ([]byte, error) { return lc.get200(ctx, "/localapi/v0/metrics") } +// UserMetrics returns the user metrics in +// the Prometheus text exposition format. +func (lc *LocalClient) UserMetrics(ctx context.Context) ([]byte, error) { + return lc.get200(ctx, "/localapi/v0/usermetrics") +} + // IncrementCounter increments the value of a Tailscale daemon's counter // metric by the given delta. If the metric has yet to exist, a new counter // metric is created and initialized to delta. @@ -807,6 +813,18 @@ func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn return decodeJSON[*ipn.Prefs](body) } +func (lc *LocalClient) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { + body, err := lc.get200(ctx, "/localapi/v0/dns-osconfig") + if err != nil { + return nil, err + } + var osCfg apitype.DNSOSConfig + if err := json.Unmarshal(body, &osCfg); err != nil { + return nil, fmt.Errorf("invalid dns.OSConfig: %w", err) + } + return &osCfg, nil +} + // StartLoginInteractive starts an interactive login. func (lc *LocalClient) StartLoginInteractive(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/login-interactive", http.StatusNoContent, nil) diff --git a/client/tailscale/required_version.go b/client/tailscale/required_version.go index ff15fc78a0f79..d6bca1c6d8ff9 100644 --- a/client/tailscale/required_version.go +++ b/client/tailscale/required_version.go @@ -1,10 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !go1.21 +//go:build !go1.23 package tailscale func init() { - you_need_Go_1_21_to_compile_Tailscale() + you_need_Go_1_23_to_compile_Tailscale() } diff --git a/client/web/web.go b/client/web/web.go index 9f9465db97c48..b914070b25af0 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -283,6 +283,12 @@ func (s *Server) serve(w http.ResponseWriter, r *http.Request) { } } + if r.URL.Path == "/metrics" { + r.URL.Path = "/api/local/v0/usermetrics" + s.proxyRequestToLocalAPI(w, r) + return + } + if strings.HasPrefix(r.URL.Path, "/api/") { switch { case r.URL.Path == "/api/auth" && r.Method == httpm.GET: diff --git a/client/web/yarn.lock b/client/web/yarn.lock index fb96d8eeb4298..2c8fca5e53e9d 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -5382,9 +5382,9 @@ wrappy@1: integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== ws@^8.14.2: - version "8.14.2" - resolved "https://registry.yarnpkg.com/ws/-/ws-8.14.2.tgz#6c249a806eb2db7a20d26d51e7709eab7b2e6c7f" - integrity sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g== + version "8.17.1" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.17.1.tgz#9293da530bb548febc95371d90f9c878727d919b" + integrity sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ== xml-name-validator@^5.0.0: version "5.0.0" diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index db354af3be273..a1ffc30feafb2 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -47,7 +47,7 @@ func main() { it := codegen.NewImportTracker(pkg.Types) buf := new(bytes.Buffer) for _, typeName := range typeNames { - typ, ok := namedTypes[typeName] + typ, ok := namedTypes[typeName].(*types.Named) if !ok { log.Fatalf("could not find type %s", typeName) } @@ -115,7 +115,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { if !codegen.ContainsPointers(ft) || codegen.HasNoClone(t.Tag(i)) { continue } - if named, _ := ft.(*types.Named); named != nil { + if named, _ := codegen.NamedTypeOf(ft); named != nil { if codegen.IsViewType(ft) { writef("dst.%s = src.%s", fname, fname) continue @@ -161,7 +161,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { case *types.Pointer: base := ft.Elem() hasPtrs := codegen.ContainsPointers(base) - if named, _ := base.(*types.Named); named != nil && hasPtrs { + if named, _ := codegen.NamedTypeOf(base); named != nil && hasPtrs { writef("dst.%s = src.%s.Clone()", fname, fname) continue } diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 0415f7d021fed..ec2d3ef1236d4 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -15,14 +15,15 @@ import ( "net/netip" "os" - "tailscale.com/kube" + "tailscale.com/kube/kubeapi" + "tailscale.com/kube/kubeclient" "tailscale.com/tailcfg" ) // storeDeviceID writes deviceID to 'device_id' data field of the named // Kubernetes Secret. func storeDeviceID(ctx context.Context, secretName string, deviceID tailcfg.StableNodeID) error { - s := &kube.Secret{ + s := &kubeapi.Secret{ Data: map[string][]byte{ "device_id": []byte(deviceID), }, @@ -42,7 +43,7 @@ func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, a return err } - s := &kube.Secret{ + s := &kubeapi.Secret{ Data: map[string][]byte{ "device_fqdn": []byte(fqdn), "device_ips": deviceIPs, @@ -55,14 +56,14 @@ func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, a // secret. No-op if there is no authkey in the secret. func deleteAuthKey(ctx context.Context, secretName string) error { // m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902. - m := []kube.JSONPatch{ + m := []kubeclient.JSONPatch{ { Op: "remove", Path: "/data/authkey", }, } if err := kc.JSONPatchSecret(ctx, secretName, m); err != nil { - if s, ok := err.(*kube.Status); ok && s.Code == http.StatusUnprocessableEntity { + if s, ok := err.(*kubeapi.Status); ok && s.Code == http.StatusUnprocessableEntity { // This is kubernetes-ese for "the field you asked to // delete already doesn't exist", aka no-op. return nil @@ -72,7 +73,7 @@ func deleteAuthKey(ctx context.Context, secretName string) error { return nil } -var kc kube.Client +var kc kubeclient.Client // setupKube is responsible for doing any necessary configuration and checks to // ensure that tailscale state storage and authentication mechanism will work on @@ -88,12 +89,12 @@ func (cfg *settings) setupKube(ctx context.Context) error { cfg.KubernetesCanPatch = canPatch s, err := kc.GetSecret(ctx, cfg.KubeSecret) - if err != nil && kube.IsNotFoundErr(err) && !canCreate { + if err != nil && kubeclient.IsNotFoundErr(err) && !canCreate { return fmt.Errorf("Tailscale state Secret %s does not exist and we don't have permissions to create it. "+ "If you intend to store tailscale state elsewhere than a Kubernetes Secret, "+ "you can explicitly set TS_KUBE_SECRET env var to an empty string. "+ "Else ensure that RBAC is set up that allows the service account associated with this installation to create Secrets.", cfg.KubeSecret) - } else if err != nil && !kube.IsNotFoundErr(err) { + } else if err != nil && !kubeclient.IsNotFoundErr(err) { return fmt.Errorf("Getting Tailscale state Secret %s: %v", cfg.KubeSecret, err) } @@ -128,10 +129,10 @@ func initKubeClient(root string) { if root != "/" { // If we are running in a test, we need to set the root path to the fake // service account directory. - kube.SetRootPathForTesting(root) + kubeclient.SetRootPathForTesting(root) } var err error - kc, err = kube.New() + kc, err = kubeclient.New() if err != nil { log.Fatalf("Error creating kube client: %v", err) } diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index 1f030959fba5c..1a5730548838f 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -11,7 +11,8 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "tailscale.com/kube" + "tailscale.com/kube/kubeapi" + "tailscale.com/kube/kubeclient" ) func TestSetupKube(t *testing.T) { @@ -20,7 +21,7 @@ func TestSetupKube(t *testing.T) { cfg *settings wantErr bool wantCfg *settings - kc kube.Client + kc kubeclient.Client }{ { name: "TS_AUTHKEY set, state Secret exists", @@ -28,11 +29,11 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { return nil, nil }, }, @@ -47,12 +48,12 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, true, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { - return nil, &kube.Status{Code: 404} + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return nil, &kubeapi.Status{Code: 404} }, }, wantCfg: &settings{ @@ -66,12 +67,12 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { - return nil, &kube.Status{Code: 404} + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return nil, &kubeapi.Status{Code: 404} }, }, wantCfg: &settings{ @@ -86,12 +87,12 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { - return nil, &kube.Status{Code: 403} + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return nil, &kubeapi.Status{Code: 403} }, }, wantCfg: &settings{ @@ -110,7 +111,7 @@ func TestSetupKube(t *testing.T) { AuthKey: "foo", KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, errors.New("broken") }, @@ -126,12 +127,12 @@ func TestSetupKube(t *testing.T) { wantCfg: &settings{ KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, true, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { - return nil, &kube.Status{Code: 404} + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return nil, &kubeapi.Status{Code: 404} }, }, }, @@ -144,12 +145,12 @@ func TestSetupKube(t *testing.T) { wantCfg: &settings{ KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { - return &kube.Secret{}, nil + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{}, nil }, }, }, @@ -158,12 +159,12 @@ func TestSetupKube(t *testing.T) { cfg: &settings{ KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return false, false, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { - return &kube.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil }, }, wantCfg: &settings{ @@ -176,12 +177,12 @@ func TestSetupKube(t *testing.T) { cfg: &settings{ KubeSecret: "foo", }, - kc: &kube.FakeClient{ + kc: &kubeclient.FakeClient{ CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) { return true, false, nil }, - GetSecretImpl: func(context.Context, string) (*kube.Secret, error) { - return &kube.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil }, }, wantCfg: &settings{ diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 1492c4ebb4721..2f6f160c8f960 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -52,7 +52,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw - L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink + L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/tailscale+ @@ -99,6 +99,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/tailscale tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/cmd/derper+ tailscale.com/net/dnscache from tailscale.com/derp/derphttp tailscale.com/net/ktimeout from tailscale.com/cmd/derper @@ -163,6 +164,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/syspolicy from tailscale.com/ipn tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy + tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -175,14 +177,15 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/tka golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls + golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/hkdf from crypto/tls + golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ W golang.org/x/exp/constraints from tailscale.com/util/winutil golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ @@ -256,6 +259,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa io from bufio+ io/fs from crypto/x509+ io/ioutil from github.com/mitchellh/go-ps+ + iter from maps+ log from expvar+ log/internal from log maps from tailscale.com/ipn+ @@ -271,7 +275,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http from expvar+ net/http/httptrace from net/http+ net/http/internal from net/http - net/http/pprof from tailscale.com/tsweb+ + net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -300,3 +304,4 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ + unique from net/netip diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 4586dfdbfb297..016166b4cda29 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tstime" "tailscale.com/util/clientmetric" "tailscale.com/util/set" @@ -61,11 +62,11 @@ type ConnectorReconciler struct { var ( // gaugeConnectorResources tracks the overall number of Connectors currently managed by this operator instance. - gaugeConnectorResources = clientmetric.NewGauge("k8s_connector_resources") + gaugeConnectorResources = clientmetric.NewGauge(kubetypes.MetricConnectorResourceCount) // gaugeConnectorSubnetRouterResources tracks the number of Connectors managed by this operator instance that are subnet routers. - gaugeConnectorSubnetRouterResources = clientmetric.NewGauge("k8s_connector_subnetrouter_resources") + gaugeConnectorSubnetRouterResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithSubnetRouterCount) // gaugeConnectorExitNodeResources tracks the number of Connectors currently managed by this operator instance that are exit nodes. - gaugeConnectorExitNodeResources = clientmetric.NewGauge("k8s_connector_exitnode_resources") + gaugeConnectorExitNodeResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithExitNodeCount) ) func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index 8a7a5dd535e08..01c60bc9e0fc4 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -16,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client/fake" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/util/mak" ) @@ -74,6 +75,7 @@ func TestConnector(t *testing.T) { hostname: "test-connector", isExitNode: true, subnetRoutes: "10.40.0.0/14", + app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts), nil) expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) @@ -169,6 +171,7 @@ func TestConnector(t *testing.T) { parentType: "connector", subnetRoutes: "10.40.0.0/14", hostname: "test-connector", + app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts), nil) expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) @@ -254,6 +257,7 @@ func TestConnectorWithProxyClass(t *testing.T) { hostname: "test-connector", isExitNode: true, subnetRoutes: "10.40.0.0/14", + app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts), nil) expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 45048e52ed533..9c0e8dd0eb858 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -143,7 +143,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify from tailscale.com/net/dns + L 💣 github.com/illarion/gonotify/v2 from tailscale.com/net/dns github.com/imdario/mergo from k8s.io/client-go/tools/clientcmd L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/net/tstun L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 @@ -171,7 +171,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd - L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink + L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go @@ -216,6 +216,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -231,7 +232,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ - L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 go.uber.org/multierr from go.uber.org/zap+ @@ -314,7 +314,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ @@ -690,7 +690,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording - tailscale.com/kube from tailscale.com/cmd/k8s-operator+ + tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ + tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore + tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+ tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal @@ -754,6 +756,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tstime from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tsweb/varz from tailscale.com/util/usermetric tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ @@ -795,7 +798,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag tailscale.com/util/osshare from tailscale.com/ipn/ipnlocal - tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal tailscale.com/util/progresstracking from tailscale.com/ipn/localapi tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy @@ -812,6 +815,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/uniq from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ @@ -828,6 +832,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/netlog from tailscale.com/wgengine tailscale.com/wgengine/netstack from tailscale.com/tsnet + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal @@ -846,8 +851,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box - golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ @@ -949,6 +955,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + iter from go/ast+ log from expvar+ log/internal from log+ log/slog from github.com/go-logr/logr+ @@ -986,7 +993,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from encoding/base32+ - sort from archive/tar+ + sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ sync from archive/tar+ @@ -999,3 +1006,4 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ + unique from net/netip diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 28fda135df071..c428d5d1e751e 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -79,8 +79,8 @@ spec: value: {{ .Values.proxyConfig.firewallMode }} {{- if .Values.proxyConfig.defaultProxyClass }} - name: PROXY_DEFAULT_CLASS - {{- end }} value: {{ .Values.proxyConfig.defaultProxyClass }} + {{- end }} {{- with .Values.operatorConfig.extraEnv }} {{- toYaml . | nindent 12 }} {{- end }} diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 1a18464397e0a..16dcae4903515 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -14,10 +14,10 @@ metadata: rules: - apiGroups: [""] resources: ["events", "services", "services/status"] - verbs: ["*"] + verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] - apiGroups: ["networking.k8s.io"] resources: ["ingresses", "ingresses/status"] - verbs: ["*"] + verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] - apiGroups: ["networking.k8s.io"] resources: ["ingressclasses"] verbs: ["get", "list", "watch"] @@ -27,6 +27,9 @@ rules: - apiGroups: ["tailscale.com"] resources: ["dnsconfigs", "dnsconfigs/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["recorders", "recorders/status"] + verbs: ["get", "list", "watch", "update"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -49,13 +52,16 @@ metadata: rules: - apiGroups: [""] resources: ["secrets", "serviceaccounts", "configmaps"] - verbs: ["*"] + verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] - apiGroups: ["apps"] resources: ["statefulsets", "deployments"] - verbs: ["*"] + verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] - apiGroups: ["discovery.k8s.io"] resources: ["endpointslices"] verbs: ["get", "list", "watch"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get", "create", "patch", "update", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml index 31a034aaafe03..1c15c9119f971 100644 --- a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml @@ -15,7 +15,7 @@ metadata: rules: - apiGroups: [""] resources: ["secrets"] - verbs: ["*"] + verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index ddbee4af0e7da..13aee9b9e9ebf 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -89,14 +89,14 @@ spec: type: object properties: image: - description: Nameserver image. + description: Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. type: object properties: repo: description: Repo defaults to tailscale/k8s-nameserver. type: string tag: - description: Tag defaults to operator's own tag. + description: Tag defaults to unstable. type: string status: description: |- diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml new file mode 100644 index 0000000000000..2c4cf2f6bb95d --- /dev/null +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -0,0 +1,1705 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + name: recorders.tailscale.com +spec: + group: tailscale.com + names: + kind: Recorder + listKind: RecorderList + plural: recorders + shortNames: + - rec + singular: recorder + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Status of the deployed Recorder resources. + jsonPath: .status.conditions[?(@.type == "RecorderReady")].reason + name: Status + type: string + - description: URL on which the UI is exposed if enabled. + jsonPath: .status.devices[?(@.url != "")].url + name: URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec describes the desired recorder instance. + type: object + properties: + enableUI: + description: |- + Set to true to enable the Recorder UI. The UI lists and plays recorded sessions. + The UI will be served at :443. Defaults to false. + Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. + Required if S3 storage is not set up, to ensure that recordings are accessible. + type: boolean + statefulSet: + description: |- + Configuration parameters for the Recorder's StatefulSet. The operator + deploys a StatefulSet for each Recorder resource. + type: object + properties: + annotations: + description: |- + Annotations that will be added to the StatefulSet created for the Recorder. + Any Annotations specified here will be merged with the default annotations + applied to the StatefulSet by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + type: object + additionalProperties: + type: string + labels: + description: |- + Labels that will be added to the StatefulSet created for the Recorder. + Any labels specified here will be merged with the default labels applied + to the StatefulSet by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object + additionalProperties: + type: string + pod: + description: Configuration for pods created by the Recorder's StatefulSet. + type: object + properties: + affinity: + description: |- + Affinity rules for Recorder Pods. By default, the operator does not + apply any affinity rules. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + type: array + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + x-kubernetes-map-type: atomic + x-kubernetes-list-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + type: integer + format: int32 + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + x-kubernetes-list-type: atomic + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + type: integer + format: int32 + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + type: array + items: + type: string + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + x-kubernetes-list-type: atomic + x-kubernetes-list-type: atomic + matchLabels: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + x-kubernetes-list-type: atomic + annotations: + description: |- + Annotations that will be added to Recorder Pods. Any annotations + specified here will be merged with the default annotations applied to + the Pod by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + type: object + additionalProperties: + type: string + container: + description: Configuration for the Recorder container running tailscale. + type: object + properties: + env: + description: |- + List of environment variables to set in the container. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables + Note that environment variables provided here will take precedence + over Tailscale-specific environment variables set by the operator, + however running proxies with custom values for Tailscale environment + variables (i.e TS_USERSPACE) is not recommended and might break in + the future. + type: array + items: + type: object + required: + - name + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + pattern: ^[-._a-zA-Z][-._a-zA-Z0-9]*$ + value: + description: |- + Variable references $(VAR_NAME) are expanded using the previously defined + environment variables in the container and any service environment + variables. If a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single $, which + allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or not. Defaults + to "". + type: string + image: + description: |- + Container image name including tag. Defaults to docker.io/tailscale/tsrecorder + with the same tag as the operator, but the official images are also + available at ghcr.io/tailscale/tsrecorder. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image + type: string + imagePullPolicy: + description: |- + Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image + type: string + enum: + - Always + - Never + - IfNotPresent + resources: + description: |- + Container resource requirements. + By default, the operator does not apply any resource requirements. The + amount of resources required wil depend on the volume of recordings sent. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources + type: object + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + type: array + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + type: object + required: + - name + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + securityContext: + description: |- + Container security context. By default, the operator does not apply any + container security context. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context + type: object + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + type: object + required: + - type + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + type: object + properties: + add: + description: Added capabilities + type: array + items: + description: Capability represent POSIX capabilities type + type: string + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + type: array + items: + description: Capability represent POSIX capabilities type + type: string + x-kubernetes-list-type: atomic + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + type: integer + format: int64 + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + type: integer + format: int64 + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + type: object + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + type: object + required: + - type + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + type: object + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + imagePullSecrets: + description: |- + Image pull Secrets for Recorder Pods. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec + type: array + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + type: object + properties: + name: + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + default: "" + x-kubernetes-map-type: atomic + labels: + description: |- + Labels that will be added to Recorder Pods. Any labels specified here + will be merged with the default labels applied to the Pod by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object + additionalProperties: + type: string + nodeSelector: + description: |- + Node selector rules for Recorder Pods. By default, the operator does + not apply any node selector rules. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: object + additionalProperties: + type: string + securityContext: + description: |- + Security context for Recorder Pods. By default, the operator does not + apply any Pod security context. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 + type: object + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + type: object + required: + - type + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + type: integer + format: int64 + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + type: integer + format: int64 + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + type: integer + format: int64 + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + type: object + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + type: object + required: + - type + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + type: array + items: + type: integer + format: int64 + x-kubernetes-list-type: atomic + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + type: array + items: + description: Sysctl defines a kernel parameter to be set + type: object + required: + - name + - value + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + type: object + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + tolerations: + description: |- + Tolerations for Recorder Pods. By default, the operator does not apply + any tolerations. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: array + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + type: object + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + storage: + description: |- + Configure where to store session recordings. By default, recordings will + be stored in a local ephemeral volume, and will not be persisted past the + lifetime of a specific pod. + type: object + properties: + s3: + description: |- + Configure an S3-compatible API for storage. Required if the UI is not + enabled, to ensure that recordings are accessible. + type: object + properties: + bucket: + description: |- + Bucket name to write to. The bucket is expected to be used solely for + recordings, as there is no stable prefix for written object names. + type: string + credentials: + description: |- + Configure environment variable credentials for managing objects in the + configured bucket. If not set, tsrecorder will try to acquire credentials + first from the file system and then the STS API. + type: object + properties: + secret: + description: |- + Use a Kubernetes Secret from the operator's namespace as the source of + credentials. + type: object + properties: + name: + description: |- + The name of a Kubernetes Secret in the operator's namespace that contains + credentials for writing to the configured bucket. Each key-value pair + from the secret's data will be mounted as an environment variable. It + should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if + using a static access key. + type: string + endpoint: + description: S3-compatible endpoint, e.g. s3.us-east-1.amazonaws.com. + type: string + tags: + description: |- + Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s]. + If you specify custom tags here, make sure you also make the operator + an owner of these tags. + See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. + Tags cannot be changed once a Recorder node has been created. + Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. + type: array + items: + type: string + pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + status: + description: |- + RecorderStatus describes the status of the recorder. This is set + and managed by the Tailscale operator. + type: object + properties: + conditions: + description: |- + List of status conditions to indicate the status of the Recorder. + Known condition types are `RecorderReady`. + type: array + items: + description: Condition contains details for one aspect of the current state of this API Resource. + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + devices: + description: List of tailnet devices associated with the Recorder statefulset. + type: array + items: + type: object + required: + - hostname + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the device. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the device. + type: array + items: + type: string + url: + description: |- + URL where the UI is available if enabled for replaying recordings. This + will be an HTTPS MagicDNS URL. You must be connected to the same tailnet + as the recorder to access it. + type: string + x-kubernetes-list-map-keys: + - hostname + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/cmd/k8s-operator/deploy/examples/recorder.yaml b/cmd/k8s-operator/deploy/examples/recorder.yaml new file mode 100644 index 0000000000000..24d24323b02c6 --- /dev/null +++ b/cmd/k8s-operator/deploy/examples/recorder.yaml @@ -0,0 +1,6 @@ +apiVersion: tailscale.com/v1alpha1 +kind: Recorder +metadata: + name: recorder +spec: + enableUI: true diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 4633ba3a48a38..0929aff4c8149 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -333,13 +333,13 @@ spec: when a DNSConfig is applied. properties: image: - description: Nameserver image. + description: Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. properties: repo: description: Repo defaults to tailscale/k8s-nameserver. type: string tag: - description: Tag defaults to operator's own tag. + description: Tag defaults to unstable. type: string type: object type: object @@ -2416,6 +2416,1712 @@ spec: subresources: status: {} --- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + name: recorders.tailscale.com +spec: + group: tailscale.com + names: + kind: Recorder + listKind: RecorderList + plural: recorders + shortNames: + - rec + singular: recorder + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Status of the deployed Recorder resources. + jsonPath: .status.conditions[?(@.type == "RecorderReady")].reason + name: Status + type: string + - description: URL on which the UI is exposed if enabled. + jsonPath: .status.devices[?(@.url != "")].url + name: URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec describes the desired recorder instance. + properties: + enableUI: + description: |- + Set to true to enable the Recorder UI. The UI lists and plays recorded sessions. + The UI will be served at :443. Defaults to false. + Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. + Required if S3 storage is not set up, to ensure that recordings are accessible. + type: boolean + statefulSet: + description: |- + Configuration parameters for the Recorder's StatefulSet. The operator + deploys a StatefulSet for each Recorder resource. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations that will be added to the StatefulSet created for the Recorder. + Any Annotations specified here will be merged with the default annotations + applied to the StatefulSet by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + type: object + labels: + additionalProperties: + type: string + description: |- + Labels that will be added to the StatefulSet created for the Recorder. + Any labels specified here will be merged with the default labels applied + to the StatefulSet by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object + pod: + description: Configuration for pods created by the Recorder's StatefulSet. + properties: + affinity: + description: |- + Affinity rules for Recorder Pods. By default, the operator does not + apply any affinity rules. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + annotations: + additionalProperties: + type: string + description: |- + Annotations that will be added to Recorder Pods. Any annotations + specified here will be merged with the default annotations applied to + the Pod by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + type: object + container: + description: Configuration for the Recorder container running tailscale. + properties: + env: + description: |- + List of environment variables to set in the container. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables + Note that environment variables provided here will take precedence + over Tailscale-specific environment variables set by the operator, + however running proxies with custom values for Tailscale environment + variables (i.e TS_USERSPACE) is not recommended and might break in + the future. + items: + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + pattern: ^[-._a-zA-Z][-._a-zA-Z0-9]*$ + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded using the previously defined + environment variables in the container and any service environment + variables. If a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single $, which + allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or not. Defaults + to "". + type: string + required: + - name + type: object + type: array + image: + description: |- + Container image name including tag. Defaults to docker.io/tailscale/tsrecorder + with the same tag as the operator, but the official images are also + available at ghcr.io/tailscale/tsrecorder. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image + type: string + imagePullPolicy: + description: |- + Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image + enum: + - Always + - Never + - IfNotPresent + type: string + resources: + description: |- + Container resource requirements. + By default, the operator does not apply any resource requirements. The + amount of resources required wil depend on the volume of recordings sent. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + securityContext: + description: |- + Container security context. By default, the operator does not apply any + container security context. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + type: object + imagePullSecrets: + description: |- + Image pull Secrets for Recorder Pods. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + labels: + additionalProperties: + type: string + description: |- + Labels that will be added to Recorder Pods. Any labels specified here + will be merged with the default labels applied to the Pod by the operator. + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + Node selector rules for Recorder Pods. By default, the operator does + not apply any node selector rules. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: object + securityContext: + description: |- + Security context for Recorder Pods. By default, the operator does not + apply any Pod security context. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + tolerations: + description: |- + Tolerations for Recorder Pods. By default, the operator does not apply + any tolerations. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + storage: + description: |- + Configure where to store session recordings. By default, recordings will + be stored in a local ephemeral volume, and will not be persisted past the + lifetime of a specific pod. + properties: + s3: + description: |- + Configure an S3-compatible API for storage. Required if the UI is not + enabled, to ensure that recordings are accessible. + properties: + bucket: + description: |- + Bucket name to write to. The bucket is expected to be used solely for + recordings, as there is no stable prefix for written object names. + type: string + credentials: + description: |- + Configure environment variable credentials for managing objects in the + configured bucket. If not set, tsrecorder will try to acquire credentials + first from the file system and then the STS API. + properties: + secret: + description: |- + Use a Kubernetes Secret from the operator's namespace as the source of + credentials. + properties: + name: + description: |- + The name of a Kubernetes Secret in the operator's namespace that contains + credentials for writing to the configured bucket. Each key-value pair + from the secret's data will be mounted as an environment variable. It + should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if + using a static access key. + type: string + type: object + type: object + endpoint: + description: S3-compatible endpoint, e.g. s3.us-east-1.amazonaws.com. + type: string + type: object + type: object + tags: + description: |- + Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s]. + If you specify custom tags here, make sure you also make the operator + an owner of these tags. + See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. + Tags cannot be changed once a Recorder node has been created. + Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. + items: + pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + type: string + type: array + type: object + status: + description: |- + RecorderStatus describes the status of the recorder. This is set + and managed by the Tailscale operator. + properties: + conditions: + description: |- + List of status conditions to indicate the status of the Recorder. + Known condition types are `RecorderReady`. + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + devices: + description: List of tailnet devices associated with the Recorder statefulset. + items: + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the device. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the device. + items: + type: string + type: array + url: + description: |- + URL where the UI is available if enabled for replaying recordings. This + will be an HTTPS MagicDNS URL. You must be connected to the same tailnet + as the recorder to access it. + type: string + required: + - hostname + type: object + type: array + x-kubernetes-list-map-keys: + - hostname + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -2428,14 +4134,28 @@ rules: - services - services/status verbs: - - '*' + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - networking.k8s.io resources: - ingresses - ingresses/status verbs: - - '*' + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - networking.k8s.io resources: @@ -2466,6 +4186,16 @@ rules: - list - watch - update + - apiGroups: + - tailscale.com + resources: + - recorders + - recorders/status + verbs: + - get + - list + - watch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -2493,14 +4223,28 @@ rules: - serviceaccounts - configmaps verbs: - - '*' + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - apps resources: - statefulsets - deployments verbs: - - '*' + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - discovery.k8s.io resources: @@ -2509,6 +4253,18 @@ rules: - get - list - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - get + - create + - patch + - update + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -2521,7 +4277,14 @@ rules: resources: - secrets verbs: - - '*' + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 8b1c96723edbd..539dad275d448 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -24,10 +24,12 @@ const ( connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" + recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" + recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" helmConditionalStart = "{{ if .Values.installCRDs -}}\n" helmConditionalEnd = "{{- end -}}" @@ -111,7 +113,7 @@ func main() { } } -// generate places tailscale.com CRDs (currently Connector, ProxyClass and DNSConfig) into +// generate places tailscale.com CRDs (currently Connector, ProxyClass, DNSConfig, Recorder) into // the Helm chart templates behind .Values.installCRDs=true condition (true by // default). func generate(baseDir string) error { @@ -137,28 +139,32 @@ func generate(baseDir string) error { } return nil } - if err := addCRDToHelm(connectorCRDPath, connectorCRDHelmTemplatePath); err != nil { - return fmt.Errorf("error adding Connector CRD to Helm templates: %w", err) - } - if err := addCRDToHelm(proxyClassCRDPath, proxyClassCRDHelmTemplatePath); err != nil { - return fmt.Errorf("error adding ProxyClass CRD to Helm templates: %w", err) - } - if err := addCRDToHelm(dnsConfigCRDPath, dnsConfigCRDHelmTemplatePath); err != nil { - return fmt.Errorf("error adding DNSConfig CRD to Helm templates: %w", err) + for _, crd := range []struct { + crdPath, templatePath string + }{ + {connectorCRDPath, connectorCRDHelmTemplatePath}, + {proxyClassCRDPath, proxyClassCRDHelmTemplatePath}, + {dnsConfigCRDPath, dnsConfigCRDHelmTemplatePath}, + {recorderCRDPath, recorderCRDHelmTemplatePath}, + } { + if err := addCRDToHelm(crd.crdPath, crd.templatePath); err != nil { + return fmt.Errorf("error adding %s CRD to Helm templates: %w", crd.crdPath, err) + } } return nil } func cleanup(baseDir string) error { log.Print("Cleaning up CRD from Helm templates") - if err := os.Remove(filepath.Join(baseDir, connectorCRDHelmTemplatePath)); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error cleaning up Connector CRD template: %w", err) - } - if err := os.Remove(filepath.Join(baseDir, proxyClassCRDHelmTemplatePath)); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error cleaning up ProxyClass CRD template: %w", err) - } - if err := os.Remove(filepath.Join(baseDir, dnsConfigCRDHelmTemplatePath)); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error cleaning up DNSConfig CRD template: %w", err) + for _, path := range []string{ + connectorCRDHelmTemplatePath, + proxyClassCRDHelmTemplatePath, + dnsConfigCRDHelmTemplatePath, + recorderCRDHelmTemplatePath, + } { + if err := os.Remove(filepath.Join(baseDir, path)); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error cleaning up %s: %w", path, err) + } } return nil } diff --git a/cmd/k8s-operator/generate/main_test.go b/cmd/k8s-operator/generate/main_test.go index febef67898d9c..d465cde7bfba1 100644 --- a/cmd/k8s-operator/generate/main_test.go +++ b/cmd/k8s-operator/generate/main_test.go @@ -59,6 +59,9 @@ func Test_generate(t *testing.T) { if !strings.Contains(installContentsWithCRD.String(), "name: dnsconfigs.tailscale.com") { t.Errorf("DNSConfig CRD not found in default chart install") } + if !strings.Contains(installContentsWithCRD.String(), "name: recorders.tailscale.com") { + t.Errorf("Recorder CRD not found in default chart install") + } // Test that CRDs can be excluded from Helm chart install installContentsWithoutCRD := bytes.NewBuffer([]byte{}) @@ -77,4 +80,7 @@ func Test_generate(t *testing.T) { if strings.Contains(installContentsWithoutCRD.String(), "name: dnsconfigs.tailscale.com") { t.Errorf("DNSConfig CRD found in chart install that should not contain a CRD") } + if strings.Contains(installContentsWithoutCRD.String(), "name: recorders.tailscale.com") { + t.Errorf("Recorder CRD found in chart install that should not contain a CRD") + } } diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index badc1e7a44355..700cf4be8a84f 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -23,6 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" "tailscale.com/util/clientmetric" "tailscale.com/util/set" @@ -53,7 +54,7 @@ type IngressReconciler struct { var ( // gaugeIngressResources tracks the number of ingress resources that we're // currently managing. - gaugeIngressResources = clientmetric.NewGauge("k8s_ingress_resources") + gaugeIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressResourceCount) ) func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 10d109653d45c..8b18776b43c7b 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -17,6 +17,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/ipn" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -93,6 +94,7 @@ func TestTailscaleIngress(t *testing.T) { namespace: "default", parentType: "ingress", hostname: "default-test", + app: kubetypes.AppIngressResource, } serveConfig := &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, @@ -224,6 +226,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { namespace: "default", parentType: "ingress", hostname: "default-test", + app: kubetypes.AppIngressResource, } serveConfig := &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 237fe6e825ec3..52577c929acea 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/yaml" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tstime" "tailscale.com/util/clientmetric" "tailscale.com/util/set" @@ -62,9 +63,7 @@ type NameserverReconciler struct { managedNameservers set.Slice[types.UID] // one or none } -var ( - gaugeNameserverResources = clientmetric.NewGauge("k8s_nameserver_resources") -) +var gaugeNameserverResources = clientmetric.NewGauge(kubetypes.MetricNameserverCount) func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { logger := a.logger.With("dnsConfig", req.Name) diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index cb6bb7d0a808d..695710212e57b 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -33,7 +33,7 @@ func TestNameserverReconciler(t *testing.T) { }, Spec: tsapi.DNSConfigSpec{ Nameserver: &tsapi.Nameserver{ - Image: &tsapi.Image{ + Image: &tsapi.NameserverImage{ Repo: "test", Tag: "v0.0.1", }, diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 18665bd8f028c..6fa1366cd38bf 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -22,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -39,6 +40,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/store/kubestore" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/types/logger" @@ -87,9 +89,9 @@ func main() { // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. mode := parseAPIProxyMode() if mode == apiserverProxyModeDisabled { - hostinfo.SetApp("k8s-operator") + hostinfo.SetApp(kubetypes.AppOperator) } else { - hostinfo.SetApp("k8s-operator-proxy") + hostinfo.SetApp(kubetypes.AppAPIServerProxy) } s, tsClient := initTSNet(zlog) @@ -240,6 +242,8 @@ func runReconcilers(opts reconcilerOpts) { &appsv1.StatefulSet{}: nsFilter, &appsv1.Deployment{}: nsFilter, &discoveryv1.EndpointSlice{}: nsFilter, + &rbacv1.Role{}: nsFilter, + &rbacv1.RoleBinding{}: nsFilter, }, }, Scheme: tsapi.GlobalScheme, @@ -300,10 +304,10 @@ func runReconcilers(opts reconcilerOpts) { Watches(&corev1.Service{}, svcHandlerForIngress). Watches(&tsapi.ProxyClass{}, proxyClassFilterForIngress). Complete(&IngressReconciler{ - ssr: ssr, - recorder: eventRecorder, - Client: mgr.GetClient(), - logger: opts.log.Named("ingress-reconciler"), + ssr: ssr, + recorder: eventRecorder, + Client: mgr.GetClient(), + logger: opts.log.Named("ingress-reconciler"), proxyDefaultClass: opts.proxyDefaultClass, }) if err != nil { @@ -388,6 +392,28 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create DNS records reconciler: %v", err) } + + // Recorder reconciler. + recorderFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.Recorder{}) + err = builder.ControllerManagedBy(mgr). + For(&tsapi.Recorder{}). + Watches(&appsv1.StatefulSet{}, recorderFilter). + Watches(&corev1.ServiceAccount{}, recorderFilter). + Watches(&corev1.Secret{}, recorderFilter). + Watches(&rbacv1.Role{}, recorderFilter). + Watches(&rbacv1.RoleBinding{}, recorderFilter). + Complete(&RecorderReconciler{ + recorder: eventRecorder, + tsNamespace: opts.tailscaleNamespace, + Client: mgr.GetClient(), + l: opts.log.Named("recorder-reconciler"), + clock: tstime.DefaultClock{}, + tsClient: opts.tsClient, + }) + if err != nil { + startlog.Fatalf("could not create Recorder reconciler: %v", err) + } + startlog.Infof("Startup complete, operator running, version: %s", version.Long()) if err := mgr.Start(signals.SetupSignalHandler()); err != nil { startlog.Fatalf("could not start manager: %v", err) @@ -524,6 +550,7 @@ func dnsRecordsReconcilerIngressHandler(ns string, isDefaultLoadBalancer bool, c type tsClient interface { CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) + Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) DeleteDevice(ctx context.Context, nodeStableID string) error } diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 448db71e29504..8b08e9ffadec9 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/net/dns/resolvconffile" "tailscale.com/tstest" "tailscale.com/tstime" @@ -123,6 +124,7 @@ func TestLoadBalancerClass(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSecret(t, fc, opts), nil) @@ -260,6 +262,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { parentType: "svc", tailnetTargetFQDN: tailnetTargetFQDN, hostname: "default-test", + app: kubetypes.AppEgressProxy, } expectEqual(t, fc, expectedSecret(t, fc, o), nil) @@ -371,6 +374,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { parentType: "svc", tailnetTargetIP: tailnetTargetIP, hostname: "default-test", + app: kubetypes.AppEgressProxy, } expectEqual(t, fc, expectedSecret(t, fc, o), nil) @@ -479,6 +483,7 @@ func TestAnnotations(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSecret(t, fc, o), nil) @@ -584,6 +589,7 @@ func TestAnnotationIntoLB(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSecret(t, fc, o), nil) @@ -713,6 +719,7 @@ func TestLBIntoAnnotation(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSecret(t, fc, o), nil) @@ -852,6 +859,7 @@ func TestCustomHostname(t *testing.T) { parentType: "svc", hostname: "reindeer-flotilla", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSecret(t, fc, o), nil) @@ -964,6 +972,7 @@ func TestCustomPriorityClassName(t *testing.T) { hostname: "tailscale-critical", priorityClassName: "custom-priority-class-name", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) @@ -1032,6 +1041,7 @@ func TestProxyClassForService(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSecret(t, fc, opts), nil) expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) @@ -1125,6 +1135,7 @@ func TestDefaultLoadBalancer(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) @@ -1181,6 +1192,7 @@ func TestProxyFirewallMode(t *testing.T) { hostname: "default-test", firewallMode: "nftables", clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation) } @@ -1235,6 +1247,7 @@ func TestTailscaledConfigfileHash(t *testing.T) { hostname: "default-test", clusterTargetIP: "10.20.30.40", confFileHash: "e09bededa0379920141cbd0b0dbdf9b8b66545877f9e8397423f5ce3e1ba439e", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSTS(t, fc, o), nil) @@ -1529,6 +1542,7 @@ func Test_externalNameService(t *testing.T) { parentType: "svc", hostname: "default-test", clusterTargetDNS: "foo.com", + app: kubetypes.AppIngressProxy, } expectEqual(t, fc, expectedSecret(t, fc, opts), nil) diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/proxy.go index 3d092fe34775f..672f07b1f1608 100644 --- a/cmd/k8s-operator/proxy.go +++ b/cmd/k8s-operator/proxy.go @@ -23,7 +23,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/client/tailscale/apitype" ksr "tailscale.com/k8s-operator/sessionrecording" - tskube "tailscale.com/kube" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/util/clientmetric" @@ -31,11 +31,10 @@ import ( "tailscale.com/util/set" ) -var whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) - var ( // counterNumRequestsproxies counts the number of API server requests proxied via this proxy. counterNumRequestsProxied = clientmetric.NewCounter("k8s_auth_proxy_requests_proxied") + whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) type apiServerProxyMode int @@ -222,6 +221,12 @@ func (ap *apiserverProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { } func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { + const ( + podNameKey = "pod" + namespaceNameKey = "namespace" + upgradeHeaderKey = "Upgrade" + ) + who, err := ap.whoIs(r) if err != nil { ap.authError(w, err) @@ -246,7 +251,7 @@ func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, p } wantsHeader := upgradeHeaderForProto[proto] - if h := r.Header.Get("Upgrade"); h != wantsHeader { + if h := r.Header.Get(upgradeHeaderKey); h != wantsHeader { msg := fmt.Sprintf("[unexpected] unable to verify that streaming protocol is %s, wants Upgrade header %q, got: %q", proto, wantsHeader, h) if failOpen { msg = msg + "; failure mode is 'fail open'; continuing session without recording." @@ -268,8 +273,8 @@ func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, p Who: who, Addrs: addrs, FailOpen: failOpen, - Pod: r.PathValue("pod"), - Namespace: r.PathValue("namespace"), + Pod: r.PathValue(podNameKey), + Namespace: r.PathValue(namespaceNameKey), Log: ap.log, } h := ksr.New(opts) @@ -309,9 +314,11 @@ func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { log.Printf("failed to add impersonation headers: " + err.Error()) } } + func (ap *apiserverProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { return ap.lc.WhoIs(r.Context(), r.RemoteAddr) } + func (ap *apiserverProxy) authError(w http.ResponseWriter, err error) { ap.log.Errorf("failed to authenticate caller: %v", err) http.Error(w, "failed to authenticate caller", http.StatusInternalServerError) @@ -332,10 +339,10 @@ const ( func addImpersonationHeaders(r *http.Request, log *zap.SugaredLogger) error { log = log.With("remote", r.RemoteAddr) who := whoIsKey.Value(r.Context()) - rules, err := tailcfg.UnmarshalCapJSON[tskube.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) + rules, err := tailcfg.UnmarshalCapJSON[kubetypes.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) if len(rules) == 0 && err == nil { // Try the old capability name for backwards compatibility. - rules, err = tailcfg.UnmarshalCapJSON[tskube.KubernetesCapRule](who.CapMap, oldCapabilityName) + rules, err = tailcfg.UnmarshalCapJSON[kubetypes.KubernetesCapRule](who.CapMap, oldCapabilityName) } if err != nil { return fmt.Errorf("failed to unmarshal capability: %v", err) @@ -385,7 +392,7 @@ func determineRecorderConfig(who *apitype.WhoIsResponse) (failOpen bool, recorde return false, nil, errors.New("[unexpected] cannot determine caller") } failOpen = true - rules, err := tailcfg.UnmarshalCapJSON[tskube.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) + rules, err := tailcfg.UnmarshalCapJSON[kubetypes.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) if err != nil { return failOpen, nil, fmt.Errorf("failed to unmarshal Kubernetes capability: %w", err) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 17cc047d000d1..1d87d6c5cef88 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -31,6 +31,7 @@ import ( "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/types/opt" @@ -342,7 +343,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * if len(tags) == 0 { tags = a.defaultTags } - authKey, err = a.newAuthKey(ctx, tags) + authKey, err = newAuthKey(ctx, a.tsClient, tags) if err != nil { return "", "", nil, err } @@ -418,6 +419,11 @@ func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map if sec == nil { return "", "", nil, nil } + + return deviceInfo(sec) +} + +func deviceInfo(sec *corev1.Secret) (id tailcfg.StableNodeID, hostname string, ips []string, err error) { id = tailcfg.StableNodeID(sec.Data["device_id"]) if id == "" { return "", "", nil, nil @@ -441,7 +447,7 @@ func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map return id, hostname, ips, nil } -func (a *tailscaleSTSReconciler) newAuthKey(ctx context.Context, tags []string) (string, error) { +func newAuthKey(ctx context.Context, tsClient tsClient, tags []string) (string, error) { caps := tailscale.KeyCapabilities{ Devices: tailscale.KeyDeviceCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{ @@ -452,7 +458,7 @@ func (a *tailscaleSTSReconciler) newAuthKey(ctx context.Context, tags []string) }, } - key, _, err := a.tsClient.CreateKey(ctx, caps) + key, _, err := tsClient.CreateKey(ctx, caps) if err != nil { return "", err } @@ -598,6 +604,18 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }, }) } + app, err := appInfoForProxy(sts) + if err != nil { + // No need to error out if now or in future we end up in a + // situation where app info cannot be determined for one of the + // many proxy configurations that the operator can produce. + logger.Error("[unexpected] unable to determine proxy type") + } else { + container.Env = append(container.Env, corev1.EnvVar{ + Name: "TS_INTERNAL_APP", + Value: app, + }) + } logger.Debugf("reconciling statefulset %s/%s", ss.GetNamespace(), ss.GetName()) if sts.ProxyClassName != "" { logger.Debugf("configuring proxy resources with ProxyClass %s", sts.ProxyClassName) @@ -611,6 +629,22 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S return createOrUpdate(ctx, a.Client, a.operatorNamespace, ss, updateSS) } +func appInfoForProxy(cfg *tailscaleSTSConfig) (string, error) { + if cfg.ClusterTargetDNSName != "" || cfg.ClusterTargetIP != "" { + return kubetypes.AppIngressProxy, nil + } + if cfg.TailnetTargetFQDN != "" || cfg.TailnetTargetIP != "" { + return kubetypes.AppEgressProxy, nil + } + if cfg.ServeConfig != nil { + return kubetypes.AppIngressResource, nil + } + if cfg.Connector != nil { + return kubetypes.AppConnector, nil + } + return "", errors.New("unable to determine proxy type") +} + // mergeStatefulSetLabelsOrAnnots returns a map that contains all keys/values // present in 'custom' map as well as those keys/values from the current map // whose keys are present in the 'managed' map. The reason why this merge is diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 73d3adf49565b..6b2ca3514bed6 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -25,6 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/net/dns/resolvconffile" "tailscale.com/tstime" "tailscale.com/util/clientmetric" @@ -69,10 +70,10 @@ type ServiceReconciler struct { var ( // gaugeEgressProxies tracks the number of egress proxies that we're // currently managing. - gaugeEgressProxies = clientmetric.NewGauge("k8s_egress_proxies") + gaugeEgressProxies = clientmetric.NewGauge(kubetypes.MetricEgressProxyCount) // gaugeIngressProxies tracks the number of ingress proxies that we're // currently managing. - gaugeIngressProxies = clientmetric.NewGauge("k8s_ingress_proxies") + gaugeIngressProxies = clientmetric.NewGauge(kubetypes.MetricIngressProxyCount) ) func childResourceLabels(name, ns, typ string) map[string]string { @@ -327,7 +328,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga if err != nil { msg := fmt.Sprintf("failed to parse cluster IP: %v", err) tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger) - return fmt.Errorf(msg) + return errors.New(msg) } for _, ip := range tsIPs { addr, err := netip.ParseAddr(ip) diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index f5f9ece2ba4a3..9e37d32a92cfb 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "net/netip" + "reflect" "strings" "sync" "testing" @@ -51,6 +52,7 @@ type configOpts struct { serveConfig *ipn.ServeConfig shouldEnableForwardingClusterTrafficViaIngress bool proxyClass string // configuration from the named ProxyClass should be applied to proxy resources + app string } func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet { @@ -142,6 +144,10 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef volumes = append(volumes, corev1.Volume{Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}) tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}) } + tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ + Name: "TS_INTERNAL_APP", + Value: opts.app, + }) ss := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", @@ -224,6 +230,7 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"}, + {Name: "TS_INTERNAL_APP", Value: opts.app}, }, ImagePullPolicy: "Always", VolumeMounts: []corev1.VolumeMount{ @@ -481,7 +488,7 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want modifier(got) } if diff := cmp.Diff(got, want); diff != "" { - t.Fatalf("unexpected object (-got +want):\n%s", diff) + t.Fatalf("unexpected %s (-got +want):\n%s", reflect.TypeOf(want).Elem().Name(), diff) } } @@ -492,7 +499,7 @@ func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns Name: name, Namespace: ns, }, obj); !apierrors.IsNotFound(err) { - t.Fatalf("object %s/%s unexpectedly present, wanted missing", ns, name) + t.Fatalf("%s %s/%s unexpectedly present, wanted missing", reflect.TypeOf(obj).Elem().Name(), ns, name) } } @@ -586,6 +593,17 @@ func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabili return "secret-authkey", k, nil } +func (c *fakeTSClient) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) { + return &tailscale.Device{ + DeviceID: deviceID, + Hostname: "test-device", + Addresses: []string{ + "1.2.3.4", + "::1", + }, + }, nil +} + func (c *fakeTSClient) DeleteDevice(ctx context.Context, deviceID string) error { c.Lock() defer c.Unlock() diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go new file mode 100644 index 0000000000000..8c9ab236f62f9 --- /dev/null +++ b/cmd/k8s-operator/tsrecorder.go @@ -0,0 +1,375 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "slices" + "sync" + + "github.com/pkg/errors" + "go.uber.org/zap" + xslices "golang.org/x/exp/slices" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/util/clientmetric" + "tailscale.com/util/set" +) + +const ( + reasonRecorderCreationFailed = "RecorderCreationFailed" + reasonRecorderCreated = "RecorderCreated" + reasonRecorderInvalid = "RecorderInvalid" + + currentProfileKey = "_current-profile" +) + +var gaugeRecorderResources = clientmetric.NewGauge(kubetypes.MetricRecorderCount) + +// RecorderReconciler syncs Recorder statefulsets with their definition in +// Recorder CRs. +type RecorderReconciler struct { + client.Client + l *zap.SugaredLogger + recorder record.EventRecorder + clock tstime.Clock + tsNamespace string + tsClient tsClient + + mu sync.Mutex // protects following + recorders set.Slice[types.UID] // for recorders gauge +} + +func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger { + return r.l.With("Recorder", name) +} + +func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { + logger := r.logger(req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + tsr := new(tsapi.Recorder) + err = r.Get(ctx, req.NamespacedName, tsr) + if apierrors.IsNotFound(err) { + logger.Debugf("Recorder not found, assuming it was deleted") + return reconcile.Result{}, nil + } else if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com Recorder: %w", err) + } + if markedForDeletion(tsr) { + logger.Debugf("Recorder is being deleted, cleaning up resources") + ix := xslices.Index(tsr.Finalizers, FinalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return reconcile.Result{}, nil + } + + if done, err := r.maybeCleanup(ctx, tsr); err != nil { + return reconcile.Result{}, err + } else if !done { + logger.Debugf("Recorder resource cleanup not yet finished, will retry...") + return reconcile.Result{RequeueAfter: shortRequeue}, nil + } + + tsr.Finalizers = slices.Delete(tsr.Finalizers, ix, ix+1) + if err := r.Update(ctx, tsr); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + oldTSRStatus := tsr.Status.DeepCopy() + setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { + tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger) + if !apiequality.Semantic.DeepEqual(oldTSRStatus, tsr.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { + err = errors.Wrap(err, updateErr.Error()) + } + } + return reconcile.Result{}, err + } + + if !slices.Contains(tsr.Finalizers, FinalizerName) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to log that the high level, multi-reconcile + // operation is underway. + logger.Infof("ensuring Recorder is set up") + tsr.Finalizers = append(tsr.Finalizers, FinalizerName) + if err := r.Update(ctx, tsr); err != nil { + logger.Errorf("error adding finalizer: %w", err) + return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed) + } + } + + if err := r.validate(tsr); err != nil { + logger.Errorf("error validating Recorder spec: %w", err) + message := fmt.Sprintf("Recorder is invalid: %s", err) + r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message) + return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) + } + + if err = r.maybeProvision(ctx, tsr); err != nil { + logger.Errorf("error creating Recorder resources: %w", err) + message := fmt.Sprintf("failed creating Recorder: %s", err) + r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderCreationFailed, message) + return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, message) + } + + logger.Info("Recorder resources synced") + return setStatusReady(tsr, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated) +} + +func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Recorder) error { + logger := r.logger(tsr.Name) + + r.mu.Lock() + r.recorders.Add(tsr.UID) + gaugeRecorderResources.Set(int64(r.recorders.Len())) + r.mu.Unlock() + + if err := r.ensureAuthSecretCreated(ctx, tsr); err != nil { + return fmt.Errorf("error creating secrets: %w", err) + } + // State secret is precreated so we can use the Recorder CR as its owner ref. + sec := tsrStateSecret(tsr, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { + s.ObjectMeta.Labels = sec.ObjectMeta.Labels + s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error creating state Secret: %w", err) + } + sa := tsrServiceAccount(tsr, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { + s.ObjectMeta.Labels = sa.ObjectMeta.Labels + s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error creating ServiceAccount: %w", err) + } + role := tsrRole(tsr, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + r.ObjectMeta.Labels = role.ObjectMeta.Labels + r.ObjectMeta.Annotations = role.ObjectMeta.Annotations + r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences + r.Rules = role.Rules + }); err != nil { + return fmt.Errorf("error creating Role: %w", err) + } + roleBinding := tsrRoleBinding(tsr, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { + r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels + r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations + r.ObjectMeta.OwnerReferences = roleBinding.ObjectMeta.OwnerReferences + r.RoleRef = roleBinding.RoleRef + r.Subjects = roleBinding.Subjects + }); err != nil { + return fmt.Errorf("error creating RoleBinding: %w", err) + } + ss := tsrStatefulSet(tsr, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { + s.ObjectMeta.Labels = ss.ObjectMeta.Labels + s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences + s.Spec = ss.Spec + }); err != nil { + return fmt.Errorf("error creating StatefulSet: %w", err) + } + + var devices []tsapi.TailnetDevice + + device, ok, err := r.getDeviceInfo(ctx, tsr.Name) + if err != nil { + return fmt.Errorf("failed to get device info: %w", err) + } + if !ok { + logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth") + return nil + } + + devices = append(devices, device) + + tsr.Status.Devices = devices + + return nil +} + +// maybeCleanup just deletes the device from the tailnet. All the kubernetes +// resources linked to a Recorder will get cleaned up via owner references +// (which we can use because they are all in the same namespace). +func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) { + logger := r.logger(tsr.Name) + + id, _, ok, err := r.getNodeMetadata(ctx, tsr.Name) + if err != nil { + return false, err + } + if !ok { + logger.Debugf("state Secret %s-0 not found or does not contain node ID, continuing cleanup", tsr.Name) + r.mu.Lock() + r.recorders.Remove(tsr.UID) + gaugeRecorderResources.Set(int64(r.recorders.Len())) + r.mu.Unlock() + return true, nil + } + + logger.Debugf("deleting device %s from control", string(id)) + if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) + } else { + return false, fmt.Errorf("error deleting device: %w", err) + } + } else { + logger.Debugf("device %s deleted from control", string(id)) + } + + // Unlike most log entries in the reconcile loop, this will get printed + // exactly once at the very end of cleanup, because the final step of + // cleanup removes the tailscale finalizer, which will make all future + // reconciles exit early. + logger.Infof("cleaned up Recorder resources") + r.mu.Lock() + r.recorders.Remove(tsr.UID) + gaugeRecorderResources.Set(int64(r.recorders.Len())) + r.mu.Unlock() + return true, nil +} + +func (r *RecorderReconciler) ensureAuthSecretCreated(ctx context.Context, tsr *tsapi.Recorder) error { + logger := r.logger(tsr.Name) + key := types.NamespacedName{ + Namespace: r.tsNamespace, + Name: tsr.Name, + } + if err := r.Get(ctx, key, &corev1.Secret{}); err == nil { + // No updates, already created the auth key. + logger.Debugf("auth Secret %s already exists", key.Name) + return nil + } else if !apierrors.IsNotFound(err) { + return err + } + + // Create the auth key Secret which is going to be used by the StatefulSet + // to authenticate with Tailscale. + logger.Debugf("creating authkey for new Recorder") + tags := tsr.Spec.Tags + if len(tags) == 0 { + tags = tsapi.Tags{"tag:k8s"} + } + authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify()) + if err != nil { + return err + } + + logger.Debug("creating a new Secret for the Recorder") + if err := r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey)); err != nil { + return err + } + + return nil +} + +func (r *RecorderReconciler) validate(tsr *tsapi.Recorder) error { + if !tsr.Spec.EnableUI && tsr.Spec.Storage.S3 == nil { + return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible") + } + + return nil +} + +// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName +// is expected to always be non-empty if the node ID is, but not required. +func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.tsNamespace, + Name: fmt.Sprintf("%s-0", tsrName), + }, + } + if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { + if apierrors.IsNotFound(err) { + return "", "", false, nil + } + + return "", "", false, err + } + + // TODO(tomhjp): Should maybe use ipn to parse the following info instead. + currentProfile, ok := secret.Data[currentProfileKey] + if !ok { + return "", "", false, nil + } + profileBytes, ok := secret.Data[string(currentProfile)] + if !ok { + return "", "", false, nil + } + var profile profile + if err := json.Unmarshal(profileBytes, &profile); err != nil { + return "", "", false, fmt.Errorf("failed to extract node profile info from state Secret %s: %w", secret.Name, err) + } + + ok = profile.Config.NodeID != "" + return tailcfg.StableNodeID(profile.Config.NodeID), profile.Config.UserProfile.LoginName, ok, nil +} + +func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.TailnetDevice, ok bool, err error) { + nodeID, dnsName, ok, err := r.getNodeMetadata(ctx, tsrName) + if !ok || err != nil { + return tsapi.TailnetDevice{}, false, err + } + + // TODO(tomhjp): The profile info doesn't include addresses, which is why we + // need the API. Should we instead update the profile to include addresses? + device, err := r.tsClient.Device(ctx, string(nodeID), nil) + if err != nil { + return tsapi.TailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) + } + + d = tsapi.TailnetDevice{ + Hostname: device.Hostname, + TailnetIPs: device.Addresses, + } + if dnsName != "" { + d.URL = fmt.Sprintf("https://%s", dnsName) + } + + return d, true, nil +} + +type profile struct { + Config struct { + NodeID string `json:"NodeID"` + UserProfile struct { + LoginName string `json:"LoginName"` + } `json:"UserProfile"` + } `json:"Config"` +} + +func markedForDeletion(tsr *tsapi.Recorder) bool { + return !tsr.DeletionTimestamp.IsZero() +} diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go new file mode 100644 index 0000000000000..4a74fb7e03442 --- /dev/null +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -0,0 +1,278 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" + "tailscale.com/version" +) + +func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: tsr.Name, + Namespace: namespace, + Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels), + OwnerReferences: tsrOwnerReference(tsr), + Annotations: tsr.Spec.StatefulSet.Annotations, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: tsr.Name, + Namespace: namespace, + Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), + Annotations: tsr.Spec.StatefulSet.Pod.Annotations, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: tsr.Name, + Affinity: tsr.Spec.StatefulSet.Pod.Affinity, + SecurityContext: tsr.Spec.StatefulSet.Pod.SecurityContext, + ImagePullSecrets: tsr.Spec.StatefulSet.Pod.ImagePullSecrets, + NodeSelector: tsr.Spec.StatefulSet.Pod.NodeSelector, + Tolerations: tsr.Spec.StatefulSet.Pod.Tolerations, + Containers: []corev1.Container{ + { + Name: "recorder", + Image: func() string { + image := tsr.Spec.StatefulSet.Pod.Container.Image + if image == "" { + image = fmt.Sprintf("tailscale/tsrecorder:%s", selfVersionImageTag()) + } + + return image + }(), + ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy, + Resources: tsr.Spec.StatefulSet.Pod.Container.Resources, + SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext, + Env: env(tsr), + EnvFrom: func() []corev1.EnvFromSource { + if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" { + return nil + } + + return []corev1.EnvFromSource{{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: tsr.Spec.Storage.S3.Credentials.Secret.Name, + }, + }, + }} + }(), + Command: []string{"/tsrecorder"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "data", + MountPath: "/data", + ReadOnly: false, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + }, + } +} + +func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: tsr.Name, + Namespace: namespace, + Labels: labels("recorder", tsr.Name, nil), + OwnerReferences: tsrOwnerReference(tsr), + }, + } +} + +func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: tsr.Name, + Namespace: namespace, + Labels: labels("recorder", tsr.Name, nil), + OwnerReferences: tsrOwnerReference(tsr), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{ + "get", + "patch", + "update", + }, + ResourceNames: []string{ + tsr.Name, // Contains the auth key. + fmt.Sprintf("%s-0", tsr.Name), // Contains the node state. + }, + }, + }, + } +} + +func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: tsr.Name, + Namespace: namespace, + Labels: labels("recorder", tsr.Name, nil), + OwnerReferences: tsrOwnerReference(tsr), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: tsr.Name, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: tsr.Name, + }, + } +} + +func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: tsr.Name, + Labels: labels("recorder", tsr.Name, nil), + OwnerReferences: tsrOwnerReference(tsr), + }, + StringData: map[string]string{ + "authkey": authKey, + }, + } +} + +func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-0", tsr.Name), + Namespace: namespace, + Labels: labels("recorder", tsr.Name, nil), + OwnerReferences: tsrOwnerReference(tsr), + }, + } +} + +func env(tsr *tsapi.Recorder) []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + Name: "TS_AUTHKEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: tsr.Name, + }, + Key: "authkey", + }, + }, + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + // Secret is named after the pod. + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "TS_STATE", + Value: "kube:$(POD_NAME)", + }, + { + Name: "TSRECORDER_HOSTNAME", + Value: "$(POD_NAME)", + }, + } + + for _, env := range tsr.Spec.StatefulSet.Pod.Container.Env { + envs = append(envs, corev1.EnvVar{ + Name: string(env.Name), + Value: env.Value, + }) + } + + if tsr.Spec.Storage.S3 != nil { + envs = append(envs, + corev1.EnvVar{ + Name: "TSRECORDER_DST", + Value: fmt.Sprintf("s3://%s", tsr.Spec.Storage.S3.Endpoint), + }, + corev1.EnvVar{ + Name: "TSRECORDER_BUCKET", + Value: tsr.Spec.Storage.S3.Bucket, + }, + ) + } else { + envs = append(envs, corev1.EnvVar{ + Name: "TSRECORDER_DST", + Value: "/data/recordings", + }) + } + + if tsr.Spec.EnableUI { + envs = append(envs, corev1.EnvVar{ + Name: "TSRECORDER_UI", + Value: "true", + }) + } + + return envs +} + +func labels(app, instance string, customLabels map[string]string) map[string]string { + l := make(map[string]string, len(customLabels)+3) + for k, v := range customLabels { + l[k] = v + } + + // ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ + l["app.kubernetes.io/name"] = app + l["app.kubernetes.io/instance"] = instance + l["app.kubernetes.io/managed-by"] = "tailscale-operator" + + return l +} + +func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference { + return []metav1.OwnerReference{*metav1.NewControllerRef(owner, tsapi.SchemeGroupVersion.WithKind("Recorder"))} +} + +// selfVersionImageTag returns the container image tag of the running operator +// build. +func selfVersionImageTag() string { + meta := version.GetMeta() + var versionPrefix string + if meta.UnstableBranch { + versionPrefix = "unstable-" + } + return fmt.Sprintf("%sv%s", versionPrefix, meta.MajorMinorPatch) +} diff --git a/cmd/k8s-operator/tsrecorder_specs_test.go b/cmd/k8s-operator/tsrecorder_specs_test.go new file mode 100644 index 0000000000000..94a8a816c69f5 --- /dev/null +++ b/cmd/k8s-operator/tsrecorder_specs_test.go @@ -0,0 +1,143 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" +) + +func TestRecorderSpecs(t *testing.T) { + t.Run("ensure spec fields are passed through correctly", func(t *testing.T) { + tsr := &tsapi.Recorder{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.RecorderSpec{ + StatefulSet: tsapi.RecorderStatefulSet{ + Labels: map[string]string{ + "ss-label-key": "ss-label-value", + }, + Annotations: map[string]string{ + "ss-annotation-key": "ss-annotation-value", + }, + Pod: tsapi.RecorderPod{ + Labels: map[string]string{ + "pod-label-key": "pod-label-value", + }, + Annotations: map[string]string{ + "pod-annotation-key": "pod-annotation-value", + }, + Affinity: &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "match-label": "match-value", + }, + }}, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To[int64](1000), + }, + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "img-pull", + }}, + NodeSelector: map[string]string{ + "some-node": "selector", + }, + Tolerations: []corev1.Toleration{{ + Key: "key", + Value: "value", + TolerationSeconds: ptr.To[int64](60), + }}, + Container: tsapi.RecorderContainer{ + Env: []tsapi.Env{{ + Name: "some_env", + Value: "env_value", + }}, + Image: "custom-image", + ImagePullPolicy: corev1.PullAlways, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_ADMIN", + }, + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + }, + }, + }, + }, + }, + }, + } + + ss := tsrStatefulSet(tsr, tsNamespace) + + // StatefulSet-level. + if diff := cmp.Diff(ss.Annotations, tsr.Spec.StatefulSet.Annotations); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Annotations, tsr.Spec.StatefulSet.Pod.Annotations); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + + // Pod-level. + if diff := cmp.Diff(ss.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.Affinity, tsr.Spec.StatefulSet.Pod.Affinity); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.SecurityContext, tsr.Spec.StatefulSet.Pod.SecurityContext); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.ImagePullSecrets, tsr.Spec.StatefulSet.Pod.ImagePullSecrets); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.NodeSelector, tsr.Spec.StatefulSet.Pod.NodeSelector); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.Tolerations, tsr.Spec.StatefulSet.Pod.Tolerations); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + + // Container-level. + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr)); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].ImagePullPolicy, tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].SecurityContext, tsr.Spec.StatefulSet.Pod.Container.SecurityContext); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Resources, tsr.Spec.StatefulSet.Pod.Container.Resources); diff != "" { + t.Errorf("(-got +want):\n%s", diff) + } + }) +} diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go new file mode 100644 index 0000000000000..cff7021051bdf --- /dev/null +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -0,0 +1,162 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" +) + +const tsNamespace = "tailscale" + +func TestRecorder(t *testing.T) { + tsr := &tsapi.Recorder{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(tsr). + WithStatusSubresource(tsr). + Build() + tsClient := &fakeTSClient{} + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(1) + cl := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &RecorderReconciler{ + tsNamespace: tsNamespace, + Client: fc, + tsClient: tsClient, + recorder: fr, + l: zl.Sugar(), + clock: cl, + } + + t.Run("invalid spec gives an error condition", func(t *testing.T) { + expectReconciled(t, reconciler, "", tsr.Name) + + msg := "Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible" + tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionFalse, reasonRecorderInvalid, msg, 0, cl, zl.Sugar()) + expectEqual(t, fc, tsr, nil) + if expected := 0; reconciler.recorders.Len() != expected { + t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) + } + expectRecorderResources(t, fc, tsr, false) + + expectedEvent := "Warning RecorderInvalid Recorder is invalid: must either enable UI or use S3 storage to ensure recordings are accessible" + expectEvents(t, fr, []string{expectedEvent}) + }) + + t.Run("observe Ready=true status condition for a valid spec", func(t *testing.T) { + tsr.Spec.EnableUI = true + mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) { + t.Spec = tsr.Spec + }) + + expectReconciled(t, reconciler, "", tsr.Name) + + tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated, 0, cl, zl.Sugar()) + expectEqual(t, fc, tsr, nil) + if expected := 1; reconciler.recorders.Len() != expected { + t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) + } + expectRecorderResources(t, fc, tsr, true) + }) + + t.Run("populate node info in state secret, and see it appear in status", func(t *testing.T) { + bytes, err := json.Marshal(map[string]any{ + "Config": map[string]any{ + "NodeID": "nodeid-123", + "UserProfile": map[string]any{ + "LoginName": "test-0.example.ts.net", + }, + }, + }) + if err != nil { + t.Fatal(err) + } + + const key = "profile-abc" + mustUpdate(t, fc, tsNamespace, "test-0", func(s *corev1.Secret) { + s.Data = map[string][]byte{ + currentProfileKey: []byte(key), + key: bytes, + } + }) + + expectReconciled(t, reconciler, "", tsr.Name) + tsr.Status.Devices = []tsapi.TailnetDevice{ + { + Hostname: "test-device", + TailnetIPs: []string{"1.2.3.4", "::1"}, + URL: "https://test-0.example.ts.net", + }, + } + expectEqual(t, fc, tsr, nil) + }) + + t.Run("delete the Recorder and observe cleanup", func(t *testing.T) { + if err := fc.Delete(context.Background(), tsr); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", tsr.Name) + + expectMissing[tsapi.Recorder](t, fc, "", tsr.Name) + if expected := 0; reconciler.recorders.Len() != expected { + t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) + } + if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-123"}); diff != "" { + t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff) + } + // The fake client does not clean up objects whose owner has been + // deleted, so we can't test for the owned resources getting deleted. + }) +} + +func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recorder, shouldExist bool) { + t.Helper() + + auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey") + state := tsrStateSecret(tsr, tsNamespace) + role := tsrRole(tsr, tsNamespace) + roleBinding := tsrRoleBinding(tsr, tsNamespace) + serviceAccount := tsrServiceAccount(tsr, tsNamespace) + statefulSet := tsrStatefulSet(tsr, tsNamespace) + + if shouldExist { + expectEqual(t, fc, auth, nil) + expectEqual(t, fc, state, nil) + expectEqual(t, fc, role, nil) + expectEqual(t, fc, roleBinding, nil) + expectEqual(t, fc, serviceAccount, nil) + expectEqual(t, fc, statefulSet, nil) + } else { + expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name) + expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name) + expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) + expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) + expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name) + expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name) + } +} diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 41166fd4afd74..df458755c42bc 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -489,6 +489,9 @@ type perPeerState struct { func (ps *perPeerState) domainForIP(ip netip.Addr) (_ string, ok bool) { ps.mu.Lock() defer ps.mu.Unlock() + if ps.addrToDomain == nil { + return "", false + } return ps.addrToDomain.Lookup(ip) } diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 84fbe69b78270..09540c833a115 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -50,6 +50,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr tailscale.com/net/stun from tailscale.com/net/stunserver @@ -81,14 +82,15 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/version/distro from tailscale.com/envknob golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls + golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/hkdf from crypto/tls + golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ golang.org/x/net/dns/dnsmessage from net golang.org/x/net/http/httpguts from net/http golang.org/x/net/http/httpproxy from net/http @@ -153,6 +155,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar io from bufio+ io/fs from crypto/x509+ io/ioutil from google.golang.org/protobuf/internal/impl + iter from maps+ log from expvar+ log/internal from log maps from tailscale.com/tailcfg+ @@ -168,7 +171,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar net/http from expvar+ net/http/httptrace from net/http net/http/internal from net/http - net/http/pprof from tailscale.com/tsweb+ + net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -195,3 +198,4 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ + unique from net/netip diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index 950fdc2cddb84..c3842e2e8b3be 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -53,7 +53,16 @@ var ( ) const ( - minInterval = time.Second + // maxTxJitter is the upper bounds for jitter introduced across probes + maxTXJitter = time.Millisecond * 400 + // minInterval is the minimum allowed probe interval/step + minInterval = time.Second * 10 + // txRxTimeout is the timeout value used for kernel timestamping loopback, + // and packet receive operations + txRxTimeout = time.Second * 2 + // maxBufferDuration is the maximum duration (maxBufferDuration / + // *flagInterval steps worth) of buffered data that can be held in memory + // before data loss occurs around prometheus unavailability. maxBufferDuration = time.Hour ) @@ -322,7 +331,7 @@ func measureSTUNRTT(conn io.ReadWriteCloser, _ string, dst netip.AddrPort) (rtt if !ok { return 0, fmt.Errorf("unexpected conn type: %T", conn) } - err = uconn.SetReadDeadline(time.Now().Add(time.Second * 2)) + err = uconn.SetReadDeadline(time.Now().Add(txRxTimeout)) if err != nil { return 0, fmt.Errorf("error setting read deadline: %w", err) } @@ -371,27 +380,6 @@ type nodeMeta struct { type measureFn func(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) -// probe measures round trip time for the node described by meta over cf against -// dstPort. It may return a nil duration and nil error in the event of a -// timeout. A non-nil error indicates an unrecoverable or non-temporary error. -func probe(meta nodeMeta, cf *connAndMeasureFn, dstPort int) (*time.Duration, error) { - ua := &net.UDPAddr{ - IP: net.IP(meta.addr.AsSlice()), - Port: dstPort, - } - - time.Sleep(rand.N(200 * time.Millisecond)) // jitter across tx - rtt, err := cf.fn(cf.conn, meta.hostname, netip.AddrPortFrom(meta.addr, uint16(dstPort))) - if err != nil { - if isTemporaryOrTimeoutErr(err) { - log.Printf("temp error measuring RTT to %s(%s): %v", meta.hostname, ua.String(), err) - return nil, nil - } - return nil, err - } - return &rtt, nil -} - // nodeMetaFromDERPMap parses the provided DERP map in order to update nodeMeta // in the provided nodeMetaByAddr. It returns a slice of nodeMeta containing // the nodes that are no longer seen in the DERP map, but were previously held @@ -460,7 +448,7 @@ type connAndMeasureFn struct { // newConnAndMeasureFn returns a connAndMeasureFn or an error. It may return // nil for both if some combination of the supplied timestampSource, protocol, // or connStability is unsupported. -func newConnAndMeasureFn(source timestampSource, protocol protocol, stable connStability) (*connAndMeasureFn, error) { +func newConnAndMeasureFn(forDst netip.Addr, source timestampSource, protocol protocol, stable connStability) (*connAndMeasureFn, error) { info := getProtocolSupportInfo(protocol) if !info.stableConn && bool(stable) { return nil, nil @@ -493,8 +481,14 @@ func newConnAndMeasureFn(source timestampSource, protocol protocol, stable connS }, nil } case protocolICMP: - // TODO(jwhited): implement - return nil, nil + conn, err := getICMPConn(forDst, source) + if err != nil { + return nil, err + } + return &connAndMeasureFn{ + conn: conn, + fn: mkICMPMeasureFn(source), + }, nil case protocolHTTPS: localPort := 0 if stable { @@ -558,7 +552,7 @@ func getConns( if !ok { for _, source := range []timestampSource{timestampSourceUserspace, timestampSourceKernel} { var cf *connAndMeasureFn - cf, err = newConnAndMeasureFn(source, protocol, stableConn) + cf, err = newConnAndMeasureFn(addr, source, protocol, stableConn) if err != nil { return } @@ -569,7 +563,7 @@ func getConns( for _, source := range []timestampSource{timestampSourceUserspace, timestampSourceKernel} { var cf *connAndMeasureFn - cf, err = newConnAndMeasureFn(source, protocol, unstableConn) + cf, err = newConnAndMeasureFn(addr, source, protocol, unstableConn) if err != nil { return } @@ -605,16 +599,24 @@ func probeNodes(nodeMetaByAddr map[netip.Addr]nodeMeta, stableConns map[stableCo }, at: at, } - rtt, err := probe(meta, cf, dstPort) + time.Sleep(rand.N(maxTXJitter)) // jitter across tx + addrPort := netip.AddrPortFrom(meta.addr, uint16(dstPort)) + rtt, err := cf.fn(cf.conn, meta.hostname, addrPort) if err != nil { - select { - case <-doneCh: - return - case errCh <- err: - return + if isTemporaryOrTimeoutErr(err) { + r.rtt = nil + log.Printf("%s: temp error measuring RTT to %s(%s): %v", protocol, meta.hostname, addrPort, err) + } else { + select { + case <-doneCh: + return + case errCh <- fmt.Errorf("%s: %v", protocol, err): + return + } } + } else { + r.rtt = &rtt } - r.rtt = rtt select { case <-doneCh: case resultsCh <- r: @@ -953,13 +955,6 @@ func main() { log.Fatal("nothing to probe") } - // TODO(jwhited): remove protocol restriction - for k := range portsByProtocol { - if k != protocolSTUN && k != protocolHTTPS && k != protocolTCP { - log.Fatal("ICMP is not yet supported") - } - } - if len(*flagDERPMap) < 1 { log.Fatal("derp-map flag is unset") } diff --git a/cmd/stunstamp/stunstamp_default.go b/cmd/stunstamp/stunstamp_default.go index 36afdbb8fc044..a244d9aea6410 100644 --- a/cmd/stunstamp/stunstamp_default.go +++ b/cmd/stunstamp/stunstamp_default.go @@ -40,10 +40,26 @@ func getProtocolSupportInfo(p protocol) protocolSupportInfo { userspaceTS: false, stableConn: true, } + case protocolICMP: + return protocolSupportInfo{ + kernelTS: false, + userspaceTS: false, + stableConn: false, + } } return protocolSupportInfo{} } +func getICMPConn(forDst netip.Addr, source timestampSource) (io.ReadWriteCloser, error) { + return nil, errors.New("platform unsupported") +} + +func mkICMPMeasureFn(source timestampSource) measureFn { + return func(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) { + return 0, errors.New("platform unsupported") + } +} + func setSOReuseAddr(fd uintptr) error { return nil } diff --git a/cmd/stunstamp/stunstamp_linux.go b/cmd/stunstamp/stunstamp_linux.go index e73d1ee3c59ea..387805feff2f1 100644 --- a/cmd/stunstamp/stunstamp_linux.go +++ b/cmd/stunstamp/stunstamp_linux.go @@ -10,17 +10,22 @@ import ( "errors" "fmt" "io" + "math" + "math/rand/v2" "net/netip" "syscall" "time" "github.com/mdlayher/socket" + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" "golang.org/x/sys/unix" "tailscale.com/net/stun" ) const ( - flags = unix.SOF_TIMESTAMPING_TX_SOFTWARE | // tx timestamp generation in device driver + timestampingFlags = unix.SOF_TIMESTAMPING_TX_SOFTWARE | // tx timestamp generation in device driver unix.SOF_TIMESTAMPING_RX_SOFTWARE | // rx timestamp generation in the kernel unix.SOF_TIMESTAMPING_SOFTWARE // report software timestamps ) @@ -35,7 +40,7 @@ func getUDPConnKernelTimestamp() (io.ReadWriteCloser, error) { if err != nil { return nil, err } - err = sconn.SetsockoptInt(unix.SOL_SOCKET, unix.SO_TIMESTAMPING_NEW, flags) + err = sconn.SetsockoptInt(unix.SOL_SOCKET, unix.SO_TIMESTAMPING_NEW, timestampingFlags) if err != nil { return nil, err } @@ -57,7 +62,128 @@ func parseTimestampFromCmsgs(oob []byte) (time.Time, error) { return time.Time{}, errors.New("failed to parse timestamp from cmsgs") } -func measureSTUNRTTKernel(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) { +func mkICMPMeasureFn(source timestampSource) measureFn { + return func(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) { + return measureICMPRTT(source, conn, hostname, dst) + } +} + +func measureICMPRTT(source timestampSource, conn io.ReadWriteCloser, _ string, dst netip.AddrPort) (rtt time.Duration, err error) { + sconn, ok := conn.(*socket.Conn) + if !ok { + return 0, fmt.Errorf("conn of unexpected type: %T", conn) + } + txBody := &icmp.Echo{ + // The kernel overrides this and routes appropriately so there is no + // point in setting or verifying. + ID: 0, + // Make this sufficiently random so that we do not account a late + // arriving reply in a future probe window. + Seq: int(rand.Int32N(math.MaxUint16)), + // Fingerprint ourselves. + Data: []byte("stunstamp"), + } + txMsg := icmp.Message{ + Body: txBody, + } + var to unix.Sockaddr + if dst.Addr().Is4() { + txMsg.Type = ipv4.ICMPTypeEcho + to = &unix.SockaddrInet4{} + copy(to.(*unix.SockaddrInet4).Addr[:], dst.Addr().AsSlice()) + } else { + txMsg.Type = ipv6.ICMPTypeEchoRequest + to = &unix.SockaddrInet6{} + copy(to.(*unix.SockaddrInet6).Addr[:], dst.Addr().AsSlice()) + } + txBuf, err := txMsg.Marshal(nil) + if err != nil { + return 0, err + } + txAt := time.Now() + err = sconn.Sendto(context.Background(), txBuf, 0, to) + if err != nil { + return 0, fmt.Errorf("sendto error: %v", err) + } + + if source == timestampSourceKernel { + txCtx, txCancel := context.WithTimeout(context.Background(), txRxTimeout) + defer txCancel() + + buf := make([]byte, 1024) + oob := make([]byte, 1024) + + for { + n, oobn, _, _, err := sconn.Recvmsg(txCtx, buf, oob, unix.MSG_ERRQUEUE) + if err != nil { + return 0, fmt.Errorf("recvmsg (MSG_ERRQUEUE) error: %v", err) // don't wrap + } + + buf = buf[:n] + // Spin until we find the message we sent. We get the full packet + // looped including eth header so match against the tail. + if n < len(txBuf) { + continue + } + txLoopedMsg, err := icmp.ParseMessage(txMsg.Type.Protocol(), buf[len(buf)-len(txBuf):]) + if err != nil { + continue + } + txLoopedBody, ok := txLoopedMsg.Body.(*icmp.Echo) + if !ok || txLoopedBody.Seq != txBody.Seq || txLoopedMsg.Code != txMsg.Code || + txLoopedMsg.Type != txLoopedMsg.Type || !bytes.Equal(txLoopedBody.Data, txBody.Data) { + continue + } + txAt, err = parseTimestampFromCmsgs(oob[:oobn]) + if err != nil { + return 0, fmt.Errorf("failed to get tx timestamp: %v", err) // don't wrap + } + break + } + } + + rxCtx, rxCancel := context.WithTimeout(context.Background(), txRxTimeout) + defer rxCancel() + + rxBuf := make([]byte, 1024) + oob := make([]byte, 1024) + for { + n, oobn, _, _, err := sconn.Recvmsg(rxCtx, rxBuf, oob, 0) + if err != nil { + return 0, fmt.Errorf("recvmsg error: %w", err) + } + rxAt := time.Now() + rxMsg, err := icmp.ParseMessage(txMsg.Type.Protocol(), rxBuf[:n]) + if err != nil { + continue + } + if txMsg.Type == ipv4.ICMPTypeEcho { + if rxMsg.Type != ipv4.ICMPTypeEchoReply { + continue + } + } else { + if rxMsg.Type != ipv6.ICMPTypeEchoReply { + continue + } + } + if rxMsg.Code != txMsg.Code { + continue + } + rxBody, ok := rxMsg.Body.(*icmp.Echo) + if !ok || rxBody.Seq != txBody.Seq || !bytes.Equal(rxBody.Data, txBody.Data) { + continue + } + if source == timestampSourceKernel { + rxAt, err = parseTimestampFromCmsgs(oob[:oobn]) + if err != nil { + return 0, fmt.Errorf("failed to get rx timestamp: %v", err) + } + } + return rxAt.Sub(txAt), nil + } +} + +func measureSTUNRTTKernel(conn io.ReadWriteCloser, _ string, dst netip.AddrPort) (rtt time.Duration, err error) { sconn, ok := conn.(*socket.Conn) if !ok { return 0, fmt.Errorf("conn of unexpected type: %T", conn) @@ -84,7 +210,7 @@ func measureSTUNRTTKernel(conn io.ReadWriteCloser, hostname string, dst netip.Ad return 0, fmt.Errorf("sendto error: %v", err) // don't wrap } - txCtx, txCancel := context.WithTimeout(context.Background(), time.Second*2) + txCtx, txCancel := context.WithTimeout(context.Background(), txRxTimeout) defer txCancel() buf := make([]byte, 1024) @@ -110,7 +236,7 @@ func measureSTUNRTTKernel(conn io.ReadWriteCloser, hostname string, dst netip.Ad break } - rxCtx, rxCancel := context.WithTimeout(context.Background(), time.Second*2) + rxCtx, rxCancel := context.WithTimeout(context.Background(), txRxTimeout) defer rxCancel() for { @@ -138,6 +264,23 @@ func measureSTUNRTTKernel(conn io.ReadWriteCloser, hostname string, dst netip.Ad } +func getICMPConn(forDst netip.Addr, source timestampSource) (io.ReadWriteCloser, error) { + domain := unix.AF_INET + proto := unix.IPPROTO_ICMP + if forDst.Is6() { + domain = unix.AF_INET6 + proto = unix.IPPROTO_ICMPV6 + } + conn, err := socket.Socket(domain, unix.SOCK_DGRAM, proto, "icmp", nil) + if err != nil { + return nil, err + } + if source == timestampSourceKernel { + err = conn.SetsockoptInt(unix.SOL_SOCKET, unix.SO_TIMESTAMPING_NEW, timestampingFlags) + } + return conn, err +} + func getProtocolSupportInfo(p protocol) protocolSupportInfo { switch p { case protocolSTUN: @@ -158,7 +301,12 @@ func getProtocolSupportInfo(p protocol) protocolSupportInfo { userspaceTS: false, stableConn: true, } - // TODO(jwhited): add ICMP + case protocolICMP: + return protocolSupportInfo{ + kernelTS: true, + userspaceTS: true, + stableConn: false, + } } return protocolSupportInfo{} } diff --git a/cmd/systray/README.md b/cmd/systray/README.md new file mode 100644 index 0000000000000..786434d130a43 --- /dev/null +++ b/cmd/systray/README.md @@ -0,0 +1,11 @@ +# systray + +The systray command is a minimal Tailscale systray application for Linux. +It is designed to provide quick access to common operations like profile switching +and exit node selection. + +## Supported platforms + +The `fyne.io/systray` package we use supports Windows, macOS, Linux, and many BSDs, +so the systray application will likely work for the most part on those platforms. +Notifications currently only work on Linux, as that is the main target. diff --git a/cmd/systray/logo.go b/cmd/systray/logo.go new file mode 100644 index 0000000000000..cd79c94a02ea4 --- /dev/null +++ b/cmd/systray/logo.go @@ -0,0 +1,220 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build cgo || !darwin + +package main + +import ( + "bytes" + "context" + "image/color" + "image/png" + "sync" + "time" + + "fyne.io/systray" + "github.com/fogleman/gg" +) + +// tsLogo represents the state of the 3x3 dot grid in the Tailscale logo. +// A 0 represents a gray dot, any other value is a white dot. +type tsLogo [9]byte + +var ( + // disconnected is all gray dots + disconnected = tsLogo{ + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + } + + // connected is the normal Tailscale logo + connected = tsLogo{ + 0, 0, 0, + 1, 1, 1, + 0, 1, 0, + } + + // loading is a special tsLogo value that is not meant to be rendered directly, + // but indicates that the loading animation should be shown. + loading = tsLogo{'l', 'o', 'a', 'd', 'i', 'n', 'g'} + + // loadingIcons are shown in sequence as an animated loading icon. + loadingLogos = []tsLogo{ + { + 0, 1, 1, + 1, 0, 1, + 0, 0, 1, + }, + { + 0, 1, 1, + 0, 0, 1, + 0, 1, 0, + }, + { + 0, 1, 1, + 0, 0, 0, + 0, 0, 1, + }, + { + 0, 0, 1, + 0, 1, 0, + 0, 0, 0, + }, + { + 0, 1, 0, + 0, 0, 0, + 0, 0, 0, + }, + { + 0, 0, 0, + 0, 0, 1, + 0, 0, 0, + }, + { + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + }, + { + 0, 0, 1, + 0, 0, 0, + 0, 0, 0, + }, + { + 0, 0, 0, + 0, 0, 0, + 1, 0, 0, + }, + { + 0, 0, 0, + 0, 0, 0, + 1, 1, 0, + }, + { + 0, 0, 0, + 1, 0, 0, + 1, 1, 0, + }, + { + 0, 0, 0, + 1, 1, 0, + 0, 1, 0, + }, + { + 0, 0, 0, + 1, 1, 0, + 0, 1, 1, + }, + { + 0, 0, 0, + 1, 1, 1, + 0, 0, 1, + }, + { + 0, 1, 0, + 0, 1, 1, + 1, 0, 1, + }, + } +) + +var ( + black = color.NRGBA{0, 0, 0, 255} + white = color.NRGBA{255, 255, 255, 255} + gray = color.NRGBA{255, 255, 255, 102} +) + +// render returns a PNG image of the logo. +func (logo tsLogo) render() *bytes.Buffer { + const radius = 25 + const borderUnits = 1 + dim := radius * (8 + borderUnits*2) + + dc := gg.NewContext(dim, dim) + dc.DrawRectangle(0, 0, float64(dim), float64(dim)) + dc.SetColor(black) + dc.Fill() + + for y := 0; y < 3; y++ { + for x := 0; x < 3; x++ { + px := (borderUnits + 1 + 3*x) * radius + py := (borderUnits + 1 + 3*y) * radius + col := white + if logo[y*3+x] == 0 { + col = gray + } + dc.DrawCircle(float64(px), float64(py), radius) + dc.SetColor(col) + dc.Fill() + } + } + + b := bytes.NewBuffer(nil) + png.Encode(b, dc.Image()) + return b +} + +// setAppIcon renders logo and sets it as the systray icon. +func setAppIcon(icon tsLogo) { + if icon == loading { + startLoadingAnimation() + } else { + stopLoadingAnimation() + systray.SetIcon(icon.render().Bytes()) + } +} + +var ( + loadingMu sync.Mutex // protects loadingCancel + + // loadingCancel stops the loading animation in the systray icon. + // This is nil if the animation is not currently active. + loadingCancel func() +) + +// startLoadingAnimation starts the animated loading icon in the system tray. +// The animation continues until [stopLoadingAnimation] is called. +// If the loading animation is already active, this func does nothing. +func startLoadingAnimation() { + loadingMu.Lock() + defer loadingMu.Unlock() + + if loadingCancel != nil { + // loading icon already displayed + return + } + + ctx := context.Background() + ctx, loadingCancel = context.WithCancel(ctx) + + go func() { + t := time.NewTicker(500 * time.Millisecond) + var i int + for { + select { + case <-ctx.Done(): + return + case <-t.C: + systray.SetIcon(loadingLogos[i].render().Bytes()) + i++ + if i >= len(loadingLogos) { + i = 0 + } + } + } + }() +} + +// stopLoadingAnimation stops the animated loading icon in the system tray. +// If the loading animation is not currently active, this func does nothing. +func stopLoadingAnimation() { + loadingMu.Lock() + defer loadingMu.Unlock() + + if loadingCancel != nil { + loadingCancel() + loadingCancel = nil + } +} diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go new file mode 100644 index 0000000000000..aca38f627c65a --- /dev/null +++ b/cmd/systray/systray.go @@ -0,0 +1,258 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build cgo || !darwin + +// The systray command is a minimal Tailscale systray application for Linux. +package main + +import ( + "context" + "errors" + "fmt" + "io" + "log" + "os" + "strings" + "sync" + "time" + + "fyne.io/systray" + "github.com/atotto/clipboard" + dbus "github.com/godbus/dbus/v5" + "github.com/toqueteos/webbrowser" + "tailscale.com/client/tailscale" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" +) + +var ( + localClient tailscale.LocalClient + chState chan ipn.State // tailscale state changes + + appIcon *os.File +) + +func main() { + systray.Run(onReady, onExit) +} + +// Menu represents the systray menu, its items, and the current Tailscale state. +type Menu struct { + mu sync.Mutex // protects the entire Menu + status *ipnstate.Status + + connect *systray.MenuItem + disconnect *systray.MenuItem + + self *systray.MenuItem + more *systray.MenuItem + quit *systray.MenuItem + + eventCancel func() // cancel eventLoop +} + +func onReady() { + log.Printf("starting") + ctx := context.Background() + + setAppIcon(disconnected) + + // dbus wants a file path for notification icons, so copy to a temp file. + appIcon, _ = os.CreateTemp("", "tailscale-systray.png") + io.Copy(appIcon, connected.render()) + + chState = make(chan ipn.State, 1) + + status, err := localClient.Status(ctx) + if err != nil { + log.Print(err) + } + + menu := new(Menu) + menu.rebuild(status) + + go watchIPNBus(ctx) +} + +// rebuild the systray menu based on the current Tailscale state. +// +// We currently rebuild the entire menu because it is not easy to update the existing menu. +// You cannot iterate over the items in a menu, nor can you remove some items like separators. +// So for now we rebuild the whole thing, and can optimize this later if needed. +func (menu *Menu) rebuild(status *ipnstate.Status) { + menu.mu.Lock() + defer menu.mu.Unlock() + + if menu.eventCancel != nil { + menu.eventCancel() + } + menu.status = status + systray.ResetMenu() + + menu.connect = systray.AddMenuItem("Connect", "") + menu.disconnect = systray.AddMenuItem("Disconnect", "") + menu.disconnect.Hide() + systray.AddSeparator() + + if status != nil && status.Self != nil { + title := fmt.Sprintf("This Device: %s (%s)", status.Self.HostName, status.Self.TailscaleIPs[0]) + menu.self = systray.AddMenuItem(title, "") + } + systray.AddSeparator() + + menu.more = systray.AddMenuItem("More settings", "") + menu.more.Enable() + + menu.quit = systray.AddMenuItem("Quit", "Quit the app") + menu.quit.Enable() + + ctx := context.Background() + ctx, menu.eventCancel = context.WithCancel(ctx) + go menu.eventLoop(ctx) +} + +// eventLoop is the main event loop for handling click events on menu items +// and responding to Tailscale state changes. +// This method does not return until ctx.Done is closed. +func (menu *Menu) eventLoop(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case state := <-chState: + switch state { + case ipn.Running: + setAppIcon(loading) + status, err := localClient.Status(ctx) + if err != nil { + log.Printf("error getting tailscale status: %v", err) + } + menu.rebuild(status) + setAppIcon(connected) + menu.connect.SetTitle("Connected") + menu.connect.Disable() + menu.disconnect.Show() + menu.disconnect.Enable() + case ipn.NoState, ipn.Stopped: + menu.connect.SetTitle("Connect") + menu.connect.Enable() + menu.disconnect.Hide() + setAppIcon(disconnected) + case ipn.Starting: + setAppIcon(loading) + } + case <-menu.connect.ClickedCh: + _, err := localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + WantRunning: true, + }, + WantRunningSet: true, + }) + if err != nil { + log.Print(err) + continue + } + + case <-menu.disconnect.ClickedCh: + _, err := localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + WantRunning: false, + }, + WantRunningSet: true, + }) + if err != nil { + log.Printf("disconnecting: %v", err) + continue + } + + case <-menu.self.ClickedCh: + copyTailscaleIP(menu.status.Self) + + case <-menu.more.ClickedCh: + webbrowser.Open("http://100.100.100.100/") + + case <-menu.quit.ClickedCh: + systray.Quit() + } + } +} + +// watchIPNBus subscribes to the tailscale event bus and sends state updates to chState. +// This method does not return. +func watchIPNBus(ctx context.Context) { + for { + if err := watchIPNBusInner(ctx); err != nil { + log.Println(err) + if errors.Is(err, context.Canceled) { + // If the context got canceled, we will never be able to + // reconnect to IPN bus, so exit the process. + log.Fatalf("watchIPNBus: %v", err) + } + } + // If our watch connection breaks, wait a bit before reconnecting. No + // reason to spam the logs if e.g. tailscaled is restarting or goes + // down. + time.Sleep(3 * time.Second) + } +} + +func watchIPNBusInner(ctx context.Context) error { + watcher, err := localClient.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + if err != nil { + return fmt.Errorf("watching ipn bus: %w", err) + } + defer watcher.Close() + for { + select { + case <-ctx.Done(): + return nil + default: + n, err := watcher.Next() + if err != nil { + return fmt.Errorf("ipnbus error: %w", err) + } + if n.State != nil { + chState <- *n.State + log.Printf("new state: %v", n.State) + } + } + } +} + +// copyTailscaleIP copies the first Tailscale IP of the given device to the clipboard +// and sends a notification with the copied value. +func copyTailscaleIP(device *ipnstate.PeerStatus) { + if device == nil || len(device.TailscaleIPs) == 0 { + return + } + name := strings.Split(device.DNSName, ".")[0] + ip := device.TailscaleIPs[0].String() + err := clipboard.WriteAll(ip) + if err != nil { + log.Printf("clipboard error: %v", err) + } + + sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) +} + +// sendNotification sends a desktop notification with the given title and content. +func sendNotification(title, content string) { + conn, err := dbus.SessionBus() + if err != nil { + log.Printf("dbus: %v", err) + return + } + timeout := 3 * time.Second + obj := conn.Object("org.freedesktop.Notifications", "/org/freedesktop/Notifications") + call := obj.Call("org.freedesktop.Notifications.Notify", 0, "Tailscale", uint32(0), + appIcon.Name(), title, content, []string{}, map[string]dbus.Variant{}, int32(timeout.Milliseconds())) + if call.Err != nil { + log.Printf("dbus: %v", call.Err) + } +} + +func onExit() { + log.Printf("exiting") + os.Remove(appIcon.Name()) +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index efbdd3e40680a..864cf6903a6d0 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -187,6 +187,7 @@ change in the future. configureCmd, netcheckCmd, ipCmd, + dnsCmd, statusCmd, pingCmd, ncCmd, diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index f6d98a830191e..e98a9e0789657 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -22,6 +22,7 @@ import ( "os" "os/exec" "runtime" + "runtime/debug" "strconv" "strings" "time" @@ -319,9 +320,36 @@ var debugCmd = &ffcli.Command{ return fs })(), }, + { + Name: "resolve", + ShortUsage: "tailscale debug resolve ", + Exec: runDebugResolve, + ShortHelp: "Does a DNS lookup", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("resolve") + fs.StringVar(&resolveArgs.net, "net", "ip", "network type to resolve (ip, ip4, ip6)") + return fs + })(), + }, + { + Name: "go-buildinfo", + ShortUsage: "tailscale debug go-buildinfo", + ShortHelp: "Prints Go's runtime/debug.BuildInfo", + Exec: runGoBuildInfo, + }, }, } +func runGoBuildInfo(ctx context.Context, args []string) error { + bi, ok := debug.ReadBuildInfo() + if !ok { + return errors.New("no Go build info") + } + e := json.NewEncoder(os.Stdout) + e.SetIndent("", "\t") + return e.Encode(bi) +} + var debugArgs struct { file string cpuSec int @@ -1167,3 +1195,26 @@ func runDebugDialTypes(ctx context.Context, args []string) error { fmt.Printf("%s", body) return nil } + +var resolveArgs struct { + net string // "ip", "ip4", "ip6"" +} + +func runDebugResolve(ctx context.Context, args []string) error { + if len(args) != 1 { + return errors.New("usage: tailscale debug resolve ") + } + + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + host := args[0] + ips, err := net.DefaultResolver.LookupIP(ctx, resolveArgs.net, host) + if err != nil { + return err + } + for _, ip := range ips { + fmt.Printf("%s\n", ip) + } + return nil +} diff --git a/cmd/tailscale/cli/dns-status.go b/cmd/tailscale/cli/dns-status.go new file mode 100644 index 0000000000000..0d59e4b9157f0 --- /dev/null +++ b/cmd/tailscale/cli/dns-status.go @@ -0,0 +1,242 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "fmt" + "maps" + "slices" + "strings" + + "tailscale.com/ipn" + "tailscale.com/types/netmap" +) + +// dnsStatusArgs are the arguments for the "dns status" subcommand. +var dnsStatusArgs struct { + all bool +} + +func runDNSStatus(ctx context.Context, args []string) error { + all := dnsStatusArgs.all + s, err := localClient.Status(ctx) + if err != nil { + return err + } + + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + enabledStr := "disabled.\n\n(Run 'tailscale set --accept-dns=true' to start sending DNS queries to the Tailscale DNS resolver)" + if prefs.CorpDNS { + enabledStr = "enabled.\n\nTailscale is configured to handle DNS queries on this device.\nRun 'tailscale set --accept-dns=false' to revert to your system default DNS resolver." + } + fmt.Print("\n") + fmt.Println("=== 'Use Tailscale DNS' status ===") + fmt.Print("\n") + fmt.Printf("Tailscale DNS: %s\n", enabledStr) + fmt.Print("\n") + fmt.Println("=== MagicDNS configuration ===") + fmt.Print("\n") + fmt.Println("This is the DNS configuration provided by the coordination server to this device.") + fmt.Print("\n") + if s.CurrentTailnet == nil { + fmt.Println("No tailnet information available; make sure you're logged in to a tailnet.") + return nil + } else if s.CurrentTailnet.MagicDNSEnabled { + fmt.Printf("MagicDNS: enabled tailnet-wide (suffix = %s)", s.CurrentTailnet.MagicDNSSuffix) + fmt.Print("\n\n") + fmt.Printf("Other devices in your tailnet can reach this device at %s\n", s.Self.DNSName) + } else { + fmt.Printf("MagicDNS: disabled tailnet-wide.\n") + } + fmt.Print("\n") + + netMap, err := fetchNetMap() + if err != nil { + fmt.Printf("Failed to fetch network map: %v\n", err) + return err + } + dnsConfig := netMap.DNS + fmt.Println("Resolvers (in preference order):") + if len(dnsConfig.Resolvers) == 0 { + fmt.Println(" (no resolvers configured, system default will be used: see 'System DNS configuration' below)") + } + for _, r := range dnsConfig.Resolvers { + fmt.Printf(" - %v", r.Addr) + if r.BootstrapResolution != nil { + fmt.Printf(" (bootstrap: %v)", r.BootstrapResolution) + } + fmt.Print("\n") + } + fmt.Print("\n") + fmt.Println("Split DNS Routes:") + if len(dnsConfig.Routes) == 0 { + fmt.Println(" (no routes configured: split DNS might not be in use)") + } + for _, k := range slices.Sorted(maps.Keys(dnsConfig.Routes)) { + v := dnsConfig.Routes[k] + for _, r := range v { + fmt.Printf(" - %-30s -> %v", k, r.Addr) + if r.BootstrapResolution != nil { + fmt.Printf(" (bootstrap: %v)", r.BootstrapResolution) + } + fmt.Print("\n") + } + } + fmt.Print("\n") + if all { + fmt.Println("Fallback Resolvers:") + if len(dnsConfig.FallbackResolvers) == 0 { + fmt.Println(" (no fallback resolvers configured)") + } + for i, r := range dnsConfig.FallbackResolvers { + fmt.Printf(" %d: %v\n", i, r) + } + fmt.Print("\n") + } + fmt.Println("Search Domains:") + if len(dnsConfig.Domains) == 0 { + fmt.Println(" (no search domains configured)") + } + domains := dnsConfig.Domains + slices.Sort(domains) + for _, r := range domains { + fmt.Printf(" - %v\n", r) + } + fmt.Print("\n") + if all { + fmt.Println("Nameservers IP Addresses:") + if len(dnsConfig.Nameservers) == 0 { + fmt.Println(" (none were provided)") + } + for _, r := range dnsConfig.Nameservers { + fmt.Printf(" - %v\n", r) + } + fmt.Print("\n") + fmt.Println("Certificate Domains:") + if len(dnsConfig.CertDomains) == 0 { + fmt.Println(" (no certificate domains are configured)") + } + for _, r := range dnsConfig.CertDomains { + fmt.Printf(" - %v\n", r) + } + fmt.Print("\n") + fmt.Println("Additional DNS Records:") + if len(dnsConfig.ExtraRecords) == 0 { + fmt.Println(" (no extra records are configured)") + } + for _, er := range dnsConfig.ExtraRecords { + if er.Type == "" { + fmt.Printf(" - %-50s -> %v\n", er.Name, er.Value) + } else { + fmt.Printf(" - [%s] %-50s -> %v\n", er.Type, er.Name, er.Value) + } + } + fmt.Print("\n") + fmt.Println("Filtered suffixes when forwarding DNS queries as an exit node:") + if len(dnsConfig.ExitNodeFilteredSet) == 0 { + fmt.Println(" (no suffixes are filtered)") + } + for _, s := range dnsConfig.ExitNodeFilteredSet { + fmt.Printf(" - %s\n", s) + } + fmt.Print("\n") + } + + fmt.Println("=== System DNS configuration ===") + fmt.Print("\n") + fmt.Println("This is the DNS configuration that Tailscale believes your operating system is using.\nTailscale may use this configuration if 'Override Local DNS' is disabled in the admin console,\nor if no resolvers are provided by the coordination server.") + fmt.Print("\n") + osCfg, err := localClient.GetDNSOSConfig(ctx) + if err != nil { + if strings.Contains(err.Error(), "not supported") { + // avoids showing the HTTP error code which would be odd here + fmt.Println(" (reading the system DNS configuration is not supported on this platform)") + } else { + fmt.Printf(" (failed to read system DNS configuration: %v)\n", err) + } + } else if osCfg == nil { + fmt.Println(" (no OS DNS configuration available)") + } else { + fmt.Println("Nameservers:") + if len(osCfg.Nameservers) == 0 { + fmt.Println(" (no nameservers found, DNS queries might fail\nunless the coordination server is providing a nameserver)") + } + for _, ns := range osCfg.Nameservers { + fmt.Printf(" - %v\n", ns) + } + fmt.Print("\n") + fmt.Println("Search domains:") + if len(osCfg.SearchDomains) == 0 { + fmt.Println(" (no search domains found)") + } + for _, sd := range osCfg.SearchDomains { + fmt.Printf(" - %v\n", sd) + } + if all { + fmt.Print("\n") + fmt.Println("Match domains:") + if len(osCfg.MatchDomains) == 0 { + fmt.Println(" (no match domains found)") + } + for _, md := range osCfg.MatchDomains { + fmt.Printf(" - %v\n", md) + } + } + } + fmt.Print("\n") + fmt.Println("[this is a preliminary version of this command; the output format may change in the future]") + return nil +} + +func fetchNetMap() (netMap *netmap.NetworkMap, err error) { + w, err := localClient.WatchIPNBus(context.Background(), ipn.NotifyInitialNetMap) + if err != nil { + return nil, err + } + defer w.Close() + notify, err := w.Next() + if err != nil { + return nil, err + } + if notify.NetMap == nil { + return nil, fmt.Errorf("no network map yet available, please try again later") + } + return notify.NetMap, nil +} + +func dnsStatusLongHelp() string { + return `The 'tailscale dns status' subcommand prints the current DNS status and configuration, including: + +- Whether the built-in DNS forwarder is enabled. +- The MagicDNS configuration provided by the coordination server. +- Details on which resolver(s) Tailscale believes the system is using by default. + +The --all flag can be used to output advanced debugging information, including fallback resolvers, nameservers, certificate domains, extra records, and the exit node filtered set. + +=== Contents of the MagicDNS configuration === + +The MagicDNS configuration is provided by the coordination server to the client and includes the following components: + +- MagicDNS enablement status: Indicates whether MagicDNS is enabled across the entire tailnet. + +- MagicDNS Suffix: The DNS suffix used for devices within your tailnet. + +- DNS Name: The DNS name that other devices in the tailnet can use to reach this device. + +- Resolvers: The preferred DNS resolver(s) to be used for resolving queries, in order of preference. If no resolvers are listed here, the system defaults are used. + +- Split DNS Routes: Custom DNS resolvers may be used to resolve hostnames in specific domains, this is also known as a 'Split DNS' configuration. The mapping of domains to their respective resolvers is provided here. + +- Certificate Domains: The DNS names for which the coordination server will assist in provisioning TLS certificates. + +- Extra Records: Additional DNS records that the coordination server might provide to the internal DNS resolver. + +- Exit Node Filtered Set: DNS suffixes that the node, when acting as an exit node DNS proxy, will not answer. + +For more information about the DNS functionality built into Tailscale, refer to https://tailscale.com/kb/1054/dns.` +} diff --git a/cmd/tailscale/cli/dns.go b/cmd/tailscale/cli/dns.go new file mode 100644 index 0000000000000..2825556952521 --- /dev/null +++ b/cmd/tailscale/cli/dns.go @@ -0,0 +1,44 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "flag" + + "github.com/peterbourgon/ff/v3/ffcli" +) + +var dnsCmd = &ffcli.Command{ + Name: "dns", + ShortHelp: "Diagnose the internal DNS forwarder", + LongHelp: dnsCmdLongHelp(), + ShortUsage: "tailscale dns [flags]", + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "status", + ShortUsage: "tailscale dns status [--all]", + Exec: runDNSStatus, + ShortHelp: "Prints the current DNS status and configuration", + LongHelp: dnsStatusLongHelp(), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("status") + fs.BoolVar(&dnsStatusArgs.all, "all", false, "outputs advanced debugging information (fallback resolvers, nameservers, cert domains, extra records, and exit node filtered set)") + return fs + })(), + }, + + // TODO: implement `tailscale query` here + + // TODO: implement `tailscale log` here + + // The above work is tracked in https://github.com/tailscale/tailscale/issues/13326 + }, +} + +func dnsCmdLongHelp() string { + return `The 'tailscale dns' subcommand provides tools for diagnosing the internal DNS forwarder (100.100.100.100). + +For more information about the DNS functionality built into Tailscale, refer to https://tailscale.com/kb/1054/dns.` +} diff --git a/cmd/tailscale/cli/exitnode_test.go b/cmd/tailscale/cli/exitnode_test.go index 4f66fa7561a23..9d569a45a4615 100644 --- a/cmd/tailscale/cli/exitnode_test.go +++ b/cmd/tailscale/cli/exitnode_test.go @@ -135,7 +135,7 @@ func TestFilterFormatAndSortExitNodes(t *testing.T) { result := filterFormatAndSortExitNodes(ps, "") if res := cmp.Diff(result.Countries, want.Countries, cmpopts.IgnoreUnexported(key.NodePublic{})); res != "" { - t.Fatalf(res) + t.Fatal(res) } }) @@ -230,7 +230,7 @@ func TestFilterFormatAndSortExitNodes(t *testing.T) { result := filterFormatAndSortExitNodes(ps, "Pacific") if res := cmp.Diff(result.Countries, want.Countries, cmpopts.IgnoreUnexported(key.NodePublic{})); res != "" { - t.Fatalf(res) + t.Fatal(res) } }) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index ed43a51c4658e..d9432614fa96e 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -60,10 +60,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw + L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli - L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/tailscale+ @@ -98,8 +98,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli tailscale.com/ipn from tailscale.com/client/tailscale+ tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ - tailscale.com/metrics from tailscale.com/derp + tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/captivedetection from tailscale.com/net/netcheck tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ @@ -132,13 +133,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ + tailscale.com/tsweb/varz from tailscale.com/util/usermetric tailscale.com/types/dnstype from tailscale.com/tailcfg tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ tailscale.com/types/key from tailscale.com/client/tailscale+ tailscale.com/types/lazy from tailscale.com/util/testenv+ tailscale.com/types/logger from tailscale.com/client/web+ - tailscale.com/types/netmap from tailscale.com/ipn + tailscale.com/types/netmap from tailscale.com/ipn+ tailscale.com/types/nettype from tailscale.com/net/netcheck+ tailscale.com/types/opt from tailscale.com/client/tailscale+ tailscale.com/types/persist from tailscale.com/ipn @@ -173,6 +175,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli + tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate @@ -194,6 +197,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ W golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ @@ -283,6 +287,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/mitchellh/go-ps+ + iter from maps+ log from expvar+ log/internal from log maps from tailscale.com/clientupdate+ @@ -314,7 +319,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep regexp/syntax from regexp runtime/debug from github.com/coder/websocket/internal/xsync+ slices from tailscale.com/client/web+ - sort from archive/tar+ + sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ sync from archive/tar+ @@ -327,3 +332,4 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ + unique from net/netip diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index b67533cfb57dc..018e74fac7bae 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -115,7 +115,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify from tailscale.com/net/dns + L 💣 github.com/illarion/gonotify/v2 from tailscale.com/net/dns L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/net/tstun L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 L github.com/insomniacslk/dhcp/interfaces from github.com/insomniacslk/dhcp/dhcpv4 @@ -139,7 +139,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd - L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink + L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio @@ -169,6 +169,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web W 💣 github.com/tailscale/wf from tailscale.com/wf @@ -188,7 +189,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de LD github.com/u-root/u-root/pkg/termios from tailscale.com/ssh/tailssh L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ - L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/tailscale+ @@ -225,7 +225,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ @@ -279,7 +279,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube from tailscale.com/ipn/store/kubestore + L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ + L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal @@ -343,7 +345,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/derp+ - tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled + tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ @@ -386,7 +388,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/osdiag from tailscale.com/cmd/tailscaled+ W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag tailscale.com/util/osshare from tailscale.com/cmd/tailscaled+ - tailscale.com/util/osuser from tailscale.com/ipn/localapi+ + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ tailscale.com/util/progresstracking from tailscale.com/ipn/localapi tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy @@ -403,6 +405,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/uniq from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ @@ -420,6 +423,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/netlog from tailscale.com/wgengine tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal @@ -438,8 +442,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/hkdf from crypto/tls+ golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box - golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ LD golang.org/x/crypto/ssh from github.com/pkg/sftp+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/appc+ @@ -529,6 +534,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de io from archive/tar+ io/fs from archive/tar+ io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + iter from maps+ log from expvar+ log/internal from log LD log/syslog from tailscale.com/ssh/tailssh @@ -564,7 +570,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from tailscale.com/appc+ - sort from archive/tar+ + sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ sync from archive/tar+ @@ -577,3 +583,4 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ + unique from net/netip diff --git a/cmd/tailscaled/required_version.go b/cmd/tailscaled/required_version.go index 03ef740b040c5..3acb3d52e4d8c 100644 --- a/cmd/tailscaled/required_version.go +++ b/cmd/tailscaled/required_version.go @@ -1,10 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !go1.21 +//go:build !go1.23 package main func init() { - you_need_Go_1_21_to_compile_Tailscale() + you_need_Go_1_23_to_compile_Tailscale() } diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 6dbf6c98231de..eb53f4f15e157 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.21 +//go:build go1.23 // The tailscaled program is the Tailscale client daemon. It's configured // and controlled via the tailscale CLI program. @@ -417,6 +417,10 @@ func run() (err error) { sys.Set(driveimpl.NewFileSystemForRemote(logf)) + if app := envknob.App(); app != "" { + hostinfo.SetApp(app) + } + return startIPNServer(context.Background(), logf, pol.PublicID, sys) } diff --git a/cmd/tl-longchain/tl-longchain.go b/cmd/tl-longchain/tl-longchain.go new file mode 100644 index 0000000000000..c92714505b8be --- /dev/null +++ b/cmd/tl-longchain/tl-longchain.go @@ -0,0 +1,93 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Program tl-longchain prints commands to re-sign Tailscale nodes that have +// long rotation signature chains. +// +// There is an implicit limit on the number of rotation signatures that can +// be chained before the signature becomes too long. This program helps +// tailnet admins to identify nodes that have signatures with long chains and +// prints commands to re-sign those node keys with a fresh direct signature. +// Commands are printed to stdout, while log messages are printed to stderr. +// +// Note that the Tailscale client this command is executed on must have +// ACL visibility to all other nodes to be able to see their signatures. +// https://tailscale.com/kb/1087/device-visibility +package main + +import ( + "context" + "flag" + "fmt" + "log" + "time" + + "tailscale.com/client/tailscale" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/key" +) + +var ( + flagSocket = flag.String("socket", "", "custom path to tailscaled socket") + maxRotations = flag.Int("rotations", 10, "number of rotation signatures before re-signing (max 16)") + showFiltered = flag.Bool("show-filtered", false, "include nodes with invalid signatures") +) + +func main() { + flag.Parse() + + lc := tailscale.LocalClient{Socket: *flagSocket} + if lc.Socket != "" { + lc.UseSocketOnly = true + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + st, err := lc.NetworkLockStatus(ctx) + if err != nil { + log.Fatalf("could not get Tailnet Lock status: %v", err) + } + if !st.Enabled { + log.Print("Tailnet Lock is not enabled") + return + } + print("Self", *st.NodeKey, *st.NodeKeySignature) + if len(st.VisiblePeers) > 0 { + log.Print("Visible peers with valid signatures:") + for _, peer := range st.VisiblePeers { + print(peerInfo(peer), peer.NodeKey, peer.NodeKeySignature) + } + } + if *showFiltered && len(st.FilteredPeers) > 0 { + log.Print("Visible peers with invalid signatures:") + for _, peer := range st.FilteredPeers { + print(peerInfo(peer), peer.NodeKey, peer.NodeKeySignature) + } + } +} + +// peerInfo returns a string with information about a peer. +func peerInfo(peer *ipnstate.TKAPeer) string { + return fmt.Sprintf("Peer %s (%s) nodeid=%s, current signature kind=%v", peer.Name, peer.TailscaleIPs[0], peer.StableID, peer.NodeKeySignature.SigKind) +} + +// print prints a message about a node key signature and a re-signing command if needed. +func print(info string, nodeKey key.NodePublic, sig tka.NodeKeySignature) { + if l := chainLength(sig); l > *maxRotations { + log.Printf("%s: chain length %d, printing command to re-sign", info, l) + wrapping, _ := sig.UnverifiedWrappingPublic() + fmt.Printf("tailscale lock sign %s %s\n", nodeKey, key.NLPublicFromEd25519Unsafe(wrapping).CLIString()) + } else { + log.Printf("%s: does not need re-signing", info) + } +} + +// chainLength returns the length of the rotation signature chain. +func chainLength(sig tka.NodeKeySignature) int { + if sig.SigKind != tka.SigRotation { + return 1 + } + return 1 + chainLength(*sig.Nested) +} diff --git a/cmd/tta/tta.go b/cmd/tta/tta.go index 6a676b0d20889..4a4c4a6beebfa 100644 --- a/cmd/tta/tta.go +++ b/cmd/tta/tta.go @@ -11,6 +11,7 @@ package main import ( + "bytes" "context" "errors" "flag" @@ -23,12 +24,15 @@ import ( "os" "os/exec" "regexp" + "strconv" "strings" "sync" "time" + "tailscale.com/atomicfile" "tailscale.com/client/tailscale" "tailscale.com/hostinfo" + "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/set" "tailscale.com/version/distro" @@ -70,6 +74,9 @@ func (rt *localClientRoundTripper) RoundTrip(req *http.Request) (*http.Response, } func main() { + var logBuf logBuffer + log.SetOutput(io.MultiWriter(os.Stderr, &logBuf)) + if distro.Get() == distro.Gokrazy { if !hostinfo.IsNATLabGuestVM() { // "Exiting immediately with status code 0 when the @@ -80,19 +87,31 @@ func main() { } flag.Parse() + debug := false if distro.Get() == distro.Gokrazy { - nsRx := regexp.MustCompile(`(?m)^nameserver (.*)`) - for t := time.Now(); time.Since(t) < 10*time.Second; time.Sleep(10 * time.Millisecond) { - all, _ := os.ReadFile("/etc/resolv.conf") - if nsRx.Match(all) { - break + cmdLine, _ := os.ReadFile("/proc/cmdline") + explicitNS := false + for _, s := range strings.Fields(string(cmdLine)) { + if ns, ok := strings.CutPrefix(s, "tta.nameserver="); ok { + err := atomicfile.WriteFile("/tmp/resolv.conf", []byte("nameserver "+ns+"\n"), 0644) + log.Printf("Wrote /tmp/resolv.conf: %v", err) + explicitNS = true + continue + } + if v, ok := strings.CutPrefix(s, "tta.debug="); ok { + debug, _ = strconv.ParseBool(v) + continue + } + } + if !explicitNS { + nsRx := regexp.MustCompile(`(?m)^nameserver (.*)`) + for t := time.Now(); time.Since(t) < 10*time.Second; time.Sleep(10 * time.Millisecond) { + all, _ := os.ReadFile("/etc/resolv.conf") + if nsRx.Match(all) { + break + } } } - } - - logc, err := net.Dial("tcp", "9.9.9.9:124") - if err == nil { - log.SetOutput(logc) } log.Printf("Tailscale Test Agent running.") @@ -122,28 +141,11 @@ func main() { }) var hs http.Server hs.Handler = &serveMux - var ( - stMu sync.Mutex - newSet = set.Set[net.Conn]{} // conns in StateNew - ) - needConnCh := make(chan bool, 1) - hs.ConnState = func(c net.Conn, s http.ConnState) { - stMu.Lock() - defer stMu.Unlock() - oldLen := len(newSet) - switch s { - case http.StateNew: - newSet.Add(c) - default: - newSet.Delete(c) - } - if oldLen != 0 && len(newSet) == 0 { - select { - case needConnCh <- true: - default: - } - } + revSt := revDialState{ + needConnCh: make(chan bool, 1), + debug: debug, } + hs.ConnState = revSt.connState conns := make(chan net.Conn, 1) lcRP := httputil.NewSingleHostReverseProxy(must.Get(url.Parse("http://local-tailscaled.sock"))) @@ -163,11 +165,17 @@ func main() { serveCmd(w, "tailscale", "up", "--login-server=http://control.tailscale") }) ttaMux.HandleFunc("/fw", addFirewallHandler) - + ttaMux.HandleFunc("/logs", func(w http.ResponseWriter, r *http.Request) { + logBuf.mu.Lock() + defer logBuf.mu.Unlock() + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Write(logBuf.buf.Bytes()) + }) go hs.Serve(chanListener(conns)) // For doing agent operations locally from gokrazy: - // (e.g. with "wget -O - localhost:8123/fw") + // (e.g. with "wget -O - localhost:8123/fw" or "wget -O - localhost:8123/logs" + // to get early tta logs before the port 124 connection is established) go func() { err := http.ListenAndServe("127.0.0.1:8123", &ttaMux) if err != nil { @@ -175,26 +183,14 @@ func main() { } }() - var lastErr string - needConnCh <- true - for { - <-needConnCh - c, err := connect() - if err != nil { - s := err.Error() - if s != lastErr { - log.Printf("Connect failure: %v", s) - } - lastErr = s - time.Sleep(time.Second) - continue - } - conns <- c - } + revSt.runDialOutLoop(conns) } func connect() (net.Conn, error) { - c, err := net.Dial("tcp", *driverAddr) + var d net.Dialer + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + c, err := d.DialContext(ctx, "tcp", *driverAddr) if err != nil { return nil, err } @@ -222,6 +218,100 @@ func (cl chanListener) Addr() net.Addr { } } +type revDialState struct { + needConnCh chan bool + debug bool + + mu sync.Mutex + newSet set.Set[net.Conn] // conns in StateNew + onNew map[net.Conn]func() +} + +func (s *revDialState) connState(c net.Conn, cs http.ConnState) { + s.mu.Lock() + defer s.mu.Unlock() + oldLen := len(s.newSet) + switch cs { + case http.StateNew: + if f, ok := s.onNew[c]; ok { + f() + delete(s.onNew, c) + } + s.newSet.Make() + s.newSet.Add(c) + default: + s.newSet.Delete(c) + } + s.vlogf("ConnState: %p now %v; newSet %v=>%v", c, s, oldLen, len(s.newSet)) + if len(s.newSet) < 2 { + select { + case s.needConnCh <- true: + default: + } + } +} + +func (s *revDialState) waitNeedConnect() { + for { + s.mu.Lock() + need := len(s.newSet) < 2 + s.mu.Unlock() + if need { + return + } + <-s.needConnCh + } +} + +func (s *revDialState) vlogf(format string, arg ...any) { + if !s.debug { + return + } + log.Printf(format, arg...) +} + +func (s *revDialState) runDialOutLoop(conns chan<- net.Conn) { + var lastErr string + connected := false + + for { + s.vlogf("[dial-driver] waiting need connect...") + s.waitNeedConnect() + s.vlogf("[dial-driver] connecting...") + t0 := time.Now() + c, err := connect() + if err != nil { + s := err.Error() + if s != lastErr { + log.Printf("[dial-driver] connect failure: %v", s) + } + lastErr = s + time.Sleep(time.Second) + continue + } + if !connected { + connected = true + log.Printf("Connected to %v", *driverAddr) + } + s.vlogf("[dial-driver] connected %v => %v after %v", c.LocalAddr(), c.RemoteAddr(), time.Since(t0)) + + inHTTP := make(chan struct{}) + s.mu.Lock() + mak.Set(&s.onNew, c, func() { close(inHTTP) }) + s.mu.Unlock() + + s.vlogf("[dial-driver] sending...") + conns <- c + s.vlogf("[dial-driver] sent; waiting") + select { + case <-inHTTP: + s.vlogf("[dial-driver] conn in HTTP") + case <-time.After(2 * time.Second): + s.vlogf("[dial-driver] timeout waiting for conn to be accepted into HTTP") + } + } +} + func addFirewallHandler(w http.ResponseWriter, r *http.Request) { if addFirewall == nil { http.Error(w, "firewall not supported", 500) @@ -236,3 +326,24 @@ func addFirewallHandler(w http.ResponseWriter, r *http.Request) { } var addFirewall func() error // set by fw_linux.go + +// logBuffer is a bytes.Buffer that is safe for concurrent use +// intended to capture early logs from the process, even if +// gokrazy's syslog streaming isn't working or yet working. +// It only captures the first 1MB of logs, as that's considered +// plenty for early debugging. At runtime, it's assumed that +// syslog log streaming is working. +type logBuffer struct { + mu sync.Mutex + buf bytes.Buffer +} + +func (lb *logBuffer) Write(p []byte) (n int, err error) { + lb.mu.Lock() + defer lb.mu.Unlock() + const maxSize = 1 << 20 // more than plenty; see type comment + if lb.buf.Len() > maxSize { + return len(p), nil + } + return lb.buf.Write(p) +} diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index 1f1ec05573624..14a4888615bc1 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -13,7 +13,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers --clone-only-type=OnlyGetClone +//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct --clone-only-type=OnlyGetClone type StructWithoutPtrs struct { Int int @@ -202,3 +202,34 @@ type StructWithContainers struct { CloneableMap MapContainer[int, *StructWithPtrs] CloneableGenericMap MapContainer[int, *GenericNoPtrsStruct[int]] } + +type ( + StructWithPtrsAlias = StructWithPtrs + StructWithoutPtrsAlias = StructWithoutPtrs + StructWithPtrsAliasView = StructWithPtrsView + StructWithoutPtrsAliasView = StructWithoutPtrsView +) + +type StructWithTypeAliasFields struct { + WithPtr StructWithPtrsAlias + WithoutPtr StructWithoutPtrsAlias + + WithPtrByPtr *StructWithPtrsAlias + WithoutPtrByPtr *StructWithoutPtrsAlias + + SliceWithPtrs []*StructWithPtrsAlias + SliceWithoutPtrs []*StructWithoutPtrsAlias + + MapWithPtrs map[string]*StructWithPtrsAlias + MapWithoutPtrs map[string]*StructWithoutPtrsAlias + + MapOfSlicesWithPtrs map[string][]*StructWithPtrsAlias + MapOfSlicesWithoutPtrs map[string][]*StructWithoutPtrsAlias +} + +type integer = constraints.Integer + +type GenericTypeAliasStruct[T integer, T2 views.ViewCloner[T2, V2], V2 views.StructView[T2]] struct { + NonCloneable T + Cloneable T2 +} diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index 53e6bacfb1eea..9131f5040c45d 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -441,3 +441,105 @@ var _StructWithContainersCloneNeedsRegeneration = StructWithContainers(struct { CloneableMap MapContainer[int, *StructWithPtrs] CloneableGenericMap MapContainer[int, *GenericNoPtrsStruct[int]] }{}) + +// Clone makes a deep copy of StructWithTypeAliasFields. +// The result aliases no memory with the original. +func (src *StructWithTypeAliasFields) Clone() *StructWithTypeAliasFields { + if src == nil { + return nil + } + dst := new(StructWithTypeAliasFields) + *dst = *src + dst.WithPtr = *src.WithPtr.Clone() + dst.WithPtrByPtr = src.WithPtrByPtr.Clone() + if dst.WithoutPtrByPtr != nil { + dst.WithoutPtrByPtr = ptr.To(*src.WithoutPtrByPtr) + } + if src.SliceWithPtrs != nil { + dst.SliceWithPtrs = make([]*StructWithPtrsAlias, len(src.SliceWithPtrs)) + for i := range dst.SliceWithPtrs { + if src.SliceWithPtrs[i] == nil { + dst.SliceWithPtrs[i] = nil + } else { + dst.SliceWithPtrs[i] = src.SliceWithPtrs[i].Clone() + } + } + } + if src.SliceWithoutPtrs != nil { + dst.SliceWithoutPtrs = make([]*StructWithoutPtrsAlias, len(src.SliceWithoutPtrs)) + for i := range dst.SliceWithoutPtrs { + if src.SliceWithoutPtrs[i] == nil { + dst.SliceWithoutPtrs[i] = nil + } else { + dst.SliceWithoutPtrs[i] = ptr.To(*src.SliceWithoutPtrs[i]) + } + } + } + if dst.MapWithPtrs != nil { + dst.MapWithPtrs = map[string]*StructWithPtrsAlias{} + for k, v := range src.MapWithPtrs { + if v == nil { + dst.MapWithPtrs[k] = nil + } else { + dst.MapWithPtrs[k] = v.Clone() + } + } + } + if dst.MapWithoutPtrs != nil { + dst.MapWithoutPtrs = map[string]*StructWithoutPtrsAlias{} + for k, v := range src.MapWithoutPtrs { + if v == nil { + dst.MapWithoutPtrs[k] = nil + } else { + dst.MapWithoutPtrs[k] = ptr.To(*v) + } + } + } + if dst.MapOfSlicesWithPtrs != nil { + dst.MapOfSlicesWithPtrs = map[string][]*StructWithPtrsAlias{} + for k := range src.MapOfSlicesWithPtrs { + dst.MapOfSlicesWithPtrs[k] = append([]*StructWithPtrsAlias{}, src.MapOfSlicesWithPtrs[k]...) + } + } + if dst.MapOfSlicesWithoutPtrs != nil { + dst.MapOfSlicesWithoutPtrs = map[string][]*StructWithoutPtrsAlias{} + for k := range src.MapOfSlicesWithoutPtrs { + dst.MapOfSlicesWithoutPtrs[k] = append([]*StructWithoutPtrsAlias{}, src.MapOfSlicesWithoutPtrs[k]...) + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithTypeAliasFieldsCloneNeedsRegeneration = StructWithTypeAliasFields(struct { + WithPtr StructWithPtrsAlias + WithoutPtr StructWithoutPtrsAlias + WithPtrByPtr *StructWithPtrsAlias + WithoutPtrByPtr *StructWithoutPtrsAlias + SliceWithPtrs []*StructWithPtrsAlias + SliceWithoutPtrs []*StructWithoutPtrsAlias + MapWithPtrs map[string]*StructWithPtrsAlias + MapWithoutPtrs map[string]*StructWithoutPtrsAlias + MapOfSlicesWithPtrs map[string][]*StructWithPtrsAlias + MapOfSlicesWithoutPtrs map[string][]*StructWithoutPtrsAlias +}{}) + +// Clone makes a deep copy of GenericTypeAliasStruct. +// The result aliases no memory with the original. +func (src *GenericTypeAliasStruct[T, T2, V2]) Clone() *GenericTypeAliasStruct[T, T2, V2] { + if src == nil { + return nil + } + dst := new(GenericTypeAliasStruct[T, T2, V2]) + *dst = *src + dst.Cloneable = src.Cloneable.Clone() + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +func _GenericTypeAliasStructCloneNeedsRegeneration[T integer, T2 views.ViewCloner[T2, V2], V2 views.StructView[T2]](GenericTypeAliasStruct[T, T2, V2]) { + _GenericTypeAliasStructCloneNeedsRegeneration(struct { + NonCloneable T + Cloneable T2 + }{}) +} diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index cf07dc663bf46..9c74c94261e08 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -14,7 +14,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct // View returns a readonly view of StructWithPtrs. func (p *StructWithPtrs) View() StructWithPtrsView { @@ -676,3 +676,164 @@ var _StructWithContainersViewNeedsRegeneration = StructWithContainers(struct { CloneableMap MapContainer[int, *StructWithPtrs] CloneableGenericMap MapContainer[int, *GenericNoPtrsStruct[int]] }{}) + +// View returns a readonly view of StructWithTypeAliasFields. +func (p *StructWithTypeAliasFields) View() StructWithTypeAliasFieldsView { + return StructWithTypeAliasFieldsView{ж: p} +} + +// StructWithTypeAliasFieldsView provides a read-only view over StructWithTypeAliasFields. +// +// Its methods should only be called if `Valid()` returns true. +type StructWithTypeAliasFieldsView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *StructWithTypeAliasFields +} + +// Valid reports whether underlying value is non-nil. +func (v StructWithTypeAliasFieldsView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v StructWithTypeAliasFieldsView) AsStruct() *StructWithTypeAliasFields { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v StructWithTypeAliasFieldsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x StructWithTypeAliasFields + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v StructWithTypeAliasFieldsView) WithPtr() StructWithPtrsView { return v.ж.WithPtr.View() } +func (v StructWithTypeAliasFieldsView) WithoutPtr() StructWithoutPtrsAlias { return v.ж.WithoutPtr } +func (v StructWithTypeAliasFieldsView) WithPtrByPtr() StructWithPtrsAliasView { + return v.ж.WithPtrByPtr.View() +} +func (v StructWithTypeAliasFieldsView) WithoutPtrByPtr() *StructWithoutPtrsAlias { + if v.ж.WithoutPtrByPtr == nil { + return nil + } + x := *v.ж.WithoutPtrByPtr + return &x +} + +func (v StructWithTypeAliasFieldsView) SliceWithPtrs() views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView] { + return views.SliceOfViews[*StructWithPtrsAlias, StructWithPtrsAliasView](v.ж.SliceWithPtrs) +} +func (v StructWithTypeAliasFieldsView) SliceWithoutPtrs() views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { + return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](v.ж.SliceWithoutPtrs) +} + +func (v StructWithTypeAliasFieldsView) MapWithPtrs() views.MapFn[string, *StructWithPtrsAlias, StructWithPtrsAliasView] { + return views.MapFnOf(v.ж.MapWithPtrs, func(t *StructWithPtrsAlias) StructWithPtrsAliasView { + return t.View() + }) +} + +func (v StructWithTypeAliasFieldsView) MapWithoutPtrs() views.MapFn[string, *StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { + return views.MapFnOf(v.ж.MapWithoutPtrs, func(t *StructWithoutPtrsAlias) StructWithoutPtrsAliasView { + return t.View() + }) +} + +func (v StructWithTypeAliasFieldsView) MapOfSlicesWithPtrs() views.MapFn[string, []*StructWithPtrsAlias, views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView]] { + return views.MapFnOf(v.ж.MapOfSlicesWithPtrs, func(t []*StructWithPtrsAlias) views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView] { + return views.SliceOfViews[*StructWithPtrsAlias, StructWithPtrsAliasView](t) + }) +} + +func (v StructWithTypeAliasFieldsView) MapOfSlicesWithoutPtrs() views.MapFn[string, []*StructWithoutPtrsAlias, views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView]] { + return views.MapFnOf(v.ж.MapOfSlicesWithoutPtrs, func(t []*StructWithoutPtrsAlias) views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { + return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](t) + }) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithTypeAliasFieldsViewNeedsRegeneration = StructWithTypeAliasFields(struct { + WithPtr StructWithPtrsAlias + WithoutPtr StructWithoutPtrsAlias + WithPtrByPtr *StructWithPtrsAlias + WithoutPtrByPtr *StructWithoutPtrsAlias + SliceWithPtrs []*StructWithPtrsAlias + SliceWithoutPtrs []*StructWithoutPtrsAlias + MapWithPtrs map[string]*StructWithPtrsAlias + MapWithoutPtrs map[string]*StructWithoutPtrsAlias + MapOfSlicesWithPtrs map[string][]*StructWithPtrsAlias + MapOfSlicesWithoutPtrs map[string][]*StructWithoutPtrsAlias +}{}) + +// View returns a readonly view of GenericTypeAliasStruct. +func (p *GenericTypeAliasStruct[T, T2, V2]) View() GenericTypeAliasStructView[T, T2, V2] { + return GenericTypeAliasStructView[T, T2, V2]{ж: p} +} + +// GenericTypeAliasStructView[T, T2, V2] provides a read-only view over GenericTypeAliasStruct[T, T2, V2]. +// +// Its methods should only be called if `Valid()` returns true. +type GenericTypeAliasStructView[T integer, T2 views.ViewCloner[T2, V2], V2 views.StructView[T2]] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *GenericTypeAliasStruct[T, T2, V2] +} + +// Valid reports whether underlying value is non-nil. +func (v GenericTypeAliasStructView[T, T2, V2]) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v GenericTypeAliasStructView[T, T2, V2]) AsStruct() *GenericTypeAliasStruct[T, T2, V2] { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v GenericTypeAliasStructView[T, T2, V2]) MarshalJSON() ([]byte, error) { + return json.Marshal(v.ж) +} + +func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x GenericTypeAliasStruct[T, T2, V2] + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v GenericTypeAliasStructView[T, T2, V2]) NonCloneable() T { return v.ж.NonCloneable } +func (v GenericTypeAliasStructView[T, T2, V2]) Cloneable() V2 { return v.ж.Cloneable.View() } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +func _GenericTypeAliasStructViewNeedsRegeneration[T integer, T2 views.ViewCloner[T2, V2], V2 views.StructView[T2]](GenericTypeAliasStruct[T, T2, V2]) { + _GenericTypeAliasStructViewNeedsRegeneration(struct { + NonCloneable T + Cloneable T2 + }{}) +} diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 2e122a128e2c8..96223297b46e2 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -230,7 +230,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi writeTemplate("sliceField") } continue - case *types.Struct, *types.Named: + case *types.Struct: strucT := underlying args.FieldType = it.QualifiedName(fieldType) if codegen.ContainsPointers(strucT) { @@ -262,7 +262,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi mElem := m.Elem() var template string switch u := mElem.(type) { - case *types.Struct, *types.Named: + case *types.Struct, *types.Named, *types.Alias: strucT := u args.FieldType = it.QualifiedName(fieldType) if codegen.ContainsPointers(strucT) { @@ -281,7 +281,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi slice := u sElem := slice.Elem() switch x := sElem.(type) { - case *types.Basic, *types.Named: + case *types.Basic, *types.Named, *types.Alias: sElem := it.QualifiedName(sElem) args.MapValueView = fmt.Sprintf("views.Slice[%v]", sElem) args.MapValueType = sElem @@ -292,7 +292,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi template = "unsupportedField" if _, isIface := pElem.Underlying().(*types.Interface); !isIface { switch pElem.(type) { - case *types.Struct, *types.Named: + case *types.Struct, *types.Named, *types.Alias: ptrType := it.QualifiedName(ptr) viewType := appendNameSuffix(it.QualifiedName(pElem), "View") args.MapFn = fmt.Sprintf("views.SliceOfViews[%v,%v](t)", ptrType, viewType) @@ -313,7 +313,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi pElem := ptr.Elem() if _, isIface := pElem.Underlying().(*types.Interface); !isIface { switch pElem.(type) { - case *types.Struct, *types.Named: + case *types.Struct, *types.Named, *types.Alias: args.MapValueType = it.QualifiedName(ptr) args.MapValueView = appendNameSuffix(it.QualifiedName(pElem), "View") args.MapFn = "t.View()" @@ -422,7 +422,7 @@ func viewTypeForValueType(typ types.Type) types.Type { func viewTypeForContainerType(typ types.Type) (*types.Named, *types.Func) { // The container type should be an instantiated generic type, // with its first type parameter specifying the element type. - containerType, ok := typ.(*types.Named) + containerType, ok := codegen.NamedTypeOf(typ) if !ok || containerType.TypeArgs().Len() == 0 { return nil, nil } @@ -435,7 +435,7 @@ func viewTypeForContainerType(typ types.Type) (*types.Named, *types.Func) { if !ok { return nil, nil } - containerViewGenericType, ok := containerViewTypeObj.Type().(*types.Named) + containerViewGenericType, ok := codegen.NamedTypeOf(containerViewTypeObj.Type()) if !ok || containerViewGenericType.TypeParams().Len() != containerType.TypeArgs().Len()+1 { return nil, nil } @@ -448,7 +448,7 @@ func viewTypeForContainerType(typ types.Type) (*types.Named, *types.Func) { } // ...and add the element view type. // For that, we need to first determine the named elem type... - elemType, ok := baseType(containerType.TypeArgs().At(containerType.TypeArgs().Len() - 1)).(*types.Named) + elemType, ok := codegen.NamedTypeOf(baseType(containerType.TypeArgs().At(containerType.TypeArgs().Len() - 1))) if !ok { return nil, nil } @@ -473,7 +473,7 @@ func viewTypeForContainerType(typ types.Type) (*types.Named, *types.Func) { } // If elemType is an instantiated generic type, instantiate the elemViewType as well. if elemTypeArgs := elemType.TypeArgs(); elemTypeArgs != nil { - elemViewType = must.Get(types.Instantiate(nil, elemViewType, collectTypes(elemTypeArgs), false)).(*types.Named) + elemViewType, _ = codegen.NamedTypeOf(must.Get(types.Instantiate(nil, elemViewType, collectTypes(elemTypeArgs), false))) } // And finally set the elemViewType as the last type argument. containerViewTypeArgs[len(containerViewTypeArgs)-1] = elemViewType @@ -567,7 +567,7 @@ func main() { if cloneOnlyType[typeName] { continue } - typ, ok := namedTypes[typeName] + typ, ok := namedTypes[typeName].(*types.Named) if !ok { log.Fatalf("could not find type %s", typeName) } diff --git a/cmd/vnet/run-krazy.sh b/cmd/vnet/run-krazy.sh index 4202521e04f3e..a55da6b953a0e 100755 --- a/cmd/vnet/run-krazy.sh +++ b/cmd/vnet/run-krazy.sh @@ -2,12 +2,18 @@ echo "Type 'C-a c' to enter monitor; q to quit." +# If the USE_V6 environment is set to 1, set the nameserver explicitly to. +EXTRA_ARG="" +if [ "$USE_V6" = "1" ]; then + EXTRA_ARG="tta.nameserver=2411::411" +fi + set -eux qemu-system-x86_64 -M microvm,isa-serial=off \ -m 1G \ -nodefaults -no-user-config -nographic \ -kernel $HOME/src/github.com/tailscale/gokrazy-kernel/vmlinuz \ - -append "console=hvc0 root=PARTUUID=60c24cc1-f3f9-427a-8199-76baa2d60001/PARTNROFF=1 ro init=/gokrazy/init panic=10 oops=panic pci=off nousb tsc=unstable clocksource=hpet tailscale-tta=1 tailscaled.env=TS_DEBUG_RAW_DISCO=1" \ + -append "console=hvc0 root=PARTUUID=60c24cc1-f3f9-427a-8199-76baa2d60001/PARTNROFF=1 ro init=/gokrazy/init panic=10 oops=panic pci=off nousb tsc=unstable clocksource=hpet tailscale-tta=1 tailscaled.env=TS_DEBUG_RAW_DISCO=1 ${EXTRA_ARG}" \ -drive id=blk0,file=$HOME/src/tailscale.com/gokrazy/natlabapp.img,format=raw \ -device virtio-blk-device,drive=blk0 \ -device virtio-rng-device \ diff --git a/cmd/vnet/vnet-main.go b/cmd/vnet/vnet-main.go index 99eb022a8f56e..1eb4f65ef2070 100644 --- a/cmd/vnet/vnet-main.go +++ b/cmd/vnet/vnet-main.go @@ -22,11 +22,15 @@ import ( ) var ( - listen = flag.String("listen", "/tmp/qemu.sock", "path to listen on") - nat = flag.String("nat", "easy", "type of NAT to use") - nat2 = flag.String("nat2", "hard", "type of NAT to use for second network") - portmap = flag.Bool("portmap", false, "enable portmapping") - dgram = flag.Bool("dgram", false, "enable datagram mode; for use with macOS Hypervisor.Framework and VZFileHandleNetworkDeviceAttachment") + listen = flag.String("listen", "/tmp/qemu.sock", "path to listen on") + nat = flag.String("nat", "easy", "type of NAT to use") + nat2 = flag.String("nat2", "hard", "type of NAT to use for second network") + portmap = flag.Bool("portmap", false, "enable portmapping; requires --v4") + dgram = flag.Bool("dgram", false, "enable datagram mode; for use with macOS Hypervisor.Framework and VZFileHandleNetworkDeviceAttachment") + blend = flag.Bool("blend", true, "blend reality (controlplane.tailscale.com and DERPs) into the virtual network") + pcapFile = flag.String("pcap", "", "if non-empty, filename to write pcap") + v4 = flag.Bool("v4", true, "enable IPv4") + v6 = flag.Bool("v6", true, "enable IPv6") ) func main() { @@ -57,9 +61,20 @@ func main() { } var c vnet.Config - node1 := c.AddNode(c.AddNetwork("2.1.1.1", "192.168.1.1/24", vnet.NAT(*nat))) + c.SetPCAPFile(*pcapFile) + c.SetBlendReality(*blend) + + var net1opt = []any{vnet.NAT(*nat)} + if *v4 { + net1opt = append(net1opt, "2.1.1.1", "192.168.1.1/24") + } + if *v6 { + net1opt = append(net1opt, "2000:52::1/64") + } + + node1 := c.AddNode(c.AddNetwork(net1opt...)) c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", vnet.NAT(*nat2))) - if *portmap { + if *portmap && *v4 { node1.Network().AddService(vnet.NATPMP) } @@ -68,8 +83,10 @@ func main() { log.Fatalf("newServer: %v", err) } - if err := s.PopulateDERPMapIPs(); err != nil { - log.Printf("warning: ignoring failure to populate DERP map: %v", err) + if *blend { + if err := s.PopulateDERPMapIPs(); err != nil { + log.Printf("warning: ignoring failure to populate DERP map: %v", err) + } } s.WriteStartingBanner(os.Stdout) @@ -85,6 +102,7 @@ func main() { http.ListenAndServe(":8080", rp) }() go func() { + var last string getStatus := func() { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -93,7 +111,10 @@ func main() { log.Printf("NodeStatus: %v", err) return } - log.Printf("NodeStatus: %v", logger.AsJSON(st)) + if st.BackendState != last { + last = st.BackendState + log.Printf("NodeStatus: %v", logger.AsJSON(st)) + } } for { time.Sleep(5 * time.Second) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 436808995bf3e..7879122229e37 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -19,7 +19,6 @@ import ( "sync" "time" - xmaps "golang.org/x/exp/maps" "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/tailcfg" @@ -313,10 +312,8 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) { } } if packetFilterChanged { - keys := xmaps.Keys(ms.namedPacketFilters) - sort.Strings(keys) var concat []tailcfg.FilterRule - for _, v := range keys { + for _, v := range slices.Sorted(maps.Keys(ms.namedPacketFilters)) { concat = ms.namedPacketFilters[v].AppendTo(concat) } ms.lastPacketFilterRules = views.SliceOf(concat) diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index 3f696dbcd38e8..0e3dd038e4ed7 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -38,7 +38,7 @@ var getMachineCertificateSubjectOnce struct { // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" func getMachineCertificateSubject() string { getMachineCertificateSubjectOnce.Do(func() { - getMachineCertificateSubjectOnce.v, _ = syspolicy.GetString("MachineCertificateSubject", "") + getMachineCertificateSubjectOnce.v, _ = syspolicy.GetString(syspolicy.MachineCertificateSubject, "") }) return getMachineCertificateSubjectOnce.v diff --git a/derp/derp_server.go b/derp/derp_server.go index c033e42e7856e..f38ae66211f85 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -47,6 +47,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/set" + "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -155,7 +156,7 @@ type Server struct { mu sync.Mutex closed bool netConns map[Conn]chan struct{} // chan is closed when conn closes - clients map[key.NodePublic]clientSet + clients map[key.NodePublic]*clientSet watchers set.Set[*sclient] // mesh peers // clientsMesh tracks all clients in the cluster, both locally // and to mesh peers. If the value is nil, that means the @@ -177,8 +178,6 @@ type Server struct { // clientSet represents 1 or more *sclients. // -// The two implementations are singleClient and *dupClientSet. -// // In the common case, client should only have one connection to the // DERP server for a given key. When they're connected multiple times, // we record their set of connections in dupClientSet and keep their @@ -194,26 +193,49 @@ type Server struct { // "health_error" frame to them that'll communicate to the end users // that they cloned a device key, and we'll also surface it in the // admin panel, etc. -type clientSet interface { - // ActiveClient returns the most recently added client to - // the set, as long as it hasn't been disabled, in which - // case it returns nil. - ActiveClient() *sclient - - // Len returns the number of clients in the set. - Len() int - - // ForeachClient calls f for each client in the set. - ForeachClient(f func(*sclient)) +type clientSet struct { + // activeClient holds the currently active connection for the set. It's nil + // if there are no connections or the connection is disabled. + // + // A pointer to a clientSet can be held by peers for long periods of time + // without holding Server.mu to avoid mutex contention on Server.mu, only + // re-acquiring the mutex and checking the clients map if activeClient is + // nil. + activeClient atomic.Pointer[sclient] + + // dup is non-nil if there are multiple connections for the + // public key. It's nil in the common case of only one + // client being connected. + // + // dup is guarded by Server.mu. + dup *dupClientSet } -// singleClient is a clientSet of a single connection. -// This is the common case. -type singleClient struct{ c *sclient } +// Len returns the number of clients in s, which can be +// 0, 1 (the common case), or more (for buggy or transiently +// reconnecting clients). +func (s *clientSet) Len() int { + if s.dup != nil { + return len(s.dup.set) + } + if s.activeClient.Load() != nil { + return 1 + } + return 0 +} -func (s singleClient) ActiveClient() *sclient { return s.c } -func (s singleClient) Len() int { return 1 } -func (s singleClient) ForeachClient(f func(*sclient)) { f(s.c) } +// ForeachClient calls f for each client in the set. +// +// The Server.mu must be held. +func (s *clientSet) ForeachClient(f func(*sclient)) { + if s.dup != nil { + for c := range s.dup.set { + f(c) + } + } else if c := s.activeClient.Load(); c != nil { + f(c) + } +} // A dupClientSet is a clientSet of more than 1 connection. // @@ -224,11 +246,12 @@ func (s singleClient) ForeachClient(f func(*sclient)) { f(s.c) } // // All fields are guarded by Server.mu. type dupClientSet struct { - // set is the set of connected clients for sclient.key. + // set is the set of connected clients for sclient.key, + // including the clientSet's active one. set set.Set[*sclient] // last is the most recent addition to set, or nil if the most - // recent one has since disconnected and nobody else has send + // recent one has since disconnected and nobody else has sent // data since. last *sclient @@ -239,17 +262,15 @@ type dupClientSet struct { sendHistory []*sclient } -func (s *dupClientSet) ActiveClient() *sclient { - if s.last != nil && !s.last.isDisabled.Load() { - return s.last +func (s *clientSet) pickActiveClient() *sclient { + d := s.dup + if d == nil { + return s.activeClient.Load() } - return nil -} -func (s *dupClientSet) Len() int { return len(s.set) } -func (s *dupClientSet) ForeachClient(f func(*sclient)) { - for c := range s.set { - f(c) + if d.last != nil && !d.last.isDisabled.Load() { + return d.last } + return nil } // removeClient removes c from s and reports whether it was in s @@ -317,7 +338,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { packetsRecvByKind: metrics.LabelMap{Label: "kind"}, packetsDroppedReason: metrics.LabelMap{Label: "reason"}, packetsDroppedType: metrics.LabelMap{Label: "type"}, - clients: map[key.NodePublic]clientSet{}, + clients: map[key.NodePublic]*clientSet{}, clientsMesh: map[key.NodePublic]PacketForwarder{}, netConns: map[Conn]chan struct{}{}, memSys0: ms.Sys, @@ -444,7 +465,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool { if !ok { return false } - return x.ActiveClient() != nil + return x.activeClient.Load() != nil } // Accept adds a new connection to the server and serves it. @@ -534,37 +555,43 @@ func (s *Server) registerClient(c *sclient) { s.mu.Lock() defer s.mu.Unlock() - curSet := s.clients[c.key] - switch curSet := curSet.(type) { - case nil: - s.clients[c.key] = singleClient{c} + cs, ok := s.clients[c.key] + if !ok { c.debugLogf("register single client") - case singleClient: + cs = &clientSet{} + s.clients[c.key] = cs + } + was := cs.activeClient.Load() + if was == nil { + // Common case. + } else { + was.isDup.Store(true) + c.isDup.Store(true) + } + + dup := cs.dup + if dup == nil && was != nil { s.dupClientKeys.Add(1) s.dupClientConns.Add(2) // both old and new count s.dupClientConnTotal.Add(1) - old := curSet.ActiveClient() - old.isDup.Store(true) - c.isDup.Store(true) - s.clients[c.key] = &dupClientSet{ - last: c, - set: set.Set[*sclient]{ - old: struct{}{}, - c: struct{}{}, - }, - sendHistory: []*sclient{old}, + dup = &dupClientSet{ + set: set.Of(c, was), + last: c, + sendHistory: []*sclient{was}, } + cs.dup = dup c.debugLogf("register duplicate client") - case *dupClientSet: + } else if dup != nil { s.dupClientConns.Add(1) // the gauge s.dupClientConnTotal.Add(1) // the counter - c.isDup.Store(true) - curSet.set.Add(c) - curSet.last = c - curSet.sendHistory = append(curSet.sendHistory, c) + dup.set.Add(c) + dup.last = c + dup.sendHistory = append(dup.sendHistory, c) c.debugLogf("register another duplicate client") } + cs.activeClient.Store(c) + if _, ok := s.clientsMesh[c.key]; !ok { s.clientsMesh[c.key] = nil // just for varz of total users in cluster } @@ -595,30 +622,47 @@ func (s *Server) unregisterClient(c *sclient) { s.mu.Lock() defer s.mu.Unlock() - set := s.clients[c.key] - switch set := set.(type) { - case nil: + set, ok := s.clients[c.key] + if !ok { c.logf("[unexpected]; clients map is empty") - case singleClient: + return + } + + dup := set.dup + if dup == nil { + // The common case. + cur := set.activeClient.Load() + if cur == nil { + c.logf("[unexpected]; active client is nil") + return + } + if cur != c { + c.logf("[unexpected]; active client is not c") + return + } c.debugLogf("removed connection") + set.activeClient.Store(nil) delete(s.clients, c.key) if v, ok := s.clientsMesh[c.key]; ok && v == nil { delete(s.clientsMesh, c.key) s.notePeerGoneFromRegionLocked(c.key) } s.broadcastPeerStateChangeLocked(c.key, netip.AddrPort{}, 0, false) - case *dupClientSet: + } else { c.debugLogf("removed duplicate client") - if set.removeClient(c) { + if dup.removeClient(c) { s.dupClientConns.Add(-1) } else { c.logf("[unexpected]; dup client set didn't shrink") } - if set.Len() == 1 { + if dup.set.Len() == 1 { + // If we drop down to one connection, demote it down + // to a regular single client (a nil dup set). + set.dup = nil s.dupClientConns.Add(-1) // again; for the original one's s.dupClientKeys.Add(-1) var remain *sclient - for remain = range set.set { + for remain = range dup.set { break } if remain == nil { @@ -626,7 +670,10 @@ func (s *Server) unregisterClient(c *sclient) { } remain.isDisabled.Store(false) remain.isDup.Store(false) - s.clients[c.key] = singleClient{remain} + set.activeClient.Store(remain) + } else { + // Still a duplicate. Pick a winner. + set.activeClient.Store(set.pickActiveClient()) } } @@ -697,7 +744,7 @@ func (s *Server) addWatcher(c *sclient) { // Queue messages for each already-connected client. for peer, clientSet := range s.clients { - ac := clientSet.ActiveClient() + ac := clientSet.activeClient.Load() if ac == nil { continue } @@ -955,7 +1002,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { s.mu.Lock() if set, ok := s.clients[dstKey]; ok { dstLen = set.Len() - dst = set.ActiveClient() + dst = set.activeClient.Load() } if dst != nil { s.notePeerSendLocked(srcKey, dst) @@ -1010,7 +1057,7 @@ func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { s.mu.Lock() if set, ok := s.clients[dstKey]; ok { dstLen = set.Len() - dst = set.ActiveClient() + dst = set.activeClient.Load() } if dst != nil { s.notePeerSendLocked(c.key, dst) @@ -1256,22 +1303,28 @@ func (s *Server) noteClientActivity(c *sclient) { s.mu.Lock() defer s.mu.Unlock() - ds, ok := s.clients[c.key].(*dupClientSet) + cs, ok := s.clients[c.key] if !ok { + return + } + dup := cs.dup + if dup == nil { // It became unduped in between the isDup fast path check above // and the mutex check. Nothing to do. return } if s.dupPolicy == lastWriterIsActive { - ds.last = c - } else if ds.last == nil { + dup.last = c + cs.activeClient.Store(c) + } else if dup.last == nil { // If we didn't have a primary, let the current // speaker be the primary. - ds.last = c + dup.last = c + cs.activeClient.Store(c) } - if sh := ds.sendHistory; len(sh) != 0 && sh[len(sh)-1] == c { + if slicesx.LastEqual(dup.sendHistory, c) { // The client c was the last client to make activity // in this set and it was already recorded. Nothing to // do. @@ -1281,10 +1334,13 @@ func (s *Server) noteClientActivity(c *sclient) { // If we saw this connection send previously, then consider // the group fighting and disable them all. if s.dupPolicy == disableFighters { - for _, prior := range ds.sendHistory { + for _, prior := range dup.sendHistory { if prior == c { - ds.ForeachClient(func(c *sclient) { + cs.ForeachClient(func(c *sclient) { c.isDisabled.Store(true) + if cs.activeClient.Load() == c { + cs.activeClient.Store(nil) + } }) break } @@ -1292,7 +1348,7 @@ func (s *Server) noteClientActivity(c *sclient) { } // Append this client to the list of clients who spoke last. - ds.sendHistory = append(ds.sendHistory, c) + dup.sendHistory = append(dup.sendHistory, c) } type serverInfo struct { @@ -1407,6 +1463,11 @@ func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, d // sclient is a client connection to the server. // +// A node (a wireguard public key) can be connected multiple times to a DERP server +// and thus have multiple sclient instances. An sclient represents +// only one of these possibly multiple connections. See clientSet for the +// type that represents the set of all connections for a given key. +// // (The "s" prefix is to more explicitly distinguish it from Client in derp_client.go) type sclient struct { // Static after construction. diff --git a/derp/derp_test.go b/derp/derp_test.go index dde2054e65fcd..72de265529ad1 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -731,7 +731,7 @@ func pubAll(b byte) (ret key.NodePublic) { func TestForwarderRegistration(t *testing.T) { s := &Server{ - clients: make(map[key.NodePublic]clientSet), + clients: make(map[key.NodePublic]*clientSet), clientsMesh: map[key.NodePublic]PacketForwarder{}, } want := func(want map[key.NodePublic]PacketForwarder) { @@ -746,6 +746,11 @@ func TestForwarderRegistration(t *testing.T) { t.Errorf("counter = %v; want %v", got, want) } } + singleClient := func(c *sclient) *clientSet { + cs := &clientSet{} + cs.activeClient.Store(c) + return cs + } u1 := pubAll(1) u2 := pubAll(2) @@ -808,7 +813,7 @@ func TestForwarderRegistration(t *testing.T) { key: u1, logf: logger.Discard, } - s.clients[u1] = singleClient{u1c} + s.clients[u1] = singleClient(u1c) s.RemovePacketForwarder(u1, testFwd(100)) want(map[key.NodePublic]PacketForwarder{ u1: nil, @@ -828,7 +833,7 @@ func TestForwarderRegistration(t *testing.T) { // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard // that they're also connected to a peer of ours. That shouldn't transition the forwarder // from nil to the new one, not a multiForwarder. - s.clients[u1] = singleClient{u1c} + s.clients[u1] = singleClient(u1c) s.clientsMesh[u1] = nil want(map[key.NodePublic]PacketForwarder{ u1: nil, @@ -860,7 +865,7 @@ func TestMultiForwarder(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) s := &Server{ - clients: make(map[key.NodePublic]clientSet), + clients: make(map[key.NodePublic]*clientSet), clientsMesh: map[key.NodePublic]PacketForwarder{}, } u := pubAll(1) @@ -1078,43 +1083,48 @@ func TestServerDupClients(t *testing.T) { } wantSingleClient := func(t *testing.T, want *sclient) { t.Helper() - switch s := s.clients[want.key].(type) { - case singleClient: - if s.c != want { - t.Error("wrong single client") - return - } - if want.isDup.Load() { + got, ok := s.clients[want.key] + if !ok { + t.Error("no clients for key") + return + } + if got.dup != nil { + t.Errorf("unexpected dup set for single client") + } + cur := got.activeClient.Load() + if cur != want { + t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) + } + if cur != nil { + if cur.isDup.Load() { t.Errorf("unexpected isDup on singleClient") } - if want.isDisabled.Load() { + if cur.isDisabled.Load() { t.Errorf("unexpected isDisabled on singleClient") } - case nil: - t.Error("no clients for key") - case *dupClientSet: - t.Error("unexpected multiple clients for key") } } wantNoClient := func(t *testing.T) { t.Helper() - switch s := s.clients[clientPub].(type) { - case nil: - // Good. + _, ok := s.clients[clientPub] + if !ok { + // Good return - default: - t.Errorf("got %T; want empty", s) } + t.Errorf("got client; want empty") } wantDupSet := func(t *testing.T) *dupClientSet { t.Helper() - switch s := s.clients[clientPub].(type) { - case *dupClientSet: - return s - default: - t.Fatalf("wanted dup set; got %T", s) + cs, ok := s.clients[clientPub] + if !ok { + t.Fatal("no set for key; want dup set") return nil } + if cs.dup != nil { + return cs.dup + } + t.Fatalf("no dup set for key; want dup set") + return nil } wantActive := func(t *testing.T, want *sclient) { t.Helper() @@ -1123,7 +1133,7 @@ func TestServerDupClients(t *testing.T) { t.Error("no set for key") return } - got := set.ActiveClient() + got := set.activeClient.Load() if got != want { t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) } diff --git a/drive/driveimpl/remote_impl.go b/drive/driveimpl/remote_impl.go index debdf8a367f41..7fd5d3325beb0 100644 --- a/drive/driveimpl/remote_impl.go +++ b/drive/driveimpl/remote_impl.go @@ -333,11 +333,15 @@ func (s *userServer) run() error { args = append(args, s.Name, s.Path) } var cmd *exec.Cmd - if s.canSudo() { + if su := s.canSU(); su != "" { s.logf("starting taildrive file server as user %q", s.username) - allArgs := []string{"-n", "-u", s.username, s.executable} - allArgs = append(allArgs, args...) - cmd = exec.Command("sudo", allArgs...) + // Quote and escape arguments. Use single quotes to prevent shell substitutions. + for i, arg := range args { + args[i] = "'" + strings.ReplaceAll(arg, "'", "'\"'\"'") + "'" + } + cmdString := fmt.Sprintf("%s %s", s.executable, strings.Join(args, " ")) + allArgs := []string{s.username, "-c", cmdString} + cmd = exec.Command(su, allArgs...) } else { // If we were root, we should have been able to sudo as a specific // user, but let's check just to make sure, since we never want to @@ -405,16 +409,28 @@ var writeMethods = map[string]bool{ "DELETE": true, } -// canSudo checks wether we can sudo -u the configured executable as the -// configured user by attempting to call the executable with the '-h' flag to -// print help. -func (s *userServer) canSudo() bool { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - if err := exec.CommandContext(ctx, "sudo", "-n", "-u", s.username, s.executable, "-h").Run(); err != nil { - return false +// canSU checks whether the current process can run su with the right username. +// If su can be run, this returns the path to the su command. +// If not, this returns the empty string "". +func (s *userServer) canSU() string { + su, err := exec.LookPath("su") + if err != nil { + s.logf("can't find su command: %v", err) + return "" + } + + // First try to execute su -c true to make sure we can su. + err = exec.Command( + su, + s.username, + "-c", "true", + ).Run() + if err != nil { + s.logf("su check failed: %s", err) + return "" } - return true + + return su } // assertNotRoot returns an error if the current user has UID 0 or if we cannot diff --git a/envknob/envknob.go b/envknob/envknob.go index 8873f00773692..f1925ccf449b2 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -20,16 +20,18 @@ import ( "fmt" "io" "log" + "maps" "os" "path/filepath" "runtime" - "sort" + "slices" "strconv" "strings" "sync" "sync/atomic" "time" + "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" "tailscale.com/version" "tailscale.com/version/distro" @@ -76,12 +78,7 @@ func LogCurrent(logf logf) { mu.Lock() defer mu.Unlock() - list := make([]string, 0, len(set)) - for k := range set { - list = append(list, k) - } - sort.Strings(list) - for _, k := range list { + for _, k := range slices.Sorted(maps.Keys(set)) { logf("envknob: %s=%q", k, set[k]) } } @@ -406,6 +403,19 @@ func SSHIgnoreTailnetPolicy() bool { return Bool("TS_DEBUG_SSH_IGNORE_TAILNET_PO // TKASkipSignatureCheck reports whether to skip node-key signature checking for development. func TKASkipSignatureCheck() bool { return Bool("TS_UNSAFE_SKIP_NKS_VERIFICATION") } +// App returns the tailscale app type of this instance, if set via +// TS_INTERNAL_APP env var. TS_INTERNAL_APP can be used to set app type for +// components that wrap tailscaled, such as containerboot. App type is intended +// to only be used to set known predefined app types, such as Tailscale +// Kubernetes Operator components. +func App() string { + a := os.Getenv("TS_INTERNAL_APP") + if a == kubetypes.AppConnector || a == kubetypes.AppEgressProxy || a == kubetypes.AppIngressProxy || a == kubetypes.AppIngressResource { + return a + } + return "" +} + // CrashOnUnexpected reports whether the Tailscale client should panic // on unexpected conditions. If TS_DEBUG_CRASH_ON_UNEXPECTED is set, that's // used. Otherwise the default value is true for unstable builds. diff --git a/flake.lock b/flake.lock index 5562579bf4ac7..8c4aa7dfc2c73 100644 --- a/flake.lock +++ b/flake.lock @@ -21,11 +21,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -36,11 +36,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1707619277, - "narHash": "sha256-vKnYD5GMQbNQyyQm4wRlqi+5n0/F1hnvqSQgaBy4BqY=", + "lastModified": 1724748588, + "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f3a93440fbfff8a74350f4791332a19282cc6dc8", + "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index b6c34e9c0c666..e6c6b1eac092b 100644 --- a/flake.nix +++ b/flake.nix @@ -40,7 +40,12 @@ }; }; - outputs = { self, nixpkgs, flake-utils, flake-compat }: let + outputs = { + self, + nixpkgs, + flake-utils, + flake-compat, + }: let # tailscaleRev is the git commit at which this flake was imported, # or the empty string when building from a local checkout of the # tailscale repo. @@ -62,36 +67,37 @@ # So really, this flake is for tailscale devs to dogfood with, if # you're an end user you should be prepared for this flake to not # build periodically. - tailscale = pkgs: pkgs.buildGo122Module rec { - name = "tailscale"; + tailscale = pkgs: + pkgs.buildGo123Module rec { + name = "tailscale"; - src = ./.; - vendorHash = pkgs.lib.fileContents ./go.mod.sri; - nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [ pkgs.makeWrapper ]; - ldflags = ["-X tailscale.com/version.gitCommitStamp=${tailscaleRev}"]; - CGO_ENABLED = 0; - subPackages = [ "cmd/tailscale" "cmd/tailscaled" ]; - doCheck = false; + src = ./.; + vendorHash = pkgs.lib.fileContents ./go.mod.sri; + nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [pkgs.makeWrapper]; + ldflags = ["-X tailscale.com/version.gitCommitStamp=${tailscaleRev}"]; + CGO_ENABLED = 0; + subPackages = ["cmd/tailscale" "cmd/tailscaled"]; + doCheck = false; - # NOTE: We strip the ${PORT} and $FLAGS because they are unset in the - # environment and cause issues (specifically the unset PORT). At some - # point, there should be a NixOS module that allows configuration of these - # things, but for now, we hardcode the default of port 41641 (taken from - # ./cmd/tailscaled/tailscaled.defaults). - postInstall = pkgs.lib.optionalString pkgs.stdenv.isLinux '' - wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [ pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow ]} - wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [ pkgs.procps ]} + # NOTE: We strip the ${PORT} and $FLAGS because they are unset in the + # environment and cause issues (specifically the unset PORT). At some + # point, there should be a NixOS module that allows configuration of these + # things, but for now, we hardcode the default of port 41641 (taken from + # ./cmd/tailscaled/tailscaled.defaults). + postInstall = pkgs.lib.optionalString pkgs.stdenv.isLinux '' + wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]} + wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]} - sed -i \ - -e "s#/usr/sbin#$out/bin#" \ - -e "/^EnvironmentFile/d" \ - -e 's/''${PORT}/41641/' \ - -e 's/$FLAGS//' \ - ./cmd/tailscaled/tailscaled.service + sed -i \ + -e "s#/usr/sbin#$out/bin#" \ + -e "/^EnvironmentFile/d" \ + -e 's/''${PORT}/41641/' \ + -e 's/$FLAGS//' \ + ./cmd/tailscaled/tailscaled.service - install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service - ''; - }; + install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service + ''; + }; # This whole blob makes the tailscale package available for all # OS/CPU combos that nix supports, as well as a dev shell so that @@ -112,7 +118,7 @@ gotools graphviz perl - go_1_22 + go_1_23 yarn ]; }; @@ -120,4 +126,4 @@ in flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); } -# nix-direnv cache busting line: sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8= +# nix-direnv cache busting line: sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0= diff --git a/go.mod b/go.mod index 7002c3410a6ce..8c46faa6c1d44 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,15 @@ module tailscale.com -go 1.22.0 +go 1.23 require ( filippo.io/mkcert v1.4.4 + fyne.io/systray v1.11.0 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa github.com/andybalholm/brotli v1.1.0 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be + github.com/atotto/clipboard v0.1.4 github.com/aws/aws-sdk-go-v2 v1.24.1 github.com/aws/aws-sdk-go-v2/config v1.26.5 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64 @@ -28,6 +30,7 @@ require ( github.com/dsnet/try v0.0.3 github.com/elastic/crd-ref-docs v0.0.12 github.com/evanw/esbuild v0.19.11 + github.com/fogleman/gg v1.3.0 github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.6.0 github.com/gaissmai/bart v0.11.1 @@ -45,7 +48,7 @@ require ( github.com/google/uuid v1.6.0 github.com/goreleaser/nfpm/v2 v2.33.1 github.com/hdevalence/ed25519consensus v0.2.0 - github.com/illarion/gonotify v1.0.1 + github.com/illarion/gonotify/v2 v2.0.3 github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 github.com/jellydator/ttlcache/v3 v3.1.0 @@ -78,17 +81,16 @@ require ( github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba - github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 + github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 + github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 github.com/toqueteos/webbrowser v1.2.0 github.com/u-root/u-root v0.12.0 - github.com/vishvananda/netlink v1.2.1-beta.2 github.com/vishvananda/netns v0.0.4 go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20220726221520-4f986261bf13 @@ -107,7 +109,7 @@ require ( golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 - honnef.co/go/tools v0.4.6 + honnef.co/go/tools v0.5.1 k8s.io/api v0.30.3 k8s.io/apimachinery v0.30.3 k8s.io/apiserver v0.30.3 @@ -131,6 +133,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/gobuffalo/flect v1.0.2 // indirect github.com/goccy/go-yaml v1.12.0 // indirect + github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect github.com/gorilla/securecookie v1.1.2 // indirect @@ -151,7 +154,7 @@ require ( github.com/AlekSi/pointer v1.2.0 // indirect github.com/Antonboom/errname v0.1.9 // indirect github.com/Antonboom/nilnil v0.1.4 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect github.com/Djarvur/go-err113 v0.1.0 // indirect github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect diff --git a/go.mod.sri b/go.mod.sri index a44020130921e..4abb3c5165d2f 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8= +sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0= diff --git a/go.sum b/go.sum index dbdb7168a9739..94ea0ff912694 100644 --- a/go.sum +++ b/go.sum @@ -46,6 +46,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= +fyne.io/systray v1.11.0 h1:D9HISlxSkx+jHSniMBR6fCFOUjk1x/OOOJLa9lJYAKg= +fyne.io/systray v1.11.0/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU= github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= @@ -57,8 +59,8 @@ github.com/Antonboom/nilnil v0.1.4/go.mod h1:iOov/7gRcXkeEU+EMGpBu2ORih3iyVEiWje github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -112,6 +114,8 @@ github.com/ashanbrown/forbidigo v1.5.1 h1:WXhzLjOlnuDYPYQo/eFlcFMi8X/kLfvWLYu6CS github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= @@ -306,6 +310,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= +github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -400,6 +406,8 @@ github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -562,8 +570,8 @@ github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f h1:ov45/OzrJG8EKb github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f/go.mod h1:JoDrYMZpDPYo6uH9/f6Peqms3zNNWT2XiGgioMOIGuI= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= -github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= +github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= +github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -930,16 +938,16 @@ github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29X github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba h1:uNo1VCm/xg4alMkIKo8RWTKNx5y1otfVOcKbp+irkL4= github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba/go.mod h1:DxnqIXBplij66U2ZkL688xy07q97qQ83P+TVueLiHq4= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= +github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= @@ -980,8 +988,6 @@ github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842 github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= @@ -1504,8 +1510,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= -honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= +honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= diff --git a/go.toolchain.branch b/go.toolchain.branch index 34ae2acf3e857..47469a20ad6e9 100644 --- a/go.toolchain.branch +++ b/go.toolchain.branch @@ -1 +1 @@ -tailscale.go1.22 +tailscale.go1.23 diff --git a/go.toolchain.rev b/go.toolchain.rev index 7d064e9660851..cc32040724295 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -22ef9eb38e9a2d21b4a45f7adc75addb05f3efb8 +0a7392ba4471f578e5160b6ea21def6ae8e4a072 diff --git a/gokrazy/build.go b/gokrazy/build.go index a8373d0d0d7e9..2392af0cb30e1 100644 --- a/gokrazy/build.go +++ b/gokrazy/build.go @@ -11,6 +11,7 @@ package main import ( "bytes" + "cmp" "encoding/json" "errors" "flag" @@ -29,6 +30,7 @@ import ( var ( app = flag.String("app", "tsapp", "appliance name; one of the subdirectories of gokrazy/") bucket = flag.String("bucket", "tskrazy-import", "S3 bucket to upload disk image to while making AMI") + goArch = flag.String("arch", cmp.Or(os.Getenv("GOARCH"), "amd64"), "GOARCH architecture to build for: arm64 or amd64") build = flag.Bool("build", false, "if true, just build locally and stop, without uploading") ) @@ -99,12 +101,12 @@ func buildImage() error { return err } if fi, err := os.Stat(filepath.Join(dir, *app)); err != nil || !fi.IsDir() { - return fmt.Errorf("in wrong directorg %v; no %q subdirectory found", dir, *app) + return fmt.Errorf("in wrong directory %v; no %q subdirectory found", dir, *app) } // Build the tsapp.img var buf bytes.Buffer cmd := exec.Command("go", "run", - "-exec=env GOOS=linux GOARCH=amd64 ", + "-exec=env GOOS=linux GOARCH="+*goArch+" ", "github.com/gokrazy/tools/cmd/gok", "--parent_dir="+dir, "--instance="+*app, @@ -250,9 +252,18 @@ func waitForImportSnapshot(importTaskID string) (snapID string, err error) { } func makeAMI(name, ebsSnapID string) (ami string, err error) { + var arch string + switch *goArch { + case "arm64": + arch = "arm64" + case "amd64": + arch = "x86_64" + default: + return "", fmt.Errorf("unknown arch %q", *goArch) + } out, err := exec.Command("aws", "ec2", "register-image", "--name", name, - "--architecture", "x86_64", + "--architecture", arch, "--root-device-name", "/dev/sda", "--ena-support", "--imds-support", "v2.0", diff --git a/gokrazy/go.mod b/gokrazy/go.mod index 8c898d3cab17e..0233f3e6d1796 100644 --- a/gokrazy/go.mod +++ b/gokrazy/go.mod @@ -1,6 +1,6 @@ module tailscale.com/gokrazy -go 1.22 +go 1.23.0 require github.com/gokrazy/tools v0.0.0-20240730192548-9f81add3a91e diff --git a/gokrazy/natlabapp.arm64/README.md b/gokrazy/natlabapp.arm64/README.md new file mode 100644 index 0000000000000..7fc9dd15901d8 --- /dev/null +++ b/gokrazy/natlabapp.arm64/README.md @@ -0,0 +1,6 @@ +# NATLab Linux test Appliance + +This is the definition of the NATLab Linux test appliance image. +It's similar to ../tsapp, but optimized for running in qemu in NATLab. + +See ../tsapp/README.md for more info. diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod new file mode 100644 index 0000000000000..c56dede46ed65 --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod @@ -0,0 +1,18 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require ( + github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect + github.com/josharian/native v1.0.0 // indirect + github.com/mdlayher/packet v1.0.0 // indirect + github.com/mdlayher/socket v0.2.3 // indirect + github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect + github.com/vishvananda/netlink v1.1.0 // indirect + github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sys v0.20.0 // indirect +) diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum new file mode 100644 index 0000000000000..3cd002ae782b1 --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum @@ -0,0 +1,39 @@ +github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= +github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= +github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= +github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= +github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod new file mode 100644 index 0000000000000..33656efeea7d7 --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod @@ -0,0 +1,15 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require ( + github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect + github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect + github.com/kenshaw/evdev v0.1.0 // indirect + github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sys v0.20.0 // indirect +) + +replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum new file mode 100644 index 0000000000000..479eb1cef1ca7 --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum @@ -0,0 +1,23 @@ +github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= +github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= +github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= +github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= +github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= +github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFeHOB7CUfoOamak+Gx/BlirfXyPk1WjI= +github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= +github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= +github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= +github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= +github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= +github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= +golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod new file mode 100644 index 0000000000000..fa6768435524e --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod @@ -0,0 +1,5 @@ +module gokrazy/build/natlabapp.arm64 + +go 1.23.0 + +require github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum new file mode 100644 index 0000000000000..5084da5c5990c --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum @@ -0,0 +1,2 @@ +github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e h1:D9QYleJ7CI4p7gpgUT1mPgAlWMi5au6yOiE8/qC5PhE= +github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e/go.mod h1:WWx72LXHEesuJxbopusRfSoKJQ6ffdwkT0DZditdrLo= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod new file mode 100644 index 0000000000000..de52e181b9c3c --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod @@ -0,0 +1,5 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum new file mode 100644 index 0000000000000..8135f60c3e791 --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum @@ -0,0 +1,26 @@ +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= +github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= +github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= +github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= +github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= +github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= +github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= +golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod new file mode 100644 index 0000000000000..ec4d9c64fc93e --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod @@ -0,0 +1,5 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum new file mode 100644 index 0000000000000..d32d5460bf29c --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum @@ -0,0 +1,4 @@ +github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= +github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod new file mode 100644 index 0000000000000..7bdfd1e060e6c --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod @@ -0,0 +1,7 @@ +module gokrazy/build/tsapp + +go 1.23 + +replace tailscale.com => ../../../.. + +require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum new file mode 100644 index 0000000000000..9123439ed88bf --- /dev/null +++ b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum @@ -0,0 +1,196 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= +github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= +github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= +github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= +github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= +github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= +github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= +github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= +github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= +github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= +github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= +github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= +github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= +github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= +github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= +github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= +github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= +github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= +github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= +github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= +github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= +github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= +github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= +github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= +github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= +github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= +github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= +github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= +github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= +github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= +github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= +github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= +github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= +github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= +github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= +github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= +go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= +gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= +nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/natlabapp.arm64/config.json b/gokrazy/natlabapp.arm64/config.json new file mode 100644 index 0000000000000..2577f61a56d31 --- /dev/null +++ b/gokrazy/natlabapp.arm64/config.json @@ -0,0 +1,27 @@ +{ + "Hostname": "natlabapp", + "Update": { + "NoPassword": true + }, + "SerialConsole": "ttyS0,115200", + "GokrazyPackages": [ + "github.com/gokrazy/gokrazy/cmd/dhcp" + ], + "Packages": [ + "github.com/gokrazy/serial-busybox", + "tailscale.com/cmd/tailscale", + "tailscale.com/cmd/tailscaled", + "tailscale.com/cmd/tta" + ], + "PackageConfig": { + "tailscale.com/cmd/tailscale": { + "ExtraFilePaths": { + "/usr": "usr-dir" + } + } + }, + "KernelPackage": "github.com/gokrazy/kernel.arm64", + "FirmwarePackage": "github.com/gokrazy/kernel.arm64", + "EEPROMPackage": "", + "InternalCompatibilityFlags": {} +} diff --git a/gokrazy/natlabapp.arm64/usr-dir.tar b/gokrazy/natlabapp.arm64/usr-dir.tar new file mode 100644 index 0000000000000..6ef6cfbfd3e1a Binary files /dev/null and b/gokrazy/natlabapp.arm64/usr-dir.tar differ diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.mod b/gokrazy/natlabapp/builddir/tailscale.com/go.mod index 6231e86de24f5..7bdfd1e060e6c 100644 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.mod +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.mod @@ -1,8 +1,6 @@ module gokrazy/build/tsapp -go 1.22.0 - -toolchain go1.22.2 +go 1.23 replace tailscale.com => ../../../.. diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.sum b/gokrazy/natlabapp/builddir/tailscale.com/go.sum index b3b73e2d0e764..9123439ed88bf 100644 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.sum +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.sum @@ -32,10 +32,14 @@ github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= +github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= @@ -64,6 +68,8 @@ github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= +github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= +github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= @@ -116,6 +122,8 @@ github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29X github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.mod b/gokrazy/tsapp/builddir/tailscale.com/go.mod index 6231e86de24f5..7bdfd1e060e6c 100644 --- a/gokrazy/tsapp/builddir/tailscale.com/go.mod +++ b/gokrazy/tsapp/builddir/tailscale.com/go.mod @@ -1,8 +1,6 @@ module gokrazy/build/tsapp -go 1.22.0 - -toolchain go1.22.2 +go 1.23 replace tailscale.com => ../../../.. diff --git a/health/health.go b/health/health.go index 10549b523a008..7bb9d18e9ced7 100644 --- a/health/health.go +++ b/health/health.go @@ -8,6 +8,7 @@ package health import ( "context" "errors" + "expvar" "fmt" "maps" "net/http" @@ -25,6 +26,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/multierr" "tailscale.com/util/set" + "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -1062,7 +1064,7 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { _ = t.lastStreamedMapResponse _ = t.lastMapRequestHeard - shouldClearMagicsockWarnings := false + shouldClearMagicsockWarnings := true for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] if f.missing { @@ -1070,6 +1072,7 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { ArgMagicsockFunctionName: f.name, }) shouldClearMagicsockWarnings = false + break } } if shouldClearMagicsockWarnings { @@ -1202,6 +1205,18 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { } func (t *Tracker) doOnceInit() { + metricHealthMessage.Set(metricHealthMessageLabel{ + Type: "warning", + }, expvar.Func(func() any { + if t.nil() { + return 0 + } + t.mu.Lock() + defer t.mu.Unlock() + t.updateBuiltinWarnablesLocked() + return int64(len(t.stringsLocked())) + })) + for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] f.name = (ReceiveFunc(i)).String() @@ -1232,3 +1247,14 @@ func (t *Tracker) checkReceiveFuncsLocked() { f.missing = true } } + +type metricHealthMessageLabel struct { + // TODO: break down by warnable.severity as well? + Type string +} + +var metricHealthMessage = usermetric.NewMultiLabelMap[metricHealthMessageLabel]( + "tailscaled_health_messages", + "gauge", + "Number of health messages broken down by type.", +) diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go new file mode 100644 index 0000000000000..db3192c9100ad --- /dev/null +++ b/ipn/ipnauth/actor.go @@ -0,0 +1,47 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnauth + +import ( + "tailscale.com/ipn" +) + +// Actor is any actor using the [ipnlocal.LocalBackend]. +// +// It typically represents a specific OS user, indicating that an operation +// is performed on behalf of this user, should be evaluated against their +// access rights, and performed in their security context when applicable. +type Actor interface { + // UserID returns an OS-specific UID of the user represented by the receiver, + // or "" if the actor does not represent a specific user on a multi-user system. + // As of 2024-08-27, it is only used on Windows. + UserID() ipn.WindowsUserID + // Username returns the user name associated with the receiver, + // or "" if the actor does not represent a specific user. + Username() (string, error) + + // IsLocalSystem reports whether the actor is the Windows' Local System account. + // + // Deprecated: this method exists for compatibility with the current (as of 2024-08-27) + // permission model and will be removed as we progress on tailscale/corp#18342. + IsLocalSystem() bool + + // IsLocalAdmin reports whether the actor has administrative access to the + // local machine, for whatever that means with respect to the current OS. + // + // The operatorUID is only used on Unix-like platforms and specifies the ID + // of a local user (in the os/user.User.Uid string form) who is allowed to + // operate tailscaled without being root or using sudo. + // + // Deprecated: this method exists for compatibility with the current (as of 2024-08-27) + // permission model and will be removed as we progress on tailscale/corp#18342. + IsLocalAdmin(operatorUID string) bool +} + +// ActorCloser is an optional interface that might be implemented by an [Actor] +// that must be closed when done to release the resources. +type ActorCloser interface { + // Close releases resources associated with the receiver. + Close() error +} diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index 02db18ffee728..19d8e8b86b5ee 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -50,6 +50,7 @@ func TestDNSConfigForNetmap(t *testing.T) { tests := []struct { name string nm *netmap.NetworkMap + expired bool peers []tailcfg.NodeView os string // version.OS value; empty means linux cloud cloudenv.Cloud @@ -327,12 +328,31 @@ func TestDNSConfigForNetmap(t *testing.T) { Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, }, }, + { + name: "self_expired", + nm: &netmap.NetworkMap{ + Name: "myname.net", + SelfNode: (&tailcfg.Node{ + Addresses: ipps("100.101.101.101"), + }).View(), + }, + expired: true, + peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Name: "peera.net", + Addresses: ipps("100.102.0.1", "100.102.0.2", "fe75::1001", "fe75::1002"), + }, + }), + prefs: &ipn.Prefs{}, + want: &dns.Config{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { verOS := cmp.Or(tt.os, "linux") var log tstest.MemLogger - got := dnsConfigForNetmap(tt.nm, peersMap(tt.peers), tt.prefs.View(), log.Logf, verOS) + got := dnsConfigForNetmap(tt.nm, peersMap(tt.peers), tt.prefs.View(), tt.expired, log.Logf, verOS) if !reflect.DeepEqual(got, tt.want) { gotj, _ := json.MarshalIndent(got, "", "\t") wantj, _ := json.MarshalIndent(tt.want, "", "\t") diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 02d3f8d70d0cf..5dcd1af37da1c 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -243,12 +243,21 @@ func (b *LocalBackend) driveSetSharesLocked(shares []*drive.Share) error { }, DriveSharesSet: true, }) - return b.pm.setPrefsLocked(prefs.View()) + return b.pm.setPrefsNoPermCheck(prefs.View()) } // driveNotifyShares notifies IPN bus listeners (e.g. Mac Application process) -// about the latest list of shares. +// about the latest list of shares, if and only if the shares have changed since +// the last time we notified. func (b *LocalBackend) driveNotifyShares(shares views.SliceView[*drive.Share, drive.ShareView]) { + b.lastNotifiedDriveSharesMu.Lock() + defer b.lastNotifiedDriveSharesMu.Unlock() + if b.lastNotifiedDriveShares != nil && driveShareViewsEqual(b.lastNotifiedDriveShares, shares) { + // shares are unchanged since last notification, don't bother notifying + return + } + b.lastNotifiedDriveShares = &shares + // Ensures shares is not nil to distinguish "no shares" from "not notifying shares" if shares.IsNil() { shares = views.SliceOfViews(make([]*drive.Share, 0)) @@ -265,11 +274,8 @@ func (b *LocalBackend) driveNotifyCurrentSharesLocked() { shares = b.pm.prefs.DriveShares() } - lastNotified := b.lastNotifiedDriveShares.Load() - if lastNotified == nil || !driveShareViewsEqual(lastNotified, shares) { - // Do the below on a goroutine to avoid deadlocking on b.mu in b.send(). - go b.driveNotifyShares(shares) - } + // Do the below on a goroutine to avoid deadlocking on b.mu in b.send(). + go b.driveNotifyShares(shares) } func driveShareViewsEqual(a *views.SliceView[*drive.Share, drive.ShareView], b views.SliceView[*drive.Share, drive.ShareView]) bool { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 2421d898d2eb8..dd64b80951025 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -106,6 +106,7 @@ import ( "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/uniq" + "tailscale.com/util/usermetric" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -117,6 +118,9 @@ import ( "tailscale.com/wgengine/wgcfg/nmcfg" ) +var metricAdvertisedRoutes = usermetric.NewGauge( + "tailscaled_advertised_routes", "Number of advertised network routes (e.g. by a subnet router)") + var controlDebugFlags = getControlDebugFlags() func getControlDebugFlags() []string { @@ -157,6 +161,7 @@ func RegisterNewSSHServer(fn newSSHServerFunc) { type watchSession struct { ch chan *ipn.Notify sessionID string + cancel func() // call to signal that the session must be terminated } // LocalBackend is the glue between the major pieces of the Tailscale @@ -263,6 +268,7 @@ type LocalBackend struct { keyExpired bool authURL string // non-empty if not Running authURLTime time.Time // when the authURL was received from the control server + interact bool // indicates whether a user requested interactive login egg bool prevIfState *netmon.State peerAPIServer *peerAPIServer // or nil @@ -287,7 +293,7 @@ type LocalBackend struct { componentLogUntil map[string]componentLogState // c2nUpdateStatus is the status of c2n-triggered client update. c2nUpdateStatus updateStatus - currentUser ipnauth.WindowsToken + currentUser ipnauth.Actor selfUpdateProgress []ipnstate.UpdateProgress lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients @@ -332,9 +338,12 @@ type LocalBackend struct { // Last ClientVersion received in MapResponse, guarded by mu. lastClientVersion *tailcfg.ClientVersion + // lastNotifiedDriveSharesMu guards lastNotifiedDriveShares + lastNotifiedDriveSharesMu sync.Mutex + // lastNotifiedDriveShares keeps track of the last set of shares that we // notified about. - lastNotifiedDriveShares atomic.Pointer[views.SliceView[*drive.Share, drive.ShareView]] + lastNotifiedDriveShares *views.SliceView[*drive.Share, drive.ShareView] // outgoingFiles keeps track of Taildrop outgoing files keyed to their OutgoingFile.ID outgoingFiles map[string]*ipn.OutgoingFile @@ -588,6 +597,15 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim return nil } +// GetDNSOSConfig returns the base OS DNS configuration, as seen by the DNS manager. +func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { + manager, ok := b.sys.DNSManager.GetOK() + if !ok { + return dns.OSConfig{}, errors.New("DNS manager not available") + } + return manager.GetBaseConfig() +} + // GetComponentDebugLogging gets the time that component's debug logging is // enabled until, or the zero time if component's time is not currently // enabled. @@ -709,14 +727,26 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { if delta.Major && shouldAutoExitNode() { b.refreshAutoExitNode = true } - // If the PAC-ness of the network changed, reconfig wireguard+route to - // add/remove subnets. + + var needReconfig bool + // If the network changed and we're using an exit node and allowing LAN access, we may need to reconfigure. + if delta.Major && b.pm.CurrentPrefs().ExitNodeID() != "" && b.pm.CurrentPrefs().ExitNodeAllowLANAccess() { + b.logf("linkChange: in state %v; updating LAN routes", b.state) + needReconfig = true + } + // If the PAC-ness of the network changed, reconfig wireguard+route to add/remove subnets. if hadPAC != ifst.HasPAC() { b.logf("linkChange: in state %v; PAC changed from %v->%v", b.state, hadPAC, ifst.HasPAC()) + needReconfig = true + } + if needReconfig { switch b.state { case ipn.NoState, ipn.Stopped: // Do nothing. default: + // TODO(raggi,tailscale/corp#22574): authReconfig should be refactored such that we can call the + // necessary operations here and avoid the need for asynchronous behavior that is racy and hard + // to test here, and do less extra work in these conditions. go b.authReconfig() } } @@ -1109,6 +1139,8 @@ func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tai return n, u, false } +var debugWhoIs = envknob.RegisterBool("TS_DEBUG_WHOIS") + // WhoIs reports the node and user who owns the node with the given IP:port. // If the IP address is a Tailscale IP, the provided port may be 0. // @@ -1124,6 +1156,14 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi b.mu.Lock() defer b.mu.Unlock() + failf := func(format string, args ...any) (tailcfg.NodeView, tailcfg.UserProfile, bool) { + if debugWhoIs() { + args = append([]any{proto, ipp}, args...) + b.logf("whois(%q, %v) :"+format, args...) + } + return zero, u, false + } + nid, ok := b.nodeByAddr[ipp.Addr()] if !ok { var ip netip.Addr @@ -1144,15 +1184,15 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi } } if !ok { - return zero, u, false + return failf("no IP found in ProxyMapper for %v", ipp) } nid, ok = b.nodeByAddr[ip] if !ok { - return zero, u, false + return failf("no node for proxymapped IP %v", ip) } } if b.netMap == nil { - return zero, u, false + return failf("no netmap") } n, ok = b.peers[nid] if !ok { @@ -1164,7 +1204,7 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi } u, ok = b.netMap.UserProfiles[n.User()] if !ok { - return zero, u, false + return failf("no userprofile for node %v", n.Key()) } return n, u, true } @@ -1323,19 +1363,21 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefs.Persist = st.Persist.AsStruct() } } - if st.URL != "" { - b.authURL = st.URL - b.authURLTime = b.clock.Now() - } - if (wasBlocked || b.seamlessRenewalEnabled()) && st.LoginFinished() { - // Interactive login finished successfully (URL visited). - // After an interactive login, the user always wants - // WantRunning. - if !prefs.WantRunning || prefs.LoggedOut { + if st.LoginFinished() { + if b.authURL != "" { + b.resetAuthURLLocked() + // Interactive login finished successfully (URL visited). + // After an interactive login, the user always wants + // WantRunning. + if !prefs.WantRunning { + prefs.WantRunning = true + prefsChanged = true + } + } + if prefs.LoggedOut { + prefs.LoggedOut = false prefsChanged = true } - prefs.WantRunning = true - prefs.LoggedOut = false } if shouldAutoExitNode() { // Re-evaluate exit node suggestion in case circumstances have changed. @@ -1452,7 +1494,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } if st.URL != "" { b.logf("Received auth URL: %.20v...", st.URL) - b.popBrowserAuthNow() + b.setAuthURL(st.URL) } b.stateMachine() // This is currently (2020-07-28) necessary; conditionally disabling it is fragile! @@ -1868,6 +1910,14 @@ func (b *LocalBackend) Start(opts ipn.Options) error { opts.AuthKey = v } + if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { + sysak, _ := syspolicy.GetString(syspolicy.AuthKey, "") + if sysak != "" { + b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) + opts.AuthKey = strings.TrimSpace(sysak) + } + } + hostinfo := hostinfo.New() applyConfigToHostinfo(hostinfo, b.conf) hostinfo.BackendLogID = b.backendLogID.String() @@ -2089,9 +2139,7 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P } } if prefs.Valid() { - ar := prefs.AdvertiseRoutes() - for i := range ar.Len() { - r := ar.At(i) + for _, r := range prefs.AdvertiseRoutes().All() { if r.Bits() == 0 { // When offering a default route to the world, we // filter out locally reachable LANs, so that the @@ -2306,8 +2354,8 @@ func packetFilterPermitsUnlockedNodes(peers map[tailcfg.NodeID]tailcfg.NodeView, continue } numUnlocked++ - for i := range p.AllowedIPs().Len() { // not only addresses! - b.AddPrefix(p.AllowedIPs().At(i)) + for _, pfx := range p.AllowedIPs().All() { // not only addresses! + b.AddPrefix(pfx) } } if numUnlocked == 0 { @@ -2609,7 +2657,15 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa } } - mak.Set(&b.notifyWatchers, sessionID, &watchSession{ch, sessionID}) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + session := &watchSession{ + ch: ch, + sessionID: sessionID, + cancel: cancel, + } + mak.Set(&b.notifyWatchers, sessionID, session) b.mu.Unlock() defer func() { @@ -2640,8 +2696,6 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa // request every 2 seconds. // TODO(bradfitz): plumb this further and only send a Notify on change. if mask&ipn.NotifyWatchEngineUpdates != 0 { - ctx, cancel := context.WithCancel(ctx) - defer cancel() go b.pollRequestEngineStatus(ctx) } @@ -2649,28 +2703,12 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa // TODO(marwan-at-work): streaming background logs? defer b.DeleteForegroundSession(sessionID) - var lastURLPop string // to dup suppress URL popups for { select { case <-ctx.Done(): return - case n, ok := <-ch: - // URLs flow into Notify.BrowseToURL via two means: - // 1. From MapResponse.PopBrowserURL, which already says they're dup - // suppressed if identical, and that's done by the controlclient, - // so this added later adds nothing. - // - // 2. From the controlclient auth routes, on register. This makes sure - // we don't tell clients (mac, windows, android) to pop the same URL - // multiple times. - if n != nil && n.BrowseToURL != nil { - if v := *n.BrowseToURL; v == lastURLPop { - n.BrowseToURL = nil - } else { - lastURLPop = v - } - } - if !ok || !fn(n) { + case n := <-ch: + if !fn(n) { return } } @@ -2800,20 +2838,52 @@ func (b *LocalBackend) sendFileNotify() { b.send(n) } -// popBrowserAuthNow shuts down the data plane and sends an auth URL -// to the connected frontend, if any. -func (b *LocalBackend) popBrowserAuthNow() { +// setAuthURL sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. +// This method is called when a new authURL is received from the control plane, meaning that either a user +// has started a new interactive login (e.g., by running `tailscale login` or clicking Login in the GUI), +// or the control plane was unable to authenticate this node non-interactively (e.g., due to key expiration). +// b.interact indicates whether an interactive login is in progress. +// If url is "", it is equivalent to calling [LocalBackend.resetAuthURLLocked] with b.mu held. +func (b *LocalBackend) setAuthURL(url string) { + var popBrowser, keyExpired bool + b.mu.Lock() - url := b.authURL - expired := b.keyExpired + switch { + case url == "": + b.resetAuthURLLocked() + case b.authURL != url: + b.authURL = url + b.authURLTime = b.clock.Now() + // Always open the browser if the URL has changed. + // This includes the transition from no URL -> some URL. + popBrowser = true + default: + // Otherwise, only open it if the user explicitly requests interactive login. + popBrowser = b.interact + } + keyExpired = b.keyExpired + // Consume the StartLoginInteractive call, if any, that caused the control + // plane to send us this URL. + b.interact = false b.mu.Unlock() - b.logf("popBrowserAuthNow: url=%v, key-expired=%v, seamless-key-renewal=%v", url != "", expired, b.seamlessRenewalEnabled()) + if popBrowser { + b.popBrowserAuthNow(url, keyExpired) + } +} + +// popBrowserAuthNow shuts down the data plane and sends an auth URL +// to the connected frontend, if any. +// keyExpired is the value of b.keyExpired upon entry and indicates +// whether the node's key has expired. +// It must not be called with b.mu held. +func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool) { + b.logf("popBrowserAuthNow: url=%v, key-expired=%v, seamless-key-renewal=%v", url != "", keyExpired, b.seamlessRenewalEnabled()) // Deconfigure the local network data plane if: // - seamless key renewal is not enabled; // - key is expired (in which case tailnet connectivity is down anyway). - if !b.seamlessRenewalEnabled() || expired { + if !b.seamlessRenewalEnabled() || keyExpired { b.blockEngineUpdates(true) b.stopEngineAndWait() } @@ -3081,13 +3151,13 @@ func (b *LocalBackend) InServerMode() bool { return b.pm.CurrentPrefs().ForceDaemon() } -// CheckIPNConnectionAllowed returns an error if the identity in ci should not +// CheckIPNConnectionAllowed returns an error if the specified actor should not // be allowed to connect or make requests to the LocalAPI currently. // -// Currently (as of 2022-11-23), this is only used on Windows to check if -// we started in server mode and ci is from an identity other than the one -// that started the server. -func (b *LocalBackend) CheckIPNConnectionAllowed(ci *ipnauth.ConnIdentity) error { +// Currently (as of 2024-08-26), this is only used on Windows. +// We plan to remove it as part of the multi-user and unattended mode improvements +// as we progress on tailscale/corp#18342. +func (b *LocalBackend) CheckIPNConnectionAllowed(actor ipnauth.Actor) error { b.mu.Lock() defer b.mu.Unlock() serverModeUid := b.pm.CurrentUserID() @@ -3102,14 +3172,11 @@ func (b *LocalBackend) CheckIPNConnectionAllowed(ci *ipnauth.ConnIdentity) error // Always allow Windows SYSTEM user to connect, // even if Tailscale is currently being used by another user. - if tok, err := ci.WindowsToken(); err == nil { - defer tok.Close() - if tok.IsLocalSystem() { - return nil - } + if actor.IsLocalSystem() { + return nil } - uid := ci.WindowsUserID() + uid := actor.UserID() if uid == "" { return errors.New("empty user uid in connection identity") } @@ -3139,16 +3206,25 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { panic("LocalBackend.assertClient: b.cc == nil") } url := b.authURL + keyExpired := b.keyExpired timeSinceAuthURLCreated := b.clock.Since(b.authURLTime) - cc := b.cc - b.mu.Unlock() - b.logf("StartLoginInteractive: url=%v", url != "") - // Only use an authURL if it was sent down from control in the last // 6 days and 23 hours. Avoids using a stale URL that is no longer valid // server-side. Server-side URLs expire after 7 days. - if url != "" && timeSinceAuthURLCreated < ((7*24*time.Hour)-(1*time.Hour)) { - b.popBrowserAuthNow() + hasValidURL := url != "" && timeSinceAuthURLCreated < ((7*24*time.Hour)-(1*time.Hour)) + if !hasValidURL { + // A user wants to log in interactively, but we don't have a valid authURL. + // Set a flag to indicate that interactive login is in progress, forcing + // a BrowseToURL notification once the authURL becomes available. + b.interact = true + } + cc := b.cc + b.mu.Unlock() + + b.logf("StartLoginInteractive: url=%v", hasValidURL) + + if hasValidURL { + b.popBrowserAuthNow(url, keyExpired) } else { cc.Login(b.loginFlags | controlclient.LoginInteractive) } @@ -3288,18 +3364,14 @@ func (b *LocalBackend) shouldUploadServices() bool { // unattended mode. The user must disable unattended mode before the user can be // changed. // -// On non-multi-user systems, the token should be set to nil. +// On non-multi-user systems, the user should be set to nil. // -// SetCurrentUser returns the ipn.WindowsUserID associated with token +// SetCurrentUser returns the ipn.WindowsUserID associated with the user // when successful. -func (b *LocalBackend) SetCurrentUser(token ipnauth.WindowsToken) (ipn.WindowsUserID, error) { +func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) (ipn.WindowsUserID, error) { var uid ipn.WindowsUserID - if token != nil { - var err error - uid, err = token.UID() - if err != nil { - return "", err - } + if actor != nil { + uid = actor.UserID() } unlock := b.lockAndGetUnlock() @@ -3308,13 +3380,11 @@ func (b *LocalBackend) SetCurrentUser(token ipnauth.WindowsToken) (ipn.WindowsUs if b.pm.CurrentUserID() == uid { return uid, nil } - if err := b.pm.SetCurrentUserID(uid); err != nil { - return uid, nil - } - if b.currentUser != nil { - b.currentUser.Close() + b.pm.SetCurrentUserID(uid) + if c, ok := b.currentUser.(ipnauth.ActorCloser); ok { + c.Close() } - b.currentUser = token + b.currentUser = actor b.resetForProfileChangeLockedOnEntry(unlock) return uid, nil } @@ -3971,7 +4041,7 @@ func (b *LocalBackend) authReconfig() { disableSubnetsIfPAC := nm.HasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) userDialUseRoutes := nm.HasCap(tailcfg.NodeAttrUserDialUseRoutes) dohURL, dohURLOK := exitNodeCanProxyDNS(nm, b.peers, prefs.ExitNodeID()) - dcfg := dnsConfigForNetmap(nm, b.peers, prefs, b.logf, version.OS()) + dcfg := dnsConfigForNetmap(nm, b.peers, prefs, b.keyExpired, b.logf, version.OS()) // If the current node is an app connector, ensure the app connector machine is started b.reconfigAppConnectorLocked(nm, prefs) b.mu.Unlock() @@ -4071,10 +4141,23 @@ func shouldUseOneCGNATRoute(logf logger.Logf, controlKnobs *controlknobs.Knobs, // // The versionOS is a Tailscale-style version ("iOS", "macOS") and not // a runtime.GOOS. -func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, prefs ipn.PrefsView, logf logger.Logf, versionOS string) *dns.Config { +func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { if nm == nil { return nil } + + // If the current node's key is expired, then we don't program any DNS + // configuration into the operating system. This ensures that if the + // DNS configuration specifies a DNS server that is only reachable over + // Tailscale, we don't break connectivity for the user. + // + // TODO(andrew-d): this also stops returning anything from quad-100; we + // could do the same thing as having "CorpDNS: false" and keep that but + // not program the OS? + if selfExpired { + return &dns.Config{} + } + dcfg := &dns.Config{ Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, Hosts: map[dnsname.FQDN][]netip.Addr{}, @@ -4099,15 +4182,14 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. return // TODO: propagate error? } var have4 bool - for i := range addrs.Len() { - if addrs.At(i).Addr().Is4() { + for _, addr := range addrs.All() { + if addr.Addr().Is4() { have4 = true break } } var ips []netip.Addr - for i := range addrs.Len() { - addr := addrs.At(i) + for _, addr := range addrs.All() { if selfV6Only { if addr.Addr().Is6() { ips = append(ips, addr.Addr()) @@ -4395,8 +4477,7 @@ func (b *LocalBackend) initPeerAPIListener() { b.peerAPIServer = ps isNetstack := b.sys.IsNetstack() - for i := range addrs.Len() { - a := addrs.At(i) + for i, a := range addrs.All() { var ln net.Listener var err error skipListen := i > 0 && isNetstack @@ -4602,6 +4683,9 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC if slices.ContainsFunc(rs.LocalAddrs, tsaddr.PrefixIs4) { rs.Routes = append(rs.Routes, netip.PrefixFrom(tsaddr.TailscaleServiceIP(), 32)) } + if slices.ContainsFunc(rs.LocalAddrs, tsaddr.PrefixIs6) { + rs.Routes = append(rs.Routes, netip.PrefixFrom(tsaddr.TailscaleServiceIPv6(), 128)) + } return rs } @@ -4629,6 +4713,15 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.ShieldsUp = prefs.ShieldsUp() hi.AllowsUpdate = envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true) + // count routes without exit node routes + var routes int64 + for _, route := range hi.RoutableIPs { + if route.Bits() != 0 { + routes++ + } + } + metricAdvertisedRoutes.Set(float64(routes)) + var sshHostKeys []string if prefs.RunSSH() && envknob.CanSSHD() { // TODO(bradfitz): this is called with b.mu held. Not ideal. @@ -4683,8 +4776,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock activeLogin := b.activeLogin authURL := b.authURL if newState == ipn.Running { - b.authURL = "" - b.authURLTime = time.Time{} + b.resetAuthURLLocked() // Start a captive portal detection loop if none has been // started. Create a new context if none is present, since it @@ -4966,7 +5058,7 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client { return nil } - b.authURL = "" + b.resetAuthURLLocked() // When we clear the control client, stop any outstanding netmap expiry // timer; synthesizing a new netmap while we don't have a control @@ -4986,6 +5078,13 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client { return prev } +// resetAuthURLLocked resets authURL, canceling any pending interactive login. +func (b *LocalBackend) resetAuthURLLocked() { + b.authURL = "" + b.authURLTime = time.Time{} + b.interact = false +} + // ResetForClientDisconnect resets the backend for GUI clients running // in interactive (non-headless) mode. This is currently used only by // Windows. This causes all state to be cleared, lest an unrelated user @@ -5007,12 +5106,13 @@ func (b *LocalBackend) ResetForClientDisconnect() { b.setNetMapLocked(nil) b.pm.Reset() if b.currentUser != nil { - b.currentUser.Close() + if c, ok := b.currentUser.(ipnauth.ActorCloser); ok { + c.Close() + } b.currentUser = nil } b.keyExpired = false - b.authURL = "" - b.authURLTime = time.Time{} + b.resetAuthURLLocked() b.activeLogin = "" b.resetDialPlan() b.setAtomicValuesFromPrefsLocked(ipn.PrefsView{}) @@ -5257,8 +5357,8 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.nodeByAddr[k] = 0 } addNode := func(n tailcfg.NodeView) { - for i := range n.Addresses().Len() { - if ipp := n.Addresses().At(i); ipp.IsSingleIP() { + for _, ipp := range n.Addresses().All() { + if ipp.IsSingleIP() { b.nodeByAddr[ipp.Addr()] = n.ID() } } @@ -6550,7 +6650,7 @@ func (b *LocalBackend) ResetAuth() error { if err := b.clearMachineKeyLocked(); err != nil { return err } - if err := b.pm.DeleteAllProfiles(); err != nil { + if err := b.pm.DeleteAllProfilesForUser(); err != nil { return err } b.resetDialPlan() // always reset if we're removing everything diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index b5c22e54be9b5..e4091ef02966e 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1283,7 +1283,7 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { } prefs := &ipn.Prefs{ExitNodeID: tc.exitNode, CorpDNS: true} - got := dnsConfigForNetmap(nm, peersMap(tc.peers), prefs.View(), t.Logf, "") + got := dnsConfigForNetmap(nm, peersMap(tc.peers), prefs.View(), false, t.Logf, "") if !resolversEqual(t, got.DefaultResolvers, tc.wantDefaultResolvers) { t.Errorf("DefaultResolvers: got %#v, want %#v", got.DefaultResolvers, tc.wantDefaultResolvers) } @@ -2654,7 +2654,7 @@ func TestOnTailnetDefaultAutoUpdate(t *testing.T) { b.hostinfo.Container = tt.container p := ipn.NewPrefs() p.AutoUpdate.Apply = tt.before - if err := b.pm.setPrefsLocked(p.View()); err != nil { + if err := b.pm.setPrefsNoPermCheck(p.View()); err != nil { t.Fatal(err) } b.onTailnetDefaultAutoUpdate(tt.tailnetDefault) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 593c5493cde61..d20bf94eb971a 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -53,7 +53,7 @@ type tkaState struct { profile ipn.ProfileID authority *tka.Authority storage *tka.FS - filtered []ipnstate.TKAFilteredPeer + filtered []ipnstate.TKAPeer } // tkaFilterNetmapLocked checks the signatures on each node key, dropping @@ -99,7 +99,7 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) { // nm.Peers is ordered, so deletion must be order-preserving. if len(toDelete) > 0 || len(obsoleteByRotation) > 0 { peers := make([]tailcfg.NodeView, 0, len(nm.Peers)) - filtered := make([]ipnstate.TKAFilteredPeer, 0, len(toDelete)+len(obsoleteByRotation)) + filtered := make([]ipnstate.TKAPeer, 0, len(toDelete)+len(obsoleteByRotation)) for i, p := range nm.Peers { if !toDelete[i] && !obsoleteByRotation.Contains(p.Key()) { peers = append(peers, p) @@ -108,20 +108,7 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) { b.logf("Network lock is dropping peer %v(%v) due to key rotation", p.ID(), p.StableID()) } // Record information about the node we filtered out. - fp := ipnstate.TKAFilteredPeer{ - Name: p.Name(), - ID: p.ID(), - StableID: p.StableID(), - TailscaleIPs: make([]netip.Addr, p.Addresses().Len()), - NodeKey: p.Key(), - } - for i := range p.Addresses().Len() { - addr := p.Addresses().At(i) - if addr.IsSingleIP() && tsaddr.IsTailscaleIP(addr.Addr()) { - fp.TailscaleIPs[i] = addr.Addr() - } - } - filtered = append(filtered, fp) + filtered = append(filtered, tkaStateFromPeer(p)) } } nm.Peers = peers @@ -188,23 +175,24 @@ func (r *rotationTracker) addRotationDetails(np key.NodePublic, d *tka.RotationD // obsoleteKeys returns the set of node keys that are obsolete due to key rotation. func (r *rotationTracker) obsoleteKeys() set.Set[key.NodePublic] { for _, v := range r.byWrappingKey { + // Do not consider signatures for keys that have been marked as obsolete + // by another signature. + v = slices.DeleteFunc(v, func(rd sigRotationDetails) bool { + return r.obsolete.Contains(rd.np) + }) + if len(v) == 0 { + continue + } + // If there are multiple rotation signatures with the same wrapping // pubkey, we need to decide which one is the "latest", and keep it. // The signature with the largest number of previous keys is likely to - // be the latest, unless it has been marked as obsolete (rotated out) by - // another signature (which might happen in the future if we start - // compacting long rotated signature chains). + // be the latest. slices.SortStableFunc(v, func(a, b sigRotationDetails) int { - // Group all obsolete keys after non-obsolete keys. - if ao, bo := r.obsolete.Contains(a.np), r.obsolete.Contains(b.np); ao != bo { - if ao { - return 1 - } - return -1 - } // Sort by decreasing number of previous keys. return b.numPrevKeys - a.numPrevKeys }) + // If there are several signatures with the same number of previous // keys, we cannot determine which one is the latest, so all of them are // rejected for safety. @@ -255,7 +243,10 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie b.logf("tkaSyncIfNeeded: enabled=%v, head=%v", nm.TKAEnabled, nm.TKAHead) } - ourNodeKey := prefs.Persist().PublicNodeKey() + ourNodeKey, ok := prefs.Persist().PublicNodeKeyOK() + if !ok { + return errors.New("tkaSyncIfNeeded: no node key in prefs") + } isEnabled := b.tka != nil wantEnabled := nm.TKAEnabled @@ -543,11 +534,20 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { } } - filtered := make([]*ipnstate.TKAFilteredPeer, len(b.tka.filtered)) + filtered := make([]*ipnstate.TKAPeer, len(b.tka.filtered)) for i := range len(filtered) { filtered[i] = b.tka.filtered[i].Clone() } + var visible []*ipnstate.TKAPeer + if b.netMap != nil { + visible = make([]*ipnstate.TKAPeer, len(b.netMap.Peers)) + for i, p := range b.netMap.Peers { + s := tkaStateFromPeer(p) + visible[i] = &s + } + } + stateID1, _ := b.tka.authority.StateIDs() return &ipnstate.NetworkLockStatus{ @@ -559,10 +559,32 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { NodeKeySignature: nodeKeySignature, TrustedKeys: outKeys, FilteredPeers: filtered, + VisiblePeers: visible, StateID: stateID1, } } +func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer { + fp := ipnstate.TKAPeer{ + Name: p.Name(), + ID: p.ID(), + StableID: p.StableID(), + TailscaleIPs: make([]netip.Addr, 0, p.Addresses().Len()), + NodeKey: p.Key(), + } + for i := range p.Addresses().Len() { + addr := p.Addresses().At(i) + if addr.IsSingleIP() && tsaddr.IsTailscaleIP(addr.Addr()) { + fp.TailscaleIPs = append(fp.TailscaleIPs, addr.Addr()) + } + } + var decoded tka.NodeKeySignature + if err := decoded.Unserialize(p.KeySignature().AsSlice()); err == nil { + fp.NodeKeySignature = decoded + } + return fp +} + // NetworkLockInit enables network-lock for the tailnet, with the tailnets' // key authority initialized to trust the provided keys. // diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index c3576dfb0c709..4b79136c81ea9 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -667,6 +667,31 @@ func TestTKAFilterNetmap(t *testing.T) { if diff := cmp.Diff(want, nm.Peers, nodePubComparer); diff != "" { t.Errorf("filtered netmap differs (-want, +got):\n%s", diff) } + + // Confirm that repeated rotation works correctly. + for range 100 { + n5Rotated, n5RotatedSig = resign(n5nl, n5RotatedSig) + } + + n51, n51Sig := resign(n5nl, n5RotatedSig) + + nm = &netmap.NetworkMap{ + Peers: nodeViews([]*tailcfg.Node{ + {ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()}, + {ID: 5, Key: n5Rotated.Public(), KeySignature: n5RotatedSig}, // rotated + {ID: 51, Key: n51.Public(), KeySignature: n51Sig}, + }), + } + + b.tkaFilterNetmapLocked(nm) + + want = nodeViews([]*tailcfg.Node{ + {ID: 1, Key: n1.Public(), KeySignature: n1GoodSig.Serialize()}, + {ID: 51, Key: n51.Public(), KeySignature: n51Sig}, + }) + if diff := cmp.Diff(want, nm.Peers, nodePubComparer); diff != "" { + t.Errorf("filtered netmap differs (-want, +got):\n%s", diff) + } } func TestTKADisable(t *testing.T) { diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 05286665edfe3..b13f921d66095 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -17,19 +17,19 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/ipn" + "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" ) -var errAlreadyMigrated = errors.New("profile migration already completed") - var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") -// profileManager is a wrapper around a StateStore that manages +// profileManager is a wrapper around an [ipn.StateStore] that manages // multiple profiles and the current profile. // // It is not safe for concurrent use. type profileManager struct { + goos string // used for TestProfileManagementWindows store ipn.StateStore logf logger.Logf health *health.Tracker @@ -57,61 +57,68 @@ func (pm *profileManager) CurrentUserID() ipn.WindowsUserID { return pm.currentUserID } -// SetCurrentUserID sets the current user ID. The uid is only non-empty -// on Windows where we have a multi-user system. -func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) error { +// SetCurrentUserID sets the current user ID and switches to that user's default (last used) profile. +// If the specified user does not have a default profile, or the default profile could not be loaded, +// it creates a new one and switches to it. The uid is only non-empty on Windows where we have a multi-user system. +func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) { if pm.currentUserID == uid { - return nil + return } - prev := pm.currentUserID pm.currentUserID = uid - if uid == "" && prev != "" { - // This is a local user logout, or app shutdown. - // Clear the current profile. - pm.NewProfile() - return nil + if err := pm.SwitchToDefaultProfile(); err != nil { + // SetCurrentUserID should never fail and must always switch to the + // user's default profile or create a new profile for the current user. + // Until we implement multi-user support and the new permission model, + // and remove the concept of the "current user" completely, we must ensure + // that when SetCurrentUserID exits, the profile in pm.currentProfile + // is either an existing profile owned by the user, or a new, empty profile. + pm.logf("%q's default profile cannot be used; creating a new one: %v", uid, err) + pm.NewProfileForUser(uid) } +} +// DefaultUserProfileID returns [ipn.ProfileID] of the default (last used) profile for the specified user, +// or an empty string if the specified user does not have a default profile. +func (pm *profileManager) DefaultUserProfileID(uid ipn.WindowsUserID) ipn.ProfileID { // Read the CurrentProfileKey from the store which stores - // the selected profile for the current user. + // the selected profile for the specified user. b, err := pm.store.ReadState(ipn.CurrentProfileKey(string(uid))) - pm.dlogf("SetCurrentUserID: ReadState(%q) = %v, %v", string(uid), len(b), err) + pm.dlogf("DefaultUserProfileID: ReadState(%q) = %v, %v", string(uid), len(b), err) if err == ipn.ErrStateNotExist || len(b) == 0 { if runtime.GOOS == "windows" { - pm.dlogf("SetCurrentUserID: windows: migrating from legacy preferences") - if err := pm.migrateFromLegacyPrefs(); err != nil && !errors.Is(err, errAlreadyMigrated) { - return err + pm.dlogf("DefaultUserProfileID: windows: migrating from legacy preferences") + profile, err := pm.migrateFromLegacyPrefs(uid, false) + if err == nil { + return profile.ID } - } else { - pm.NewProfile() + pm.logf("failed to migrate from legacy preferences: %v", err) } - return nil + return "" } - // Now attempt to load the profile using the key we just read. pk := ipn.StateKey(string(b)) prof := pm.findProfileByKey(pk) if prof == nil { - pm.dlogf("SetCurrentUserID: no profile found for key: %q", pk) - pm.NewProfile() - return nil + pm.dlogf("DefaultUserProfileID: no profile found for key: %q", pk) + return "" } - prefs, err := pm.loadSavedPrefs(pk) - if err != nil { - pm.NewProfile() - return err + return prof.ID +} + +// checkProfileAccess returns an [errProfileAccessDenied] if the current user +// does not have access to the specified profile. +func (pm *profileManager) checkProfileAccess(profile *ipn.LoginProfile) error { + if pm.currentUserID != "" && profile.LocalUserID != pm.currentUserID { + return errProfileAccessDenied } - pm.currentProfile = prof - pm.prefs = prefs - pm.updateHealth() return nil } -// allProfiles returns all profiles that belong to the currentUserID. +// allProfiles returns all profiles accessible to the current user. // The returned profiles are sorted by Name. func (pm *profileManager) allProfiles() (out []*ipn.LoginProfile) { for _, p := range pm.knownProfiles { - if p.LocalUserID == pm.currentUserID { + if pm.checkProfileAccess(p) == nil { out = append(out, p) } } @@ -121,9 +128,8 @@ func (pm *profileManager) allProfiles() (out []*ipn.LoginProfile) { return out } -// matchingProfiles returns all profiles that match the given predicate and -// belong to the currentUserID. -// The returned profiles are sorted by Name. +// matchingProfiles is like [profileManager.allProfiles], but returns only profiles +// matching the given predicate. func (pm *profileManager) matchingProfiles(f func(*ipn.LoginProfile) bool) (out []*ipn.LoginProfile) { all := pm.allProfiles() out = all[:0] @@ -135,19 +141,20 @@ func (pm *profileManager) matchingProfiles(f func(*ipn.LoginProfile) bool) (out return out } -// findMatchinProfiles returns all profiles that represent the same node/user as -// prefs. +// findMatchingProfiles returns all profiles accessible to the current user +// that represent the same node/user as prefs. // The returned profiles are sorted by Name. -func (pm *profileManager) findMatchingProfiles(prefs *ipn.Prefs) []*ipn.LoginProfile { +func (pm *profileManager) findMatchingProfiles(prefs ipn.PrefsView) []*ipn.LoginProfile { return pm.matchingProfiles(func(p *ipn.LoginProfile) bool { - return p.ControlURL == prefs.ControlURL && - (p.UserProfile.ID == prefs.Persist.UserProfile.ID || - p.NodeID == prefs.Persist.NodeID) + return p.ControlURL == prefs.ControlURL() && + (p.UserProfile.ID == prefs.Persist().UserProfile().ID || + p.NodeID == prefs.Persist().NodeID()) }) } // ProfileIDForName returns the profile ID for the profile with the -// given name. It returns "" if no such profile exists. +// given name. It returns "" if no such profile exists among profiles +// accessible to the current user. func (pm *profileManager) ProfileIDForName(name string) ipn.ProfileID { p := pm.findProfileByName(name) if p == nil { @@ -164,7 +171,7 @@ func (pm *profileManager) findProfileByName(name string) *ipn.LoginProfile { return nil } if len(out) > 1 { - pm.logf("[unxpected] multiple profiles with the same name") + pm.logf("[unexpected] multiple profiles with the same name") } return out[0] } @@ -177,17 +184,17 @@ func (pm *profileManager) findProfileByKey(key ipn.StateKey) *ipn.LoginProfile { return nil } if len(out) > 1 { - pm.logf("[unxpected] multiple profiles with the same key") + pm.logf("[unexpected] multiple profiles with the same key") } return out[0] } func (pm *profileManager) setUnattendedModeAsConfigured() error { - if pm.currentUserID == "" { + if pm.goos != "windows" { return nil } - if pm.prefs.ForceDaemon() { + if pm.currentProfile.Key != "" && pm.prefs.ForceDaemon() { return pm.WriteState(ipn.ServerModeStartKey, []byte(pm.currentProfile.Key)) } else { return pm.WriteState(ipn.ServerModeStartKey, nil) @@ -201,26 +208,21 @@ func (pm *profileManager) Reset() { } // SetPrefs sets the current profile's prefs to the provided value. -// It also saves the prefs to the StateStore. It stores a copy of the -// provided prefs, which may be accessed via CurrentPrefs. +// It also saves the prefs to the [ipn.StateStore]. It stores a copy of the +// provided prefs, which may be accessed via [profileManager.CurrentPrefs]. // -// NetworkProfile stores additional information about the tailnet the user +// The [ipn.NetworkProfile] stores additional information about the tailnet the user // is logged into so that we can keep track of things like their domain name // across user switches to disambiguate the same account but a different tailnet. func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) error { - prefs := prefsIn.AsStruct() - newPersist := prefs.Persist - if newPersist == nil || newPersist.NodeID == "" || newPersist.UserProfile.LoginName == "" { + cp := pm.currentProfile + if persist := prefsIn.Persist(); !persist.Valid() || persist.NodeID() == "" || persist.UserProfile().LoginName == "" { // We don't know anything about this profile, so ignore it for now. - return pm.setPrefsLocked(prefs.View()) - } - up := newPersist.UserProfile - if up.DisplayName == "" { - up.DisplayName = up.LoginName + return pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefsIn.AsStruct().View()) } - cp := pm.currentProfile + // Check if we already have an existing profile that matches the user/node. - if existing := pm.findMatchingProfiles(prefs); len(existing) > 0 { + if existing := pm.findMatchingProfiles(prefsIn); len(existing) > 0 { // We already have a profile for this user/node we should reuse it. Also // cleanup any other duplicate profiles. cp = existing[0] @@ -231,37 +233,76 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) // We couldn't delete the state, so keep the profile around. continue } - // Remove the profile, knownProfiles will be persisted below. + // Remove the profile, knownProfiles will be persisted + // in [profileManager.setProfilePrefs] below. delete(pm.knownProfiles, p.ID) } - } else if cp.ID == "" { - // We didn't have an existing profile, so create a new one. - cp.ID, cp.Key = newUnusedID(pm.knownProfiles) - cp.LocalUserID = pm.currentUserID - } else { - // This means that there was a force-reauth as a new node that - // we haven't seen before. - } - - if prefs.ProfileName != "" { - cp.Name = prefs.ProfileName - } else { - cp.Name = up.LoginName } - cp.ControlURL = prefs.ControlURL - cp.UserProfile = newPersist.UserProfile - cp.NodeID = newPersist.NodeID - cp.NetworkProfile = np - pm.knownProfiles[cp.ID] = cp pm.currentProfile = cp - if err := pm.writeKnownProfiles(); err != nil { + if err := pm.SetProfilePrefs(cp, prefsIn, np); err != nil { return err } - if err := pm.setAsUserSelectedProfileLocked(); err != nil { + return pm.setProfileAsUserDefault(cp) + +} + +// SetProfilePrefs is like [profileManager.SetPrefs], but sets prefs for the specified [ipn.LoginProfile] +// which is not necessarily the [profileManager.CurrentProfile]. It returns an [errProfileAccessDenied] +// if the specified profile is not accessible by the current user. +func (pm *profileManager) SetProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.PrefsView, np ipn.NetworkProfile) error { + if err := pm.checkProfileAccess(lp); err != nil { return err } - if err := pm.setPrefsLocked(prefs.View()); err != nil { - return err + + // An empty profile.ID indicates that the profile is new, the node info wasn't available, + // and it hasn't been persisted yet. We'll generate both an ID and [ipn.StateKey] + // once the information is available and needs to be persisted. + if lp.ID == "" { + if persist := prefsIn.Persist(); persist.Valid() && persist.NodeID() != "" && persist.UserProfile().LoginName != "" { + // Generate an ID and [ipn.StateKey] now that we have the node info. + lp.ID, lp.Key = newUnusedID(pm.knownProfiles) + } + + // Set the current user as the profile owner, unless the current user ID does + // not represent a specific user, or the profile is already owned by a different user. + // It is only relevant on Windows where we have a multi-user system. + if lp.LocalUserID == "" && pm.currentUserID != "" { + lp.LocalUserID = pm.currentUserID + } + } + + var up tailcfg.UserProfile + if persist := prefsIn.Persist(); persist.Valid() { + up = persist.UserProfile() + if up.DisplayName == "" { + up.DisplayName = up.LoginName + } + lp.NodeID = persist.NodeID() + } else { + lp.NodeID = "" + } + + if prefsIn.ProfileName() != "" { + lp.Name = prefsIn.ProfileName() + } else { + lp.Name = up.LoginName + } + lp.ControlURL = prefsIn.ControlURL() + lp.UserProfile = up + lp.NetworkProfile = np + + // An empty profile.ID indicates that the node info is not available yet, + // and the profile doesn't need to be saved on disk. + if lp.ID != "" { + pm.knownProfiles[lp.ID] = lp + if err := pm.writeKnownProfiles(); err != nil { + return err + } + // Clone prefsIn and create a read-only view as a safety measure to + // prevent accidental preference mutations, both externally and internally. + if err := pm.setProfilePrefsNoPermCheck(lp, prefsIn.AsStruct().View()); err != nil { + return err + } } return nil } @@ -278,19 +319,35 @@ func newUnusedID(knownProfiles map[ipn.ProfileID]*ipn.LoginProfile) (ipn.Profile } } -// setPrefsLocked sets the current profile's prefs to the provided value. -// It also saves the prefs to the StateStore, if the current profile -// is not new. -func (pm *profileManager) setPrefsLocked(clonedPrefs ipn.PrefsView) error { - pm.prefs = clonedPrefs - pm.updateHealth() - if pm.currentProfile.ID == "" { - return nil +// setProfilePrefsNoPermCheck sets the profile's prefs to the provided value. +// If the profile has the [ipn.LoginProfile.Key] set, it saves the prefs to the +// [ipn.StateStore] under that key. It returns an error if the profile is non-current +// and does not have its Key set, or if the prefs could not be saved. +// The method does not perform any additional checks on the specified +// profile, such as verifying the caller's access rights or checking +// if another profile for the same node already exists. +func (pm *profileManager) setProfilePrefsNoPermCheck(profile *ipn.LoginProfile, clonedPrefs ipn.PrefsView) error { + isCurrentProfile := pm.currentProfile == profile + if isCurrentProfile { + pm.prefs = clonedPrefs + pm.updateHealth() } - if err := pm.writePrefsToStore(pm.currentProfile.Key, pm.prefs); err != nil { - return err + if profile.Key != "" { + if err := pm.writePrefsToStore(profile.Key, clonedPrefs); err != nil { + return err + } + } else if !isCurrentProfile { + return errors.New("cannot set prefs for a non-current in-memory profile") } - return pm.setUnattendedModeAsConfigured() + if isCurrentProfile { + return pm.setUnattendedModeAsConfigured() + } + return nil +} + +// setPrefsNoPermCheck is like [profileManager.setProfilePrefsNoPermCheck], but sets the current profile's prefs. +func (pm *profileManager) setPrefsNoPermCheck(clonedPrefs ipn.PrefsView) error { + return pm.setProfilePrefsNoPermCheck(pm.currentProfile, clonedPrefs) } func (pm *profileManager) writePrefsToStore(key ipn.StateKey, prefs ipn.PrefsView) error { @@ -304,18 +361,67 @@ func (pm *profileManager) writePrefsToStore(key ipn.StateKey, prefs ipn.PrefsVie return nil } -// Profiles returns the list of known profiles. +// Profiles returns the list of known profiles accessible to the current user. func (pm *profileManager) Profiles() []ipn.LoginProfile { allProfiles := pm.allProfiles() - out := make([]ipn.LoginProfile, 0, len(allProfiles)) - for _, p := range allProfiles { - out = append(out, *p) + out := make([]ipn.LoginProfile, len(allProfiles)) + for i, p := range allProfiles { + out[i] = *p } return out } +// ProfileByID returns a profile with the given id, if it is accessible to the current user. +// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied]. +// If the profile does not exist, it returns an [errProfileNotFound]. +func (pm *profileManager) ProfileByID(id ipn.ProfileID) (ipn.LoginProfile, error) { + kp, err := pm.profileByIDNoPermCheck(id) + if err != nil { + return ipn.LoginProfile{}, err + } + if err := pm.checkProfileAccess(kp); err != nil { + return ipn.LoginProfile{}, err + } + return *kp, nil +} + +// profileByIDNoPermCheck is like [profileManager.ProfileByID], but it doesn't +// check user's access rights to the profile. +func (pm *profileManager) profileByIDNoPermCheck(id ipn.ProfileID) (*ipn.LoginProfile, error) { + if id == pm.currentProfile.ID { + return pm.currentProfile, nil + } + kp, ok := pm.knownProfiles[id] + if !ok { + return nil, errProfileNotFound + } + return kp, nil +} + +// ProfilePrefs returns preferences for a profile with the given id. +// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied]. +// If the profile does not exist, it returns an [errProfileNotFound]. +func (pm *profileManager) ProfilePrefs(id ipn.ProfileID) (ipn.PrefsView, error) { + kp, err := pm.profileByIDNoPermCheck(id) + if err != nil { + return ipn.PrefsView{}, errProfileNotFound + } + if err := pm.checkProfileAccess(kp); err != nil { + return ipn.PrefsView{}, err + } + return pm.profilePrefs(kp) +} + +func (pm *profileManager) profilePrefs(p *ipn.LoginProfile) (ipn.PrefsView, error) { + if p.ID == pm.currentProfile.ID { + return pm.prefs, nil + } + return pm.loadSavedPrefs(p.Key) +} + // SwitchProfile switches to the profile with the given id. -// If the profile is not known, it returns an errProfileNotFound. +// If the profile exists but is not accessible to the current user, it returns an [errProfileAccessDenied]. +// If the profile does not exist, it returns an [errProfileNotFound]. func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error { metricSwitchProfile.Add(1) @@ -323,12 +429,12 @@ func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error { if !ok { return errProfileNotFound } - if pm.currentProfile != nil && kp.ID == pm.currentProfile.ID && pm.prefs.Valid() { return nil } - if kp.LocalUserID != pm.currentUserID { - return fmt.Errorf("profile %q is not owned by current user", id) + + if err := pm.checkProfileAccess(kp); err != nil { + return fmt.Errorf("%w: profile %q is not accessible to the current user", err, id) } prefs, err := pm.loadSavedPrefs(kp.Key) if err != nil { @@ -337,12 +443,32 @@ func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error { pm.prefs = prefs pm.updateHealth() pm.currentProfile = kp - return pm.setAsUserSelectedProfileLocked() + return pm.setProfileAsUserDefault(kp) } -func (pm *profileManager) setAsUserSelectedProfileLocked() error { +// SwitchToDefaultProfile switches to the default (last used) profile for the current user. +// It creates a new one and switches to it if the current user does not have a default profile, +// or returns an error if the default profile is inaccessible or could not be loaded. +func (pm *profileManager) SwitchToDefaultProfile() error { + if id := pm.DefaultUserProfileID(pm.currentUserID); id != "" { + return pm.SwitchProfile(id) + } + pm.NewProfileForUser(pm.currentUserID) + return nil +} + +// setProfileAsUserDefault sets the specified profile as the default for the current user. +// It returns an [errProfileAccessDenied] if the specified profile is not accessible to the current user. +func (pm *profileManager) setProfileAsUserDefault(profile *ipn.LoginProfile) error { + if profile.Key == "" { + // The profile has not been persisted yet; ignore it for now. + return nil + } + if err := pm.checkProfileAccess(profile); err != nil { + return errProfileAccessDenied + } k := ipn.CurrentProfileKey(string(pm.currentUserID)) - return pm.WriteState(k, []byte(pm.currentProfile.Key)) + return pm.WriteState(k, []byte(profile.Key)) } func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) { @@ -387,53 +513,94 @@ func (pm *profileManager) CurrentProfile() ipn.LoginProfile { return *pm.currentProfile } -// errProfileNotFound is returned by methods that accept a ProfileID. +// errProfileNotFound is returned by methods that accept a ProfileID +// when the specified profile does not exist. var errProfileNotFound = errors.New("profile not found") +// errProfileAccessDenied is returned by methods that accept a ProfileID +// when the current user does not have access to the specified profile. +// It is used temporarily until we implement access checks based on the +// caller's identity in tailscale/corp#18342. +var errProfileAccessDenied = errors.New("profile access denied") + // DeleteProfile removes the profile with the given id. It returns -// errProfileNotFound if the profile does not exist. +// [errProfileNotFound] if the profile does not exist, or an +// [errProfileAccessDenied] if the specified profile is not accessible +// to the current user. // If the profile is the current profile, it is the equivalent of -// calling NewProfile() followed by DeleteProfile(id). This is -// useful for deleting the last profile. In other cases, it is -// recommended to call SwitchProfile() first. +// calling [profileManager.NewProfile] followed by [profileManager.DeleteProfile](id). +// This is useful for deleting the last profile. In other cases, it is +// recommended to call [profileManager.SwitchProfile] first. func (pm *profileManager) DeleteProfile(id ipn.ProfileID) error { metricDeleteProfile.Add(1) - - if id == "" { - // Deleting the in-memory only new profile, just create a new one. - pm.NewProfile() - return nil + if id == pm.currentProfile.ID { + return pm.deleteCurrentProfile() } kp, ok := pm.knownProfiles[id] if !ok { return errProfileNotFound } - if kp.ID == pm.currentProfile.ID { + if err := pm.checkProfileAccess(kp); err != nil { + return err + } + return pm.deleteProfileNoPermCheck(kp) +} + +func (pm *profileManager) deleteCurrentProfile() error { + if err := pm.checkProfileAccess(pm.currentProfile); err != nil { + return err + } + if pm.currentProfile.ID == "" { + // Deleting the in-memory only new profile, just create a new one. pm.NewProfile() + return nil } - if err := pm.WriteState(kp.Key, nil); err != nil { + return pm.deleteProfileNoPermCheck(pm.currentProfile) +} + +// deleteProfileNoPermCheck is like [profileManager.DeleteProfile], +// but it doesn't check user's access rights to the profile. +func (pm *profileManager) deleteProfileNoPermCheck(profile *ipn.LoginProfile) error { + if profile.ID == pm.currentProfile.ID { + pm.NewProfile() + } + if err := pm.WriteState(profile.Key, nil); err != nil { return err } - delete(pm.knownProfiles, id) + delete(pm.knownProfiles, profile.ID) return pm.writeKnownProfiles() } -// DeleteAllProfiles removes all known profiles and switches to a new empty -// profile. -func (pm *profileManager) DeleteAllProfiles() error { +// DeleteAllProfilesForUser removes all known profiles accessible to the current user +// and switches to a new, empty profile. +func (pm *profileManager) DeleteAllProfilesForUser() error { metricDeleteAllProfile.Add(1) + currentProfileDeleted := false + writeKnownProfiles := func() error { + if currentProfileDeleted || pm.currentProfile.ID == "" { + pm.NewProfile() + } + return pm.writeKnownProfiles() + } + for _, kp := range pm.knownProfiles { + if pm.checkProfileAccess(kp) != nil { + // Skip profiles we don't have access to. + continue + } if err := pm.WriteState(kp.Key, nil); err != nil { // Write to remove references to profiles we've already deleted, but // return the original error. - pm.writeKnownProfiles() + writeKnownProfiles() return err } delete(pm.knownProfiles, kp.ID) + if kp.ID == pm.currentProfile.ID { + currentProfileDeleted = true + } } - pm.NewProfile() - return pm.writeKnownProfiles() + return writeKnownProfiles() } func (pm *profileManager) writeKnownProfiles() error { @@ -452,13 +619,43 @@ func (pm *profileManager) updateHealth() { } // NewProfile creates and switches to a new unnamed profile. The new profile is -// not persisted until SetPrefs is called with a logged-in user. +// not persisted until [profileManager.SetPrefs] is called with a logged-in user. func (pm *profileManager) NewProfile() { + pm.NewProfileForUser(pm.currentUserID) +} + +// NewProfileForUser is like [profileManager.NewProfile], but it switches to the +// specified user and sets that user as the profile owner for the new profile. +func (pm *profileManager) NewProfileForUser(uid ipn.WindowsUserID) { + pm.currentUserID = uid + metricNewProfile.Add(1) pm.prefs = defaultPrefs pm.updateHealth() - pm.currentProfile = &ipn.LoginProfile{} + pm.currentProfile = &ipn.LoginProfile{LocalUserID: uid} +} + +// newProfileWithPrefs creates a new profile with the specified prefs and assigns +// the specified uid as the profile owner. If switchNow is true, it switches to the +// newly created profile immediately. It returns the newly created profile on success, +// or an error on failure. +func (pm *profileManager) newProfileWithPrefs(uid ipn.WindowsUserID, prefs ipn.PrefsView, switchNow bool) (*ipn.LoginProfile, error) { + metricNewProfile.Add(1) + + profile := &ipn.LoginProfile{LocalUserID: uid} + if err := pm.SetProfilePrefs(profile, prefs, ipn.NetworkProfile{}); err != nil { + return nil, err + } + if switchNow { + pm.currentProfile = profile + pm.prefs = prefs.AsStruct().View() + pm.updateHealth() + if err := pm.setProfileAsUserDefault(profile); err != nil { + return nil, err + } + } + return profile, nil } // defaultPrefs is the default prefs for a new profile. This initializes before @@ -473,7 +670,7 @@ var defaultPrefs = func() ipn.PrefsView { return prefs.View() }() -// Store returns the StateStore used by the ProfileManager. +// Store returns the [ipn.StateStore] used by the [profileManager]. func (pm *profileManager) Store() ipn.StateStore { return pm.store } @@ -494,8 +691,8 @@ func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsV return pm.CurrentPrefs(), nil } -// newProfileManager creates a new ProfileManager using the provided StateStore. -// It also loads the list of known profiles from the StateStore. +// newProfileManager creates a new [profileManager] using the provided [ipn.StateStore]. +// It also loads the list of known profiles from the store. func newProfileManager(store ipn.StateStore, logf logger.Logf, health *health.Tracker) (*profileManager, error) { return newProfileManagerWithGOOS(store, logf, health, envknob.GOOS()) } @@ -543,6 +740,7 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt } pm := &profileManager{ + goos: goos, store: store, knownProfiles: knownProfiles, logf: logf, @@ -567,7 +765,7 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt if err != nil { return nil, err } - if err := pm.setPrefsLocked(prefs); err != nil { + if err := pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefs); err != nil { return nil, err } // Most platform behavior is controlled by the goos parameter, however @@ -580,7 +778,7 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt } else if len(knownProfiles) == 0 && goos != "windows" && runtime.GOOS != "windows" { // No known profiles, try a migration. pm.dlogf("no known profiles; trying to migrate from legacy prefs") - if err := pm.migrateFromLegacyPrefs(); err != nil { + if _, err := pm.migrateFromLegacyPrefs(pm.currentUserID, true); err != nil { return nil, err } } else { @@ -590,23 +788,23 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt return pm, nil } -func (pm *profileManager) migrateFromLegacyPrefs() error { +func (pm *profileManager) migrateFromLegacyPrefs(uid ipn.WindowsUserID, switchNow bool) (*ipn.LoginProfile, error) { metricMigration.Add(1) - pm.NewProfile() - sentinel, prefs, err := pm.loadLegacyPrefs() + sentinel, prefs, err := pm.loadLegacyPrefs(uid) if err != nil { metricMigrationError.Add(1) - return fmt.Errorf("load legacy prefs: %w", err) + return nil, fmt.Errorf("load legacy prefs: %w", err) } pm.dlogf("loaded legacy preferences; sentinel=%q", sentinel) - if err := pm.SetPrefs(prefs, ipn.NetworkProfile{}); err != nil { + profile, err := pm.newProfileWithPrefs(uid, prefs, switchNow) + if err != nil { metricMigrationError.Add(1) - return fmt.Errorf("migrating _daemon profile: %w", err) + return nil, fmt.Errorf("migrating _daemon profile: %w", err) } pm.completeMigration(sentinel) pm.dlogf("completed legacy preferences migration with sentinel=%q", sentinel) metricMigrationSuccess.Add(1) - return nil + return profile, nil } func (pm *profileManager) requiresBackfill() bool { diff --git a/ipn/ipnlocal/profiles_notwindows.go b/ipn/ipnlocal/profiles_notwindows.go index fc61d26713668..0ca8f439cf9f4 100644 --- a/ipn/ipnlocal/profiles_notwindows.go +++ b/ipn/ipnlocal/profiles_notwindows.go @@ -13,7 +13,7 @@ import ( "tailscale.com/version" ) -func (pm *profileManager) loadLegacyPrefs() (string, ipn.PrefsView, error) { +func (pm *profileManager) loadLegacyPrefs(ipn.WindowsUserID) (string, ipn.PrefsView, error) { k := ipn.LegacyGlobalDaemonStateKey switch { case runtime.GOOS == "ios", version.IsSandboxedMacOS(): diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 01d49c2300082..73e4f6535387e 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -540,9 +540,7 @@ func TestProfileManagementWindows(t *testing.T) { { t.Logf("Set user1 as logged in user") - if err := pm.SetCurrentUserID(uid); err != nil { - t.Fatalf("can't set user id: %s", err) - } + pm.SetCurrentUserID(uid) checkProfiles(t) t.Logf("Save prefs for user1") wantProfiles["default"] = setPrefs(t, "default", false) @@ -576,9 +574,7 @@ func TestProfileManagementWindows(t *testing.T) { { t.Logf("Set user1 as current user") - if err := pm.SetCurrentUserID(uid); err != nil { - t.Fatal(err) - } + pm.SetCurrentUserID(uid) wantCurProfile = "test" } checkProfiles(t) diff --git a/ipn/ipnlocal/profiles_windows.go b/ipn/ipnlocal/profiles_windows.go index d98f4b5261315..c4beb22f9d42f 100644 --- a/ipn/ipnlocal/profiles_windows.go +++ b/ipn/ipnlocal/profiles_windows.go @@ -22,6 +22,8 @@ const ( legacyPrefsExt = ".conf" ) +var errAlreadyMigrated = errors.New("profile migration already completed") + func legacyPrefsDir(uid ipn.WindowsUserID) (string, error) { // TODO(aaron): Ideally we'd have the impersonation token for the pipe's // client and use it to call SHGetKnownFolderPath, thus yielding the correct @@ -37,10 +39,10 @@ func legacyPrefsDir(uid ipn.WindowsUserID) (string, error) { return userLegacyPrefsDir, nil } -func (pm *profileManager) loadLegacyPrefs() (string, ipn.PrefsView, error) { - userLegacyPrefsDir, err := legacyPrefsDir(pm.currentUserID) +func (pm *profileManager) loadLegacyPrefs(uid ipn.WindowsUserID) (string, ipn.PrefsView, error) { + userLegacyPrefsDir, err := legacyPrefsDir(uid) if err != nil { - pm.dlogf("no legacy preferences directory for %q: %v", pm.currentUserID, err) + pm.dlogf("no legacy preferences directory for %q: %v", uid, err) return "", ipn.PrefsView{}, err } diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 9ad05a1961813..67d521f0968eb 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -331,7 +331,7 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string if !has(k) { for _, sess := range b.notifyWatchers { if sess.sessionID == k { - close(sess.ch) + sess.cancel() } } } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 626615484cb02..e43de17658ca7 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -251,6 +251,14 @@ func TestServeConfigForeground(t *testing.T) { t.Fatal(err) } + // Introduce a race between [LocalBackend] sending notifications + // and [LocalBackend.WatchNotifications] shutting down due to + // setting the serve config below. + const N = 1000 + for range N { + go b.send(ipn.Notify{}) + } + // Setting a new serve config should shut down WatchNotifications // whose session IDs are no longer found: session1 goes, session2 stays. err = b.SetServeConfig(&ipn.ServeConfig{ diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index d9ed608d86a7e..20dde81f14587 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -437,10 +437,13 @@ func TestStateMachine(t *testing.T) { // ask control to do anything. Instead backend will emit an event // indicating that the UI should browse to the given URL. t.Logf("\n\nLogin (interactive)") - notifies.expect(0) + notifies.expect(1) b.StartLoginInteractive(context.Background()) { + nn := notifies.drain(1) cc.assertCalls() + c.Assert(nn[0].BrowseToURL, qt.IsNotNil) + c.Assert(url1, qt.Equals, *nn[0].BrowseToURL) c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) } @@ -450,11 +453,13 @@ func TestStateMachine(t *testing.T) { // the login URL expired. If they start another interactive login, // we must always get a *new* login URL first. t.Logf("\n\nLogin2 (interactive)") + b.authURLTime = time.Now().Add(-time.Hour * 24 * 7) // simulate URL expiration notifies.expect(0) b.StartLoginInteractive(context.Background()) { + notifies.drain(0) // backend asks control for another login sequence - cc.assertCalls() + cc.assertCalls("Login") c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) } @@ -651,25 +656,55 @@ func TestStateMachine(t *testing.T) { c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) } + // Explicitly set the ControlURL to avoid defaulting to [ipn.DefaultControlURL]. + // This prevents [LocalBackend] from using the production control server during tests + // and ensures that [LocalBackend.validPopBrowserURL] returns true for the + // fake interactive login URLs used below. Otherwise, we won't be receiving + // BrowseToURL notifications as expected. + // See tailscale/tailscale#11393. + notifies.expect(1) + b.EditPrefs(&ipn.MaskedPrefs{ + ControlURLSet: true, + Prefs: ipn.Prefs{ + ControlURL: "https://localhost:1/", + }, + }) + notifies.drain(1) + + t.Logf("\n\nStartLoginInteractive3") b.StartLoginInteractive(context.Background()) - t.Logf("\n\nLoginFinished3") + // We've been logged out, and the previously created profile is now deleted. + // We're attempting an interactive login for the first time with the new profile, + // this should result in a call to the control server, which in turn should provide + // an interactive login URL to visit. + notifies.expect(2) + url3 := "https://localhost:1/3" + cc.send(nil, url3, false, nil) + { + nn := notifies.drain(2) + cc.assertCalls("Login") + c.Assert(nn[1].BrowseToURL, qt.IsNotNil) + c.Assert(*nn[1].BrowseToURL, qt.Equals, url3) + } + t.Logf("%q visited", url3) notifies.expect(3) cc.persist.UserProfile.LoginName = "user2" cc.persist.NodeID = "node2" cc.send(nil, "", true, &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), }) + t.Logf("\n\nLoginFinished3") { nn := notifies.drain(3) - cc.assertCalls("Login") c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.Persist(), qt.IsNotNil) - c.Assert(nn[2].State, qt.IsNotNil) // Prefs after finishing the login, so LoginName updated. c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user2") c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) + // If a user initiates an interactive login, they also expect WantRunning to become true. c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) + c.Assert(nn[2].State, qt.IsNotNil) c.Assert(ipn.Starting, qt.Equals, *nn[2].State) } @@ -767,18 +802,12 @@ func TestStateMachine(t *testing.T) { // We want to try logging in as a different user, while Stopped. // First, start the login process (without logging out first). t.Logf("\n\nLoginDifferent") - notifies.expect(2) - b.EditPrefs(&ipn.MaskedPrefs{ - ControlURLSet: true, - Prefs: ipn.Prefs{ - ControlURL: "https://localhost:1/", - }, - }) + notifies.expect(1) b.StartLoginInteractive(context.Background()) - url3 := "https://localhost:1/3" - cc.send(nil, url3, false, nil) + url4 := "https://localhost:1/4" + cc.send(nil, url4, false, nil) { - nn := notifies.drain(2) + nn := notifies.drain(1) // It might seem like WantRunning should switch to true here, // but that would be risky since we already have a valid // user account. It might try to reconnect to the old account @@ -787,8 +816,8 @@ func TestStateMachine(t *testing.T) { // Because the login hasn't yet completed, the old login // is still valid, so it's correct that we stay paused. cc.assertCalls("Login") - c.Assert(nn[1].BrowseToURL, qt.IsNotNil) - c.Assert(*nn[1].BrowseToURL, qt.Equals, url3) + c.Assert(nn[0].BrowseToURL, qt.IsNotNil) + c.Assert(*nn[0].BrowseToURL, qt.Equals, url4) } // Now, let's complete the interactive login, using a different diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go new file mode 100644 index 0000000000000..761c9816cab27 --- /dev/null +++ b/ipn/ipnserver/actor.go @@ -0,0 +1,188 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipnserver + +import ( + "context" + "errors" + "fmt" + "net" + "os/exec" + "runtime" + "time" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" + "tailscale.com/types/logger" + "tailscale.com/util/ctxkey" + "tailscale.com/util/osuser" + "tailscale.com/version" +) + +var _ ipnauth.Actor = (*actor)(nil) + +// actor implements [ipnauth.Actor] and provides additional functionality that is +// specific to the current (as of 2024-08-27) permission model. +// +// Deprecated: this type exists for compatibility reasons and will be removed as +// we progress on tailscale/corp#18342. +type actor struct { + logf logger.Logf + ci *ipnauth.ConnIdentity + + isLocalSystem bool // whether the actor is the Windows' Local System identity. +} + +func newActor(logf logger.Logf, c net.Conn) (*actor, error) { + ci, err := ipnauth.GetConnIdentity(logf, c) + if err != nil { + return nil, err + } + return &actor{logf: logf, ci: ci, isLocalSystem: connIsLocalSystem(ci)}, nil +} + +// IsLocalSystem implements [ipnauth.Actor]. +func (a *actor) IsLocalSystem() bool { + return a.isLocalSystem +} + +// IsLocalAdmin implements [ipnauth.Actor]. +func (a *actor) IsLocalAdmin(operatorUID string) bool { + return a.isLocalSystem || connIsLocalAdmin(a.logf, a.ci, operatorUID) +} + +// UserID implements [ipnauth.Actor]. +func (a *actor) UserID() ipn.WindowsUserID { + return a.ci.WindowsUserID() +} + +func (a *actor) pid() int { + return a.ci.Pid() +} + +// Username implements [ipnauth.Actor]. +func (a *actor) Username() (string, error) { + if a.ci == nil { + a.logf("[unexpected] missing ConnIdentity in ipnserver.actor") + return "", errors.New("missing ConnIdentity") + } + switch runtime.GOOS { + case "windows": + tok, err := a.ci.WindowsToken() + if err != nil { + return "", fmt.Errorf("get windows token: %w", err) + } + defer tok.Close() + return tok.Username() + case "darwin", "linux": + uid, ok := a.ci.Creds().UserID() + if !ok { + return "", errors.New("missing user ID") + } + u, err := osuser.LookupByUID(uid) + if err != nil { + return "", fmt.Errorf("lookup user: %w", err) + } + return u.Username, nil + default: + return "", errors.New("unsupported OS") + } +} + +type actorOrError struct { + actor *actor + err error +} + +func (a actorOrError) unwrap() (*actor, error) { + return a.actor, a.err +} + +var errNoActor = errors.New("connection actor not available") + +var actorKey = ctxkey.New("ipnserver.actor", actorOrError{err: errNoActor}) + +// contextWithActor returns a new context that carries the identity of the actor +// owning the other end of the [net.Conn]. It can be retrieved with [actorFromContext]. +func contextWithActor(ctx context.Context, logf logger.Logf, c net.Conn) context.Context { + actor, err := newActor(logf, c) + return actorKey.WithValue(ctx, actorOrError{actor: actor, err: err}) +} + +// actorFromContext returns an [actor] associated with ctx, +// or an error if the context does not carry an actor's identity. +func actorFromContext(ctx context.Context) (*actor, error) { + return actorKey.Value(ctx).unwrap() +} + +func connIsLocalSystem(ci *ipnauth.ConnIdentity) bool { + token, err := ci.WindowsToken() + return err == nil && token.IsLocalSystem() +} + +// connIsLocalAdmin reports whether the connected client has administrative +// access to the local machine, for whatever that means with respect to the +// current OS. +// +// This is useful because tailscaled itself always runs with elevated rights: +// we want to avoid privilege escalation for certain mutative operations. +func connIsLocalAdmin(logf logger.Logf, ci *ipnauth.ConnIdentity, operatorUID string) bool { + if ci == nil { + logf("[unexpected] missing ConnIdentity in LocalAPI Handler") + return false + } + switch runtime.GOOS { + case "windows": + tok, err := ci.WindowsToken() + if err != nil { + if !errors.Is(err, ipnauth.ErrNotImplemented) { + logf("ipnauth.ConnIdentity.WindowsToken() error: %v", err) + } + return false + } + defer tok.Close() + + return tok.IsElevated() + + case "darwin": + // Unknown, or at least unchecked on sandboxed macOS variants. Err on + // the side of less permissions. + // + // authorizeServeConfigForGOOSAndUserContext should not call + // connIsLocalAdmin on sandboxed variants anyway. + if version.IsSandboxedMacOS() { + return false + } + // This is a standalone tailscaled setup, use the same logic as on + // Linux. + fallthrough + case "linux": + uid, ok := ci.Creds().UserID() + if !ok { + return false + } + // root is always admin. + if uid == "0" { + return true + } + // if non-root, must be operator AND able to execute "sudo tailscale". + if operatorUID != "" && uid != operatorUID { + return false + } + u, err := osuser.LookupByUID(uid) + if err != nil { + return false + } + // Short timeout just in case sudo hangs for some reason. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + if err := exec.CommandContext(ctx, "sudo", "--other-user="+u.Name, "--list", "tailscale").Run(); err != nil { + return false + } + return true + + default: + return false + } +} diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index fd2075ba8f4bb..73b5e82abee76 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -18,12 +18,10 @@ import ( "strings" "sync" "sync/atomic" - "time" "unicode" "tailscale.com/envknob" "tailscale.com/ipn" - "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" "tailscale.com/net/netmon" @@ -52,7 +50,7 @@ type Server struct { // lock order: mu, then LocalBackend.mu mu sync.Mutex lastUserID ipn.WindowsUserID // tracks last userid; on change, Reset state for paranoia - activeReqs map[*http.Request]*ipnauth.ConnIdentity + activeReqs map[*http.Request]*actor backendWaiter waiterSet // of LocalBackend waiters zeroReqWaiter waiterSet // of blockUntilZeroConnections waiters } @@ -75,7 +73,7 @@ func (s *Server) mustBackend() *ipnlocal.LocalBackend { type waiterSet set.HandleSet[context.CancelFunc] // add registers a new waiter in the set. -// It aquires mu to add the waiter, and does so again when cleanup is called to remove it. +// It acquires mu to add the waiter, and does so again when cleanup is called to remove it. // ready is closed when the waiter is ready (or ctx is done). func (s *waiterSet) add(mu *sync.Mutex, ctx context.Context) (ready <-chan struct{}, cleanup func()) { ctx, cancel := context.WithCancel(ctx) @@ -173,15 +171,13 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { return } - var ci *ipnauth.ConnIdentity - switch v := r.Context().Value(connIdentityContextKey{}).(type) { - case *ipnauth.ConnIdentity: - ci = v - case error: - http.Error(w, v.Error(), http.StatusUnauthorized) - return - case nil: - http.Error(w, "internal error: no connIdentityContextKey", http.StatusInternalServerError) + ci, err := actorFromContext(r.Context()) + if err != nil { + if errors.Is(err, errNoActor) { + http.Error(w, "internal error: "+err.Error(), http.StatusInternalServerError) + } else { + http.Error(w, err.Error(), http.StatusUnauthorized) + } return } @@ -199,9 +195,9 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.URL.Path, "/localapi/") { lah := localapi.NewHandler(lb, s.logf, s.backendLogID) - lah.PermitRead, lah.PermitWrite = s.localAPIPermissions(ci) - lah.PermitCert = s.connCanFetchCerts(ci) - lah.ConnIdentity = ci + lah.PermitRead, lah.PermitWrite = ci.Permissions(lb.OperatorUserID()) + lah.PermitCert = ci.CanFetchCerts() + lah.Actor = ci lah.ServeHTTP(w, r) return } @@ -234,42 +230,28 @@ func (e inUseOtherUserError) Unwrap() error { return e.error } // The returned error, when non-nil, will be of type inUseOtherUserError. // // s.mu must be held. -func (s *Server) checkConnIdentityLocked(ci *ipnauth.ConnIdentity) error { +func (s *Server) checkConnIdentityLocked(ci *actor) error { // If clients are already connected, verify they're the same user. // This mostly matters on Windows at the moment. if len(s.activeReqs) > 0 { - var active *ipnauth.ConnIdentity + var active *actor for _, active = range s.activeReqs { break } if active != nil { - chkTok, err := ci.WindowsToken() - if err == nil { - defer chkTok.Close() - } else if !errors.Is(err, ipnauth.ErrNotImplemented) { - return err - } - // Always allow Windows SYSTEM user to connect, // even if Tailscale is currently being used by another user. - if chkTok != nil && chkTok.IsLocalSystem() { + if ci.IsLocalSystem() { return nil } - activeTok, err := active.WindowsToken() - if err == nil { - defer activeTok.Close() - } else if !errors.Is(err, ipnauth.ErrNotImplemented) { - return err - } - - if chkTok != nil && !chkTok.EqualUIDs(activeTok) { + if ci.UserID() != active.UserID() { var b strings.Builder b.WriteString("Tailscale already in use") - if username, err := activeTok.Username(); err == nil { + if username, err := active.Username(); err == nil { fmt.Fprintf(&b, " by %s", username) } - fmt.Fprintf(&b, ", pid %d", active.Pid()) + fmt.Fprintf(&b, ", pid %d", active.pid()) return inUseOtherUserError{errors.New(b.String())} } } @@ -285,11 +267,11 @@ func (s *Server) checkConnIdentityLocked(ci *ipnauth.ConnIdentity) error { // // This is primarily used for the Windows GUI, to block until one user's done // controlling the tailscaled process. -func (s *Server) blockWhileIdentityInUse(ctx context.Context, ci *ipnauth.ConnIdentity) error { +func (s *Server) blockWhileIdentityInUse(ctx context.Context, actor *actor) error { inUse := func() bool { s.mu.Lock() defer s.mu.Unlock() - _, ok := s.checkConnIdentityLocked(ci).(inUseOtherUserError) + _, ok := s.checkConnIdentityLocked(actor).(inUseOtherUserError) return ok } for inUse() { @@ -304,24 +286,28 @@ func (s *Server) blockWhileIdentityInUse(ctx context.Context, ci *ipnauth.ConnId return nil } -// localAPIPermissions returns the permissions for the given identity accessing -// the Tailscale local daemon API. -// -// s.mu must not be held. -func (s *Server) localAPIPermissions(ci *ipnauth.ConnIdentity) (read, write bool) { +// Permissions returns the actor's permissions for accessing +// the Tailscale local daemon API. The operatorUID is only used on +// Unix-like platforms and specifies the ID of a local user +// (in the os/user.User.Uid string form) who is allowed +// to operate tailscaled without being root or using sudo. +func (a *actor) Permissions(operatorUID string) (read, write bool) { switch envknob.GOOS() { case "windows": - s.mu.Lock() - defer s.mu.Unlock() - if s.checkConnIdentityLocked(ci) == nil { - return true, true - } - return false, false + // As of 2024-08-27, according to the current permission model, + // Windows users always have read/write access to the local API if + // they're allowed to connect. Whether a user is allowed to connect + // is determined by [Server.checkConnIdentityLocked] when adding a + // new connection in [Server.addActiveHTTPRequest]. Therefore, it's + // acceptable to permit read and write access without any additional + // checks here. Note that this permission model is being changed in + // tailscale/corp#18342. + return true, true case "js": return true, true } - if ci.IsUnixSock() { - return true, !ci.IsReadonlyConn(s.mustBackend().OperatorUserID(), logger.Discard) + if a.ci.IsUnixSock() { + return true, !a.ci.IsReadonlyConn(operatorUID, logger.Discard) } return false, false } @@ -349,19 +335,19 @@ func isAllDigit(s string) bool { return true } -// connCanFetchCerts reports whether ci is allowed to fetch HTTPS +// CanFetchCerts reports whether the actor is allowed to fetch HTTPS // certs from this server when it wouldn't otherwise be able to. // -// That is, this reports whether ci should grant additional -// capabilities over what the conn would otherwise be able to do. +// That is, this reports whether the actor should grant additional +// capabilities over what the actor would otherwise be able to do. // // For now this only returns true on Unix machines when // TS_PERMIT_CERT_UID is set the to the userid of the peer // connection. It's intended to give your non-root webserver access // (www-data, caddy, nginx, etc) to certs. -func (s *Server) connCanFetchCerts(ci *ipnauth.ConnIdentity) bool { - if ci.IsUnixSock() && ci.Creds() != nil { - connUID, ok := ci.Creds().UserID() +func (a *actor) CanFetchCerts() bool { + if a.ci.IsUnixSock() && a.ci.Creds() != nil { + connUID, ok := a.ci.Creds().UserID() if ok && connUID == userIDFromString(envknob.String("TS_PERMIT_CERT_UID")) { return true } @@ -371,12 +357,13 @@ func (s *Server) connCanFetchCerts(ci *ipnauth.ConnIdentity) bool { // addActiveHTTPRequest adds c to the server's list of active HTTP requests. // -// If the returned error may be of type inUseOtherUserError. +// It returns an error if the specified actor is not allowed to connect. +// The returned error may be of type [inUseOtherUserError]. // // onDone must be called when the HTTP request is done. -func (s *Server) addActiveHTTPRequest(req *http.Request, ci *ipnauth.ConnIdentity) (onDone func(), err error) { - if ci == nil { - return nil, errors.New("internal error: nil connIdentity") +func (s *Server) addActiveHTTPRequest(req *http.Request, actor *actor) (onDone func(), err error) { + if actor == nil { + return nil, errors.New("internal error: nil actor") } lb := s.mustBackend() @@ -394,25 +381,19 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, ci *ipnauth.ConnIdentit s.mu.Lock() defer s.mu.Unlock() - if err := s.checkConnIdentityLocked(ci); err != nil { + if err := s.checkConnIdentityLocked(actor); err != nil { return nil, err } - mak.Set(&s.activeReqs, req, ci) + mak.Set(&s.activeReqs, req, actor) if len(s.activeReqs) == 1 { - token, err := ci.WindowsToken() - if err != nil { - if !errors.Is(err, ipnauth.ErrNotImplemented) { - s.logf("error obtaining access token: %v", err) - } - } else if !token.IsLocalSystem() { + if envknob.GOOS() == "windows" && !actor.IsLocalSystem() { // Tell the LocalBackend about the identity we're now running as, // unless its the SYSTEM user. That user is not a real account and // doesn't have a home directory. - uid, err := lb.SetCurrentUser(token) + uid, err := lb.SetCurrentUser(actor) if err != nil { - token.Close() return nil, err } if s.lastUserID != uid { @@ -488,10 +469,6 @@ func (s *Server) SetLocalBackend(lb *ipnlocal.LocalBackend) { // https://github.com/tailscale/tailscale/issues/6522 } -// connIdentityContextKey is the http.Request.Context's context.Value key for either an -// *ipnauth.ConnIdentity or an error. -type connIdentityContextKey struct{} - // Run runs the server, accepting connections from ln forever. // // If the context is done, the listener is closed. It is also the base context @@ -525,21 +502,9 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { Handler: http.HandlerFunc(s.serveHTTP), BaseContext: func(_ net.Listener) context.Context { return ctx }, ConnContext: func(ctx context.Context, c net.Conn) context.Context { - ci, err := ipnauth.GetConnIdentity(s.logf, c) - if err != nil { - return context.WithValue(ctx, connIdentityContextKey{}, err) - } - return context.WithValue(ctx, connIdentityContextKey{}, ci) + return contextWithActor(ctx, s.logf, c) }, - // Localhost connections are cheap; so only do - // keep-alives for a short period of time, as these - // active connections lock the server into only serving - // that user. If the user has this page open, we don't - // want another switching user to be locked out for - // minutes. 5 seconds is enough to let browser hit - // favicon.ico and such. - IdleTimeout: 5 * time.Second, - ErrorLog: logger.StdLogger(logger.WithPrefix(s.logf, "ipnserver: ")), + ErrorLog: logger.StdLogger(logger.WithPrefix(s.logf, "ipnserver: ")), } if err := hs.Serve(ln); err != nil { if err := ctx.Err(); err != nil { diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index b38d75e5a304c..9f8bd34f61033 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -26,7 +26,7 @@ import ( "tailscale.com/version" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TKAFilteredPeer +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TKAPeer // Status represents the entire state of the IPN network. type Status struct { @@ -94,15 +94,14 @@ type TKAKey struct { Votes uint } -// TKAFilteredPeer describes a peer which was removed from the netmap -// (i.e. no connectivity) because it failed tailnet lock -// checks. -type TKAFilteredPeer struct { - Name string // DNS - ID tailcfg.NodeID - StableID tailcfg.StableNodeID - TailscaleIPs []netip.Addr // Tailscale IP(s) assigned to this node - NodeKey key.NodePublic +// TKAPeer describes a peer and its network lock details. +type TKAPeer struct { + Name string // DNS + ID tailcfg.NodeID + StableID tailcfg.StableNodeID + TailscaleIPs []netip.Addr // Tailscale IP(s) assigned to this node + NodeKey key.NodePublic + NodeKeySignature tka.NodeKeySignature } // NetworkLockStatus represents whether network-lock is enabled, @@ -134,10 +133,14 @@ type NetworkLockStatus struct { // to network-lock. TrustedKeys []TKAKey + // VisiblePeers describes peers which are visible in the netmap that + // have valid Tailnet Lock signatures signatures. + VisiblePeers []*TKAPeer + // FilteredPeers describes peers which were removed from the netmap // (i.e. no connectivity) because they failed tailnet lock // checks. - FilteredPeers []*TKAFilteredPeer + FilteredPeers []*TKAPeer // StateID is a nonce associated with the network lock authority, // generated upon enablement. This field is not populated if the diff --git a/ipn/ipnstate/ipnstate_clone.go b/ipn/ipnstate/ipnstate_clone.go index 262daf3f2927b..20ae43c5fb73e 100644 --- a/ipn/ipnstate/ipnstate_clone.go +++ b/ipn/ipnstate/ipnstate_clone.go @@ -9,26 +9,29 @@ import ( "net/netip" "tailscale.com/tailcfg" + "tailscale.com/tka" "tailscale.com/types/key" ) -// Clone makes a deep copy of TKAFilteredPeer. +// Clone makes a deep copy of TKAPeer. // The result aliases no memory with the original. -func (src *TKAFilteredPeer) Clone() *TKAFilteredPeer { +func (src *TKAPeer) Clone() *TKAPeer { if src == nil { return nil } - dst := new(TKAFilteredPeer) + dst := new(TKAPeer) *dst = *src dst.TailscaleIPs = append(src.TailscaleIPs[:0:0], src.TailscaleIPs...) + dst.NodeKeySignature = *src.NodeKeySignature.Clone() return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. -var _TKAFilteredPeerCloneNeedsRegeneration = TKAFilteredPeer(struct { - Name string - ID tailcfg.NodeID - StableID tailcfg.StableNodeID - TailscaleIPs []netip.Addr - NodeKey key.NodePublic +var _TKAPeerCloneNeedsRegeneration = TKAPeer(struct { + Name string + ID tailcfg.NodeID + StableID tailcfg.StableNodeID + TailscaleIPs []netip.Addr + NodeKey key.NodePublic + NodeKeySignature tka.NodeKeySignature }{}) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 3be469193c441..01dc064cfda6e 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -23,7 +23,6 @@ import ( "net/netip" "net/url" "os" - "os/exec" "path" "runtime" "slices" @@ -60,9 +59,10 @@ import ( "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/osdiag" - "tailscale.com/util/osuser" "tailscale.com/util/progresstracking" "tailscale.com/util/rands" + "tailscale.com/util/testenv" + "tailscale.com/util/usermetric" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -98,6 +98,7 @@ var handler = map[string]localAPIHandler{ "derpmap": (*Handler).serveDERPMap, "dev-set-state-store": (*Handler).serveDevSetStateStore, "dial": (*Handler).serveDial, + "dns-osconfig": (*Handler).serveDNSOSConfig, "drive/fileserver-address": (*Handler).serveDriveServerAddr, "drive/shares": (*Handler).serveShares, "file-targets": (*Handler).serveFileTargets, @@ -141,6 +142,7 @@ var handler = map[string]localAPIHandler{ "update/install": (*Handler).serveUpdateInstall, "update/progress": (*Handler).serveUpdateProgress, "upload-client-metrics": (*Handler).serveUploadClientMetrics, + "usermetrics": (*Handler).serveUserMetrics, "watch-ipn-bus": (*Handler).serveWatchIPNBus, "whois": (*Handler).serveWhoIs, } @@ -180,12 +182,8 @@ type Handler struct { // cert fetching access. PermitCert bool - // ConnIdentity is the identity of the client connected to the Handler. - ConnIdentity *ipnauth.ConnIdentity - - // Test-only override for connIsLocalAdmin method. If non-nil, - // connIsLocalAdmin returns this value. - testConnIsLocalAdmin *bool + // Actor is the identity of the client connected to the Handler. + Actor ipnauth.Actor b *ipnlocal.LocalBackend logf logger.Logf @@ -571,6 +569,18 @@ func (h *Handler) serveMetrics(w http.ResponseWriter, r *http.Request) { clientmetric.WritePrometheusExpositionFormat(w) } +// TODO(kradalby): Remove this once we have landed on a final set of +// metrics to export to clients and consider the metrics stable. +var debugUsermetricsEndpoint = envknob.RegisterBool("TS_DEBUG_USER_METRICS") + +func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { + if !testenv.InTest() && !debugUsermetricsEndpoint() { + http.Error(w, "usermetrics debug flag not enabled", http.StatusForbidden) + return + } + usermetric.Handler(w, r) +} + func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { http.Error(w, "debug access denied", http.StatusForbidden) @@ -1050,7 +1060,7 @@ func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeC if !configIn.HasPathHandler() { return nil } - if h.connIsLocalAdmin() { + if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { return nil } switch goos { @@ -1066,104 +1076,6 @@ func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeC } -// connIsLocalAdmin reports whether the connected client has administrative -// access to the local machine, for whatever that means with respect to the -// current OS. -// -// This is useful because tailscaled itself always runs with elevated rights: -// we want to avoid privilege escalation for certain mutative operations. -func (h *Handler) connIsLocalAdmin() bool { - if h.testConnIsLocalAdmin != nil { - return *h.testConnIsLocalAdmin - } - if h.ConnIdentity == nil { - h.logf("[unexpected] missing ConnIdentity in LocalAPI Handler") - return false - } - switch runtime.GOOS { - case "windows": - tok, err := h.ConnIdentity.WindowsToken() - if err != nil { - if !errors.Is(err, ipnauth.ErrNotImplemented) { - h.logf("ipnauth.ConnIdentity.WindowsToken() error: %v", err) - } - return false - } - defer tok.Close() - - return tok.IsElevated() - - case "darwin": - // Unknown, or at least unchecked on sandboxed macOS variants. Err on - // the side of less permissions. - // - // authorizeServeConfigForGOOSAndUserContext should not call - // connIsLocalAdmin on sandboxed variants anyway. - if version.IsSandboxedMacOS() { - return false - } - // This is a standalone tailscaled setup, use the same logic as on - // Linux. - fallthrough - case "linux": - uid, ok := h.ConnIdentity.Creds().UserID() - if !ok { - return false - } - // root is always admin. - if uid == "0" { - return true - } - // if non-root, must be operator AND able to execute "sudo tailscale". - operatorUID := h.b.OperatorUserID() - if operatorUID != "" && uid != operatorUID { - return false - } - u, err := osuser.LookupByUID(uid) - if err != nil { - return false - } - // Short timeout just in case sudo hangs for some reason. - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - if err := exec.CommandContext(ctx, "sudo", "--other-user="+u.Name, "--list", "tailscale").Run(); err != nil { - return false - } - return true - - default: - return false - } -} - -func (h *Handler) getUsername() (string, error) { - if h.ConnIdentity == nil { - h.logf("[unexpected] missing ConnIdentity in LocalAPI Handler") - return "", errors.New("missing ConnIdentity") - } - switch runtime.GOOS { - case "windows": - tok, err := h.ConnIdentity.WindowsToken() - if err != nil { - return "", fmt.Errorf("get windows token: %w", err) - } - defer tok.Close() - return tok.Username() - case "darwin", "linux": - uid, ok := h.ConnIdentity.Creds().UserID() - if !ok { - return "", errors.New("missing user ID") - } - u, err := osuser.LookupByUID(uid) - if err != nil { - return "", fmt.Errorf("lookup user: %w", err) - } - return u.Username, nil - default: - return "", errors.New("unsupported OS") - } -} - func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "IP forwarding check access denied", http.StatusForbidden) @@ -2796,6 +2708,44 @@ func (h *Handler) serveUpdateProgress(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(ups) } +// serveDNSOSConfig serves the current system DNS configuration as a JSON object, if +// supported by the OS. +func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + // Require write access for privacy reasons. + if !h.PermitWrite { + http.Error(w, "dns-osconfig dump access denied", http.StatusForbidden) + return + } + bCfg, err := h.b.GetDNSOSConfig() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + nameservers := make([]string, 0, len(bCfg.Nameservers)) + for _, ns := range bCfg.Nameservers { + nameservers = append(nameservers, ns.String()) + } + searchDomains := make([]string, 0, len(bCfg.SearchDomains)) + for _, sd := range bCfg.SearchDomains { + searchDomains = append(searchDomains, sd.WithoutTrailingDot()) + } + matchDomains := make([]string, 0, len(bCfg.MatchDomains)) + for _, md := range bCfg.MatchDomains { + matchDomains = append(matchDomains, md.WithoutTrailingDot()) + } + response := apitype.DNSOSConfig{ + Nameservers: nameservers, + SearchDomains: searchDomains, + MatchDomains: matchDomains, + } + json.NewEncoder(w).Encode(response) +} + // serveDriveServerAddr handles updates of the Taildrive file server address. func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { if r.Method != "PUT" { @@ -2844,7 +2794,7 @@ func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { } if drive.AllowShareAs() { // share as the connected user - username, err := h.getUsername() + username, err := h.Actor.Username() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 095ec5e56241d..5ec873b3bdee3 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -26,6 +26,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" @@ -38,6 +39,23 @@ import ( "tailscale.com/wgengine" ) +var _ ipnauth.Actor = (*testActor)(nil) + +type testActor struct { + uid ipn.WindowsUserID + name string + isLocalSystem bool + isLocalAdmin bool +} + +func (u *testActor) UserID() ipn.WindowsUserID { return u.uid } + +func (u *testActor) Username() (string, error) { return u.name, nil } + +func (u *testActor) IsLocalSystem() bool { return u.isLocalSystem } + +func (u *testActor) IsLocalAdmin(operatorUID string) bool { return u.isLocalAdmin } + func TestValidHost(t *testing.T) { tests := []struct { host string @@ -189,7 +207,7 @@ func TestWhoIsArgTypes(t *testing.T) { func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { newHandler := func(connIsLocalAdmin bool) *Handler { - return &Handler{testConnIsLocalAdmin: &connIsLocalAdmin} + return &Handler{Actor: &testActor{isLocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)} } tests := []struct { name string diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 0c90d06b3e5f3..00950bd3b2394 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -13,20 +13,21 @@ import ( "time" "tailscale.com/ipn" - "tailscale.com/kube" + "tailscale.com/kube/kubeapi" + "tailscale.com/kube/kubeclient" "tailscale.com/types/logger" ) // Store is an ipn.StateStore that uses a Kubernetes Secret for persistence. type Store struct { - client kube.Client + client kubeclient.Client canPatch bool secretName string } // New returns a new Store that persists to the named secret. func New(_ logger.Logf, secretName string) (*Store, error) { - c, err := kube.New() + c, err := kubeclient.New() if err != nil { return nil, err } @@ -58,7 +59,7 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { secret, err := s.client.GetSecret(ctx, s.secretName) if err != nil { - if st, ok := err.(*kube.Status); ok && st.Code == 404 { + if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 { return nil, ipn.ErrStateNotExist } return nil, err @@ -88,13 +89,13 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error { secret, err := s.client.GetSecret(ctx, s.secretName) if err != nil { - if kube.IsNotFoundErr(err) { - return s.client.CreateSecret(ctx, &kube.Secret{ - TypeMeta: kube.TypeMeta{ + if kubeclient.IsNotFoundErr(err) { + return s.client.CreateSecret(ctx, &kubeapi.Secret{ + TypeMeta: kubeapi.TypeMeta{ APIVersion: "v1", Kind: "Secret", }, - ObjectMeta: kube.ObjectMeta{ + ObjectMeta: kubeapi.ObjectMeta{ Name: s.secretName, }, Data: map[string][]byte{ @@ -106,7 +107,7 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error { } if s.canPatch { if len(secret.Data) == 0 { // if user has pre-created a blank Secret - m := []kube.JSONPatch{ + m := []kubeclient.JSONPatch{ { Op: "add", Path: "/data", @@ -118,7 +119,7 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error { } return nil } - m := []kube.JSONPatch{ + m := []kubeclient.JSONPatch{ { Op: "add", Path: "/data/" + sanitizeKey(id), diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 1b72df0f2720b..e96baa7902b16 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -14,6 +14,8 @@ - [DNSConfigList](#dnsconfiglist) - [ProxyClass](#proxyclass) - [ProxyClassList](#proxyclasslist) +- [Recorder](#recorder) +- [RecorderList](#recorderlist) @@ -236,6 +238,7 @@ _Appears in:_ _Appears in:_ - [Container](#container) +- [RecorderContainer](#recordercontainer) | Field | Description | Default | Validation | | --- | --- | --- | --- | @@ -258,23 +261,6 @@ _Appears in:_ -#### Image - - - - - - - -_Appears in:_ -- [Nameserver](#nameserver) - -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `repo` _string_ | Repo defaults to tailscale/k8s-nameserver. | | | -| `tag` _string_ | Tag defaults to operator's own tag. | | | - - #### Metrics @@ -319,7 +305,24 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `image` _[Image](#image)_ | Nameserver image. | | | +| `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | + + +#### NameserverImage + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `repo` _string_ | Repo defaults to tailscale/k8s-nameserver. | | | +| `tag` _string_ | Tag defaults to unstable. | | | #### NameserverStatus @@ -447,6 +450,145 @@ _Appears in:_ | `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyClass.
Known condition types are `ProxyClassReady`. | | | +#### Recorder + + + + + + + +_Appears in:_ +- [RecorderList](#recorderlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `Recorder` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[RecorderSpec](#recorderspec)_ | Spec describes the desired recorder instance. | | | +| `status` _[RecorderStatus](#recorderstatus)_ | RecorderStatus describes the status of the recorder. This is set
and managed by the Tailscale operator. | | | + + +#### RecorderContainer + + + + + + + +_Appears in:_ +- [RecorderPod](#recorderpod) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `env` _[Env](#env) array_ | List of environment variables to set in the container.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
Note that environment variables provided here will take precedence
over Tailscale-specific environment variables set by the operator,
however running proxies with custom values for Tailscale environment
variables (i.e TS_USERSPACE) is not recommended and might break in
the future. | | | +| `image` _string_ | Container image name including tag. Defaults to docker.io/tailscale/tsrecorder
with the same tag as the operator, but the official images are also
available at ghcr.io/tailscale/tsrecorder.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | +| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
| +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
By default, the operator does not apply any resource requirements. The
amount of resources required wil depend on the volume of recordings sent.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | | +| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context. By default, the operator does not apply any
container security context.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | + + +#### RecorderList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `RecorderList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Recorder](#recorder) array_ | | | | + + +#### RecorderPod + + + + + + + +_Appears in:_ +- [RecorderStatefulSet](#recorderstatefulset) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `labels` _object (keys:string, values:string)_ | Labels that will be added to Recorder Pods. Any labels specified here
will be merged with the default labels applied to the Pod by the operator.
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | +| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to Recorder Pods. Any annotations
specified here will be merged with the default annotations applied to
the Pod by the operator.
https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Affinity rules for Recorder Pods. By default, the operator does not
apply any affinity rules.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | | +| `container` _[RecorderContainer](#recordercontainer)_ | Configuration for the Recorder container running tailscale. | | | +| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#podsecuritycontext-v1-core)_ | Security context for Recorder Pods. By default, the operator does not
apply any Pod security context.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 | | | +| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Image pull Secrets for Recorder Pods.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | | +| `nodeSelector` _object (keys:string, values:string)_ | Node selector rules for Recorder Pods. By default, the operator does
not apply any node selector rules.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Tolerations for Recorder Pods. By default, the operator does not apply
any tolerations.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | + + +#### RecorderSpec + + + + + + + +_Appears in:_ +- [Recorder](#recorder) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `statefulSet` _[RecorderStatefulSet](#recorderstatefulset)_ | Configuration parameters for the Recorder's StatefulSet. The operator
deploys a StatefulSet for each Recorder resource. | | | +| `tags` _[Tags](#tags)_ | Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a Recorder node has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| +| `enableUI` _boolean_ | Set to true to enable the Recorder UI. The UI lists and plays recorded sessions.
The UI will be served at :443. Defaults to false.
Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
Required if S3 storage is not set up, to ensure that recordings are accessible. | | | +| `storage` _[Storage](#storage)_ | Configure where to store session recordings. By default, recordings will
be stored in a local ephemeral volume, and will not be persisted past the
lifetime of a specific pod. | | | + + +#### RecorderStatefulSet + + + + + + + +_Appears in:_ +- [RecorderSpec](#recorderspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `labels` _object (keys:string, values:string)_ | Labels that will be added to the StatefulSet created for the Recorder.
Any labels specified here will be merged with the default labels applied
to the StatefulSet by the operator.
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | +| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the StatefulSet created for the Recorder.
Any Annotations specified here will be merged with the default annotations
applied to the StatefulSet by the operator.
https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | +| `pod` _[RecorderPod](#recorderpod)_ | Configuration for pods created by the Recorder's StatefulSet. | | | + + +#### RecorderStatus + + + + + + + +_Appears in:_ +- [Recorder](#recorder) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the Recorder.
Known condition types are `RecorderReady`. | | | +| `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the Recorder statefulset. | | | + + #### Route _Underlying type:_ _string_ @@ -478,6 +620,56 @@ _Appears in:_ +#### S3 + + + + + + + +_Appears in:_ +- [Storage](#storage) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `endpoint` _string_ | S3-compatible endpoint, e.g. s3.us-east-1.amazonaws.com. | | | +| `bucket` _string_ | Bucket name to write to. The bucket is expected to be used solely for
recordings, as there is no stable prefix for written object names. | | | +| `credentials` _[S3Credentials](#s3credentials)_ | Configure environment variable credentials for managing objects in the
configured bucket. If not set, tsrecorder will try to acquire credentials
first from the file system and then the STS API. | | | + + +#### S3Credentials + + + + + + + +_Appears in:_ +- [S3](#s3) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `secret` _[S3Secret](#s3secret)_ | Use a Kubernetes Secret from the operator's namespace as the source of
credentials. | | | + + +#### S3Secret + + + + + + + +_Appears in:_ +- [S3Credentials](#s3credentials) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | The name of a Kubernetes Secret in the operator's namespace that contains
credentials for writing to the configured bucket. Each key-value pair
from the secret's data will be mounted as an environment variable. It
should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if
using a static access key. | | | + + #### StatefulSet @@ -496,6 +688,22 @@ _Appears in:_ | `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | | +#### Storage + + + + + + + +_Appears in:_ +- [RecorderSpec](#recorderspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `s3` _[S3](#s3)_ | Configure an S3-compatible API for storage. Required if the UI is not
enabled, to ensure that recordings are accessible. | | | + + #### SubnetRouter @@ -540,8 +748,27 @@ _Validation:_ _Appears in:_ - [ConnectorSpec](#connectorspec) +- [RecorderSpec](#recorderspec) + +#### TailnetDevice + + + + + + + +_Appears in:_ +- [RecorderStatus](#recorderstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname is the fully qualified domain name of the device.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | +| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the device. | | | +| `url` _string_ | URL where the UI is available if enabled for replaying recordings. This
will be an HTTPS MagicDNS URL. You must be connected to the same tailnet
as the recorder to access it. | | | + #### TailscaleConfig diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index 8c888ff05009b..b16bc7b7be333 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -49,7 +49,16 @@ func init() { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}, &ProxyClass{}, &ProxyClassList{}, &DNSConfig{}, &DNSConfigList{}) + scheme.AddKnownTypes(SchemeGroupVersion, + &Connector{}, + &ConnectorList{}, + &ProxyClass{}, + &ProxyClassList{}, + &DNSConfig{}, + &DNSConfigList{}, + &Recorder{}, + &RecorderList{}, + ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index c33ad3c393e70..87c44926b52bd 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -173,4 +173,5 @@ const ( ConnectorReady ConditionType = `ConnectorReady` ProxyClassready ConditionType = `ProxyClassReady` ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service + RecorderReady ConditionType = `RecorderReady` ) diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go new file mode 100644 index 0000000000000..f365ab3163965 --- /dev/null +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -0,0 +1,249 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=rec +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "RecorderReady")].reason`,description="Status of the deployed Recorder resources." +// +kubebuilder:printcolumn:name="URL",type="string",JSONPath=`.status.devices[?(@.url != "")].url`,description="URL on which the UI is exposed if enabled." + +type Recorder struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec describes the desired recorder instance. + Spec RecorderSpec `json:"spec"` + + // RecorderStatus describes the status of the recorder. This is set + // and managed by the Tailscale operator. + // +optional + Status RecorderStatus `json:"status"` +} + +// +kubebuilder:object:root=true + +type RecorderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Recorder `json:"items"` +} + +type RecorderSpec struct { + // Configuration parameters for the Recorder's StatefulSet. The operator + // deploys a StatefulSet for each Recorder resource. + // +optional + StatefulSet RecorderStatefulSet `json:"statefulSet"` + + // Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s]. + // If you specify custom tags here, make sure you also make the operator + // an owner of these tags. + // See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. + // Tags cannot be changed once a Recorder node has been created. + // Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. + // +optional + Tags Tags `json:"tags,omitempty"` + + // TODO(tomhjp): Support a hostname or hostname prefix field, depending on + // the plan for multiple replicas. + + // Set to true to enable the Recorder UI. The UI lists and plays recorded sessions. + // The UI will be served at :443. Defaults to false. + // Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. + // Required if S3 storage is not set up, to ensure that recordings are accessible. + // +optional + EnableUI bool `json:"enableUI,omitempty"` + + // Configure where to store session recordings. By default, recordings will + // be stored in a local ephemeral volume, and will not be persisted past the + // lifetime of a specific pod. + // +optional + Storage Storage `json:"storage,omitempty"` +} + +type RecorderStatefulSet struct { + // Labels that will be added to the StatefulSet created for the Recorder. + // Any labels specified here will be merged with the default labels applied + // to the StatefulSet by the operator. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations that will be added to the StatefulSet created for the Recorder. + // Any Annotations specified here will be merged with the default annotations + // applied to the StatefulSet by the operator. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Configuration for pods created by the Recorder's StatefulSet. + // +optional + Pod RecorderPod `json:"pod,omitempty"` +} + +type RecorderPod struct { + // Labels that will be added to Recorder Pods. Any labels specified here + // will be merged with the default labels applied to the Pod by the operator. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations that will be added to Recorder Pods. Any annotations + // specified here will be merged with the default annotations applied to + // the Pod by the operator. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Affinity rules for Recorder Pods. By default, the operator does not + // apply any affinity rules. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Configuration for the Recorder container running tailscale. + // +optional + Container RecorderContainer `json:"container,omitempty"` + + // Security context for Recorder Pods. By default, the operator does not + // apply any Pod security context. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 + // +optional + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + + // Image pull Secrets for Recorder Pods. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec + // +optional + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // Node selector rules for Recorder Pods. By default, the operator does + // not apply any node selector rules. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations for Recorder Pods. By default, the operator does not apply + // any tolerations. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +type RecorderContainer struct { + // List of environment variables to set in the container. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables + // Note that environment variables provided here will take precedence + // over Tailscale-specific environment variables set by the operator, + // however running proxies with custom values for Tailscale environment + // variables (i.e TS_USERSPACE) is not recommended and might break in + // the future. + // +optional + Env []Env `json:"env,omitempty"` + + // Container image name including tag. Defaults to docker.io/tailscale/tsrecorder + // with the same tag as the operator, but the official images are also + // available at ghcr.io/tailscale/tsrecorder. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image + // +optional + Image string `json:"image,omitempty"` + + // Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image + // +kubebuilder:validation:Enum=Always;Never;IfNotPresent + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Container resource requirements. + // By default, the operator does not apply any resource requirements. The + // amount of resources required wil depend on the volume of recordings sent. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // Container security context. By default, the operator does not apply any + // container security context. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` +} + +type Storage struct { + // Configure an S3-compatible API for storage. Required if the UI is not + // enabled, to ensure that recordings are accessible. + // +optional + S3 *S3 `json:"s3,omitempty"` +} + +type S3 struct { + // S3-compatible endpoint, e.g. s3.us-east-1.amazonaws.com. + Endpoint string `json:"endpoint,omitempty"` + + // Bucket name to write to. The bucket is expected to be used solely for + // recordings, as there is no stable prefix for written object names. + Bucket string `json:"bucket,omitempty"` + + // Configure environment variable credentials for managing objects in the + // configured bucket. If not set, tsrecorder will try to acquire credentials + // first from the file system and then the STS API. + // +optional + Credentials S3Credentials `json:"credentials,omitempty"` +} + +type S3Credentials struct { + // Use a Kubernetes Secret from the operator's namespace as the source of + // credentials. + // +optional + Secret S3Secret `json:"secret,omitempty"` +} + +type S3Secret struct { + // The name of a Kubernetes Secret in the operator's namespace that contains + // credentials for writing to the configured bucket. Each key-value pair + // from the secret's data will be mounted as an environment variable. It + // should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if + // using a static access key. + //+optional + Name string `json:"name,omitempty"` +} + +type RecorderStatus struct { + // List of status conditions to indicate the status of the Recorder. + // Known condition types are `RecorderReady`. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // List of tailnet devices associated with the Recorder statefulset. + // +listType=map + // +listMapKey=hostname + // +optional + Devices []TailnetDevice `json:"devices,omitempty"` +} + +type TailnetDevice struct { + // Hostname is the fully qualified domain name of the device. + // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + // node. + Hostname string `json:"hostname"` + + // TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + // assigned to the device. + // +optional + TailnetIPs []string `json:"tailnetIPs,omitempty"` + + // URL where the UI is available if enabled for replaying recordings. This + // will be an HTTPS MagicDNS URL. You must be connected to the same tailnet + // as the recorder to access it. + // +optional + URL string `json:"url,omitempty"` +} diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index c5c2e78232242..60d212279f4f5 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -78,16 +78,16 @@ type DNSConfigSpec struct { } type Nameserver struct { - // Nameserver image. + // Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. // +optional - Image *Image `json:"image,omitempty"` + Image *NameserverImage `json:"image,omitempty"` } -type Image struct { +type NameserverImage struct { // Repo defaults to tailscale/k8s-nameserver. // +optional Repo string `json:"repo,omitempty"` - // Tag defaults to operator's own tag. + // Tag defaults to unstable. // +optional Tag string `json:"tag,omitempty"` } diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 648a6875b19be..5464f4e37bb48 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -271,21 +271,6 @@ func (in *Env) DeepCopy() *Env { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { - if in == nil { - return nil - } - out := new(Image) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metrics) DeepCopyInto(out *Metrics) { *out = *in @@ -306,7 +291,7 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = *in if in.Image != nil { in, out := &in.Image, &out.Image - *out = new(Image) + *out = new(NameserverImage) **out = **in } } @@ -321,6 +306,21 @@ func (in *Nameserver) DeepCopy() *Nameserver { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverImage) DeepCopyInto(out *NameserverImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverImage. +func (in *NameserverImage) DeepCopy() *NameserverImage { + if in == nil { + return nil + } + out := new(NameserverImage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameserverStatus) DeepCopyInto(out *NameserverStatus) { *out = *in @@ -515,6 +515,231 @@ func (in *ProxyClassStatus) DeepCopy() *ProxyClassStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Recorder) DeepCopyInto(out *Recorder) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Recorder. +func (in *Recorder) DeepCopy() *Recorder { + if in == nil { + return nil + } + out := new(Recorder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Recorder) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderContainer) DeepCopyInto(out *RecorderContainer) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]Env, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderContainer. +func (in *RecorderContainer) DeepCopy() *RecorderContainer { + if in == nil { + return nil + } + out := new(RecorderContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderList) DeepCopyInto(out *RecorderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Recorder, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderList. +func (in *RecorderList) DeepCopy() *RecorderList { + if in == nil { + return nil + } + out := new(RecorderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RecorderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderPod) DeepCopyInto(out *RecorderPod) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + in.Container.DeepCopyInto(&out.Container) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderPod. +func (in *RecorderPod) DeepCopy() *RecorderPod { + if in == nil { + return nil + } + out := new(RecorderPod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderSpec) DeepCopyInto(out *RecorderSpec) { + *out = *in + in.StatefulSet.DeepCopyInto(&out.StatefulSet) + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(Tags, len(*in)) + copy(*out, *in) + } + in.Storage.DeepCopyInto(&out.Storage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderSpec. +func (in *RecorderSpec) DeepCopy() *RecorderSpec { + if in == nil { + return nil + } + out := new(RecorderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderStatefulSet) DeepCopyInto(out *RecorderStatefulSet) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Pod.DeepCopyInto(&out.Pod) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderStatefulSet. +func (in *RecorderStatefulSet) DeepCopy() *RecorderStatefulSet { + if in == nil { + return nil + } + out := new(RecorderStatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderStatus) DeepCopyInto(out *RecorderStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]TailnetDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderStatus. +func (in *RecorderStatus) DeepCopy() *RecorderStatus { + if in == nil { + return nil + } + out := new(RecorderStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Routes) DeepCopyInto(out *Routes) { { @@ -534,6 +759,53 @@ func (in Routes) DeepCopy() Routes { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3) DeepCopyInto(out *S3) { + *out = *in + out.Credentials = in.Credentials +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3. +func (in *S3) DeepCopy() *S3 { + if in == nil { + return nil + } + out := new(S3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Credentials) DeepCopyInto(out *S3Credentials) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Credentials. +func (in *S3Credentials) DeepCopy() *S3Credentials { + if in == nil { + return nil + } + out := new(S3Credentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Secret) DeepCopyInto(out *S3Secret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Secret. +func (in *S3Secret) DeepCopy() *S3Secret { + if in == nil { + return nil + } + out := new(S3Secret) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { *out = *in @@ -568,6 +840,26 @@ func (in *StatefulSet) DeepCopy() *StatefulSet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SubnetRouter) DeepCopyInto(out *SubnetRouter) { *out = *in @@ -607,6 +899,26 @@ func (in Tags) DeepCopy() Tags { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetDevice) DeepCopyInto(out *TailnetDevice) { + *out = *in + if in.TailnetIPs != nil { + in, out := &in.TailnetIPs, &out.TailnetIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetDevice. +func (in *TailnetDevice) DeepCopy() *TailnetDevice { + if in == nil { + return nil + } + out := new(TailnetDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TailscaleConfig) DeepCopyInto(out *TailscaleConfig) { *out = *in diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index 23539fe616f32..322d1eb349b0e 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -63,6 +63,14 @@ func RemoveServiceCondition(svc *corev1.Service, conditionType tsapi.ConditionTy }) } +// SetRecorderCondition ensures that Recorder status has a condition with the +// given attributes. LastTransitionTime gets set every time condition's status +// changes. +func SetRecorderCondition(tsr *tsapi.Recorder, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) { + conds := updateCondition(tsr.Status.Conditions, conditionType, status, reason, message, gen, clock, logger) + tsr.Status.Conditions = conds +} + func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []metav1.Condition { newCondition := metav1.Condition{ Type: string(conditionType), diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 2e7ec75980bac..f8ef951d415f0 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -15,6 +15,7 @@ import ( "io" "net" "net/http" + "net/http/httptrace" "net/netip" "strings" @@ -126,7 +127,10 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { const ( // https://docs.asciinema.org/manual/asciicast/v2/ - asciicastv2 = 2 + asciicastv2 = 2 + ttyKey = "tty" + commandKey = "command" + containerKey = "container" ) var ( wc io.WriteCloser @@ -134,8 +138,15 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, errChan <-chan error ) h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen) - // TODO (irbekrm): send client a message that session will be recorded. - wc, _, errChan, err = h.connectToRecorder(ctx, h.addrs, h.ts.Dial) + qp := h.req.URL.Query() + container := strings.Join(qp[containerKey], "") + var recorderAddr net.Addr + trace := &httptrace.ClientTrace{ + GotConn: func(info httptrace.GotConnInfo) { + recorderAddr = info.Conn.RemoteAddr() + }, + } + wc, _, errChan, err = h.connectToRecorder(httptrace.WithClientTrace(ctx, trace), h.addrs, h.ts.Dial) if err != nil { msg := fmt.Sprintf("error connecting to session recorders: %v", err) if h.failOpen { @@ -148,23 +159,24 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, return nil, multierr.New(errors.New(msg), err) } return nil, errors.New(msg) + } else { + h.log.Infof("exec session to container %q in Pod %q namespace %q will be recorded, the recording will be sent to a tsrecorder instance at %q", container, h.pod, h.ns, recorderAddr) } - // TODO (irbekrm): log which recorder - h.log.Info("successfully connected to a session recorder") cl := tstime.DefaultClock{} - rec := tsrecorder.New(wc, cl, cl.Now(), h.failOpen) - qp := h.req.URL.Query() + rec := tsrecorder.New(wc, cl, cl.Now(), h.failOpen, h.log) + tty := strings.Join(qp[ttyKey], "") + hasTerm := (tty == "true") // session has terminal attached ch := sessionrecording.CastHeader{ Version: asciicastv2, Timestamp: cl.Now().Unix(), - Command: strings.Join(qp["command"], " "), + Command: strings.Join(qp[commandKey], " "), SrcNode: strings.TrimSuffix(h.who.Node.Name, "."), SrcNodeID: h.who.Node.StableID, Kubernetes: &sessionrecording.Kubernetes{ PodName: h.pod, Namespace: h.ns, - Container: strings.Join(qp["container"], " "), + Container: container, }, } if !h.who.Node.IsTagged() { @@ -177,9 +189,9 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, var lc net.Conn switch h.proto { case SPDYProtocol: - lc = spdy.New(conn, rec, ch, h.log) + lc = spdy.New(conn, rec, ch, hasTerm, h.log) case WSProtocol: - lc = ws.New(conn, rec, ch, h.log) + lc = ws.New(conn, rec, ch, hasTerm, h.log) default: return nil, fmt.Errorf("unknown protocol: %s", h.proto) } diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index 5c19d3a1d870e..440d9c94294c9 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -78,7 +78,10 @@ func Test_Hijacker(t *testing.T) { tc := &fakes.TestConn{} ch := make(chan error) h := &Hijacker{ - connectToRecorder: func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) { + connectToRecorder: func(context.Context, + []netip.AddrPort, + func(context.Context, string, string) (net.Conn, error), + ) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) { if tt.failRecorderConnect { err = errors.New("test") } diff --git a/k8s-operator/sessionrecording/spdy/conn.go b/k8s-operator/sessionrecording/spdy/conn.go index 19a01641e4155..455c2225ad921 100644 --- a/k8s-operator/sessionrecording/spdy/conn.go +++ b/k8s-operator/sessionrecording/spdy/conn.go @@ -28,14 +28,16 @@ import ( // The hijacked connection is used to transmit SPDY streams between Kubernetes client ('kubectl') and the destination container. // Data read from the underlying network connection is data sent via one of the SPDY streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. -// We parse the data and send everything for the STDOUT/STDERR streams to the configured tsrecorder as an asciinema recording with the provided header. +// We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#background-remotecommand-subprotocol -func New(nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, log *zap.SugaredLogger) net.Conn { +func New(nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { return &conn{ - Conn: nc, - rec: rec, - ch: ch, - log: log, + Conn: nc, + rec: rec, + ch: ch, + log: log, + hasTerm: hasTerm, + initialTermSizeSet: make(chan struct{}), } } @@ -47,7 +49,6 @@ type conn struct { net.Conn // rec knows how to send data written to it to a tsrecorder instance. rec *tsrecorder.Client - ch sessionrecording.CastHeader stdoutStreamID atomic.Uint32 stderrStreamID atomic.Uint32 @@ -56,8 +57,37 @@ type conn struct { wmu sync.Mutex // sequences writes closed bool - rmu sync.Mutex // sequences reads + rmu sync.Mutex // sequences reads + + // The following fields are related to sending asciinema CastHeader. + // CastHeader must be sent before any payload. If the session has a + // terminal attached, the CastHeader must have '.Width' and '.Height' + // fields set for the tsrecorder UI to be able to play the recording. + // For 'kubectl exec' sessions, terminal width and height are sent as a + // resize message on resize stream from the client when the session + // starts as well as at any time the client detects a terminal change. + // We can intercept the resize message on Read calls. As there is no + // guarantee that the resize message from client will be intercepted + // before server writes stdout messages that we must record, we need to + // ensure that parsing stdout/stderr messages written to the connection + // waits till a resize message has been received and a CastHeader with + // correct terminal dimensions can be written. + + // ch is the asciinema CastHeader for the current session. + // https://docs.asciinema.org/manual/asciicast/v2/#header + ch sessionrecording.CastHeader + // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. writeCastHeaderOnce sync.Once + hasTerm bool // whether the session had TTY attached + // initialTermSizeSet channel gets sent a value once, when the Read has + // received a resize message and set the initial terminal size. It must + // be set to a buffered channel to prevent Reads being blocked on the + // first stdout/stderr write reading from the channel. + initialTermSizeSet chan struct{} + // sendInitialTermSizeSetOnce is used to ensure that a value is sent to + // initialTermSizeSet channel only once, when the initial resize message + // is received. + sendinitialTermSizeSetOnce sync.Once zlibReqReader zlibReader // writeBuf is used to store data written to the connection that has not @@ -97,13 +127,28 @@ func (c *conn) Read(b []byte) (int, error) { if !sf.Ctrl { // data frame switch sf.StreamID { case c.resizeStreamID.Load(): - var err error + var msg spdyResizeMsg if err = json.Unmarshal(sf.Payload, &msg); err != nil { return 0, fmt.Errorf("error umarshalling resize msg: %w", err) } c.ch.Width = msg.Width c.ch.Height = msg.Height + + // If this is initial resize message, the width and + // height will be sent in the CastHeader. If this is a + // subsequent resize message, we need to send asciinema + // resize message. + var isInitialResize bool + c.sendinitialTermSizeSetOnce.Do(func() { + isInitialResize = true + close(c.initialTermSizeSet) // unblock sending of CastHeader + }) + if !isInitialResize { + if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { + return 0, fmt.Errorf("error writing resize message: %w", err) + } + } } return n, nil } @@ -147,21 +192,21 @@ func (c *conn) Write(b []byte) (int, error) { case c.stdoutStreamID.Load(), c.stderrStreamID.Load(): var err error c.writeCastHeaderOnce.Do(func() { - var j []byte - j, err = json.Marshal(c.ch) - if err != nil { - return - } - j = append(j, '\n') - err = c.rec.WriteCastLine(j) - if err != nil { - c.log.Errorf("received error from recorder: %v", err) + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + if c.hasTerm { + c.log.Debugf("write: waiting for the initial terminal size to be set before proceeding with sending the first payload") + <-c.initialTermSizeSet } + err = c.rec.WriteCastHeader(c.ch) }) if err != nil { return 0, fmt.Errorf("error writing CastHeader: %w", err) } - if err := c.rec.Write(sf.Payload); err != nil { + if err := c.rec.WriteOutput(sf.Payload); err != nil { return 0, fmt.Errorf("error sending payload to session recorder: %w", err) } } diff --git a/k8s-operator/sessionrecording/spdy/conn_test.go b/k8s-operator/sessionrecording/spdy/conn_test.go index 629536b2e00b1..3485d61c4f454 100644 --- a/k8s-operator/sessionrecording/spdy/conn_test.go +++ b/k8s-operator/sessionrecording/spdy/conn_test.go @@ -29,13 +29,15 @@ func Test_Writes(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + firstWrite bool + width int + height int + sendInitialResize bool + hasTerm bool }{ { name: "single_write_control_frame_with_payload", @@ -76,7 +78,18 @@ func Test_Writes(t *testing.T) { wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { - name: "single_first_write_stdout_data_frame_with_payload", + name: "single_first_write_stdout_data_frame_with_payload_sess_has_terminal", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), + width: 10, + height: 20, + hasTerm: true, + firstWrite: true, + sendInitialResize: true, + }, + { + name: "single_first_write_stdout_data_frame_with_payload_sess_does_not_have_terminal", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), @@ -89,7 +102,7 @@ func Test_Writes(t *testing.T) { t.Run(tt.name, func(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} - rec := tsrecorder.New(sr, cl, cl.Now(), true) + rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) c := &conn{ Conn: tc, @@ -99,15 +112,21 @@ func Test_Writes(t *testing.T) { Width: tt.width, Height: tt.height, }, + initialTermSizeSet: make(chan struct{}), + hasTerm: tt.hasTerm, } if !tt.firstWrite { // this test case does not intend to test that cast header gets written once c.writeCastHeaderOnce.Do(func() {}) } + if tt.sendInitialResize { + close(c.initialTermSizeSet) + } c.stdoutStreamID.Store(stdoutStreamID) c.stderrStreamID.Store(stderrStreamID) for i, input := range tt.inputs { + c.hasTerm = tt.hasTerm if _, err := c.Write(input); err != nil { t.Errorf("[%d] spdyRemoteConnRecorder.Write() unexpected error %v", i, err) } @@ -195,11 +214,12 @@ func Test_Reads(t *testing.T) { t.Run(tt.name, func(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} - rec := tsrecorder.New(sr, cl, cl.Now(), true) + rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, + Conn: tc, + log: zl.Sugar(), + rec: rec, + initialTermSizeSet: make(chan struct{}), } c.resizeStreamID.Store(tt.resizeStreamIDBeforeRead) diff --git a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go index 30142e4bdd1a5..af5fcb8da641a 100644 --- a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go +++ b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go @@ -14,10 +14,12 @@ import ( "time" "github.com/pkg/errors" + "go.uber.org/zap" + "tailscale.com/sessionrecording" "tailscale.com/tstime" ) -func New(conn io.WriteCloser, clock tstime.Clock, start time.Time, failOpen bool) *Client { +func New(conn io.WriteCloser, clock tstime.Clock, start time.Time, failOpen bool, logger *zap.SugaredLogger) *Client { return &Client{ start: start, clock: clock, @@ -35,38 +37,66 @@ type Client struct { // failOpen specifies whether the session should be allowed to // continue if writing to the recording fails. failOpen bool + // failedOpen is set to true if the recording of this session failed and + // we should not attempt to send any more data. + failedOpen bool - // backOff is set to true if we've failed open and should stop - // attempting to write to tsrecorder. - backOff bool + logger *zap.SugaredLogger mu sync.Mutex // guards writes to conn conn io.WriteCloser // connection to a tsrecorder instance } -// Write appends timestamp to the provided bytes and sends them to the -// configured tsrecorder. -func (rec *Client) Write(p []byte) (err error) { +// WriteOutput sends terminal stdout and stderr to the tsrecorder. +// https://docs.asciinema.org/manual/asciicast/v2/#o-output-data-written-to-a-terminal +func (rec *Client) WriteOutput(p []byte) (err error) { + const outputEventCode = "o" if len(p) == 0 { return nil } - if rec.backOff { + return rec.write([]any{ + rec.clock.Now().Sub(rec.start).Seconds(), + outputEventCode, + string(p)}) +} + +// WriteResize writes an asciinema resize message. This can be called if +// terminal size has changed. +// https://docs.asciinema.org/manual/asciicast/v2/#r-resize +func (rec *Client) WriteResize(height, width int) (err error) { + const resizeEventCode = "r" + p := fmt.Sprintf("%dx%d", height, width) + return rec.write([]any{ + rec.clock.Now().Sub(rec.start).Seconds(), + resizeEventCode, + string(p)}) +} + +// WriteCastHeaders writes asciinema CastHeader. This must be called once, +// before any payload is sent to the tsrecorder. +// https://docs.asciinema.org/manual/asciicast/v2/#header +func (rec *Client) WriteCastHeader(ch sessionrecording.CastHeader) error { + return rec.write(ch) +} + +// write writes the data to session recorder. If recording fails and policy is +// 'fail open', sets the state to failed and does not attempt to write any more +// data during this session. +func (rec *Client) write(data any) error { + if rec.failedOpen { return nil } - j, err := json.Marshal([]any{ - rec.clock.Now().Sub(rec.start).Seconds(), - "o", - string(p), - }) + j, err := json.Marshal(data) if err != nil { - return fmt.Errorf("error marhalling payload: %w", err) + return fmt.Errorf("error marshalling data as json: %v", err) } j = append(j, '\n') - if err := rec.WriteCastLine(j); err != nil { + if err := rec.writeCastLine(j); err != nil { if !rec.failOpen { return fmt.Errorf("error writing payload to recorder: %w", err) } - rec.backOff = true + rec.logger.Infof("error writing to tsrecorder: %v. Failure policy is to fail open, so rest of session contents will not be recorded.", err) + rec.failedOpen = true } return nil } @@ -82,9 +112,9 @@ func (rec *Client) Close() error { return err } -// writeCastLine sends bytes to the tsrecorder. The bytes should be in +// writeToRecorder sends bytes to the tsrecorder. The bytes should be in // asciinema format. -func (c *Client) WriteCastLine(j []byte) error { +func (c *Client) writeCastLine(j []byte) error { c.mu.Lock() defer c.mu.Unlock() if c.conn == nil { diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index 82fd094d15364..86029f67b1f13 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -28,14 +28,16 @@ import ( // The hijacked connection is used to transmit *.channel.k8s.io streams between Kubernetes client ('kubectl') and the destination proxy controlled by Kubernetes. // Data read from the underlying network connection is data sent via one of the streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. -// We parse the data and send everything for the STDOUT/STDERR streams to the configured tsrecorder as an asciinema recording with the provided header. +// We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#proposal-new-remotecommand-sub-protocol-version---v5channelk8sio -func New(c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, log *zap.SugaredLogger) net.Conn { +func New(c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { return &conn{ - Conn: c, - rec: rec, - ch: ch, - log: log, + Conn: c, + rec: rec, + ch: ch, + hasTerm: hasTerm, + log: log, + initialTermSizeSet: make(chan struct{}, 1), } } @@ -49,8 +51,37 @@ type conn struct { net.Conn // rec knows how to send data to a tsrecorder instance. rec *tsrecorder.Client - // ch is the asiinema CastHeader for a session. - ch sessionrecording.CastHeader + + // The following fields are related to sending asciinema CastHeader. + // CastHeader must be sent before any payload. If the session has a + // terminal attached, the CastHeader must have '.Width' and '.Height' + // fields set for the tsrecorder UI to be able to play the recording. + // For 'kubectl exec' sessions, terminal width and height are sent as a + // resize message on resize stream from the client when the session + // starts as well as at any time the client detects a terminal change. + // We can intercept the resize message on Read calls. As there is no + // guarantee that the resize message from client will be intercepted + // before server writes stdout messages that we must record, we need to + // ensure that parsing stdout/stderr messages written to the connection + // waits till a resize message has been received and a CastHeader with + // correct terminal dimensions can be written. + + // ch is asciinema CastHeader for the current session. + // https://docs.asciinema.org/manual/asciicast/v2/#header + ch sessionrecording.CastHeader + // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. + writeCastHeaderOnce sync.Once + hasTerm bool // whether the session has TTY attached + // initialTermSizeSet channel gets sent a value once, when the Read has + // received a resize message and set the initial terminal size. It must + // be set to a buffered channel to prevent Reads being blocked on the + // first stdout/stderr write reading from the channel. + initialTermSizeSet chan struct{} + // sendInitialTermSizeSetOnce is used to ensure that a value is sent to + // initialTermSizeSet channel only once, when the initial resize message + // is received. + sendInitialTermSizeSetOnce sync.Once + log *zap.SugaredLogger rmu sync.Mutex // sequences reads @@ -63,9 +94,8 @@ type conn struct { // the original byte array. readBuf bytes.Buffer - wmu sync.Mutex // sequences writes - writeCastHeaderOnce sync.Once - closed bool // connection is closed + wmu sync.Mutex // sequences writes + closed bool // connection is closed // writeBuf contains bytes for a currently parsed binary data message // being written to the underlying conn. If the message is masked, it is // unmasked in place, so having this buffer allows us to avoid modifying @@ -140,17 +170,32 @@ func (c *conn) Read(b []byte) (int, error) { } c.readBuf.Next(len(readMsg.raw)) - if readMsg.isFinalized { + if readMsg.isFinalized && !c.readMsgIsIncomplete() { // Stream IDs for websocket streams are static. // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 if readMsg.streamID.Load() == remotecommand.StreamResize { - var err error var msg tsrecorder.ResizeMsg if err = json.Unmarshal(readMsg.payload, &msg); err != nil { return 0, fmt.Errorf("error umarshalling resize message: %w", err) } + c.ch.Width = msg.Width c.ch.Height = msg.Height + + // If this is initial resize message, the width and + // height will be sent in the CastHeader. If this is a + // subsequent resize message, we need to send asciinema + // resize message. + var isInitialResize bool + c.sendInitialTermSizeSetOnce.Do(func() { + isInitialResize = true + close(c.initialTermSizeSet) // unblock sending of CastHeader + }) + if !isInitialResize { + if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { + return 0, fmt.Errorf("error writing resize message: %w", err) + } + } } } c.currentReadMsg = readMsg @@ -209,22 +254,21 @@ func (c *conn) Write(b []byte) (int, error) { if writeMsg.streamID.Load() == remotecommand.StreamStdOut || writeMsg.streamID.Load() == remotecommand.StreamStdErr { var err error c.writeCastHeaderOnce.Do(func() { - var j []byte - j, err = json.Marshal(c.ch) - if err != nil { - c.log.Errorf("error marhsalling conn: %v", err) - return - } - j = append(j, '\n') - err = c.rec.WriteCastLine(j) - if err != nil { - c.log.Errorf("received error from recorder: %v", err) + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + if c.hasTerm { + c.log.Debug("waiting for terminal size to be set before starting to send recorded data") + <-c.initialTermSizeSet } + err = c.rec.WriteCastHeader(c.ch) }) if err != nil { return 0, fmt.Errorf("error writing CastHeader: %w", err) } - if err := c.rec.Write(writeMsg.payload); err != nil { + if err := c.rec.WriteOutput(writeMsg.payload); err != nil { return 0, fmt.Errorf("error writing message to recorder: %v", err) } } diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index 2fcbeb7cabdc1..11174480ba605 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -65,6 +65,7 @@ func Test_conn_Read(t *testing.T) { log: zl.Sugar(), } for i, input := range tt.inputs { + c.initialTermSizeSet = make(chan struct{}) if err := tc.WriteReadBufBytes(input); err != nil { t.Fatalf("writing bytes to test conn: %v", err) } @@ -93,13 +94,15 @@ func Test_conn_Write(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + firstWrite bool + width int + height int + hasTerm bool + sendInitialResize bool }{ { name: "single_write_control_frame", @@ -144,12 +147,23 @@ func Test_conn_Write(t *testing.T) { wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, + { + name: "three_writes_stdout_data_message_with_split_fragment_cast_header_with_terminal", + inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, + wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), + height: 20, + width: 10, + hasTerm: true, + firstWrite: true, + sendInitialResize: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} - rec := tsrecorder.New(sr, cl, cl.Now(), true) + rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) c := &conn{ Conn: tc, log: zl.Sugar(), @@ -157,12 +171,17 @@ func Test_conn_Write(t *testing.T) { Width: tt.width, Height: tt.height, }, - rec: rec, + rec: rec, + initialTermSizeSet: make(chan struct{}), + hasTerm: tt.hasTerm, } if !tt.firstWrite { // This test case does not intend to test that cast header gets written once. c.writeCastHeaderOnce.Do(func() {}) } + if tt.sendInitialResize { + close(c.initialTermSizeSet) + } for i, input := range tt.inputs { _, err := c.Write(input) if err != nil { @@ -221,7 +240,7 @@ func Test_conn_WriteRand(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) sr := &fakes.TestSessionRecorder{} - rec := tsrecorder.New(sr, cl, cl.Now(), true) + rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) for i := range 100 { tc := &fakes.TestConn{} c := &conn{ diff --git a/kube/api.go b/kube/kubeapi/api.go similarity index 96% rename from kube/api.go rename to kube/kubeapi/api.go index b49b76c340fe0..0e42437a69a2a 100644 --- a/kube/api.go +++ b/kube/kubeapi/api.go @@ -1,7 +1,11 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package kube +// Package kubeapi contains Kubernetes API types for internal consumption. +// These types are split into a separate package for consumption of +// non-Kubernetes shared libraries and binaries. Be mindful of not increasing +// dependency size for those consumers when adding anything new here. +package kubeapi import "time" diff --git a/kube/client.go b/kube/kubeclient/client.go similarity index 89% rename from kube/client.go rename to kube/kubeclient/client.go index 62daa366e4080..35cb4f713e5a6 100644 --- a/kube/client.go +++ b/kube/kubeclient/client.go @@ -1,10 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package kube provides a client to interact with Kubernetes. +// Package kubeclient provides a client to interact with Kubernetes. // This package is Tailscale-internal and not meant for external consumption. // Further, the API should not be considered stable. -package kube +// Client is split into a separate package for consumption of +// non-Kubernetes shared libraries and binaries. Be mindful of not increasing +// dependency size for those consumers when adding anything new here. +package kubeclient import ( "bytes" @@ -23,6 +26,7 @@ import ( "sync" "time" + "tailscale.com/kube/kubeapi" "tailscale.com/util/multierr" ) @@ -50,10 +54,10 @@ func readFile(n string) ([]byte, error) { // Client handles connections to Kubernetes. // It expects to be run inside a cluster. type Client interface { - GetSecret(context.Context, string) (*Secret, error) - UpdateSecret(context.Context, *Secret) error - CreateSecret(context.Context, *Secret) error - StrategicMergePatchSecret(context.Context, string, *Secret, string) error + GetSecret(context.Context, string) (*kubeapi.Secret, error) + UpdateSecret(context.Context, *kubeapi.Secret) error + CreateSecret(context.Context, *kubeapi.Secret) error + StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error JSONPatchSecret(context.Context, string, []JSONPatch) error CheckSecretPermissions(context.Context, string) (bool, bool, error) SetDialer(dialer func(context.Context, string, string) (net.Conn, error)) @@ -144,7 +148,7 @@ func getError(resp *http.Response) error { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#http-status-codes return nil } - st := &Status{} + st := &kubeapi.Status{} if err := json.NewDecoder(resp.Body).Decode(st); err != nil { return err } @@ -178,7 +182,7 @@ func (c *client) doRequest(ctx context.Context, method, url string, in, out any, } defer resp.Body.Close() if err := getError(resp); err != nil { - if st, ok := err.(*Status); ok && st.Code == 401 { + if st, ok := err.(*kubeapi.Status); ok && st.Code == 401 { c.expireToken() } return err @@ -220,8 +224,8 @@ func (c *client) newRequest(ctx context.Context, method, url string, in any) (*h } // GetSecret fetches the secret from the Kubernetes API. -func (c *client) GetSecret(ctx context.Context, name string) (*Secret, error) { - s := &Secret{Data: make(map[string][]byte)} +func (c *client) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, error) { + s := &kubeapi.Secret{Data: make(map[string][]byte)} if err := c.doRequest(ctx, "GET", c.secretURL(name), nil, s); err != nil { return nil, err } @@ -229,13 +233,13 @@ func (c *client) GetSecret(ctx context.Context, name string) (*Secret, error) { } // CreateSecret creates a secret in the Kubernetes API. -func (c *client) CreateSecret(ctx context.Context, s *Secret) error { +func (c *client) CreateSecret(ctx context.Context, s *kubeapi.Secret) error { s.Namespace = c.ns return c.doRequest(ctx, "POST", c.secretURL(""), s, nil) } // UpdateSecret updates a secret in the Kubernetes API. -func (c *client) UpdateSecret(ctx context.Context, s *Secret) error { +func (c *client) UpdateSecret(ctx context.Context, s *kubeapi.Secret) error { return c.doRequest(ctx, "PUT", c.secretURL(s.Name), s, nil) } @@ -263,7 +267,7 @@ func (c *client) JSONPatchSecret(ctx context.Context, name string, patch []JSONP // StrategicMergePatchSecret updates a secret in the Kubernetes API using a // strategic merge patch. // If a fieldManager is provided, it will be used to track the patch. -func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *Secret, fieldManager string) error { +func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error { surl := c.secretURL(name) if fieldManager != "" { uv := url.Values{ @@ -340,7 +344,7 @@ func (c *client) checkPermission(ctx context.Context, verb, secretName string) ( } func IsNotFoundErr(err error) bool { - if st, ok := err.(*Status); ok && st.Code == 404 { + if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 { return true } return false diff --git a/kube/fake_client.go b/kube/kubeclient/fake_client.go similarity index 68% rename from kube/fake_client.go rename to kube/kubeclient/fake_client.go index ad5e8201d603a..3cef3d27ee0df 100644 --- a/kube/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -1,34 +1,36 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package kube +package kubeclient import ( "context" "net" + + "tailscale.com/kube/kubeapi" ) var _ Client = &FakeClient{} type FakeClient struct { - GetSecretImpl func(context.Context, string) (*Secret, error) + GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { return fc.CheckSecretPermissionsImpl(ctx, name) } -func (fc *FakeClient) GetSecret(ctx context.Context, name string) (*Secret, error) { +func (fc *FakeClient) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, error) { return fc.GetSecretImpl(ctx, name) } func (fc *FakeClient) SetURL(_ string) {} func (fc *FakeClient) SetDialer(dialer func(ctx context.Context, network, addr string) (net.Conn, error)) { } -func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *Secret, string) error { +func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error { return nil } func (fc *FakeClient) JSONPatchSecret(context.Context, string, []JSONPatch) error { return nil } -func (fc *FakeClient) UpdateSecret(context.Context, *Secret) error { return nil } -func (fc *FakeClient) CreateSecret(context.Context, *Secret) error { return nil } +func (fc *FakeClient) UpdateSecret(context.Context, *kubeapi.Secret) error { return nil } +func (fc *FakeClient) CreateSecret(context.Context, *kubeapi.Secret) error { return nil } diff --git a/kube/grants.go b/kube/kubetypes/grants.go similarity index 89% rename from kube/grants.go rename to kube/kubetypes/grants.go index f87143054cdfa..4dc278ff14d4c 100644 --- a/kube/grants.go +++ b/kube/kubetypes/grants.go @@ -1,7 +1,12 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package kube +// Package kubetypes contains types and constants related to the Tailscale +// Kubernetes Operator. +// These are split into a separate package for consumption of +// non-Kubernetes shared libraries and binaries. Be mindful of not increasing +// dependency size for those consumers when adding anything new here. +package kubetypes import "net/netip" diff --git a/kube/kubetypes/metrics.go b/kube/kubetypes/metrics.go new file mode 100644 index 0000000000000..e9e30cfc7e829 --- /dev/null +++ b/kube/kubetypes/metrics.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package kubetypes + +const ( + // Hostinfo App values for the Tailscale Kubernetes Operator components. + AppOperator = "k8s-operator" + AppAPIServerProxy = "k8s-operator-proxy" + AppIngressProxy = "k8s-operator-ingress-proxy" + AppIngressResource = "k8s-operator-ingress-resource" + AppEgressProxy = "k8s-operator-egress-proxy" + AppConnector = "k8s-operator-connector-resource" + + // Clientmetrics for Tailscale Kubernetes Operator components + MetricIngressProxyCount = "k8s_ingress_proxies" // L3 + MetricIngressResourceCount = "k8s_ingress_resources" // L7 + MetricEgressProxyCount = "k8s_egress_proxies" + MetricConnectorResourceCount = "k8s_connector_resources" + MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources" + MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources" + MetricNameserverCount = "k8s_nameserver_resources" + MetricRecorderCount = "k8s_recorder_resources" +) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index f40ede86a7235..9e00a3ad498c1 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -18,6 +18,7 @@ import ( "log" "net" "net/http" + "net/netip" "net/url" "os" "os/exec" @@ -835,7 +836,19 @@ func awaitGokrazyNetwork() { // Before DHCP finishes, the /etc/resolv.conf file has just "#MANUAL". all, _ := os.ReadFile("/etc/resolv.conf") if bytes.Contains(all, []byte("nameserver ")) { - return + good := true + firstLine, _, ok := strings.Cut(string(all), "\n") + if ok { + ns, ok := strings.CutPrefix(firstLine, "nameserver ") + if ok { + if ip, err := netip.ParseAddr(ns); err == nil && ip.Is6() && !ip.IsLinkLocalUnicast() { + good = haveGlobalUnicastIPv6() + } + } + } + if good { + return + } } select { case <-ctx.Done(): @@ -844,3 +857,27 @@ func awaitGokrazyNetwork() { } } } + +// haveGlobalUnicastIPv6 reports whether the machine has a IPv6 non-private +// (non-ULA) global unicast address. +// +// It's only intended for use in natlab integration tests so only works on +// Linux/macOS now and not environments (such as Android) where net.Interfaces +// doesn't work directly. +func haveGlobalUnicastIPv6() bool { + ifs, _ := net.Interfaces() + for _, ni := range ifs { + aa, _ := ni.Addrs() + for _, a := range aa { + ipn, ok := a.(*net.IPNet) + if !ok { + continue + } + ip, _ := netip.AddrFromSlice(ipn.IP) + if ip.Is6() && ip.IsGlobalUnicast() && !ip.IsPrivate() { + return true + } + } + } + return false +} diff --git a/metrics/multilabelmap.go b/metrics/multilabelmap.go index c0f312e7d2bd1..df2ae5073bf5f 100644 --- a/metrics/multilabelmap.go +++ b/metrics/multilabelmap.go @@ -39,7 +39,7 @@ func NewMultiLabelMap[T comparable](name string, promType, helpText string) *Mul Help: helpText, } var zero T - _ = labelString(zero) // panic early if T is invalid + _ = LabelString(zero) // panic early if T is invalid expvar.Publish(name, m) return m } @@ -50,8 +50,10 @@ type labelsAndValue[T comparable] struct { val expvar.Var } -// labelString returns a Prometheus-formatted label string for the given key. -func labelString(k any) string { +// LabelString returns a Prometheus-formatted label string for the given key. +// k must be a struct type with scalar fields, as required by MultiLabelMap, +// if k is not a struct, it will panic. +func LabelString(k any) string { rv := reflect.ValueOf(k) t := rv.Type() if t.Kind() != reflect.Struct { @@ -150,7 +152,7 @@ func (v *MultiLabelMap[T]) Init() *MultiLabelMap[T] { // // v.mu must be held. func (v *MultiLabelMap[T]) addKeyLocked(key T, val expvar.Var) { - ls := labelString(key) + ls := LabelString(key) ent := labelsAndValue[T]{key, ls, val} // Using insertion sort to place key into the already-sorted v.keys. @@ -209,6 +211,26 @@ func (v *MultiLabelMap[T]) Set(key T, val expvar.Var) { v.m.Store(key, val) } +// SetInt sets val to the *[expvar.Int] value stored under the given map key, +// creating it if it doesn't exist yet. +// It does nothing if key exists but is of the wrong type. +func (v *MultiLabelMap[T]) SetInt(key T, val int64) { + // Set to Int; ignore otherwise. + if iv, ok := v.getOrFill(key, newInt).(*expvar.Int); ok { + iv.Set(val) + } +} + +// SetFloat sets val to the *[expvar.Float] value stored under the given map key, +// creating it if it doesn't exist yet. +// It does nothing if key exists but is of the wrong type. +func (v *MultiLabelMap[T]) SetFloat(key T, val float64) { + // Set to Float; ignore otherwise. + if iv, ok := v.getOrFill(key, newFloat).(*expvar.Float); ok { + iv.Set(val) + } +} + // Add adds delta to the *[expvar.Int] value stored under the given map key, // creating it if it doesn't exist yet. // It does nothing if key exists but is of the wrong type. @@ -234,7 +256,7 @@ func (v *MultiLabelMap[T]) AddFloat(key T, delta float64) { // This is not optimized for highly concurrent usage; it's presumed to only be // used rarely, at startup. func (v *MultiLabelMap[T]) Delete(key T) { - ls := labelString(key) + ls := LabelString(key) v.mu.Lock() defer v.mu.Unlock() diff --git a/metrics/multilabelmap_test.go b/metrics/multilabelmap_test.go index 9a1340a3cc0d9..b53e15ec8913e 100644 --- a/metrics/multilabelmap_test.go +++ b/metrics/multilabelmap_test.go @@ -5,6 +5,7 @@ package metrics import ( "bytes" + "expvar" "fmt" "io" "testing" @@ -22,6 +23,12 @@ func TestMultilabelMap(t *testing.T) { m.Add(L2{"b", "b"}, 3) m.Add(L2{"a", "a"}, 1) + m.SetFloat(L2{"sf", "sf"}, 3.5) + m.SetFloat(L2{"sf", "sf"}, 5.5) + m.Set(L2{"sfunc", "sfunc"}, expvar.Func(func() any { return 3 })) + m.SetInt(L2{"si", "si"}, 3) + m.SetInt(L2{"si", "si"}, 5) + cur := func() string { var buf bytes.Buffer m.Do(func(kv KeyValue[L2]) { @@ -33,7 +40,7 @@ func TestMultilabelMap(t *testing.T) { return buf.String() } - if g, w := cur(), "a/a=1,a/b=2,b/b=3,b/c=4"; g != w { + if g, w := cur(), "a/a=1,a/b=2,b/b=3,b/c=4,sf/sf=5.5,sfunc/sfunc=3,si/si=5"; g != w { t.Errorf("got %q; want %q", g, w) } @@ -43,6 +50,9 @@ func TestMultilabelMap(t *testing.T) { metricname{foo="a",bar="b"} 2 metricname{foo="b",bar="b"} 3 metricname{foo="b",bar="c"} 4 +metricname{foo="sf",bar="sf"} 5.5 +metricname{foo="sfunc",bar="sfunc"} 3 +metricname{foo="si",bar="si"} 5 ` if got := buf.String(); got != want { t.Errorf("promtheus output = %q; want %q", got, want) @@ -50,7 +60,7 @@ metricname{foo="b",bar="c"} 4 m.Delete(L2{"b", "b"}) - if g, w := cur(), "a/a=1,a/b=2,b/c=4"; g != w { + if g, w := cur(), "a/a=1,a/b=2,b/c=4,sf/sf=5.5,sfunc/sfunc=3,si/si=5"; g != w { t.Errorf("got %q; want %q", g, w) } diff --git a/net/art/table_test.go b/net/art/table_test.go index cdc295c0e20ed..a129c8484ddcd 100644 --- a/net/art/table_test.go +++ b/net/art/table_test.go @@ -589,7 +589,7 @@ func TestInsertCompare(t *testing.T) { } if debugInsert { - t.Logf(fast.debugSummary()) + t.Log(fast.debugSummary()) } seenVals4 := map[int]bool{} diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go index 319462170387b..bdeefb352498b 100644 --- a/net/dns/direct_linux.go +++ b/net/dns/direct_linux.go @@ -7,21 +7,20 @@ import ( "bytes" "context" - "github.com/illarion/gonotify" + "github.com/illarion/gonotify/v2" "tailscale.com/health" ) func (m *directManager) runFileWatcher() { - in, err := gonotify.NewInotify() + ctx, cancel := context.WithCancel(m.ctx) + defer cancel() + in, err := gonotify.NewInotify(ctx) if err != nil { // Oh well, we tried. This is all best effort for now, to // surface warnings to users. m.logf("dns: inotify new: %v", err) return } - ctx, cancel := context.WithCancel(m.ctx) - defer cancel() - go m.closeInotifyOnDone(ctx, in) const events = gonotify.IN_ATTRIB | gonotify.IN_CLOSE_WRITE | @@ -107,8 +106,3 @@ func (m *directManager) checkForFileTrample() { m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) m.health.SetUnhealthy(resolvTrampleWarnable, nil) } - -func (m *directManager) closeInotifyOnDone(ctx context.Context, in *gonotify.Inotify) { - <-ctx.Done() - in.Close() -} diff --git a/net/dns/manager.go b/net/dns/manager.go index dfce5b2acaf82..51a0fa12cba63 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -122,6 +122,11 @@ func (m *Manager) Set(cfg Config) error { return m.setLocked(cfg) } +// GetBaseConfig returns the current base OS DNS configuration as provided by the OSConfigurator. +func (m *Manager) GetBaseConfig() (OSConfig, error) { + return m.os.GetBaseConfig() +} + // setLocked sets the DNS configuration. // // m.mu must be held. diff --git a/net/dns/manager_darwin.go b/net/dns/manager_darwin.go index 7e4d403205a32..ccfafaa457f16 100644 --- a/net/dns/manager_darwin.go +++ b/net/dns/manager_darwin.go @@ -10,6 +10,8 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/health" + "tailscale.com/net/dns/resolvconffile" + "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/util/mak" ) @@ -83,8 +85,36 @@ func (c *darwinConfigurator) SetDNS(cfg OSConfig) error { return c.removeResolverFiles(func(domain string) bool { return !keep[domain] }) } +// GetBaseConfig returns the current OS DNS configuration, extracting it from /etc/resolv.conf. +// We should really be using the SystemConfiguration framework to get this information, as this +// is not a stable public API, and is provided mostly as a compatibility effort with Unix +// tools. Apple might break this in the future. But honestly, parsing the output of `scutil --dns` +// is *even more* likely to break in the future. func (c *darwinConfigurator) GetBaseConfig() (OSConfig, error) { - return OSConfig{}, ErrGetBaseConfigNotSupported + cfg := OSConfig{} + + resolvConf, err := resolvconffile.ParseFile("/etc/resolv.conf") + if err != nil { + c.logf("failed to parse /etc/resolv.conf: %v", err) + return cfg, ErrGetBaseConfigNotSupported + } + + for _, ns := range resolvConf.Nameservers { + if ns == tsaddr.TailscaleServiceIP() || ns == tsaddr.TailscaleServiceIPv6() { + // If we find Quad100 in /etc/resolv.conf, we should ignore it + c.logf("ignoring 100.100.100.100 resolver IP found in /etc/resolv.conf") + continue + } + cfg.Nameservers = append(cfg.Nameservers, ns) + } + cfg.SearchDomains = resolvConf.SearchDomains + + if len(cfg.Nameservers) == 0 { + // Log a warning in case we couldn't find any nameservers in /etc/resolv.conf. + c.logf("no nameservers found in /etc/resolv.conf, DNS resolution might fail") + } + + return cfg, nil } const macResolverFileHeader = "# Added by tailscaled\n" diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index e4d703257e026..250a2557350dd 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -469,6 +469,9 @@ func (m *windowsManager) disableDynamicUpdates() error { } defer k.Close() + if err := k.SetDWordValue("RegistrationEnabled", 0); err != nil { + return err + } if err := k.SetDWordValue("DisableDynamicUpdate", 1); err != nil { return err } diff --git a/net/dns/osconfig.go b/net/dns/osconfig.go index 012601b7427a7..842c5ac607853 100644 --- a/net/dns/osconfig.go +++ b/net/dns/osconfig.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "net/netip" + "slices" "strings" "tailscale.com/types/logger" @@ -103,10 +104,16 @@ func (o *OSConfig) WriteToBufioWriter(w *bufio.Writer) { } func (o OSConfig) IsZero() bool { - return len(o.Nameservers) == 0 && len(o.SearchDomains) == 0 && len(o.MatchDomains) == 0 + return len(o.Hosts) == 0 && + len(o.Nameservers) == 0 && + len(o.SearchDomains) == 0 && + len(o.MatchDomains) == 0 } func (a OSConfig) Equal(b OSConfig) bool { + if len(a.Hosts) != len(b.Hosts) { + return false + } if len(a.Nameservers) != len(b.Nameservers) { return false } @@ -117,6 +124,15 @@ func (a OSConfig) Equal(b OSConfig) bool { return false } + for i := range a.Hosts { + ha, hb := a.Hosts[i], b.Hosts[i] + if ha.Addr != hb.Addr { + return false + } + if !slices.Equal(ha.Hosts, hb.Hosts) { + return false + } + } for i := range a.Nameservers { if a.Nameservers[i] != b.Nameservers[i] { return false diff --git a/net/dns/osconfig_test.go b/net/dns/osconfig_test.go index 02b1cad9ec4cf..c19db299f4b54 100644 --- a/net/dns/osconfig_test.go +++ b/net/dns/osconfig_test.go @@ -6,8 +6,10 @@ package dns import ( "fmt" "net/netip" + "reflect" "testing" + "tailscale.com/tstest" "tailscale.com/util/dnsname" ) @@ -41,3 +43,13 @@ func TestOSConfigPrintable(t *testing.T) { t.Errorf("format mismatch:\n got: %s\n want: %s", s, expected) } } + +func TestIsZero(t *testing.T) { + tstest.CheckIsZero[OSConfig](t, map[reflect.Type]any{ + reflect.TypeFor[dnsname.FQDN](): dnsname.FQDN("foo.bar."), + reflect.TypeFor[*HostEntry](): &HostEntry{ + Addr: netip.AddrFrom4([4]byte{100, 1, 2, 3}), + Hosts: []string{"foo", "bar"}, + }, + }) +} diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index c528175214f67..b8eaccbdd22eb 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -181,7 +181,7 @@ var dnsForwarderFailing = health.Register(&health.Warnable{ DependsOn: []*health.Warnable{health.NetworkStatusWarnable}, Text: health.StaticMessage("Tailscale can't reach the configured DNS servers. Internet connectivity may be affected."), ImpactsConnectivity: true, - TimeToVisible: 5 * time.Second, + TimeToVisible: 15 * time.Second, }) type route struct { diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 26e52602afaa5..2f18705762829 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -7,12 +7,12 @@ import ( "bytes" "context" "fmt" + "maps" "net" "net/http" "net/netip" "reflect" "slices" - "sort" "strconv" "strings" "testing" @@ -593,13 +593,7 @@ func TestMakeProbePlan(t *testing.T) { func (plan probePlan) String() string { var sb strings.Builder - keys := []string{} - for k := range plan { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { + for _, key := range slices.Sorted(maps.Keys(plan)) { fmt.Fprintf(&sb, "[%s]", key) pv := plan[key] for _, p := range pv { diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go index ef7dcaaca850b..299f3101ea73b 100644 --- a/net/netmon/interfaces_linux.go +++ b/net/netmon/interfaces_linux.go @@ -15,8 +15,6 @@ import ( "net" "net/netip" "os" - "os/exec" - "runtime" "strings" "sync/atomic" @@ -50,9 +48,6 @@ ens18 0000000A 00000000 0001 0 0 0 0000FFFF func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { if procNetRouteErr.Load() { // If we failed to read /proc/net/route previously, don't keep trying. - if runtime.GOOS == "android" { - return likelyHomeRouterIPAndroid() - } return ret, myIP, false } lineNum := 0 @@ -94,9 +89,6 @@ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { } if err != nil { procNetRouteErr.Store(true) - if runtime.GOOS == "android" { - return likelyHomeRouterIPAndroid() - } log.Printf("interfaces: failed to read /proc/net/route: %v", err) } if ret.IsValid() { @@ -137,41 +129,6 @@ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { return netip.Addr{}, netip.Addr{}, false } -// Android apps don't have permission to read /proc/net/route, at -// least on Google devices and the Android emulator. -func likelyHomeRouterIPAndroid() (ret netip.Addr, _ netip.Addr, ok bool) { - cmd := exec.Command("/system/bin/ip", "route", "show", "table", "0") - out, err := cmd.StdoutPipe() - if err != nil { - return - } - if err := cmd.Start(); err != nil { - log.Printf("interfaces: running /system/bin/ip: %v", err) - return - } - // Search for line like "default via 10.0.2.2 dev radio0 table 1016 proto static mtu 1500 " - lineread.Reader(out, func(line []byte) error { - const pfx = "default via " - if !mem.HasPrefix(mem.B(line), mem.S(pfx)) { - return nil - } - line = line[len(pfx):] - sp := bytes.IndexByte(line, ' ') - if sp == -1 { - return nil - } - ipb := line[:sp] - if ip, err := netip.ParseAddr(string(ipb)); err == nil && ip.Is4() { - ret = ip - log.Printf("interfaces: found Android default route %v", ip) - } - return nil - }) - cmd.Process.Kill() - cmd.Wait() - return ret, netip.Addr{}, ret.IsValid() -} - func defaultRoute() (d DefaultRouteDetails, err error) { v, err := defaultRouteInterfaceProcNet() if err == nil { diff --git a/net/packet/packet.go b/net/packet/packet.go index dc870414a8269..c9521ad4667c2 100644 --- a/net/packet/packet.go +++ b/net/packet/packet.go @@ -393,6 +393,11 @@ func (q *Parsed) Buffer() []byte { // Payload returns the payload of the IP subprotocol section. // This is a read-only view; that is, q retains the ownership of the buffer. func (q *Parsed) Payload() []byte { + // If the packet is truncated, return nothing instead of crashing. + if q.length > len(q.b) || q.dataofs > len(q.b) { + return nil + } + return q.b[q.dataofs:q.length] } diff --git a/net/tstun/tun.go b/net/tstun/tun.go index f2c034fc3c616..66e209d1acb5a 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -53,9 +53,6 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { dev.Close() return nil, "", err } - if err := setLinkFeatures(dev); err != nil { - logf("setting link features: %v", err) - } if err := setLinkAttrs(dev); err != nil { logf("setting link attributes: %v", err) } diff --git a/net/tstun/tun_features_linux.go b/net/tstun/tun_features_linux.go deleted file mode 100644 index 42408811520d2..0000000000000 --- a/net/tstun/tun_features_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package tstun - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/envknob" -) - -func setLinkFeatures(dev tun.Device) error { - if envknob.Bool("TS_TUN_DISABLE_UDP_GRO") { - linuxDev, ok := dev.(tun.LinuxDevice) - if ok { - linuxDev.DisableUDPGRO() - } - } - return nil -} diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 24defba27a782..514ebcaaf1f5e 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -34,8 +34,10 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" + "tailscale.com/util/usermetric" "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/netstack/gro" "tailscale.com/wgengine/wgcfg" ) @@ -74,6 +76,16 @@ var parsedPacketPool = sync.Pool{New: func() any { return new(packet.Parsed) }} // It must not hold onto the packet struct, as its backing storage will be reused. type FilterFunc func(*packet.Parsed, *Wrapper) filter.Response +// GROFilterFunc is a FilterFunc extended with a *gro.GRO, enabling increased +// throughput where GRO is supported by a packet.Parsed interceptor, e.g. +// netstack/gVisor, and we are handling a vector of packets. Callers must pass a +// nil g for the first packet in a given vector, and continue passing the +// returned *gro.GRO for all remaining packets in said vector. If the returned +// *gro.GRO is non-nil after the last packet for a given vector is passed +// through the GROFilterFunc, the caller must also call Flush() on it to deliver +// any previously Enqueue()'d packets. +type GROFilterFunc func(p *packet.Parsed, w *Wrapper, g *gro.GRO) (filter.Response, *gro.GRO) + // Wrapper augments a tun.Device with packet filtering and injection. // // A Wrapper starts in a "corked" mode where Read calls are blocked @@ -161,16 +173,12 @@ type Wrapper struct { // and therefore sees the packets that may be later dropped by it. PreFilterPacketInboundFromWireGuard FilterFunc // PostFilterPacketInboundFromWireGuard is the inbound filter function that runs after the main filter. - PostFilterPacketInboundFromWireGuard FilterFunc - // EndPacketVectorInboundFromWireGuardFlush is a function that runs after all packets in a given vector - // have been handled by all filters. Filters may queue packets for the purposes of GRO, requiring an - // explicit flush. - EndPacketVectorInboundFromWireGuardFlush func() + PostFilterPacketInboundFromWireGuard GROFilterFunc // PreFilterPacketOutboundToWireGuardNetstackIntercept is a filter function that runs before the main filter // for packets from the local system. This filter is populated by netstack to hook // packets that should be handled by netstack. If set, this filter runs before // PreFilterFromTunToEngine. - PreFilterPacketOutboundToWireGuardNetstackIntercept FilterFunc + PreFilterPacketOutboundToWireGuardNetstackIntercept GROFilterFunc // PreFilterPacketOutboundToWireGuardEngineIntercept is a filter function that runs before the main filter // for packets from the local system. This filter is populated by wgengine to hook // packets which it handles internally. If both this and PreFilterFromTunToNetstack @@ -804,7 +812,7 @@ var ( magicDNSIPPortv6 = netip.AddrPortFrom(tsaddr.TailscaleServiceIPv6(), 0) ) -func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConfigTable) filter.Response { +func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConfigTable, gro *gro.GRO) (filter.Response, *gro.GRO) { // Fake ICMP echo responses to MagicDNS (100.100.100.100). if p.IsEchoRequest() { switch p.Dst { @@ -813,13 +821,13 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf header.ToResponse() outp := packet.Generate(&header, p.Payload()) t.InjectInboundCopy(outp) - return filter.DropSilently // don't pass on to OS; already handled + return filter.DropSilently, gro // don't pass on to OS; already handled case magicDNSIPPortv6: header := p.ICMP6Header() header.ToResponse() outp := packet.Generate(&header, p.Payload()) t.InjectInboundCopy(outp) - return filter.DropSilently // don't pass on to OS; already handled + return filter.DropSilently, gro // don't pass on to OS; already handled } } @@ -831,20 +839,22 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf t.isSelfDisco(p) { t.limitedLogf("[unexpected] received self disco out packet over tstun; dropping") metricPacketOutDropSelfDisco.Add(1) - return filter.DropSilently + return filter.DropSilently, gro } if t.PreFilterPacketOutboundToWireGuardNetstackIntercept != nil { - if res := t.PreFilterPacketOutboundToWireGuardNetstackIntercept(p, t); res.IsDrop() { + var res filter.Response + res, gro = t.PreFilterPacketOutboundToWireGuardNetstackIntercept(p, t, gro) + if res.IsDrop() { // Handled by netstack.Impl.handleLocalPackets (quad-100 DNS primarily) - return res + return res, gro } } if t.PreFilterPacketOutboundToWireGuardEngineIntercept != nil { if res := t.PreFilterPacketOutboundToWireGuardEngineIntercept(p, t); res.IsDrop() { // Handled by userspaceEngine.handleLocalPackets (primarily handles // quad-100 if netstack is not installed). - return res + return res, gro } } @@ -857,21 +867,23 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf filt = t.filter.Load() } if filt == nil { - return filter.Drop + return filter.Drop, gro } if filt.RunOut(p, t.filterFlags) != filter.Accept { metricPacketOutDropFilter.Add(1) - return filter.Drop + metricOutboundDroppedPacketsTotal.Add(dropPacketLabel{ + Reason: DropReasonACL, + }, 1) + return filter.Drop, gro } if t.PostFilterPacketOutboundToWireGuard != nil { if res := t.PostFilterPacketOutboundToWireGuard(p, t); res.IsDrop() { - return res + return res, gro } } - - return filter.Accept + return filter.Accept, gro } // noteActivity records that there was a read or write at the current time. @@ -910,6 +922,7 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { defer parsedPacketPool.Put(p) captHook := t.captureHook.Load() pc := t.peerConfig.Load() + var buffsGRO *gro.GRO for _, data := range res.data { p.Decode(data[res.dataOffset:]) @@ -922,7 +935,8 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { captHook(capture.FromLocal, t.now(), p.Buffer(), p.CaptureMeta) } if !t.disableFilter { - response := t.filterPacketOutboundToWireGuard(p, pc) + var response filter.Response + response, buffsGRO = t.filterPacketOutboundToWireGuard(p, pc, buffsGRO) if response != filter.Accept { metricPacketOutDrop.Add(1) continue @@ -942,6 +956,9 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { } buffsPos++ } + if buffsGRO != nil { + buffsGRO.Flush() + } // t.vectorBuffer has a fixed location in memory. // TODO(raggi): add an explicit field and possibly method to the tunVectorReadResult @@ -988,6 +1005,13 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { return options, nil } +// invertGSOChecksum inverts the transport layer checksum in pkt if gVisor +// handed us a segment with a partial checksum. A partial checksum is not a +// ones' complement of the sum, and incremental checksum updating is not yet +// partial checksum aware. This may be called twice for a single packet, +// both before and after partial checksum updates where later checksum +// offloading still expects a partial checksum. +// TODO(jwhited): plumb partial checksum awareness into net/packet/checksum. func invertGSOChecksum(pkt []byte, gso stack.GSO) { if gso.NeedsCsum != true { return @@ -1024,13 +1048,6 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i defer parsedPacketPool.Put(p) p.Decode(pkt) - // We invert the transport layer checksum before and after snat() if gVisor - // handed us a segment with a partial checksum. A partial checksum is not a - // ones' complement of the sum, and incremental checksum updating that could - // occur as a result of snat() is not aware of this. Alternatively we could - // plumb partial transport layer checksum awareness down through snat(), - // but the surface area of such a change is much larger, and not yet - // justified by this singular case. invertGSOChecksum(pkt, gso) pc.snat(p) invertGSOChecksum(pkt, gso) @@ -1061,7 +1078,7 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i return n, err } -func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook capture.Callback, pc *peerConfigTable) filter.Response { +func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook capture.Callback, pc *peerConfigTable, gro *gro.GRO) (filter.Response, *gro.GRO) { if captHook != nil { captHook(capture.FromPeer, t.now(), p.Buffer(), p.CaptureMeta) } @@ -1070,7 +1087,7 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca if pingReq, ok := p.AsTSMPPing(); ok { t.noteActivity() t.injectOutboundPong(p, pingReq) - return filter.DropSilently + return filter.DropSilently, gro } else if data, ok := p.AsTSMPPong(); ok { if f := t.OnTSMPPongReceived; f != nil { f(data) @@ -1082,7 +1099,7 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca if f := t.OnICMPEchoResponseReceived; f != nil && f(p) { // Note: this looks dropped in metrics, even though it was // handled internally. - return filter.DropSilently + return filter.DropSilently, gro } } @@ -1094,12 +1111,12 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca t.isSelfDisco(p) { t.limitedLogf("[unexpected] received self disco in packet over tstun; dropping") metricPacketInDropSelfDisco.Add(1) - return filter.DropSilently + return filter.DropSilently, gro } if t.PreFilterPacketInboundFromWireGuard != nil { if res := t.PreFilterPacketInboundFromWireGuard(p, t); res.IsDrop() { - return res + return res, gro } } @@ -1110,7 +1127,7 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca filt = t.filter.Load() } if filt == nil { - return filter.Drop + return filter.Drop, gro } outcome := filt.RunIn(p, t.filterFlags) @@ -1127,6 +1144,9 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca if outcome != filter.Accept { metricPacketInDropFilter.Add(1) + metricInboundDroppedPacketsTotal.Add(dropPacketLabel{ + Reason: DropReasonACL, + }, 1) // Tell them, via TSMP, we're dropping them due to the ACL. // Their host networking stack can translate this into ICMP @@ -1150,20 +1170,24 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca // TODO(bradfitz): also send a TCP RST, after the TSMP message. } - return filter.Drop + return filter.Drop, gro } if t.PostFilterPacketInboundFromWireGuard != nil { - if res := t.PostFilterPacketInboundFromWireGuard(p, t); res.IsDrop() { - return res + var res filter.Response + res, gro = t.PostFilterPacketInboundFromWireGuard(p, t, gro) + if res.IsDrop() { + return res, gro } } - return filter.Accept + return filter.Accept, gro } -// Write accepts incoming packets. The packets begins at buffs[:][offset:], -// like wireguard-go/tun.Device.Write. +// Write accepts incoming packets. The packets begin at buffs[:][offset:], +// like wireguard-go/tun.Device.Write. Write is called per-peer via +// wireguard-go/device.Peer.RoutineSequentialReceiver, so it MUST be +// thread-safe. func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { metricPacketIn.Add(int64(len(buffs))) i := 0 @@ -1171,11 +1195,17 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { defer parsedPacketPool.Put(p) captHook := t.captureHook.Load() pc := t.peerConfig.Load() + var buffsGRO *gro.GRO for _, buff := range buffs { p.Decode(buff[offset:]) pc.dnat(p) if !t.disableFilter { - if t.filterPacketInboundFromWireGuard(p, captHook, pc) != filter.Accept { + var res filter.Response + // TODO(jwhited): name and document this filter code path + // appropriately. It is not only responsible for filtering, it + // also routes packets towards gVisor/netstack. + res, buffsGRO = t.filterPacketInboundFromWireGuard(p, captHook, pc, buffsGRO) + if res != filter.Accept { metricPacketInDrop.Add(1) } else { buffs[i] = buff @@ -1183,8 +1213,8 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } } } - if t.EndPacketVectorInboundFromWireGuardFlush != nil { - t.EndPacketVectorInboundFromWireGuardFlush() + if buffsGRO != nil { + buffsGRO.Flush() } if t.disableFilter { i = len(buffs) @@ -1194,6 +1224,11 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { if len(buffs) > 0 { t.noteActivity() _, err := t.tdevWrite(buffs, offset) + if err != nil { + metricInboundDroppedPacketsTotal.Add(dropPacketLabel{ + Reason: DropReasonError, + }, int64(len(buffs))) + } return len(buffs), err } return 0, nil @@ -1225,36 +1260,73 @@ func (t *Wrapper) SetJailedFilter(filt *filter.Filter) { } // InjectInboundPacketBuffer makes the Wrapper device behave as if a packet -// with the given contents was received from the network. -// It takes ownership of one reference count on the packet. The injected +// (pkt) with the given contents was received from the network. +// It takes ownership of one reference count on pkt. The injected // packet will not pass through inbound filters. // +// pkt will be copied into buffs before writing to the underlying tun.Device. +// Therefore, callers must allocate and pass a buffs slice that is sized +// appropriately for holding pkt.Size() + PacketStartOffset as a single +// element (buffs[0]) and split across multiple elements if the originating +// stack supports GSO. sizes must be sized with similar consideration, +// len(buffs) should be equal to len(sizes). If any len(buffs[]) was +// mutated by InjectInboundPacketBuffer it will be reset to cap(buffs[]) +// before returning. +// // This path is typically used to deliver synthesized packets to the // host networking stack. -func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer) error { - buf := make([]byte, PacketStartOffset+pkt.Size()) +func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer, buffs [][]byte, sizes []int) error { + buf := buffs[0][PacketStartOffset:] - n := copy(buf[PacketStartOffset:], pkt.NetworkHeader().Slice()) - n += copy(buf[PacketStartOffset+n:], pkt.TransportHeader().Slice()) - n += copy(buf[PacketStartOffset+n:], pkt.Data().AsRange().ToSlice()) - if n != pkt.Size() { + bufN := copy(buf, pkt.NetworkHeader().Slice()) + bufN += copy(buf[bufN:], pkt.TransportHeader().Slice()) + bufN += copy(buf[bufN:], pkt.Data().AsRange().ToSlice()) + if bufN != pkt.Size() { panic("unexpected packet size after copy") } - pkt.DecRef() + buf = buf[:bufN] + defer pkt.DecRef() pc := t.peerConfig.Load() p := parsedPacketPool.Get().(*packet.Parsed) defer parsedPacketPool.Put(p) - p.Decode(buf[PacketStartOffset:]) + p.Decode(buf) captHook := t.captureHook.Load() if captHook != nil { captHook(capture.SynthesizedToLocal, t.now(), p.Buffer(), p.CaptureMeta) } + invertGSOChecksum(buf, pkt.GSOOptions) pc.dnat(p) - - return t.InjectInboundDirect(buf, PacketStartOffset) + invertGSOChecksum(buf, pkt.GSOOptions) + + gso, err := stackGSOToTunGSO(buf, pkt.GSOOptions) + if err != nil { + return err + } + + // TODO(jwhited): support GSO passthrough to t.tdev. If t.tdev supports + // GSO we don't need to split here and coalesce inside wireguard-go, + // we can pass a coalesced segment all the way through. + n, err := tun.GSOSplit(buf, gso, buffs, sizes, PacketStartOffset) + if err != nil { + if errors.Is(err, tun.ErrTooManySegments) { + t.limitedLogf("InjectInboundPacketBuffer: GSO split overflows buffs") + } else { + return err + } + } + for i := 0; i < n; i++ { + buffs[i] = buffs[i][:PacketStartOffset+sizes[i]] + } + defer func() { + for i := 0; i < n; i++ { + buffs[i] = buffs[i][:cap(buffs[i])] + } + }() + _, err = t.tdevWrite(buffs[:n], PacketStartOffset) + return err } // InjectInboundDirect makes the Wrapper device behave as if a packet @@ -1396,6 +1468,33 @@ var ( metricPacketOutDropSelfDisco = clientmetric.NewCounter("tstun_out_to_wg_drop_self_disco") ) +type DropReason string + +const ( + DropReasonACL DropReason = "acl" + DropReasonError DropReason = "error" +) + +type dropPacketLabel struct { + // Reason indicates what we have done with the packet, and has the following values: + // - acl (rejected packets because of ACL) + // - error (rejected packets because of an error) + Reason DropReason +} + +var ( + metricInboundDroppedPacketsTotal = usermetric.NewMultiLabelMap[dropPacketLabel]( + "tailscaled_inbound_dropped_packets_total", + "counter", + "Counts the number of dropped packets received by the node from other peers", + ) + metricOutboundDroppedPacketsTotal = usermetric.NewMultiLabelMap[dropPacketLabel]( + "tailscaled_outbound_dropped_packets_total", + "counter", + "Counts the number of packets dropped while being sent to other peers", + ) +) + func (t *Wrapper) InstallCaptureHook(cb capture.Callback) { t.captureHook.Store(cb) } diff --git a/net/tstun/wrap_linux.go b/net/tstun/wrap_linux.go new file mode 100644 index 0000000000000..136ddfe1efb2d --- /dev/null +++ b/net/tstun/wrap_linux.go @@ -0,0 +1,82 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tstun + +import ( + "errors" + "net/netip" + "runtime" + + "github.com/tailscale/wireguard-go/tun" + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/checksum" + "gvisor.dev/gvisor/pkg/tcpip/header" + "tailscale.com/envknob" + "tailscale.com/net/tsaddr" +) + +// SetLinkFeaturesPostUp configures link features on t based on select TS_TUN_ +// environment variables and OS feature tests. Callers should ensure t is +// up prior to calling, otherwise OS feature tests may be inconclusive. +func (t *Wrapper) SetLinkFeaturesPostUp() { + if t.isTAP || runtime.GOOS == "android" { + return + } + if groDev, ok := t.tdev.(tun.GRODevice); ok { + if envknob.Bool("TS_TUN_DISABLE_UDP_GRO") { + groDev.DisableUDPGRO() + } + if envknob.Bool("TS_TUN_DISABLE_TCP_GRO") { + groDev.DisableTCPGRO() + } + err := probeTCPGRO(groDev) + if errors.Is(err, unix.EINVAL) { + groDev.DisableTCPGRO() + groDev.DisableUDPGRO() + t.logf("disabled TUN TCP & UDP GRO due to GRO probe error: %v", err) + } + } +} + +func probeTCPGRO(dev tun.GRODevice) error { + ipPort := netip.MustParseAddrPort(tsaddr.TailscaleServiceIPString + ":0") + fingerprint := []byte("tailscale-probe-tun-gro") + segmentSize := len(fingerprint) + iphLen := 20 + tcphLen := 20 + totalLen := iphLen + tcphLen + segmentSize + ipAs4 := ipPort.Addr().As4() + bufs := make([][]byte, 2) + for i := range bufs { + bufs[i] = make([]byte, PacketStartOffset+totalLen, PacketStartOffset+(totalLen*2)) + ipv4H := header.IPv4(bufs[i][PacketStartOffset:]) + ipv4H.Encode(&header.IPv4Fields{ + SrcAddr: tcpip.AddrFromSlice(ipAs4[:]), + DstAddr: tcpip.AddrFromSlice(ipAs4[:]), + Protocol: unix.IPPROTO_TCP, + // Use a zero value TTL as best effort means to reduce chance of + // probe packet leaking further than it needs to. + TTL: 0, + TotalLength: uint16(totalLen), + }) + tcpH := header.TCP(bufs[i][PacketStartOffset+iphLen:]) + tcpH.Encode(&header.TCPFields{ + SrcPort: ipPort.Port(), + DstPort: ipPort.Port(), + SeqNum: 1 + uint32(i*segmentSize), + AckNum: 1, + DataOffset: 20, + Flags: header.TCPFlagAck, + WindowSize: 3000, + }) + copy(bufs[i][PacketStartOffset+iphLen+tcphLen:], fingerprint) + ipv4H.SetChecksum(^ipv4H.CalculateChecksum()) + pseudoCsum := header.PseudoHeaderChecksum(unix.IPPROTO_TCP, ipv4H.SourceAddress(), ipv4H.DestinationAddress(), uint16(tcphLen+segmentSize)) + pseudoCsum = checksum.Checksum(bufs[i][PacketStartOffset+iphLen+tcphLen:], pseudoCsum) + tcpH.SetChecksum(^tcpH.CalculateChecksum(pseudoCsum)) + } + _, err := dev.Write(bufs, PacketStartOffset) + return err +} diff --git a/net/tstun/tun_features_notlinux.go b/net/tstun/wrap_noop.go similarity index 51% rename from net/tstun/tun_features_notlinux.go rename to net/tstun/wrap_noop.go index 85285fd054c5a..c743072ca6ba2 100644 --- a/net/tstun/tun_features_notlinux.go +++ b/net/tstun/wrap_noop.go @@ -5,10 +5,4 @@ package tstun -import ( - "github.com/tailscale/wireguard-go/tun" -) - -func setLinkFeatures(dev tun.Device) error { - return nil -} +func (t *Wrapper) SetLinkFeaturesPostUp() {} diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index fb03249891bf2..f9319210276a3 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -315,6 +315,12 @@ func mustHexDecode(s string) []byte { } func TestFilter(t *testing.T) { + // Reset the metrics before test. These are global + // so the different tests might have affected them. + metricInboundDroppedPacketsTotal.SetInt(dropPacketLabel{Reason: DropReasonACL}, 0) + metricInboundDroppedPacketsTotal.SetInt(dropPacketLabel{Reason: DropReasonError}, 0) + metricOutboundDroppedPacketsTotal.SetInt(dropPacketLabel{Reason: DropReasonACL}, 0) + chtun, tun := newChannelTUN(t.Logf, true) defer tun.Close() @@ -429,6 +435,22 @@ func TestFilter(t *testing.T) { } }) } + + inACL := metricInboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}) + inError := metricInboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonError}) + outACL := metricOutboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}) + + assertMetricPackets(t, "inACL", "3", inACL.String()) + assertMetricPackets(t, "inError", "0", inError.String()) + assertMetricPackets(t, "outACL", "1", outACL.String()) + +} + +func assertMetricPackets(t *testing.T, metricName, want, got string) { + t.Helper() + if want != got { + t.Errorf("%s got unexpected value, got %s, want %s", metricName, got, want) + } } func TestAllocs(t *testing.T) { @@ -552,7 +574,7 @@ func TestPeerAPIBypass(t *testing.T) { tt.w.SetFilter(tt.filter) tt.w.disableTSMPRejected = true tt.w.logf = t.Logf - if got := tt.w.filterPacketInboundFromWireGuard(p, nil, nil); got != tt.want { + if got, _ := tt.w.filterPacketInboundFromWireGuard(p, nil, nil, nil); got != tt.want { t.Errorf("got = %v; want %v", got, tt.want) } }) @@ -582,7 +604,7 @@ func TestFilterDiscoLoop(t *testing.T) { p := new(packet.Parsed) p.Decode(pkt) - got := tw.filterPacketInboundFromWireGuard(p, nil, nil) + got, _ := tw.filterPacketInboundFromWireGuard(p, nil, nil, nil) if got != filter.DropSilently { t.Errorf("got %v; want DropSilently", got) } @@ -593,7 +615,7 @@ func TestFilterDiscoLoop(t *testing.T) { memLog.Reset() pp := new(packet.Parsed) pp.Decode(pkt) - got = tw.filterPacketOutboundToWireGuard(pp, nil) + got, _ = tw.filterPacketOutboundToWireGuard(pp, nil, nil) if got != filter.DropSilently { t.Errorf("got %v; want DropSilently", got) } @@ -882,7 +904,10 @@ func TestCaptureHook(t *testing.T) { packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ Payload: buffer.MakeWithData([]byte("InjectInboundPacketBuffer")), }) - w.InjectInboundPacketBuffer(packetBuf) + buffs := make([][]byte, 1) + buffs[0] = make([]byte, PacketStartOffset+packetBuf.Size()) + sizes := make([]int, 1) + w.InjectInboundPacketBuffer(packetBuf, buffs, sizes) packetBuf = stack.NewPacketBuffer(stack.PacketBufferOptions{ Payload: buffer.MakeWithData([]byte("InjectOutboundPacketBuffer")), diff --git a/posture/serialnumber_ios.go b/posture/serialnumber_ios.go index b5aa090565ffe..55d0e438b54d5 100644 --- a/posture/serialnumber_ios.go +++ b/posture/serialnumber_ios.go @@ -14,7 +14,7 @@ import ( // MDM solution. It requires configuration via the DeviceSerialNumber system policy. // This is the only way to gather serial numbers on iOS and tvOS. func GetSerialNumbers(_ logger.Logf) ([]string, error) { - s, err := syspolicy.GetString("DeviceSerialNumber", "") + s, err := syspolicy.GetString(syspolicy.DeviceSerialNumber, "") if err != nil { return nil, fmt.Errorf("failed to get serial number from MDM: %v", err) } diff --git a/posture/serialnumber_notmacos.go b/posture/serialnumber_notmacos.go index 69ad904b80901..8b91738b04bfa 100644 --- a/posture/serialnumber_notmacos.go +++ b/posture/serialnumber_notmacos.go @@ -98,8 +98,5 @@ func GetSerialNumbers(logf logger.Logf) ([]string, error) { } } } - - logf("got serial numbers %v", serials) - return serials, nil } diff --git a/proxymap/proxymap.go b/proxymap/proxymap.go index 8a7f1f95e200c..dfe6f2d586000 100644 --- a/proxymap/proxymap.go +++ b/proxymap/proxymap.go @@ -6,9 +6,13 @@ package proxymap import ( + "fmt" "net/netip" + "strings" "sync" "time" + + "tailscale.com/util/mak" ) // Mapper tracks which localhost ip:ports correspond to which remote Tailscale @@ -24,7 +28,26 @@ type Mapper struct { // keyed first by the protocol ("tcp" or "udp"), then by the IP:port. // // +checklocks:mu - m map[string]map[netip.AddrPort]netip.Addr + m map[mappingKey]netip.Addr +} + +// String returns a human-readable representation of the current mappings. +func (m *Mapper) String() string { + m.mu.Lock() + defer m.mu.Unlock() + if len(m.m) == 0 { + return "no mappings" + } + var sb strings.Builder + for k, v := range m.m { + fmt.Fprintf(&sb, "%v/%v=>%v\n", k.proto, k.ap, v) + } + return sb.String() +} + +type mappingKey struct { + proto string + ap netip.AddrPort } // RegisterIPPortIdentity registers a given node (identified by its @@ -36,18 +59,15 @@ type Mapper struct { // // The proto is the network protocol that is being proxied; it must be "tcp" or // "udp" (not e.g. "tcp4", "udp6", etc.) -func (m *Mapper) RegisterIPPortIdentity(proto string, ipport netip.AddrPort, tsIP netip.Addr) { +func (m *Mapper) RegisterIPPortIdentity(proto string, ipport netip.AddrPort, tsIP netip.Addr) error { m.mu.Lock() defer m.mu.Unlock() - if m.m == nil { - m.m = make(map[string]map[netip.AddrPort]netip.Addr) + k := mappingKey{proto, ipport} + if v, ok := m.m[k]; ok { + return fmt.Errorf("proxymap: RegisterIPPortIdentity: already registered: %v/%v=>%v", k.proto, k.ap, v) } - p, ok := m.m[proto] - if !ok { - p = make(map[netip.AddrPort]netip.Addr) - m.m[proto] = p - } - p[ipport] = tsIP + mak.Set(&m.m, k, tsIP) + return nil } // UnregisterIPPortIdentity removes a temporary IP:port registration @@ -55,8 +75,8 @@ func (m *Mapper) RegisterIPPortIdentity(proto string, ipport netip.AddrPort, tsI func (m *Mapper) UnregisterIPPortIdentity(proto string, ipport netip.AddrPort) { m.mu.Lock() defer m.mu.Unlock() - p := m.m[proto] - delete(p, ipport) // safe to delete from a nil map + k := mappingKey{proto, ipport} + delete(m.m, k) // safe to delete from a nil map } var whoIsSleeps = [...]time.Duration{ @@ -75,13 +95,11 @@ func (m *Mapper) WhoIsIPPort(proto string, ipport netip.AddrPort) (tsIP netip.Ad // so loop a few times for now waiting for the registration // to appear. // TODO(bradfitz,namansood): remove this once #1616 is fixed. + k := mappingKey{proto, ipport} for _, d := range whoIsSleeps { time.Sleep(d) m.mu.Lock() - p, ok := m.m[proto] - if ok { - tsIP, ok = p[ipport] - } + tsIP, ok := m.m[k] m.mu.Unlock() if ok { return tsIP, true diff --git a/safesocket/unixsocket.go b/safesocket/unixsocket.go index ef22263aa7aab..ec8635bbbf0d7 100644 --- a/safesocket/unixsocket.go +++ b/safesocket/unixsocket.go @@ -7,7 +7,6 @@ package safesocket import ( "context" - "errors" "fmt" "log" "net" @@ -18,9 +17,6 @@ import ( ) func connect(ctx context.Context, path string) (net.Conn, error) { - if runtime.GOOS == "js" { - return nil, errors.New("safesocket.Connect not yet implemented on js/wasm") - } var std net.Dialer return std.DialContext(ctx, "unix", path) } diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index 12c5c8c018b6b..db966ba2cdee2 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -105,7 +105,7 @@ func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial func(con } attempt.FailureMessage = err.Error() errs = append(errs, err) - continue + continue // try the next recorder } return pw, attempts, errChan, nil } diff --git a/shell.nix b/shell.nix index 839da956e1096..4d2e24366ae46 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8= +# nix-direnv cache busting line: sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0= diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index aafe0c743b26e..37f2a54343ed2 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -115,6 +115,7 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err "--gid=" + lu.Gid, "--groups=" + groups, "--local-user=" + lu.Username, + "--home-dir=" + lu.HomeDir, "--remote-user=" + remoteUser, "--remote-ip=" + ci.src.Addr().String(), "--has-tty=false", // updated in-place by startWithPTY @@ -128,7 +129,8 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err incubatorArgs = append(incubatorArgs, "--is-selinux-enforcing") } - forceV1Behavior := ss.conn.srv.lb.NetMap().HasCap(tailcfg.NodeAttrSSHBehaviorV1) + nm := ss.conn.srv.lb.NetMap() + forceV1Behavior := nm.HasCap(tailcfg.NodeAttrSSHBehaviorV1) && !nm.HasCap(tailcfg.NodeAttrSSHBehaviorV2) if forceV1Behavior { incubatorArgs = append(incubatorArgs, "--force-v1-behavior") } @@ -179,6 +181,7 @@ type incubatorArgs struct { gid int gids []int localUser string + homeDir string remoteUser string remoteIP string ttyName string @@ -201,6 +204,7 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { flags.IntVar(&ia.gid, "gid", 0, "the gid of local-user") flags.StringVar(&groups, "groups", "", "comma-separated list of gids of local-user") flags.StringVar(&ia.localUser, "local-user", "", "the user to run as") + flags.StringVar(&ia.homeDir, "home-dir", "/", "the user's home directory") flags.StringVar(&ia.remoteUser, "remote-user", "", "the remote user/tags") flags.StringVar(&ia.remoteIP, "remote-ip", "", "the remote Tailscale IP") flags.StringVar(&ia.ttyName, "tty-name", "", "the tty name (pts/3)") @@ -399,7 +403,7 @@ func tryExecLogin(dlogf logger.Logf, ia incubatorArgs) error { return nil } loginArgs := ia.loginArgs(loginCmdPath) - dlogf("logging in with %s %+v", loginCmdPath, loginArgs) + dlogf("logging in with %+v", loginArgs) // If Exec works, the Go code will not proceed past this: err = unix.Exec(loginCmdPath, loginArgs, os.Environ()) @@ -435,13 +439,18 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { defer sessionCloser() } - loginArgs := []string{"-l", ia.localUser} + loginArgs := []string{ + su, + "-w", "SSH_AUTH_SOCK", // pass through SSH_AUTH_SOCK environment variable to support ssh agent forwarding + "-l", + ia.localUser, + } if ia.cmd != "" { // Note - unlike the login command, su allows using both -l and -c. loginArgs = append(loginArgs, "-c", ia.cmd) } - dlogf("logging in with %s %q", su, loginArgs) + dlogf("logging in with %+v", loginArgs) // If Exec works, the Go code will not proceed past this: err = unix.Exec(su, loginArgs, os.Environ()) @@ -473,9 +482,15 @@ func findSU(dlogf logger.Logf, ia incubatorArgs) string { return "" } - // First try to execute su -l -c true to make sure su supports the - // necessary arguments. - err = exec.Command(su, "-l", ia.localUser, "-c", "true").Run() + // First try to execute su -w SSH_AUTH_SOCK -l -c true + // to make sure su supports the necessary arguments. + err = exec.Command( + su, + "-w", "SSH_AUTH_SOCK", + "-l", + ia.localUser, + "-c", "true", + ).Run() if err != nil { dlogf("su check failed: %s", err) return "" @@ -554,7 +569,7 @@ const ( // dropPrivileges calls doDropPrivileges with uid, gid, and gids from the given // incubatorArgs. func dropPrivileges(dlogf logger.Logf, ia incubatorArgs) error { - return doDropPrivileges(dlogf, ia.uid, ia.gid, ia.gids) + return doDropPrivileges(dlogf, ia.uid, ia.gid, ia.gids, ia.homeDir) } // doDropPrivileges contains all the logic for dropping privileges to a different @@ -567,7 +582,7 @@ func dropPrivileges(dlogf logger.Logf, ia incubatorArgs) error { // be done by running: // // go test -c ./ssh/tailssh/ && sudo ./tailssh.test -test.v -test.run TestDoDropPrivileges -func doDropPrivileges(dlogf logger.Logf, wantUid, wantGid int, supplementaryGroups []int) error { +func doDropPrivileges(dlogf logger.Logf, wantUid, wantGid int, supplementaryGroups []int, homeDir string) error { dlogf("dropping privileges") fatalf := func(format string, args ...any) { dlogf("[unexpected] error dropping privileges: "+format, args...) @@ -653,6 +668,13 @@ func doDropPrivileges(dlogf logger.Logf, wantUid, wantGid int, supplementaryGrou // TODO(andrew-d): assert that our supplementary groups are correct } + // Prefer to run in user's homedir if possible. We ignore a failure to Chdir, + // which just leaves us at "/" where we launched in the first place. + dlogf("attempting to chdir to user's home directory %q", homeDir) + if err := os.Chdir(homeDir); err != nil { + dlogf("failed to chdir to user's home directory %q, continuing in current directory", homeDir) + } + return nil } @@ -669,16 +691,7 @@ func (ss *sshSession) launchProcess() error { } cmd := ss.cmd - homeDir := ss.conn.localUser.HomeDir - if _, err := os.Stat(homeDir); err == nil { - cmd.Dir = homeDir - } else if os.IsNotExist(err) { - // If the home directory doesn't exist, we can't chdir to it. - // Instead, we'll chdir to the root directory. - cmd.Dir = "/" - } else { - return err - } + cmd.Dir = "/" cmd.Env = envForUser(ss.conn.localUser) for _, kv := range ss.Environ() { if acceptEnvPair(kv) { diff --git a/ssh/tailssh/privs_test.go b/ssh/tailssh/privs_test.go index 5ebf4e25c760a..32b219a7798ca 100644 --- a/ssh/tailssh/privs_test.go +++ b/ssh/tailssh/privs_test.go @@ -49,7 +49,7 @@ func TestDoDropPrivileges(t *testing.T) { f := os.NewFile(3, "out.json") // We're in our subprocess; actually drop privileges now. - doDropPrivileges(t.Logf, input.UID, input.GID, input.AdditionalGroups) + doDropPrivileges(t.Logf, input.UID, input.GID, input.AdditionalGroups, "/") additional, _ := syscall.Getgroups() diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index fd747f5917ed1..7187b5b595ff7 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -45,6 +45,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" "tailscale.com/util/mak" + "tailscale.com/util/slicesx" ) var ( @@ -330,7 +331,7 @@ func (c *conn) nextAuthMethodCallback(cm gossh.ConnMetadata, prevErrors []error) switch { case c.anyPasswordIsOkay: nextMethod = append(nextMethod, "password") - case len(prevErrors) > 0 && prevErrors[len(prevErrors)-1] == errPubKeyRequired: + case slicesx.LastEqual(prevErrors, errPubKeyRequired): nextMethod = append(nextMethod, "publickey") } @@ -1731,6 +1732,7 @@ func envValFromList(env []string, wantKey string) (v string) { // envEq reports whether environment variable a == b for the current // operating system. func envEq(a, b string) bool { + //lint:ignore SA4032 in case this func moves elsewhere, permit the GOOS check if runtime.GOOS == "windows" { return strings.EqualFold(a, b) } diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 62413e3a59a00..485c13fdbd1b2 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -122,13 +122,13 @@ func TestIntegrationSSH(t *testing.T) { { cmd: "pwd", want: []string{homeDir}, - skip: !fallbackToSUAvailable(), + skip: os.Getenv("SKIP_FILE_OPS") == "1" || !fallbackToSUAvailable(), forceV1Behavior: false, }, { cmd: "echo 'hello'", want: []string{"hello"}, - skip: !fallbackToSUAvailable(), + skip: os.Getenv("SKIP_FILE_OPS") == "1" || !fallbackToSUAvailable(), forceV1Behavior: false, }, } @@ -349,7 +349,7 @@ func TestSSHAgentForwarding(t *testing.T) { // Run tailscale SSH server and connect to it username := "testuser" - tailscaleAddr := testServer(t, username, false) // TODO: make this false to use V2 behavior + tailscaleAddr := testServer(t, username, false) tcl, err := ssh.Dial("tcp", tailscaleAddr, &ssh.ClientConfig{ HostKeyCallback: ssh.InsecureIgnoreHostKey(), }) @@ -387,7 +387,7 @@ func TestSSHAgentForwarding(t *testing.T) { o, err := s.CombinedOutput(fmt.Sprintf(`ssh -T -o StrictHostKeyChecking=no -p %s upstreamuser@%s "true"`, upstreamPort, upstreamHost)) if err != nil { - t.Fatalf("unable to call true command: %s\n%s", err, o) + t.Fatalf("unable to call true command: %s\n%s\n-------------------------", err, o) } } diff --git a/ssh/tailssh/testcontainers/Dockerfile b/ssh/tailssh/testcontainers/Dockerfile index ff27981ef931a..c94c961d37c61 100644 --- a/ssh/tailssh/testcontainers/Dockerfile +++ b/ssh/tailssh/testcontainers/Dockerfile @@ -1,62 +1,75 @@ ARG BASE FROM ${BASE} +ARG BASE + RUN echo "Install openssh, needed for scp." -RUN apt-get update -y && apt-get install -y openssh-client +RUN if echo "$BASE" | grep "ubuntu:"; then apt-get update -y && apt-get install -y openssh-client; fi +RUN if echo "$BASE" | grep "alpine:"; then apk add openssh; fi -RUN groupadd -g 10000 groupone -RUN groupadd -g 10001 grouptwo -# Note - we do not create the user's home directory, pam_mkhomedir will do that +# Note - on Ubuntu, we do not create the user's home directory, pam_mkhomedir will do that # for us, and we want to test that PAM gets triggered by Tailscale SSH. -RUN useradd -g 10000 -G 10001 -u 10002 testuser +RUN if echo "$BASE" | grep "ubuntu:"; then groupadd -g 10000 groupone && groupadd -g 10001 grouptwo && useradd -g 10000 -G 10001 -u 10002 testuser; fi +# On Alpine, we can't configure pam_mkhomdir, so go ahead and create home directory. +RUN if echo "$BASE" | grep "alpine:"; then addgroup -g 10000 groupone && addgroup -g 10001 grouptwo && adduser -u 10002 -D testuser && addgroup testuser groupone && addgroup testuser grouptwo; fi -RUN echo "Set up pam_mkhomedir." -RUN sed -i -e 's/Default: no/Default: yes/g' /usr/share/pam-configs/mkhomedir || echo "might not be ubuntu" -RUN cat /usr/share/pam-configs/mkhomedir -RUN pam-auth-update --enable mkhomedir +RUN if echo "$BASE" | grep "ubuntu:"; then \ + echo "Set up pam_mkhomedir." && \ + sed -i -e 's/Default: no/Default: yes/g' /usr/share/pam-configs/mkhomedir && \ + cat /usr/share/pam-configs/mkhomedir && \ + pam-auth-update --enable mkhomedir \ + ; fi COPY tailscaled . COPY tailssh.test . RUN chmod 755 tailscaled -# RUN echo "First run tests normally." +RUN echo "First run tests normally." RUN eval `ssh-agent -s` && TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestSSHAgentForwarding -RUN rm -Rf /home/testuser +RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSFTP -RUN rm -Rf /home/testuser +RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSCP -RUN rm -Rf /home/testuser +RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSSH RUN echo "Then run tests as non-root user testuser and make sure tests still pass." +RUN touch /tmp/tailscalessh.log RUN chown testuser:groupone /tmp/tailscalessh.log RUN TAILSCALED_PATH=`pwd`tailscaled eval `su -m testuser -c ssh-agent -s` && su -m testuser -c "./tailssh.test -test.v -test.run TestSSHAgentForwarding" RUN TAILSCALED_PATH=`pwd`tailscaled su -m testuser -c "./tailssh.test -test.v -test.run TestIntegration TestDoDropPrivileges" +RUN echo "Also, deny everyone access to the user's home directory and make sure non file-related tests still pass." +RUN mkdir -p /home/testuser && chown testuser:groupone /home/testuser && chmod 0000 /home/testuser +RUN TAILSCALED_PATH=`pwd`tailscaled SKIP_FILE_OPS=1 su -m testuser -c "./tailssh.test -test.v -test.run TestIntegrationSSH" +RUN chmod 0755 /home/testuser RUN chown root:root /tmp/tailscalessh.log -RUN echo "Then run tests in a system that's pretending to be SELinux in enforcing mode" -RUN mv /usr/bin/login /tmp/login_orig -# Use nonsense for /usr/bin/login so that it fails. -# It's not the same failure mode as in SELinux, but failure is good enough for test. -RUN echo "adsfasdfasdf" > /usr/bin/login -RUN chmod 755 /usr/bin/login -# Simulate getenforce command -RUN printf "#!/bin/bash\necho 'Enforcing'" > /usr/bin/getenforce -RUN chmod 755 /usr/bin/getenforce -RUN eval `ssh-agent -s` && TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestSSHAgentForwarding -RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegration -RUN mv /tmp/login_orig /usr/bin/login -RUN rm /usr/bin/getenforce +RUN if echo "$BASE" | grep "ubuntu:"; then \ + echo "Then run tests in a system that's pretending to be SELinux in enforcing mode" && \ + # Remove execute permissions for /usr/bin/login so that it fails. + mv /usr/bin/login /tmp/login_orig && \ + # Use nonsense for /usr/bin/login so that it fails. + # It's not the same failure mode as in SELinux, but failure is good enough for test. + echo "adsfasdfasdf" > /usr/bin/login && \ + chmod 755 /usr/bin/login && \ + # Simulate getenforce command + printf "#!/bin/bash\necho 'Enforcing'" > /usr/bin/getenforce && \ + chmod 755 /usr/bin/getenforce && \ + eval `ssh-agent -s` && TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestSSHAgentForwarding && \ + TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegration && \ + mv /tmp/login_orig /usr/bin/login && \ + rm /usr/bin/getenforce \ + ; fi RUN echo "Then remove the login command and make sure tests still pass." RUN rm `which login` RUN eval `ssh-agent -s` && TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestSSHAgentForwarding -RUN rm -Rf /home/testuser +RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSFTP -RUN rm -Rf /home/testuser +RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSCP -RUN rm -Rf /home/testuser +RUN if echo "$BASE" | grep "ubuntu:"; then rm -Rf /home/testuser; fi RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSSH RUN echo "Then remove the su command and make sure tests still pass." diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 5a06c89ff9648..3cf486b0dee56 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -147,7 +147,9 @@ type CapabilityVersion int // - 102: 2024-07-12: NodeAttrDisableMagicSockCryptoRouting support // - 103: 2024-07-24: Client supports NodeAttrDisableCaptivePortalDetection // - 104: 2024-08-03: SelfNodeV6MasqAddrForThisPeer now works -const CurrentCapabilityVersion CapabilityVersion = 104 +// - 105: 2024-08-05: Fixed SSH behavior on systems that use busybox (issue #12849) +// - 106: 2024-09-03: fix panic regression from cryptokey routing change (65fe0ba7b5) +const CurrentCapabilityVersion CapabilityVersion = 106 type StableID string @@ -2306,6 +2308,13 @@ const ( // Added 2024-05-29 in Tailscale version 1.68. NodeAttrSSHBehaviorV1 NodeCapability = "ssh-behavior-v1" + // NodeAttrSSHBehaviorV2 forces SSH to use the V2 behavior (use su, run SFTP in child process). + // This overrides NodeAttrSSHBehaviorV1 if set. + // See forceV1Behavior in ssh/tailssh/incubator.go for distinction between + // V1 and V2 behavior. + // Added 2024-08-06 in Tailscale version 1.72. + NodeAttrSSHBehaviorV2 NodeCapability = "ssh-behavior-v2" + // NodeAttrDisableSplitDNSWhenNoCustomResolvers indicates that the node's // DNS manager should not adopt a split DNS configuration even though the // Config of the resolver only contains routes that do not specify custom diff --git a/tka/sig.go b/tka/sig.go index 6c68a588eb56f..c82f9715c33fb 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -19,6 +19,8 @@ import ( "tailscale.com/types/tkatype" ) +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=NodeKeySignature + // SigKind describes valid NodeKeySignature types. type SigKind uint8 @@ -370,10 +372,15 @@ func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.Marsha return oldNKS, nil } + nested, err := maybeTrimRotationSignatureChain(oldSig, priv) + if err != nil { + return nil, fmt.Errorf("trimming rotation signature chain: %w", err) + } + newSig := NodeKeySignature{ SigKind: SigRotation, Pubkey: nk, - Nested: &oldSig, + Nested: &nested, } if newSig.Signature, err = priv.SignNKS(newSig.SigHash()); err != nil { return nil, fmt.Errorf("signing NKS: %w", err) @@ -382,6 +389,51 @@ func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.Marsha return newSig.Serialize(), nil } +// maybeTrimRotationSignatureChain truncates rotation signature chain to ensure +// it contains no more than 15 node keys. +func maybeTrimRotationSignatureChain(sig NodeKeySignature, priv key.NLPrivate) (NodeKeySignature, error) { + if sig.SigKind != SigRotation { + return sig, nil + } + + // Collect all the previous node keys, ordered from newest to oldest. + prevPubkeys := [][]byte{sig.Pubkey} + nested := sig.Nested + for nested != nil { + if len(nested.Pubkey) > 0 { + prevPubkeys = append(prevPubkeys, nested.Pubkey) + } + if nested.SigKind != SigRotation { + break + } + nested = nested.Nested + } + + // Existing rotation signature with 15 keys is the maximum we can wrap in a + // new signature without hitting the CBOR nesting limit of 16 (see + // MaxNestedLevels in tka.go). + const maxPrevKeys = 15 + if len(prevPubkeys) <= maxPrevKeys { + return sig, nil + } + + // Create a new rotation signature chain, starting with the original + // direct signature. + var err error + result := nested // original direct signature + for i := maxPrevKeys - 2; i >= 0; i-- { + result = &NodeKeySignature{ + SigKind: SigRotation, + Pubkey: prevPubkeys[i], + Nested: result, + } + if result.Signature, err = priv.SignNKS(result.SigHash()); err != nil { + return sig, fmt.Errorf("signing NKS: %w", err) + } + } + return *result, nil +} + // SignByCredential signs a node public key by a private key which has its // signing authority delegated by a SigCredential signature. This is used by // wrapped auth keys. diff --git a/tka/sig_test.go b/tka/sig_test.go index d857eaf5516ca..d64575e7c7b45 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -9,7 +9,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/types/key" + "tailscale.com/types/tkatype" ) func TestSigDirect(t *testing.T) { @@ -74,6 +76,9 @@ func TestSigNested(t *testing.T) { if err := nestedSig.verifySignature(oldNode.Public(), k); err != nil { t.Fatalf("verifySignature(oldNode) failed: %v", err) } + if l := sigChainLength(nestedSig); l != 1 { + t.Errorf("nestedSig chain length = %v, want 1", l) + } // The signature authorizing the rotation, signed by the // rotation key & embedding the original signature. @@ -88,6 +93,9 @@ func TestSigNested(t *testing.T) { if err := sig.verifySignature(node.Public(), k); err != nil { t.Fatalf("verifySignature(node) failed: %v", err) } + if l := sigChainLength(sig); l != 2 { + t.Errorf("sig chain length = %v, want 2", l) + } // Test verification fails if the wrong verification key is provided kBad := Key{Kind: Key25519, Public: []byte{1, 2, 3, 4}, Votes: 2} @@ -497,3 +505,129 @@ func TestDecodeWrappedAuthkey(t *testing.T) { } } + +func TestResignNKS(t *testing.T) { + // Tailnet lock keypair of a signing node. + authPub, authPriv := testingKey25519(t, 1) + authKey := Key{Kind: Key25519, Public: authPub, Votes: 2} + + // Node's own tailnet lock key used to sign rotation signatures. + tlPriv := key.NewNLPrivate() + + // The original (oldest) node key, signed by a signing node. + origNode := key.NewNode() + origPub, _ := origNode.Public().MarshalBinary() + + // The original signature for the old node key, signed by + // the network-lock key. + directSig := NodeKeySignature{ + SigKind: SigDirect, + KeyID: authKey.MustID(), + Pubkey: origPub, + WrappingPubkey: tlPriv.Public().Verifier(), + } + sigHash := directSig.SigHash() + directSig.Signature = ed25519.Sign(authPriv, sigHash[:]) + if err := directSig.verifySignature(origNode.Public(), authKey); err != nil { + t.Fatalf("verifySignature(origNode) failed: %v", err) + } + + // Generate a bunch of node keys to be used by tests. + var nodeKeys []key.NodePublic + for range 20 { + n := key.NewNode() + nodeKeys = append(nodeKeys, n.Public()) + } + + // mkSig creates a signature chain starting with a direct signature + // with rotation signatures matching provided keys (from the nodeKeys slice). + mkSig := func(prevKeyIDs ...int) tkatype.MarshaledSignature { + sig := &directSig + for _, i := range prevKeyIDs { + pk, _ := nodeKeys[i].MarshalBinary() + sig = &NodeKeySignature{ + SigKind: SigRotation, + Pubkey: pk, + Nested: sig, + } + var err error + sig.Signature, err = tlPriv.SignNKS(sig.SigHash()) + if err != nil { + t.Error(err) + } + } + return sig.Serialize() + } + + tests := []struct { + name string + oldSig tkatype.MarshaledSignature + wantPrevNodeKeys []key.NodePublic + }{ + { + name: "first-rotation", + oldSig: directSig.Serialize(), + wantPrevNodeKeys: []key.NodePublic{origNode.Public()}, + }, + { + name: "second-rotation", + oldSig: mkSig(0), + wantPrevNodeKeys: []key.NodePublic{nodeKeys[0], origNode.Public()}, + }, + { + name: "truncate-chain", + oldSig: mkSig(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), + wantPrevNodeKeys: []key.NodePublic{ + nodeKeys[14], + nodeKeys[13], + nodeKeys[12], + nodeKeys[11], + nodeKeys[10], + nodeKeys[9], + nodeKeys[8], + nodeKeys[7], + nodeKeys[6], + nodeKeys[5], + nodeKeys[4], + nodeKeys[3], + nodeKeys[2], + nodeKeys[1], + origNode.Public(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + newNode := key.NewNode() + got, err := ResignNKS(tlPriv, newNode.Public(), tt.oldSig) + if err != nil { + t.Fatalf("ResignNKS() error = %v", err) + } + var gotSig NodeKeySignature + if err := gotSig.Unserialize(got); err != nil { + t.Fatalf("Unserialize() failed: %v", err) + } + if err := gotSig.verifySignature(newNode.Public(), authKey); err != nil { + t.Errorf("verifySignature(newNode) error: %v", err) + } + + rd, err := gotSig.rotationDetails() + if err != nil { + t.Fatalf("rotationDetails() error = %v", err) + } + if sigChainLength(gotSig) != len(tt.wantPrevNodeKeys)+1 { + t.Errorf("sigChainLength() = %v, want %v", sigChainLength(gotSig), len(tt.wantPrevNodeKeys)+1) + } + if diff := cmp.Diff(tt.wantPrevNodeKeys, rd.PrevNodeKeys, cmpopts.EquateComparable(key.NodePublic{})); diff != "" { + t.Errorf("PrevNodeKeys mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func sigChainLength(s NodeKeySignature) int { + if s.Nested != nil { + return 1 + sigChainLength(*s.Nested) + } + return 1 +} diff --git a/tka/tka_clone.go b/tka/tka_clone.go new file mode 100644 index 0000000000000..323a824fe5a63 --- /dev/null +++ b/tka/tka_clone.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. + +package tka + +// Clone makes a deep copy of NodeKeySignature. +// The result aliases no memory with the original. +func (src *NodeKeySignature) Clone() *NodeKeySignature { + if src == nil { + return nil + } + dst := new(NodeKeySignature) + *dst = *src + dst.Pubkey = append(src.Pubkey[:0:0], src.Pubkey...) + dst.KeyID = append(src.KeyID[:0:0], src.KeyID...) + dst.Signature = append(src.Signature[:0:0], src.Signature...) + dst.Nested = src.Nested.Clone() + dst.WrappingPubkey = append(src.WrappingPubkey[:0:0], src.WrappingPubkey...) + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _NodeKeySignatureCloneNeedsRegeneration = NodeKeySignature(struct { + SigKind SigKind + Pubkey []byte + KeyID []byte + Signature []byte + Nested *NodeKeySignature + WrappingPubkey []byte +}{}) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 8be54bb734ab6..ca6c44ea7f1a9 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -2,8 +2,6 @@ // SPDX-License-Identifier: BSD-3-Clause // Package tsnet provides Tailscale as a library. -// -// It is an experimental work in progress. package tsnet import ( diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 9589b47967eca..7f6fb00c0d1be 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -31,8 +31,10 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "golang.org/x/net/proxy" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" @@ -815,3 +817,66 @@ func TestUDPConn(t *testing.T) { t.Errorf("got %q, want world", got) } } + +func TestUserMetrics(t *testing.T) { + tstest.ResourceCheck(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // testWarnable is a Warnable that is used within this package for testing purposes only. + var testWarnable = health.Register(&health.Warnable{ + Code: "test-warnable-tsnet", + Title: "Test warnable", + Severity: health.SeverityLow, + Text: func(args health.Args) string { + return args[health.ArgError] + }, + }) + + controlURL, c := startControl(t) + s1, _, s1PubKey := startServer(t, ctx, controlURL, "s1") + + s1.lb.EditPrefs(&ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AdvertiseRoutes: []netip.Prefix{ + netip.MustParsePrefix("192.0.2.0/24"), + netip.MustParsePrefix("192.0.3.0/24"), + }, + }, + AdvertiseRoutesSet: true, + }) + c.SetSubnetRoutes(s1PubKey, []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}) + + lc1, err := s1.LocalClient() + if err != nil { + t.Fatal(err) + } + + ht := s1.lb.HealthTracker() + ht.SetUnhealthy(testWarnable, health.Args{"Text": "Hello world 1"}) + + metrics1, err := lc1.UserMetrics(ctx) + if err != nil { + t.Fatal(err) + } + + // Note that this test will check for two warnings because the health + // tracker will have two warnings: one from the testWarnable, added in + // this test, and one because we are running the dev/unstable version + // of tailscale. + want := `# TYPE tailscaled_advertised_routes gauge +# HELP tailscaled_advertised_routes Number of advertised network routes (e.g. by a subnet router) +tailscaled_advertised_routes 2 +# TYPE tailscaled_health_messages gauge +# HELP tailscaled_health_messages Number of health messages broken down by type. +tailscaled_health_messages{type="warning"} 2 +# TYPE tailscaled_inbound_dropped_packets_total counter +# HELP tailscaled_inbound_dropped_packets_total Counts the number of dropped packets received by the node from other peers +# TYPE tailscaled_outbound_dropped_packets_total counter +# HELP tailscaled_outbound_dropped_packets_total Counts the number of packets dropped while being sent to other peers +` + + if diff := cmp.Diff(want, string(metrics1)); diff != "" { + t.Fatalf("unexpected metrics (-want +got):\n%s", diff) + } +} diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 30ac510361c06..ecb655fe9f474 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -23,12 +23,14 @@ import ( "path/filepath" "regexp" "runtime" + "strconv" "strings" "sync" "sync/atomic" "testing" "time" + "github.com/miekg/dns" "go4.org/mem" "tailscale.com/client/tailscale" "tailscale.com/clientupdate" @@ -37,6 +39,8 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store" + "tailscale.com/net/tsaddr" + "tailscale.com/net/tstun" "tailscale.com/safesocket" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -46,6 +50,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/ptr" + "tailscale.com/util/dnsname" "tailscale.com/util/must" "tailscale.com/util/rands" "tailscale.com/version" @@ -1118,13 +1123,386 @@ func TestAutoUpdateDefaults(t *testing.T) { } } +// TestDNSOverTCPIntervalResolver tests that the quad-100 resolver successfully +// serves TCP queries. It exercises the host's TCP stack, a TUN device, and +// gVisor/netstack. +// https://github.com/tailscale/corp/issues/22511 +func TestDNSOverTCPIntervalResolver(t *testing.T) { + tstest.Shard(t) + if os.Getuid() != 0 { + t.Skip("skipping when not root") + } + env := newTestEnv(t) + env.tunMode = true + n1 := newTestNode(t, env) + d1 := n1.StartDaemon() + + n1.AwaitResponding() + n1.MustUp() + + wantIP4 := n1.AwaitIP4() + n1.AwaitRunning() + + status, err := n1.Status() + if err != nil { + t.Fatalf("failed to get node status: %v", err) + } + selfDNSName, err := dnsname.ToFQDN(status.Self.DNSName) + if err != nil { + t.Fatalf("error converting self dns name to fqdn: %v", err) + } + + cases := []struct { + network string + serviceAddr netip.Addr + }{ + { + "tcp4", + tsaddr.TailscaleServiceIP(), + }, + { + "tcp6", + tsaddr.TailscaleServiceIPv6(), + }, + } + for _, c := range cases { + err = tstest.WaitFor(time.Second*5, func() error { + m := new(dns.Msg) + m.SetQuestion(selfDNSName.WithTrailingDot(), dns.TypeA) + conn, err := net.DialTimeout(c.network, net.JoinHostPort(c.serviceAddr.String(), "53"), time.Second*1) + if err != nil { + return err + } + defer conn.Close() + dnsConn := &dns.Conn{ + Conn: conn, + } + dnsClient := &dns.Client{} + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + resp, _, err := dnsClient.ExchangeWithConnContext(ctx, m, dnsConn) + if err != nil { + return err + } + if len(resp.Answer) != 1 { + return fmt.Errorf("unexpected DNS resp: %s", resp) + } + var gotAddr net.IP + answer, ok := resp.Answer[0].(*dns.A) + if !ok { + return fmt.Errorf("unexpected answer type: %s", resp.Answer[0]) + } + gotAddr = answer.A + if !bytes.Equal(gotAddr, wantIP4.AsSlice()) { + return fmt.Errorf("got (%s) != want (%s)", gotAddr, wantIP4) + } + return nil + }) + if err != nil { + t.Fatal(err) + } + } + + d1.MustCleanShutdown(t) +} + +// TestNetstackTCPLoopback tests netstack loopback of a TCP stream, in both +// directions. +func TestNetstackTCPLoopback(t *testing.T) { + tstest.Shard(t) + if os.Getuid() != 0 { + t.Skip("skipping when not root") + } + + env := newTestEnv(t) + env.tunMode = true + loopbackPort := 5201 + env.loopbackPort = &loopbackPort + loopbackPortStr := strconv.Itoa(loopbackPort) + n1 := newTestNode(t, env) + d1 := n1.StartDaemon() + + n1.AwaitResponding() + n1.MustUp() + + n1.AwaitIP4() + n1.AwaitRunning() + + cases := []struct { + lisAddr string + network string + dialAddr string + }{ + { + lisAddr: net.JoinHostPort("127.0.0.1", loopbackPortStr), + network: "tcp4", + dialAddr: net.JoinHostPort(tsaddr.TailscaleServiceIPString, loopbackPortStr), + }, + { + lisAddr: net.JoinHostPort("::1", loopbackPortStr), + network: "tcp6", + dialAddr: net.JoinHostPort(tsaddr.TailscaleServiceIPv6String, loopbackPortStr), + }, + } + + writeBufSize := 128 << 10 // 128KiB, exercise GSO if enabled + writeBufIterations := 100 // allow TCP send window to open up + wantTotal := writeBufSize * writeBufIterations + + for _, c := range cases { + lis, err := net.Listen(c.network, c.lisAddr) + if err != nil { + t.Fatal(err) + } + defer lis.Close() + + writeFn := func(conn net.Conn) error { + for i := 0; i < writeBufIterations; i++ { + toWrite := make([]byte, writeBufSize) + var wrote int + for { + n, err := conn.Write(toWrite) + if err != nil { + return err + } + wrote += n + if wrote == len(toWrite) { + break + } + } + } + return nil + } + + readFn := func(conn net.Conn) error { + var read int + for { + b := make([]byte, writeBufSize) + n, err := conn.Read(b) + if err != nil { + return err + } + read += n + if read == wantTotal { + return nil + } + } + } + + lisStepCh := make(chan error) + go func() { + conn, err := lis.Accept() + if err != nil { + lisStepCh <- err + return + } + lisStepCh <- readFn(conn) + lisStepCh <- writeFn(conn) + }() + + var conn net.Conn + err = tstest.WaitFor(time.Second*5, func() error { + conn, err = net.DialTimeout(c.network, c.dialAddr, time.Second*1) + if err != nil { + return err + } + return nil + }) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + dialerStepCh := make(chan error) + go func() { + dialerStepCh <- writeFn(conn) + dialerStepCh <- readFn(conn) + }() + + var ( + dialerSteps int + lisSteps int + ) + for { + select { + case lisErr := <-lisStepCh: + if lisErr != nil { + t.Fatal(err) + } + lisSteps++ + if dialerSteps == 2 && lisSteps == 2 { + return + } + case dialerErr := <-dialerStepCh: + if dialerErr != nil { + t.Fatal(err) + } + dialerSteps++ + if dialerSteps == 2 && lisSteps == 2 { + return + } + } + } + } + + d1.MustCleanShutdown(t) +} + +// TestNetstackUDPLoopback tests netstack loopback of UDP packets, in both +// directions. +func TestNetstackUDPLoopback(t *testing.T) { + tstest.Shard(t) + if os.Getuid() != 0 { + t.Skip("skipping when not root") + } + + env := newTestEnv(t) + env.tunMode = true + loopbackPort := 5201 + env.loopbackPort = &loopbackPort + n1 := newTestNode(t, env) + d1 := n1.StartDaemon() + + n1.AwaitResponding() + n1.MustUp() + + ip4 := n1.AwaitIP4() + ip6 := n1.AwaitIP6() + n1.AwaitRunning() + + cases := []struct { + pingerLAddr *net.UDPAddr + pongerLAddr *net.UDPAddr + network string + dialAddr *net.UDPAddr + }{ + { + pingerLAddr: &net.UDPAddr{IP: ip4.AsSlice(), Port: loopbackPort + 1}, + pongerLAddr: &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: loopbackPort}, + network: "udp4", + dialAddr: &net.UDPAddr{IP: tsaddr.TailscaleServiceIP().AsSlice(), Port: loopbackPort}, + }, + { + pingerLAddr: &net.UDPAddr{IP: ip6.AsSlice(), Port: loopbackPort + 1}, + pongerLAddr: &net.UDPAddr{IP: net.ParseIP("::1"), Port: loopbackPort}, + network: "udp6", + dialAddr: &net.UDPAddr{IP: tsaddr.TailscaleServiceIPv6().AsSlice(), Port: loopbackPort}, + }, + } + + writeBufSize := int(tstun.DefaultTUNMTU()) - 40 - 8 // mtu - ipv6 header - udp header + wantPongs := 100 + + for _, c := range cases { + pongerConn, err := net.ListenUDP(c.network, c.pongerLAddr) + if err != nil { + t.Fatal(err) + } + defer pongerConn.Close() + + var pingerConn *net.UDPConn + err = tstest.WaitFor(time.Second*5, func() error { + pingerConn, err = net.DialUDP(c.network, c.pingerLAddr, c.dialAddr) + return err + }) + if err != nil { + t.Fatal(err) + } + defer pingerConn.Close() + + pingerFn := func(conn *net.UDPConn) error { + b := make([]byte, writeBufSize) + n, err := conn.Write(b) + if err != nil { + return err + } + if n != len(b) { + return fmt.Errorf("bad write size: %d", n) + } + err = conn.SetReadDeadline(time.Now().Add(time.Millisecond * 500)) + if err != nil { + return err + } + n, err = conn.Read(b) + if err != nil { + return err + } + if n != len(b) { + return fmt.Errorf("bad read size: %d", n) + } + return nil + } + + pongerFn := func(conn *net.UDPConn) error { + for { + b := make([]byte, writeBufSize) + n, from, err := conn.ReadFromUDP(b) + if err != nil { + return err + } + if n != len(b) { + return fmt.Errorf("bad read size: %d", n) + } + n, err = conn.WriteToUDP(b, from) + if err != nil { + return err + } + if n != len(b) { + return fmt.Errorf("bad write size: %d", n) + } + } + } + + pongerErrCh := make(chan error, 1) + go func() { + pongerErrCh <- pongerFn(pongerConn) + }() + + err = tstest.WaitFor(time.Second*5, func() error { + err = pingerFn(pingerConn) + if err != nil { + return err + } + return nil + }) + if err != nil { + t.Fatal(err) + } + + var pongsRX int + for { + pingerErrCh := make(chan error) + go func() { + pingerErrCh <- pingerFn(pingerConn) + }() + + select { + case err := <-pongerErrCh: + t.Fatal(err) + case err := <-pingerErrCh: + if err != nil { + t.Fatal(err) + } + } + + pongsRX++ + if pongsRX == wantPongs { + break + } + } + } + + d1.MustCleanShutdown(t) +} + // testEnv contains the test environment (set of servers) used by one // or more nodes. type testEnv struct { - t testing.TB - tunMode bool - cli string - daemon string + t testing.TB + tunMode bool + cli string + daemon string + loopbackPort *int LogCatcher *LogCatcher LogCatcherServer *httptest.Server @@ -1425,6 +1803,9 @@ func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { "TS_DISABLE_PORTMAPPER=1", // shouldn't be needed; test is all localhost "TS_DEBUG_LOG_RATE=all", ) + if n.env.loopbackPort != nil { + cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) + } if version.IsRace() { cmd.Env = append(cmd.Env, "GORACE=halt_on_error=1") } diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index d88a56e74cfbf..a8c20d6bd634f 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -95,6 +95,10 @@ func findKernelPath(goMod string) (string, error) { type addNodeFunc func(c *vnet.Config) *vnet.Node // returns nil to omit test +func v6cidr(n int) string { + return fmt.Sprintf("2000:%d::1/64", n) +} + func easy(c *vnet.Config) *vnet.Node { n := c.NumNodes() + 1 return c.AddNode(c.AddNetwork( @@ -102,6 +106,31 @@ func easy(c *vnet.Config) *vnet.Node { fmt.Sprintf("192.168.%d.1/24", n), vnet.EasyNAT)) } +func easyAnd6(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), + v6cidr(n), + vnet.EasyNAT)) +} + +func v6AndBlackholedIPv4(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + nw := c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), + v6cidr(n), + vnet.EasyNAT) + nw.SetBlackholedIPv4(true) + return c.AddNode(nw) +} + +func just6(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork(v6cidr(n))) // public IPv6 prefix +} + // easy + host firewall func easyFW(c *vnet.Config) *vnet.Node { n := c.NumNodes() + 1 @@ -192,21 +221,24 @@ func hardPMP(c *vnet.Config) *vnet.Node { fmt.Sprintf("10.7.%d.1/24", n), vnet.HardNAT, vnet.NATPMP)) } -func (nt *natTest) runTest(node1, node2 addNodeFunc) pingRoute { +func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { + if len(addNode) < 1 || len(addNode) > 2 { + nt.tb.Fatalf("runTest: invalid number of nodes %v; want 1 or 2", len(addNode)) + } t := nt.tb var c vnet.Config c.SetPCAPFile(*pcapFile) - nodes := []*vnet.Node{ - node1(&c), - node2(&c), - } - if nodes[0] == nil || nodes[1] == nil { - t.Skip("skipping test; not applicable combination") - } - if *logTailscaled { - nodes[0].SetVerboseSyslog(true) - nodes[1].SetVerboseSyslog(true) + nodes := []*vnet.Node{} + for _, fn := range addNode { + node := fn(&c) + if node == nil { + t.Skip("skipping test; not applicable combination") + } + nodes = append(nodes, node) + if *logTailscaled { + node.SetVerboseSyslog(true) + } } var err error @@ -255,6 +287,11 @@ func (nt *natTest) runTest(node1, node2 addNodeFunc) pingRoute { for _, e := range node.Env() { fmt.Fprintf(&envBuf, " tailscaled.env=%s=%s", e.Key, e.Value) } + sysLogAddr := net.JoinHostPort(vnet.FakeSyslogIPv4().String(), "995") + if node.IsV6Only() { + fmt.Fprintf(&envBuf, " tta.nameserver=%s", vnet.FakeDNSIPv6()) + sysLogAddr = net.JoinHostPort(vnet.FakeSyslogIPv6().String(), "995") + } envStr := envBuf.String() cmd := exec.Command("qemu-system-x86_64", @@ -262,7 +299,7 @@ func (nt *natTest) runTest(node1, node2 addNodeFunc) pingRoute { "-m", "384M", "-nodefaults", "-no-user-config", "-nographic", "-kernel", nt.kernel, - "-append", "console=hvc0 root=PARTUUID=60c24cc1-f3f9-427a-8199-76baa2d60001/PARTNROFF=1 ro init=/gokrazy/init panic=10 oops=panic pci=off nousb tsc=unstable clocksource=hpet gokrazy.remote_syslog.target=52.52.0.9:995 tailscale-tta=1"+envStr, + "-append", "console=hvc0 root=PARTUUID=60c24cc1-f3f9-427a-8199-76baa2d60001/PARTNROFF=1 ro init=/gokrazy/init panic=10 oops=panic pci=off nousb tsc=unstable clocksource=hpet gokrazy.remote_syslog.target="+sysLogAddr+" tailscale-tta=1"+envStr, "-drive", "id=blk0,file="+disk+",format=qcow2", "-device", "virtio-blk-device,drive=blk0", "-netdev", "stream,id=net0,addr.type=unix,addr.path="+sockAddr, @@ -287,16 +324,18 @@ func (nt *natTest) runTest(node1, node2 addNodeFunc) pingRoute { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() - lc1 := nt.vnet.NodeAgentClient(nodes[0]) - lc2 := nt.vnet.NodeAgentClient(nodes[1]) - clients := []*vnet.NodeAgentClient{lc1, lc2} + var clients []*vnet.NodeAgentClient + for _, n := range nodes { + clients = append(clients, nt.vnet.NodeAgentClient(n)) + } + sts := make([]*ipnstate.Status, len(nodes)) var eg errgroup.Group - var sts [2]*ipnstate.Status for i, c := range clients { i, c := i, c eg.Go(func() error { node := nodes[i] + t.Logf("%v calling Status...", node) st, err := c.Status(ctx) if err != nil { return fmt.Errorf("%v status: %w", node, err) @@ -334,7 +373,11 @@ func (nt *natTest) runTest(node1, node2 addNodeFunc) pingRoute { defer nt.vnet.Close() - pingRes, err := ping(ctx, lc1, sts[1].Self.TailscaleIPs[0]) + if len(nodes) < 2 { + return "" + } + + pingRes, err := ping(ctx, clients[0], sts[1].Self.TailscaleIPs[0]) if err != nil { t.Fatalf("ping failure: %v", err) } @@ -445,6 +488,37 @@ func TestEasyEasy(t *testing.T) { nt.want(routeDirect) } +func TestSingleJustIPv6(t *testing.T) { + nt := newNatTest(t) + nt.runTest(just6) +} + +var knownBroken = flag.Bool("known-broken", false, "run known-broken tests") + +// TestSingleDualStackButBrokenIPv4 tests a dual-stack node with broken +// (blackholed) IPv4. +// +// See https://github.com/tailscale/tailscale/issues/13346 +func TestSingleDualBrokenIPv4(t *testing.T) { + if !*knownBroken { + t.Skip("skipping known-broken test; set --known-broken to run; see https://github.com/tailscale/tailscale/issues/13346") + } + nt := newNatTest(t) + nt.runTest(v6AndBlackholedIPv4) +} + +func TestJustIPv6(t *testing.T) { + nt := newNatTest(t) + nt.runTest(just6, just6) + nt.want(routeDirect) +} + +func TestEasy4AndJust6(t *testing.T) { + nt := newNatTest(t) + nt.runTest(easyAnd6, just6) + nt.want(routeDirect) +} + func TestSameLAN(t *testing.T) { nt := newNatTest(t) nt.runTest(easy, sameLAN) diff --git a/tstest/integration/vms/distros.go b/tstest/integration/vms/distros.go index ea43e271b5448..ca2bf53ba66a7 100644 --- a/tstest/integration/vms/distros.go +++ b/tstest/integration/vms/distros.go @@ -11,8 +11,6 @@ import ( "github.com/tailscale/hujson" ) -// go:generate go run ./gen - type Distro struct { Name string // amazon-linux URL string // URL to a qcow2 image diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go index 1703e0c1296a8..cf71a66743e1c 100644 --- a/tstest/natlab/vnet/conf.go +++ b/tstest/natlab/vnet/conf.go @@ -6,7 +6,7 @@ package vnet import ( "cmp" "fmt" - "log" + "iter" "net/netip" "os" "slices" @@ -29,19 +29,31 @@ import ( // values to modify the config before calling NewServer. // Once the NewServer is called, Config is no longer used. type Config struct { - nodes []*Node - networks []*Network - pcapFile string + nodes []*Node + networks []*Network + pcapFile string + blendReality bool } +// SetPCAPFile sets the filename to write a pcap file to, +// or empty to disable pcap file writing. func (c *Config) SetPCAPFile(file string) { c.pcapFile = file } +// NumNodes returns the number of nodes in the configuration. func (c *Config) NumNodes() int { return len(c.nodes) } +// SetBlendReality sets whether to blend the real controlplane.tailscale.com and +// DERP servers into the virtual network. This is mostly useful for interactive +// testing when working on natlab. +func (c *Config) SetBlendReality(v bool) { + c.blendReality = v +} + +// FirstNetwork returns the first network in the config, or nil if none. func (c *Config) FirstNetwork() *Network { if len(c.networks) == 0 { return nil @@ -49,6 +61,30 @@ func (c *Config) FirstNetwork() *Network { return c.networks[0] } +func (c *Config) Nodes() iter.Seq2[int, *Node] { + return slices.All(c.nodes) +} + +func nodeMac(n int) MAC { + // 52=TS then 0xcc for cccclient + return MAC{0x52, 0xcc, 0xcc, 0xcc, 0xcc, byte(n)} +} + +func routerMac(n int) MAC { + // 52=TS then 0xee for 'etwork + return MAC{0x52, 0xee, 0xee, 0xee, 0xee, byte(n)} +} + +var lanSLAACBase = netip.MustParseAddr("fe80::50cc:ccff:fecc:cc01") + +// nodeLANIP6 returns a node number's Link Local SLAAC IPv6 address, +// such as fe80::50cc:ccff:fecc:cc03 for node 3. +func nodeLANIP6(n int) netip.Addr { + a := lanSLAACBase.As16() + a[15] = byte(n) + return netip.AddrFrom16(a) +} + // AddNode creates a new node in the world. // // The opts may be of the following types: @@ -58,10 +94,10 @@ func (c *Config) FirstNetwork() *Network { // On an error or unknown opt type, AddNode returns a // node with a carried error that gets returned later. func (c *Config) AddNode(opts ...any) *Node { - num := len(c.nodes) + num := len(c.nodes) + 1 n := &Node{ - num: num + 1, - mac: MAC{0x52, 0xcc, 0xcc, 0xcc, 0xcc, byte(num) + 1}, // 52=TS then 0xcc for ccclient + num: num, + mac: nodeMac(num), } c.nodes = append(c.nodes, n) for _, o := range opts { @@ -112,24 +148,32 @@ type TailscaledEnv struct { // The opts may be of the following types: // - string IP address, for the network's WAN IP (if any) // - string netip.Prefix, for the network's LAN IP (defaults to 192.168.0.0/24) +// if IPv4, or its WAN IPv6 + CIDR (e.g. "2000:52::1/64") // - NAT, the type of NAT to use // - NetworkService, a service to add to the network // // On an error or unknown opt type, AddNetwork returns a // network with a carried error that gets returned later. func (c *Config) AddNetwork(opts ...any) *Network { - num := len(c.networks) + num := len(c.networks) + 1 n := &Network{ - mac: MAC{0x52, 0xee, 0xee, 0xee, 0xee, byte(num) + 1}, // 52=TS then 0xee for 'etwork + num: num, + mac: routerMac(num), } c.networks = append(c.networks, n) for _, o := range opts { switch o := o.(type) { case string: if ip, err := netip.ParseAddr(o); err == nil { - n.wanIP = ip + n.wanIP4 = ip } else if ip, err := netip.ParsePrefix(o); err == nil { - n.lanIP = ip + // If the prefix is IPv4, treat it as the router's internal IPv4 address + CIDR. + // If the prefix is IPv6, treat it as the router's WAN IPv6 + CIDR (typically a /64). + if ip.Addr().Is4() { + n.lanIP4 = ip + } else if ip.Addr().Is6() { + n.wanIP6 = ip + } } else { if n.err == nil { n.err = fmt.Errorf("unknown string option %q", o) @@ -196,6 +240,21 @@ func (n *Node) SetVerboseSyslog(v bool) { n.verboseSyslog = v } +// IsV6Only reports whether this node is only connected to IPv6 networks. +func (n *Node) IsV6Only() bool { + for _, net := range n.nets { + if net.CanV4() { + return false + } + } + for _, net := range n.nets { + if net.CanV6() { + return true + } + } + return false +} + // Network returns the first network this node is connected to, // or nil if none. func (n *Node) Network() *Network { @@ -207,12 +266,16 @@ func (n *Node) Network() *Network { // Network is the configuration of a network in the virtual network. type Network struct { + num int // 1-based mac MAC // MAC address of the router/gateway natType NAT - wanIP netip.Addr - lanIP netip.Prefix - nodes []*Node + wanIP6 netip.Prefix // global unicast router in host bits; CIDR is /64 delegated to LAN + + wanIP4 netip.Addr // IPv4 WAN IP, if any + lanIP4 netip.Prefix + nodes []*Node + breakWAN4 bool // whether to break WAN IPv4 connectivity svcs set.Set[NetworkService] @@ -220,6 +283,20 @@ type Network struct { err error // carried error } +// SetBlackholedIPv4 sets whether the network should blackhole all IPv4 traffic +// out to the Internet. (DHCP etc continues to work on the LAN.) +func (n *Network) SetBlackholedIPv4(v bool) { + n.breakWAN4 = v +} + +func (n *Network) CanV4() bool { + return n.lanIP4.IsValid() || n.wanIP4.IsValid() +} + +func (n *Network) CanV6() bool { + return n.wanIP6.IsValid() +} + func (n *Network) CanTakeMoreNodes() bool { if n.natType == One2OneNAT { return len(n.nodes) == 0 @@ -270,24 +347,44 @@ func (s *Server) initFromConfig(c *Config) error { if conf.err != nil { return conf.err } - if !conf.lanIP.IsValid() { - conf.lanIP = netip.MustParsePrefix("192.168.0.0/24") + if !conf.lanIP4.IsValid() && !conf.wanIP6.IsValid() { + conf.lanIP4 = netip.MustParsePrefix("192.168.0.0/24") } n := &network{ - s: s, - mac: conf.mac, - portmap: conf.svcs.Contains(NATPMP), // TODO: expand network.portmap - wanIP: conf.wanIP, - lanIP: conf.lanIP, - nodesByIP: map[netip.Addr]*node{}, - logf: logger.WithPrefix(log.Printf, fmt.Sprintf("[net-%v] ", conf.mac)), + num: conf.num, + s: s, + mac: conf.mac, + portmap: conf.svcs.Contains(NATPMP), // TODO: expand network.portmap + wanIP6: conf.wanIP6, + v4: conf.lanIP4.IsValid(), + v6: conf.wanIP6.IsValid(), + wanIP4: conf.wanIP4, + lanIP4: conf.lanIP4, + breakWAN4: conf.breakWAN4, + nodesByIP4: map[netip.Addr]*node{}, + nodesByMAC: map[MAC]*node{}, + logf: logger.WithPrefix(s.logf, fmt.Sprintf("[net-%v] ", conf.mac)), } netOfConf[conf] = n s.networks.Add(n) - if _, ok := s.networkByWAN[conf.wanIP]; ok { - return fmt.Errorf("two networks have the same WAN IP %v; Anycast not (yet?) supported", conf.wanIP) + if conf.wanIP4.IsValid() { + if conf.wanIP4.Is6() { + return fmt.Errorf("invalid IPv6 address in wanIP") + } + if _, ok := s.networkByWAN.Lookup(conf.wanIP4); ok { + return fmt.Errorf("two networks have the same WAN IP %v; Anycast not (yet?) supported", conf.wanIP4) + } + s.networkByWAN.Insert(netip.PrefixFrom(conf.wanIP4, 32), n) + } + if conf.wanIP6.IsValid() { + if conf.wanIP6.Addr().Is4() { + return fmt.Errorf("invalid IPv4 address in wanIP6") + } + if _, ok := s.networkByWAN.LookupPrefix(conf.wanIP6); ok { + return fmt.Errorf("two networks have the same WAN IPv6 %v; Anycast not (yet?) supported", conf.wanIP6) + } + s.networkByWAN.Insert(conf.wanIP6, n) } - s.networkByWAN[conf.wanIP] = n n.lanInterfaceID = must.Get(s.pcapWriter.AddInterface(pcapgo.NgInterface{ Name: fmt.Sprintf("network%d-lan", i+1), LinkType: layers.LinkTypeIPv4, @@ -318,13 +415,16 @@ func (s *Server) initFromConfig(c *Config) error { s.nodes = append(s.nodes, n) s.nodeByMAC[n.mac] = n - // Allocate a lanIP for the node. Use the network's CIDR and use final - // octet 101 (for first node), 102, etc. The node number comes from the - // last octent of the MAC address (0-based) - ip4 := n.net.lanIP.Addr().As4() - ip4[3] = 100 + n.mac[5] - n.lanIP = netip.AddrFrom4(ip4) - n.net.nodesByIP[n.lanIP] = n + if n.net.v4 { + // Allocate a lanIP for the node. Use the network's CIDR and use final + // octet 101 (for first node), 102, etc. The node number comes from the + // last octent of the MAC address (0-based) + ip4 := n.net.lanIP4.Addr().As4() + ip4[3] = 100 + n.mac[5] + n.lanIP = netip.AddrFrom4(ip4) + n.net.nodesByIP4[n.lanIP] = n + } + n.net.nodesByMAC[n.mac] = n } // Now that nodes are populated, set up NAT: diff --git a/tstest/natlab/vnet/pcap.go b/tstest/natlab/vnet/pcap.go index fa1904667790a..41a443e30b6c5 100644 --- a/tstest/natlab/vnet/pcap.go +++ b/tstest/natlab/vnet/pcap.go @@ -21,6 +21,15 @@ type pcapWriter struct { w *pcapgo.NgWriter } +func do(fs ...func() error) error { + for _, f := range fs { + if err := f(); err != nil { + return err + } + } + return nil +} + func (p *pcapWriter) WritePacket(ci gopacket.CaptureInfo, data []byte) error { if p == nil { return nil @@ -30,7 +39,11 @@ func (p *pcapWriter) WritePacket(ci gopacket.CaptureInfo, data []byte) error { if p.w == nil { return io.ErrClosedPipe } - return p.w.WritePacket(ci, data) + return do( + func() error { return p.w.WritePacket(ci, data) }, + p.w.Flush, + p.f.Sync, + ) } func (p *pcapWriter) AddInterface(i pcapgo.NgInterface) (int, error) { diff --git a/tstest/natlab/vnet/vip.go b/tstest/natlab/vnet/vip.go new file mode 100644 index 0000000000000..c75f17cee5393 --- /dev/null +++ b/tstest/natlab/vnet/vip.go @@ -0,0 +1,99 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package vnet + +import ( + "fmt" + "net/netip" +) + +var vips = map[string]virtualIP{} // DNS name => details + +var ( + fakeDNS = newVIP("dns", "4.11.4.11", "2411::411") + fakeProxyControlplane = newVIP("controlplane.tailscale.com", 1) + fakeTestAgent = newVIP("test-driver.tailscale", 2) + fakeControl = newVIP("control.tailscale", 3) + fakeDERP1 = newVIP("derp1.tailscale", "33.4.0.1") // 3340=DERP; 1=derp 1 + fakeDERP2 = newVIP("derp2.tailscale", "33.4.0.2") // 3340=DERP; 2=derp 2 + fakeLogCatcher = newVIP("log.tailscale.io", 4) + fakeSyslog = newVIP("syslog.tailscale", 9) +) + +type virtualIP struct { + name string // for DNS + v4 netip.Addr + v6 netip.Addr +} + +func (v virtualIP) Match(a netip.Addr) bool { + return v.v4 == a.Unmap() || v.v6 == a +} + +// FakeDNSIPv4 returns the fake DNS IPv4 address. +func FakeDNSIPv4() netip.Addr { return fakeDNS.v4 } + +// FakeDNSIPv6 returns the fake DNS IPv6 address. +func FakeDNSIPv6() netip.Addr { return fakeDNS.v6 } + +// FakeSyslogIPv4 returns the fake syslog IPv4 address. +func FakeSyslogIPv4() netip.Addr { return fakeSyslog.v4 } + +// FakeSyslogIPv6 returns the fake syslog IPv6 address. +func FakeSyslogIPv6() netip.Addr { return fakeSyslog.v6 } + +// newVIP returns a new virtual IP. +// +// opts may be an IPv4 an IPv6 (in string form) or an int (bounded by uint8) to +// use IPv4 of 52.52.0.x. +// +// If the IPv6 is omitted, one is derived from the IPv4. +// +// If an opt is invalid or the DNS name is already used, it panics. +func newVIP(name string, opts ...any) (v virtualIP) { + if _, ok := vips[name]; ok { + panic(fmt.Sprintf("duplicate VIP %q", name)) + } + v.name = name + for _, o := range opts { + switch o := o.(type) { + case string: + if ip, err := netip.ParseAddr(o); err == nil { + if ip.Is4() { + v.v4 = ip + } else if ip.Is6() { + v.v6 = ip + } + } else { + panic(fmt.Sprintf("unsupported string option %q", o)) + } + case int: + if o <= 0 || o > 255 { + panic(fmt.Sprintf("bad octet %d", o)) + } + v.v4 = netip.AddrFrom4([4]byte{52, 52, 0, byte(o)}) + default: + panic(fmt.Sprintf("unknown option type %T", o)) + } + } + if !v.v6.IsValid() && v.v4.IsValid() { + // Map 1.2.3.4 to 2052::0102:0304 + // But make 52.52.0.x map to 2052::x + a := [16]byte{0: 0x20, 1: 0x52} // 2052:: + v4 := v.v4.As4() + if v4[0] == 52 && v4[1] == 52 && v4[2] == 0 { + a[15] = v4[3] + } else { + copy(a[12:], v.v4.AsSlice()) + } + v.v6 = netip.AddrFrom16(a) + } + for _, b := range vips { + if b.Match(v.v4) || b.Match(v.v6) { + panic(fmt.Sprintf("VIP %q collides with %q", name, v.name)) + } + } + vips[name] = v + return v +} diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 0205559c9aacd..919ae1fa163be 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -9,12 +9,9 @@ package vnet // TODO: -// - [ ] port mapping actually working -// - [ ] conf to let you firewall things // - [ ] tests for NAT tables import ( - "bufio" "bytes" "context" "crypto/tls" @@ -23,7 +20,9 @@ import ( "errors" "fmt" "io" + "iter" "log" + "maps" "math/rand/v2" "net" "net/http" @@ -35,6 +34,7 @@ import ( "sync/atomic" "time" + "github.com/gaissmai/bart" "github.com/google/gopacket" "github.com/google/gopacket/layers" "go4.org/mem" @@ -45,6 +45,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/link/channel" "gvisor.dev/gvisor/pkg/tcpip/network/arp" "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" + "gvisor.dev/gvisor/pkg/tcpip/network/ipv6" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/tcpip/transport/icmp" "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" @@ -66,7 +67,12 @@ import ( ) const nicID = 1 -const stunPort = 3478 + +const ( + stunPort = 3478 + pcpPort = 5351 + ssdpPort = 1900 +) func (s *Server) PopulateDERPMapIPs() error { out, err := exec.Command("tailscale", "debug", "derp-map").Output() @@ -94,7 +100,7 @@ func (n *network) InitNAT(natType NAT) error { } t, err := ctor(n) if err != nil { - return fmt.Errorf("error creating NAT type %q for network %v: %w", natType, n.wanIP, err) + return fmt.Errorf("error creating NAT type %q for network %v: %w", natType, n.wanIP4, err) } n.setNATTable(t) n.natStyle.Store(natType) @@ -109,22 +115,23 @@ func (n *network) setNATTable(nt NATTable) { // SoleLANIP implements [IPPool]. func (n *network) SoleLANIP() (netip.Addr, bool) { - if len(n.nodesByIP) != 1 { + if len(n.nodesByIP4) != 1 { return netip.Addr{}, false } - for ip := range n.nodesByIP { + for ip := range n.nodesByIP4 { return ip, true } return netip.Addr{}, false } // WANIP implements [IPPool]. -func (n *network) WANIP() netip.Addr { return n.wanIP } +func (n *network) WANIP() netip.Addr { return n.wanIP4 } func (n *network) initStack() error { n.ns = stack.New(stack.Options{ NetworkProtocols: []stack.NetworkProtocolFactory{ ipv4.NewProtocol, + ipv6.NewProtocol, arp.NewProtocol, }, TransportProtocols: []stack.TransportProtocolFactory{ @@ -144,25 +151,48 @@ func (n *network) initStack() error { n.ns.SetPromiscuousMode(nicID, true) n.ns.SetSpoofing(nicID, true) - prefix := tcpip.AddrFrom4Slice(n.lanIP.Addr().AsSlice()).WithPrefix() - prefix.PrefixLen = n.lanIP.Bits() - if tcpProb := n.ns.AddProtocolAddress(nicID, tcpip.ProtocolAddress{ - Protocol: ipv4.ProtocolNumber, - AddressWithPrefix: prefix, - }, stack.AddressProperties{}); tcpProb != nil { - return errors.New(tcpProb.String()) - } + var routes []tcpip.Route - ipv4Subnet, err := tcpip.NewSubnet(tcpip.AddrFromSlice(make([]byte, 4)), tcpip.MaskFromBytes(make([]byte, 4))) - if err != nil { - return fmt.Errorf("could not create IPv4 subnet: %v", err) - } - n.ns.SetRouteTable([]tcpip.Route{ - { + if n.v4 { + prefix := tcpip.AddrFrom4Slice(n.lanIP4.Addr().AsSlice()).WithPrefix() + prefix.PrefixLen = n.lanIP4.Bits() + if tcpProb := n.ns.AddProtocolAddress(nicID, tcpip.ProtocolAddress{ + Protocol: ipv4.ProtocolNumber, + AddressWithPrefix: prefix, + }, stack.AddressProperties{}); tcpProb != nil { + return errors.New(tcpProb.String()) + } + + ipv4Subnet, err := tcpip.NewSubnet(tcpip.AddrFromSlice(make([]byte, 4)), tcpip.MaskFromBytes(make([]byte, 4))) + if err != nil { + return fmt.Errorf("could not create IPv4 subnet: %v", err) + } + routes = append(routes, tcpip.Route{ Destination: ipv4Subnet, NIC: nicID, - }, - }) + }) + } + if n.v6 { + prefix := tcpip.AddrFrom16(n.wanIP6.Addr().As16()).WithPrefix() + prefix.PrefixLen = n.wanIP6.Bits() + if tcpProb := n.ns.AddProtocolAddress(nicID, tcpip.ProtocolAddress{ + Protocol: ipv6.ProtocolNumber, + AddressWithPrefix: prefix, + }, stack.AddressProperties{}); tcpProb != nil { + return errors.New(tcpProb.String()) + } + + ipv6Subnet, err := tcpip.NewSubnet(tcpip.AddrFromSlice(make([]byte, 16)), tcpip.MaskFromBytes(make([]byte, 16))) + if err != nil { + return fmt.Errorf("could not create IPv6 subnet: %v", err) + } + routes = append(routes, tcpip.Route{ + Destination: ipv6Subnet, + NIC: nicID, + }) + } + + n.ns.SetRouteTable(routes) const tcpReceiveBufferSize = 0 // default const maxInFlightConnectionAttempts = 8192 @@ -181,57 +211,66 @@ func (n *network) initStack() error { } continue } - - ipRaw := pkt.ToView().AsSlice() - goPkt := gopacket.NewPacket( - ipRaw, - layers.LayerTypeIPv4, gopacket.Lazy) - layerV4 := goPkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - - dstIP, _ := netip.AddrFromSlice(layerV4.DstIP) - node, ok := n.nodesByIP[dstIP] - if !ok { - log.Printf("no MAC for dest IP %v", dstIP) - continue - } - eth := &layers.Ethernet{ - SrcMAC: n.mac.HWAddr(), - DstMAC: node.mac.HWAddr(), - EthernetType: layers.EthernetTypeIPv4, - } - buffer := gopacket.NewSerializeBuffer() - options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} - sls := []gopacket.SerializableLayer{ - eth, - } - for _, layer := range goPkt.Layers() { - sl, ok := layer.(gopacket.SerializableLayer) - if !ok { - log.Fatalf("layer %s is not serializable", layer.LayerType().String()) - } - switch gl := layer.(type) { - case *layers.TCP: - gl.SetNetworkLayerForChecksum(layerV4) - case *layers.UDP: - gl.SetNetworkLayerForChecksum(layerV4) - } - sls = append(sls, sl) - } - - if err := gopacket.SerializeLayers(buffer, options, sls...); err != nil { - log.Printf("Serialize error: %v", err) - continue - } - if writeFunc, ok := n.writeFunc.Load(node.mac); ok { - writeFunc(buffer.Bytes()) - } else { - log.Printf("No writeFunc for %v", node.mac) - } + n.handleIPPacketFromGvisor(pkt.ToView().AsSlice()) } }() return nil } +func (n *network) handleIPPacketFromGvisor(ipRaw []byte) { + if len(ipRaw) == 0 { + panic("empty packet from gvisor") + } + var goPkt gopacket.Packet + ipVer := ipRaw[0] >> 4 // 4 or 6 + switch ipVer { + case 4: + goPkt = gopacket.NewPacket( + ipRaw, + layers.LayerTypeIPv4, gopacket.Lazy) + case 6: + goPkt = gopacket.NewPacket( + ipRaw, + layers.LayerTypeIPv6, gopacket.Lazy) + default: + panic(fmt.Sprintf("unexpected IP packet version %v", ipVer)) + } + flow, ok := flow(goPkt) + if !ok { + panic("unexpected gvisor packet") + } + node, ok := n.nodeByIP(flow.dst) + if !ok { + n.logf("no node for netstack dest IP %v", flow.dst) + return + } + eth := &layers.Ethernet{ + SrcMAC: n.mac.HWAddr(), + DstMAC: node.mac.HWAddr(), + } + sls := []gopacket.SerializableLayer{ + eth, + } + for _, layer := range goPkt.Layers() { + sl, ok := layer.(gopacket.SerializableLayer) + if !ok { + log.Fatalf("layer %s is not serializable", layer.LayerType().String()) + } + sls = append(sls, sl) + } + + resPkt, err := mkPacket(sls...) + if err != nil { + n.logf("gvisor: serialize error: %v", err) + return + } + if nw, ok := n.writers.Load(node.mac); ok { + nw.write(resPkt) + } else { + n.logf("gvisor write: no writeFunc for %v", node.mac) + } +} + func netaddrIPFromNetstackIP(s tcpip.Address) netip.Addr { switch s.Len() { case 4: @@ -259,6 +298,8 @@ func (n *network) acceptTCP(r *tcp.ForwarderRequest) { return } + log.Printf("vnet-AcceptTCP: %v", stringifyTEI(reqDetails)) + var wq waiter.Queue ep, err := r.CreateEndpoint(&wq) if err != nil { @@ -276,37 +317,21 @@ func (n *network) acceptTCP(r *tcp.ForwarderRequest) { return } - if destPort == 124 { - node, ok := n.nodesByIP[clientRemoteIP] + if destPort == 8008 && fakeTestAgent.Match(destIP) { + node, ok := n.nodeByIP(clientRemoteIP) if !ok { - log.Printf("no node for TCP 124 connection from %v", clientRemoteIP) + n.logf("unknown client IP %v trying to connect to test driver", clientRemoteIP) r.Complete(true) return } r.Complete(false) tc := gonet.NewTCPConn(&wq, ep) - - go func() { - defer tc.Close() - bs := bufio.NewScanner(tc) - for bs.Scan() { - line := bs.Text() - log.Printf("LOG from %v: %s", node, line) - } - }() - return - } - - if destPort == 8008 && destIP == fakeTestAgentIP { - r.Complete(false) - tc := gonet.NewTCPConn(&wq, ep) - node := n.nodesByIP[clientRemoteIP] ac := &agentConn{node, tc} n.s.addIdleAgentConn(ac) return } - if destPort == 80 && destIP == fakeControlIP { + if destPort == 80 && fakeControl.Match(destIP) { r.Complete(false) tc := gonet.NewTCPConn(&wq, ep) hs := &http.Server{Handler: n.s.control} @@ -314,40 +339,39 @@ func (n *network) acceptTCP(r *tcp.ForwarderRequest) { return } - if destPort == 443 && (destIP == fakeDERP1IP || destIP == fakeDERP2IP) { - ds := n.s.derps[0] - if destIP == fakeDERP2IP { - ds = n.s.derps[1] - } + if fakeDERP1.Match(destIP) || fakeDERP2.Match(destIP) { + if destPort == 443 { + ds := n.s.derps[0] + if fakeDERP2.Match(destIP) { + ds = n.s.derps[1] + } - r.Complete(false) - tc := gonet.NewTCPConn(&wq, ep) - tlsConn := tls.Server(tc, ds.tlsConfig) - hs := &http.Server{Handler: ds.handler} - go hs.Serve(netutil.NewOneConnListener(tlsConn, nil)) - return - } - if destPort == 80 && (destIP == fakeDERP1IP || destIP == fakeDERP2IP) { - r.Complete(false) - tc := gonet.NewTCPConn(&wq, ep) - hs := &http.Server{Handler: n.s.derps[0].handler} - go hs.Serve(netutil.NewOneConnListener(tc, nil)) - return + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + tlsConn := tls.Server(tc, ds.tlsConfig) + hs := &http.Server{Handler: ds.handler} + go hs.Serve(netutil.NewOneConnListener(tlsConn, nil)) + return + } + if destPort == 80 { + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + hs := &http.Server{Handler: n.s.derps[0].handler} + go hs.Serve(netutil.NewOneConnListener(tc, nil)) + return + } } - if destPort == 443 && destIP == fakeLogCatcherIP { - + if destPort == 443 && fakeLogCatcher.Match(destIP) { r.Complete(false) tc := gonet.NewTCPConn(&wq, ep) go n.serveLogCatcherConn(clientRemoteIP, tc) return } - log.Printf("vnet-AcceptTCP: %v", stringifyTEI(reqDetails)) - var targetDial string if n.s.derpIPs.Contains(destIP) { targetDial = destIP.String() + ":" + strconv.Itoa(int(destPort)) - } else if destIP == fakeProxyControlplaneIP { + } else if fakeProxyControlplane.Match(destIP) { targetDial = "controlplane.tailscale.com:" + strconv.Itoa(int(destPort)) } if targetDial != "" { @@ -400,7 +424,7 @@ func (n *network) serveLogCatcherConn(clientRemoteIP netip.Addr, c net.Conn) { log.Printf("Logs decode error: %v", err) return } - node := n.nodesByIP[clientRemoteIP] + node := n.nodesByIP4[clientRemoteIP] if node != nil { node.logMu.Lock() defer node.logMu.Unlock() @@ -415,17 +439,6 @@ func (n *network) serveLogCatcherConn(clientRemoteIP netip.Addr, c net.Conn) { hs.Serve(netutil.NewOneConnListener(tlsConn, nil)) } -var ( - fakeDNSIP = netip.AddrFrom4([4]byte{4, 11, 4, 11}) - fakeProxyControlplaneIP = netip.AddrFrom4([4]byte{52, 52, 0, 1}) // real controlplane.tailscale.com proxy - fakeTestAgentIP = netip.AddrFrom4([4]byte{52, 52, 0, 2}) - fakeControlIP = netip.AddrFrom4([4]byte{52, 52, 0, 3}) // 3=C for "Control" - fakeDERP1IP = netip.AddrFrom4([4]byte{33, 4, 0, 1}) // 3340=DERP; 1=derp 1 - fakeDERP2IP = netip.AddrFrom4([4]byte{33, 4, 0, 2}) // 3340=DERP; 1=derp 1 - fakeLogCatcherIP = netip.AddrFrom4([4]byte{52, 52, 0, 4}) - fakeSyslogIP = netip.AddrFrom4([4]byte{52, 52, 0, 9}) -) - type EthernetPacket struct { le *layers.Ethernet gp gopacket.Packet @@ -445,6 +458,12 @@ func (m MAC) IsBroadcast() bool { return m == MAC{0xff, 0xff, 0xff, 0xff, 0xff, 0xff} } +// IsIPv6Multicast reports whether m is an IPv6 multicast MAC address, +// typically one containing a solicited-node multicast address. +func (m MAC) IsIPv6Multicast() bool { + return m[0] == 0x33 && m[1] == 0x33 +} + func macOf(hwa net.HardwareAddr) (_ MAC, ok bool) { if len(hwa) != 6 { return MAC{}, false @@ -465,15 +484,39 @@ type portMapping struct { expiry time.Time } +// writerFunc is a function that writes an Ethernet frame to a connected client. +// +// ethFrame is the Ethernet frame to write. +// +// interfaceIndexID is the interface ID for the pcap file. +type writerFunc func(dst vmClient, ethFrame []byte, interfaceIndexID int) + +// networkWriter are the arguments to a writerFunc and the writerFunc. +type networkWriter struct { + writer writerFunc // Function to write packets to the network + c vmClient + interfaceID int // The interface ID of the src node (for writing pcaps) +} + +func (nw networkWriter) write(b []byte) { + nw.writer(nw.c, b, nw.interfaceID) +} + type network struct { s *Server - mac MAC + num int // 1-based + mac MAC // of router portmap bool lanInterfaceID int wanInterfaceID int - wanIP netip.Addr - lanIP netip.Prefix // with host bits set (e.g. 192.168.2.1/24) - nodesByIP map[netip.Addr]*node + v4 bool // network supports IPv4 + v6 bool // network support IPv6 + wanIP6 netip.Prefix // router's WAN IPv6, if any, as a /64. + wanIP4 netip.Addr // router's LAN IPv4, if any + lanIP4 netip.Prefix // router's LAN IP + CIDR (e.g. 192.168.2.1/24) + breakWAN4 bool // break WAN IPv4 connectivity + nodesByIP4 map[netip.Addr]*node // by LAN IPv4 + nodesByMAC map[MAC]*node logf func(format string, args ...any) ns *stack.Stack @@ -485,24 +528,46 @@ type network struct { portMap map[netip.AddrPort]portMapping // WAN ip:port -> LAN ip:port portMapFlow map[portmapFlowKey]netip.AddrPort // (lanAP, peerWANAP) -> portmapped wanAP - // writeFunc is a map of MAC -> func to write to that MAC. + macMu sync.Mutex + macOfIPv6 map[netip.Addr]MAC // IPv6 source IP -> MAC + + // writers is a map of MAC -> networkWriters to write packets to that MAC. // It contains entries for connected nodes only. - writeFunc syncs.Map[MAC, func([]byte)] // MAC -> func to write to that MAC + writers syncs.Map[MAC, networkWriter] // MAC -> to networkWriter for that MAC } -func (n *network) registerWriter(mac MAC, f func([]byte)) { - if f != nil { - n.writeFunc.Store(mac, f) - } else { - n.writeFunc.Delete(mac) +// registerWriter registers a client address with a MAC address. +func (n *network) registerWriter(mac MAC, c vmClient) { + nw := networkWriter{ + writer: n.s.writeEthernetFrameToVM, + c: c, + } + if node, ok := n.s.nodeByMAC[mac]; ok { + nw.interfaceID = node.interfaceID } + n.writers.Store(mac, nw) +} + +func (n *network) unregisterWriter(mac MAC) { + n.writers.Delete(mac) +} + +// RegisteredWritersForTest returns the number of registered connections (VM +// guests with a known MAC to whom a packet can be sent) there are to the +// server. It exists for testing. +func (s *Server) RegisteredWritersForTest() int { + num := 0 + for n := range s.networks { + num += n.writers.Len() + } + return num } func (n *network) MACOfIP(ip netip.Addr) (_ MAC, ok bool) { - if n.lanIP.Addr() == ip { + if n.lanIP4.Addr() == ip { return n.mac, true } - if n, ok := n.nodesByIP[ip]; ok { + if n, ok := n.nodesByIP4[ip]; ok { return n.mac, true } return MAC{}, false @@ -559,23 +624,41 @@ type Server struct { wg sync.WaitGroup blendReality bool + optLogf func(format string, args ...any) // or nil to use log.Printf + derpIPs set.Set[netip.Addr] nodes []*node nodeByMAC map[MAC]*node networks set.Set[*network] - networkByWAN map[netip.Addr]*network + networkByWAN *bart.Table[*network] control *testcontrol.Server derps []*derpServer pcapWriter *pcapWriter + // writeMu serializes all writes to VM clients. + writeMu sync.Mutex + scratch []byte + mu sync.Mutex agentConnWaiter map[*node]chan<- struct{} // signaled after added to set agentConns set.Set[*agentConn] // not keyed by node; should be small/cheap enough to scan all agentDialer map[*node]DialFunc } +func (s *Server) logf(format string, args ...any) { + if s.optLogf != nil { + s.optLogf(format, args...) + } else { + log.Printf(format, args...) + } +} + +func (s *Server) SetLoggerForTest(logf func(format string, args ...any)) { + s.optLogf = logf +} + type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) var derpMap = &tailcfg.DERPMap{ @@ -589,7 +672,8 @@ var derpMap = &tailcfg.DERPMap{ Name: "1a", RegionID: 1, HostName: "derp1.tailscale", - IPv4: fakeDERP1IP.String(), + IPv4: fakeDERP1.v4.String(), + IPv6: fakeDERP1.v6.String(), InsecureForTests: true, CanPort80: true, }, @@ -604,7 +688,8 @@ var derpMap = &tailcfg.DERPMap{ Name: "2a", RegionID: 2, HostName: "derp2.tailscale", - IPv4: fakeDERP2IP.String(), + IPv4: fakeDERP2.v4.String(), + IPv6: fakeDERP2.v6.String(), InsecureForTests: true, CanPort80: true, }, @@ -624,10 +709,11 @@ func New(c *Config) (*Server, error) { ExplicitBaseURL: "http://control.tailscale", }, - derpIPs: set.Of[netip.Addr](), + blendReality: c.blendReality, + derpIPs: set.Of[netip.Addr](), nodeByMAC: map[MAC]*node{}, - networkByWAN: map[netip.Addr]*network{}, + networkByWAN: &bart.Table[*network]{}, networks: set.Of[*network](), } for range 2 { @@ -653,41 +739,96 @@ func (s *Server) Close() { s.wg.Wait() } -func (s *Server) HWAddr(mac MAC) net.HardwareAddr { - // TODO: cache - return net.HardwareAddr(mac[:]) +// MACs returns the MAC addresses of the configured nodes. +func (s *Server) MACs() iter.Seq[MAC] { + return maps.Keys(s.nodeByMAC) } -// IPv4ForDNS returns the IP address for the given DNS query name (for IPv4 A -// queries only). -func (s *Server) IPv4ForDNS(qname string) (netip.Addr, bool) { - switch qname { - case "dns": - return fakeDNSIP, true - case "log.tailscale.io": - return fakeLogCatcherIP, true - case "test-driver.tailscale": - return fakeTestAgentIP, true - case "controlplane.tailscale.com": - return fakeProxyControlplaneIP, true - case "control.tailscale": - return fakeControlIP, true - case "derp1.tailscale": - return fakeDERP1IP, true - case "derp2.tailscale": - return fakeDERP2IP, true +func (s *Server) RegisterSinkForTest(mac MAC, fn func(eth []byte)) { + n, ok := s.nodeByMAC[mac] + if !ok { + log.Fatalf("RegisterSinkForTest: unknown MAC %v", mac) } - return netip.Addr{}, false + n.net.writers.Store(mac, networkWriter{ + writer: func(_ vmClient, eth []byte, _ int) { + fn(eth) + }, + }) +} + +func (s *Server) HWAddr(mac MAC) net.HardwareAddr { + // TODO: cache + return net.HardwareAddr(mac[:]) } type Protocol int const ( ProtocolQEMU = Protocol(iota + 1) - ProtocolUnixDGRAM // for macOS Hypervisor.Framework and VZFileHandleNetworkDeviceAttachment + ProtocolUnixDGRAM // for macOS Virtualization.Framework and VZFileHandleNetworkDeviceAttachment ) -// serveConn serves a single connection from a client. +func (s *Server) writeEthernetFrameToVM(c vmClient, ethPkt []byte, interfaceID int) { + s.writeMu.Lock() + defer s.writeMu.Unlock() + + if ethPkt == nil { + return + } + switch c.proto() { + case ProtocolQEMU: + s.scratch = binary.BigEndian.AppendUint32(s.scratch[:0], uint32(len(ethPkt))) + s.scratch = append(s.scratch, ethPkt...) + if _, err := c.uc.Write(s.scratch); err != nil { + s.logf("Write pkt: %v", err) + } + + case ProtocolUnixDGRAM: + if _, err := c.uc.WriteToUnix(ethPkt, c.raddr); err != nil { + s.logf("Write pkt : %v", err) + return + } + } + + must.Do(s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(ethPkt), + Length: len(ethPkt), + InterfaceIndex: interfaceID, + }, ethPkt)) +} + +// vmClient is a comparable value representing a connection from a VM, either a +// QEMU-style client (with streams over a Unix socket) or a datagram based +// client (such as macOS Virtualization.framework clients). +type vmClient struct { + uc *net.UnixConn + raddr *net.UnixAddr // nil for QEMU-style clients using streams; else datagram source +} + +func (c vmClient) proto() Protocol { + if c.raddr == nil { + return ProtocolQEMU + } + return ProtocolUnixDGRAM +} + +func parseEthernet(pkt []byte) (dst, src MAC, ethType layers.EthernetType, payload []byte, ok bool) { + // headerLen is the length of an Ethernet header: + // 6 bytes of destination MAC, 6 bytes of source MAC, 2 bytes of EtherType. + const headerLen = 14 + if len(pkt) < headerLen { + return + } + dst = MAC(pkt[0:6]) + src = MAC(pkt[6:12]) + ethType = layers.EthernetType(binary.BigEndian.Uint16(pkt[12:14])) + payload = pkt[headerLen:] + ok = true + return +} + +// Handles a single connection from a QEMU-style client or muxd connections for dgram mode func (s *Server) ServeUnixConn(uc *net.UnixConn, proto Protocol) { if s.shuttingDown.Load() { return @@ -697,58 +838,35 @@ func (s *Server) ServeUnixConn(uc *net.UnixConn, proto Protocol) { context.AfterFunc(s.shutdownCtx, func() { uc.SetDeadline(time.Now()) }) - log.Printf("Got conn %T %p", uc, uc) + s.logf("Got conn %T %p", uc, uc) defer uc.Close() - bw := bufio.NewWriterSize(uc, 2<<10) - var writeMu sync.Mutex - var srcNode *node - writePkt := func(pkt []byte) { - if pkt == nil { - return - } - writeMu.Lock() - defer writeMu.Unlock() - if proto == ProtocolQEMU { - hdr := binary.BigEndian.AppendUint32(bw.AvailableBuffer()[:0], uint32(len(pkt))) - if _, err := bw.Write(hdr); err != nil { - log.Printf("Write hdr: %v", err) - return - } - } - if _, err := bw.Write(pkt); err != nil { - log.Printf("Write pkt: %v", err) - return - } - if err := bw.Flush(); err != nil { - log.Printf("Flush: %v", err) - } - must.Do(s.pcapWriter.WritePacket(gopacket.CaptureInfo{ - Timestamp: time.Now(), - CaptureLength: len(pkt), - Length: len(pkt), - InterfaceIndex: srcNode.interfaceID, - }, pkt)) - } - buf := make([]byte, 16<<10) - var netw *network // non-nil after first packet + didReg := map[MAC]bool{} for { var packetRaw []byte - if proto == ProtocolUnixDGRAM { - n, _, err := uc.ReadFromUnix(buf) + var raddr *net.UnixAddr + + switch proto { + case ProtocolUnixDGRAM: + n, addr, err := uc.ReadFromUnix(buf) + raddr = addr if err != nil { - log.Printf("ReadFromUnix: %v", err) + if s.shutdownCtx.Err() != nil { + // Return without logging. + return + } + s.logf("ReadFromUnix: %#v", err) continue } packetRaw = buf[:n] - } else if proto == ProtocolQEMU { + case ProtocolQEMU: if _, err := io.ReadFull(uc, buf[:4]); err != nil { if s.shutdownCtx.Err() != nil { // Return without logging. return } - log.Printf("ReadFull header: %v", err) + s.logf("ReadFull header: %v", err) return } n := binary.BigEndian.Uint32(buf[:4]) @@ -758,46 +876,60 @@ func (s *Server) ServeUnixConn(uc *net.UnixConn, proto Protocol) { // Return without logging. return } - log.Printf("ReadFull pkt: %v", err) + s.logf("ReadFull pkt: %v", err) return } packetRaw = buf[4 : 4+n] // raw ethernet frame } + c := vmClient{uc, raddr} - packet := gopacket.NewPacket(packetRaw, layers.LayerTypeEthernet, gopacket.Lazy) - le, ok := packet.LinkLayer().(*layers.Ethernet) - if !ok || len(le.SrcMAC) != 6 || len(le.DstMAC) != 6 { + // For the first packet from a MAC, register a writerFunc to write to the VM. + _, srcMAC, _, _, ok := parseEthernet(packetRaw) + if !ok { + continue + } + srcNode, ok := s.nodeByMAC[srcMAC] + if !ok { + s.logf("[conn %p] got frame from unknown MAC %v", c.uc, srcMAC) continue } - ep := EthernetPacket{le, packet} + if !didReg[srcMAC] { + didReg[srcMAC] = true + s.logf("[conn %p] Registering writer for MAC %v, node %v", c.uc, srcMAC, srcNode.lanIP) + srcNode.net.registerWriter(srcMAC, c) + defer srcNode.net.unregisterWriter(srcMAC) + } - srcMAC := ep.SrcMAC() - if srcNode == nil { - srcNode, ok = s.nodeByMAC[srcMAC] - if !ok { - log.Printf("[conn %p] ignoring frame from unknown MAC %v", uc, srcMAC) - continue - } - log.Printf("[conn %p] MAC %v is node %v", uc, srcMAC, srcNode.lanIP) - netw = srcNode.net - netw.registerWriter(srcMAC, writePkt) - defer netw.registerWriter(srcMAC, nil) - } else { - if srcMAC != srcNode.mac { - log.Printf("[conn %p] ignoring frame from MAC %v, expected %v", uc, srcMAC, srcNode.mac) - continue - } + if err := s.handleEthernetFrameFromVM(packetRaw); err != nil { + srcNode.net.logf("handleEthernetFrameFromVM: [conn %p], %v", c.uc, err) } - must.Do(s.pcapWriter.WritePacket(gopacket.CaptureInfo{ - Timestamp: time.Now(), - CaptureLength: len(packetRaw), - Length: len(packetRaw), - InterfaceIndex: srcNode.interfaceID, - }, packetRaw)) - netw.HandleEthernetPacket(ep) } } +func (s *Server) handleEthernetFrameFromVM(packetRaw []byte) error { + packet := gopacket.NewPacket(packetRaw, layers.LayerTypeEthernet, gopacket.Lazy) + le, ok := packet.LinkLayer().(*layers.Ethernet) + if !ok || len(le.SrcMAC) != 6 || len(le.DstMAC) != 6 { + return fmt.Errorf("ignoring non-Ethernet packet: % 02x", packetRaw) + } + ep := EthernetPacket{le, packet} + + srcMAC := ep.SrcMAC() + srcNode, ok := s.nodeByMAC[srcMAC] + if !ok { + return fmt.Errorf("got frame from unknown MAC %v", srcMAC) + } + + must.Do(s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(packetRaw), + Length: len(packetRaw), + InterfaceIndex: srcNode.interfaceID, + }, packetRaw)) + srcNode.net.HandleEthernetPacket(ep) + return nil +} + func (s *Server) routeUDPPacket(up UDPPacket) { // Find which network owns this based on the destination IP // and all the known networks' wan IPs. @@ -815,7 +947,7 @@ func (s *Server) routeUDPPacket(up UDPPacket) { } dstIP := up.Dst.Addr() - netw, ok := s.networkByWAN[dstIP] + netw, ok := s.networkByWAN.Lookup(dstIP) if !ok { if dstIP.IsPrivate() { // Not worth spamming logs. RFC 1918 space doesn't route. @@ -832,39 +964,74 @@ func (s *Server) routeUDPPacket(up UDPPacket) { // // This only delivers to client devices and not the virtual router/gateway // device. -func (n *network) writeEth(res []byte) { - if len(res) < 12 { - return +// +// It reports whether a packet was written to any clients. +func (n *network) writeEth(res []byte) bool { + dstMAC, srcMAC, etherType, _, ok := parseEthernet(res) + if !ok { + return false } - dstMAC := MAC(res[0:6]) - srcMAC := MAC(res[6:12]) - if dstMAC.IsBroadcast() { - n.writeFunc.Range(func(mac MAC, writeFunc func([]byte)) bool { - writeFunc(res) + + if dstMAC.IsBroadcast() || (n.v6 && etherType == layers.EthernetTypeIPv6 && dstMAC == macAllNodes) { + num := 0 + n.writers.Range(func(mac MAC, nw networkWriter) bool { + if mac != srcMAC { + num++ + nw.write(res) + } return true }) - return + return num > 0 } if srcMAC == dstMAC { n.logf("dropping write of packet from %v to itself", srcMAC) - return + return false } - if writeFunc, ok := n.writeFunc.Load(dstMAC); ok { - writeFunc(res) - return + if nw, ok := n.writers.Load(dstMAC); ok { + nw.write(res) + return true + } + + const debugMiss = false + if debugMiss { + gp := gopacket.NewPacket(res, layers.LayerTypeEthernet, gopacket.Lazy) + n.logf("no writeFunc for dst %v from src %v; pkt=%v", dstMAC, srcMAC, gp) } + + return false } +var ( + macAllNodes = MAC{0: 0x33, 1: 0x33, 5: 0x01} + macAllRouters = MAC{0: 0x33, 1: 0x33, 5: 0x02} + macBroadcast = MAC{0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +const ( + testingEthertype layers.EthernetType = 0x1234 +) + func (n *network) HandleEthernetPacket(ep EthernetPacket) { packet := ep.gp dstMAC := ep.DstMAC() - isBroadcast := dstMAC.IsBroadcast() - forRouter := dstMAC == n.mac || isBroadcast + isBroadcast := dstMAC.IsBroadcast() || (n.v6 && ep.le.EthernetType == layers.EthernetTypeIPv6 && dstMAC == macAllNodes) + isV6SpecialMAC := dstMAC[0] == 0x33 && dstMAC[1] == 0x33 + + // forRouter is whether the packet is destined for the router itself + // or if it's a special thing (like V6 NDP) that the router should handle. + forRouter := dstMAC == n.mac || isBroadcast || isV6SpecialMAC + + const debug = false + if debug { + n.logf("HandleEthernetPacket: %v => %v; type %v, bcast=%v, forRouter=%v", ep.SrcMAC(), ep.DstMAC(), ep.le.EthernetType, isBroadcast, forRouter) + } switch ep.le.EthernetType { default: n.logf("Dropping non-IP packet: %v", ep.le.EthernetType) return + case 0x1234: + // Permitted for testing. Not a real ethertype. case layers.EthernetTypeARP: res, err := n.createARPResponse(packet) if err != nil { @@ -874,9 +1041,38 @@ func (n *network) HandleEthernetPacket(ep EthernetPacket) { } return case layers.EthernetTypeIPv6: - // One day. Low value for now. IPv4 NAT modes is the main thing - // this project wants to test. - return + if !n.v6 { + n.logf("dropping IPv6 packet on v4-only network") + return + } + if dstMAC == macAllRouters { + if rs, ok := ep.gp.Layer(layers.LayerTypeICMPv6RouterSolicitation).(*layers.ICMPv6RouterSolicitation); ok { + n.handleIPv6RouterSolicitation(ep, rs) + } else { + n.logf("unexpected IPv6 packet to all-routers: %v", ep.gp) + } + return + } + isMcast := dstMAC.IsIPv6Multicast() + if isMcast || dstMAC == n.mac { + if ns, ok := ep.gp.Layer(layers.LayerTypeICMPv6NeighborSolicitation).(*layers.ICMPv6NeighborSolicitation); ok { + n.handleIPv6NeighborSolicitation(ep, ns) + return + } + if ep.gp.Layer(layers.LayerTypeMLDv2MulticastListenerReport) != nil { + // We don't care about these (yet?) and Linux spams a bunch + // a bunch of them out, so explicitly ignore them to prevent + // log spam when verbose logging is enabled. + return + } + if isMcast && !isBroadcast { + return + } + } + + // TODO(bradfitz): handle packets to e.g. [fe80::50cc:ccff:fecc:cc01]:43619 + // and don't fall through to the router below. + case layers.EthernetTypeIPv4: // Below } @@ -884,10 +1080,12 @@ func (n *network) HandleEthernetPacket(ep EthernetPacket) { // Send ethernet broadcasts and unicast ethernet frames to peers // on the same network. This is all LAN traffic that isn't meant // for the router/gw itself: - n.writeEth(ep.gp.Data()) + if isBroadcast || !forRouter { + n.writeEth(ep.gp.Data()) + } if forRouter { - n.HandleEthernetIPv4PacketForRouter(ep) + n.HandleEthernetPacketForRouter(ep) } } @@ -907,6 +1105,10 @@ func (n *network) HandleUDPPacket(p UDPPacket) { Length: len(buf), InterfaceIndex: n.wanInterfaceID, }, buf) + if p.Dst.Addr().Is4() && n.breakWAN4 { + // Blackhole the packet. + return + } dst := n.doNATIn(p.Src, p.Dst) if !dst.IsValid() { n.logf("Warning: NAT dropped packet; no mapping for %v=>%v", p.Src, p.Dst) @@ -927,6 +1129,27 @@ func (n *network) HandleUDPPacket(p UDPPacket) { n.WriteUDPPacketNoNAT(p) } +func (n *network) nodeByIP(ip netip.Addr) (node *node, ok bool) { + if ip.Is4() { + node, ok = n.nodesByIP4[ip] + } + if !ok && ip.Is6() { + var mac MAC + n.macMu.Lock() + mac, ok = n.macOfIPv6[ip] + n.macMu.Unlock() + if !ok { + log.Printf("warning: no known MAC for IPv6 %v", ip) + return nil, false + } + node, ok = n.nodesByMAC[mac] + if !ok { + log.Printf("warning: no known node for MAC %v (IP %v)", mac, ip) + } + } + return node, ok +} + // WriteUDPPacketNoNAT writes a UDP packet to the network, without // doing any NAT translation. // @@ -935,16 +1158,15 @@ func (n *network) HandleUDPPacket(p UDPPacket) { // same ethernet segment. func (n *network) WriteUDPPacketNoNAT(p UDPPacket) { src, dst := p.Src, p.Dst - node, ok := n.nodesByIP[dst.Addr()] + node, ok := n.nodeByIP(dst.Addr()) if !ok { n.logf("no node for dest IP %v in UDP packet %v=>%v", dst.Addr(), p.Src, p.Dst) return } eth := &layers.Ethernet{ - SrcMAC: n.mac.HWAddr(), // of gateway - DstMAC: node.mac.HWAddr(), - EthernetType: layers.EthernetTypeIPv4, + SrcMAC: n.mac.HWAddr(), // of gateway + DstMAC: node.mac.HWAddr(), } ethRaw, err := n.serializedUDPPacket(src, dst, p.Payload, eth) if err != nil { @@ -954,54 +1176,116 @@ func (n *network) WriteUDPPacketNoNAT(p UDPPacket) { n.writeEth(ethRaw) } +type serializableNetworkLayer interface { + gopacket.SerializableLayer + gopacket.NetworkLayer +} + +func mkIPLayer(proto layers.IPProtocol, src, dst netip.Addr) serializableNetworkLayer { + if src.Is4() { + return &layers.IPv4{ + Protocol: proto, + SrcIP: src.AsSlice(), + DstIP: dst.AsSlice(), + } + } + if src.Is6() { + return &layers.IPv6{ + NextHeader: proto, + SrcIP: src.AsSlice(), + DstIP: dst.AsSlice(), + } + } + panic("invalid src IP") +} + // serializedUDPPacket serializes a UDP packet with the given source and // destination IP:port pairs, and payload. // // If eth is non-nil, it will be used as the Ethernet layer, otherwise the // Ethernet layer will be omitted from the serialization. func (n *network) serializedUDPPacket(src, dst netip.AddrPort, payload []byte, eth *layers.Ethernet) ([]byte, error) { - ip := &layers.IPv4{ - Version: 4, - TTL: 64, - Protocol: layers.IPProtocolUDP, - SrcIP: src.Addr().AsSlice(), - DstIP: dst.Addr().AsSlice(), - } + ip := mkIPLayer(layers.IPProtocolUDP, src.Addr(), dst.Addr()) udp := &layers.UDP{ SrcPort: layers.UDPPort(src.Port()), DstPort: layers.UDPPort(dst.Port()), } - udp.SetNetworkLayerForChecksum(ip) - - buffer := gopacket.NewSerializeBuffer() - options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} - layers := []gopacket.SerializableLayer{eth, ip, udp, gopacket.Payload(payload)} if eth == nil { - layers = layers[1:] - } - if err := gopacket.SerializeLayers(buffer, options, layers...); err != nil { - return nil, fmt.Errorf("serializing UDP: %v", err) + return mkPacket(ip, udp, gopacket.Payload(payload)) + } else { + return mkPacket(eth, ip, udp, gopacket.Payload(payload)) } - return buffer.Bytes(), nil } -// HandleEthernetIPv4PacketForRouter handles an IPv4 packet that is +// HandleEthernetPacketForRouter handles a packet that is // directed to the router/gateway itself. The packet may be to the // broadcast MAC address, or to the router's MAC address. The target // IP may be the router's IP, or an internet (routed) IP. -func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { +func (n *network) HandleEthernetPacketForRouter(ep EthernetPacket) { packet := ep.gp - - v4, ok := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + flow, ok := flow(packet) if !ok { + n.logf("dropping non-IP packet: %v", packet) + return + } + dstIP := flow.dst + toForward := dstIP != n.lanIP4.Addr() && dstIP != netip.IPv4Unspecified() && !dstIP.IsLinkLocalUnicast() + + // Pre-NAT mapping, for DNS/etc responses: + if flow.src.Is6() { + n.macMu.Lock() + mak.Set(&n.macOfIPv6, flow.src, ep.SrcMAC()) + n.macMu.Unlock() + } + + if udp, ok := packet.Layer(layers.LayerTypeUDP).(*layers.UDP); ok { + n.handleUDPPacketForRouter(ep, udp, toForward, flow) + return + } + + if toForward && n.s.shouldInterceptTCP(packet) { + if flow.dst.Is4() && n.breakWAN4 { + // Blackhole the packet. + return + } + var base *layers.BaseLayer + proto := header.IPv4ProtocolNumber + if v4, ok := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4); ok { + base = &v4.BaseLayer + } else if v6, ok := packet.Layer(layers.LayerTypeIPv6).(*layers.IPv6); ok { + base = &v6.BaseLayer + proto = header.IPv6ProtocolNumber + } else { + panic("not v4, not v6") + } + pktCopy := make([]byte, 0, len(base.Contents)+len(base.Payload)) + pktCopy = append(pktCopy, base.Contents...) + pktCopy = append(pktCopy, base.Payload...) + packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(pktCopy), + }) + n.linkEP.InjectInbound(proto, packetBuf) + packetBuf.DecRef() + return + } + + if flow.src.Is6() && flow.src.IsLinkLocalUnicast() && !flow.dst.IsLinkLocalUnicast() { + // Don't log. return } - srcIP, _ := netip.AddrFromSlice(v4.SrcIP) - dstIP, _ := netip.AddrFromSlice(v4.DstIP) - toForward := dstIP != n.lanIP.Addr() && dstIP != netip.IPv4Unspecified() - udp, isUDP := packet.Layer(layers.LayerTypeUDP).(*layers.UDP) + + n.logf("router got unknown packet: %v", packet) +} + +func (n *network) handleUDPPacketForRouter(ep EthernetPacket, udp *layers.UDP, toForward bool, flow ipSrcDst) { + packet := ep.gp + srcIP, dstIP := flow.src, flow.dst if isDHCPRequest(packet) { + if !n.v4 { + n.logf("dropping DHCPv4 packet on v6-only network") + return + } res, err := n.s.createDHCPResponse(packet) if err != nil { n.logf("createDHCPResponse: %v", err) @@ -1017,8 +1301,6 @@ func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { } if isDNSRequest(packet) { - // TODO(bradfitz): restrict this to 4.11.4.11? add DNS - // on gateway instead? res, err := n.s.createDNSResponse(packet) if err != nil { n.logf("createDNSResponse: %v", err) @@ -1028,20 +1310,20 @@ func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { return } - if isUDP && dstIP == fakeSyslogIP { - node, ok := n.nodesByIP[srcIP] + if fakeSyslog.Match(dstIP) { + node, ok := n.nodeByIP(srcIP) if !ok { return } if node.verboseSyslog { // TODO(bradfitz): parse this and capture it, structured, into // node's log buffer. - log.Printf("syslog from %v: %s", node, udp.Payload) + n.logf("syslog from %v: %s", node, udp.Payload) } return } - if !toForward && isNATPMP(packet) { + if dstIP == n.lanIP4.Addr() && isNATPMP(udp) { n.handleNATPMPRequest(UDPPacket{ Src: netip.AddrPortFrom(srcIP, uint16(udp.SrcPort)), Dst: netip.AddrPortFrom(dstIP, uint16(udp.DstPort)), @@ -1050,7 +1332,11 @@ func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { return } - if toForward && isUDP { + if toForward { + if dstIP.Is4() && n.breakWAN4 { + // Blackhole the packet. + return + } src := netip.AddrPortFrom(srcIP, uint16(udp.SrcPort)) dst := netip.AddrPortFrom(dstIP, uint16(udp.DstPort)) buf, err := n.serializedUDPPacket(src, dst, udp.Payload, nil) @@ -1065,7 +1351,12 @@ func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { InterfaceIndex: n.lanInterfaceID, }, buf) + lanSrc := src // the original src, before NAT (for logging only) src = n.doNATOut(src, dst) + if !src.IsValid() { + n.logf("warning: NAT dropped packet; no NAT out mapping for %v=>%v", lanSrc, dst) + return + } buf, err = n.serializedUDPPacket(src, dst, udp.Payload, nil) if err != nil { n.logf("serializing UDP packet: %v", err) @@ -1078,6 +1369,12 @@ func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { InterfaceIndex: n.wanInterfaceID, }, buf) + if src.Addr().Is6() { + n.macMu.Lock() + mak.Set(&n.macOfIPv6, src.Addr(), ep.SrcMAC()) + n.macMu.Unlock() + } + n.s.routeUDPPacket(UDPPacket{ Src: src, Dst: dst, @@ -1086,22 +1383,116 @@ func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { return } - if toForward && n.s.shouldInterceptTCP(packet) { - ipp := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - pktCopy := make([]byte, 0, len(ipp.Contents)+len(ipp.Payload)) - pktCopy = append(pktCopy, ipp.Contents...) - pktCopy = append(pktCopy, ipp.Payload...) - packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ - Payload: buffer.MakeWithData(pktCopy), - }) - n.linkEP.InjectInbound(header.IPv4ProtocolNumber, packetBuf) - packetBuf.DecRef() + if udp.DstPort == pcpPort || udp.DstPort == ssdpPort { + // We handle NAT-PMP, but not these yet. + // TODO(bradfitz): handle? marginal utility so far. + // Don't log about them being unknown. return } - //log.Printf("Got packet: %v", packet) + n.logf("router got unknown UDP packet: %v", packet) } +func (n *network) handleIPv6RouterSolicitation(ep EthernetPacket, rs *layers.ICMPv6RouterSolicitation) { + v6 := ep.gp.Layer(layers.LayerTypeIPv6).(*layers.IPv6) + + // Send a router advertisement back. + eth := &layers.Ethernet{ + SrcMAC: n.mac.HWAddr(), + DstMAC: ep.SrcMAC().HWAddr(), + EthernetType: layers.EthernetTypeIPv6, + } + n.logf("sending IPv6 router advertisement to %v from %v", eth.DstMAC, eth.SrcMAC) + ip := &layers.IPv6{ + NextHeader: layers.IPProtocolICMPv6, + HopLimit: 255, // per RFC 4861, 7.1.1 etc (all NDP messages); don't use mkPacket's default of 64 + SrcIP: net.ParseIP("fe80::1"), + DstIP: v6.SrcIP, + } + icmp := &layers.ICMPv6{ + TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeRouterAdvertisement, 0), + } + pfx := make([]byte, 0, 30) // it's 32 on the wire, once gopacket adds two byte header + pfx = append(pfx, byte(64)) // CIDR length + pfx = append(pfx, byte(0xc0)) // flags: On-Link, Autonomous + pfx = binary.BigEndian.AppendUint32(pfx, 86400) // valid lifetime + pfx = binary.BigEndian.AppendUint32(pfx, 14400) // preferred lifetime + pfx = binary.BigEndian.AppendUint32(pfx, 0) // reserved + wanIP := n.wanIP6.Addr().As16() + pfx = append(pfx, wanIP[:]...) + + ra := &layers.ICMPv6RouterAdvertisement{ + RouterLifetime: 1800, + Options: []layers.ICMPv6Option{ + { + Type: layers.ICMPv6OptPrefixInfo, + Data: pfx, + }, + }, + } + pkt, err := mkPacket(eth, ip, icmp, ra) + if err != nil { + n.logf("serializing ICMPv6 RA: %v", err) + return + } + n.writeEth(pkt) +} + +func (n *network) handleIPv6NeighborSolicitation(ep EthernetPacket, ns *layers.ICMPv6NeighborSolicitation) { + v6 := ep.gp.Layer(layers.LayerTypeIPv6).(*layers.IPv6) + + targetIP, ok := netip.AddrFromSlice(ns.TargetAddress) + if !ok { + return + } + var srcMAC MAC + if targetIP == netip.MustParseAddr("fe80::1") { + srcMAC = n.mac + } else { + n.logf("Ignoring IPv6 NS request from %v for target %v", ep.SrcMAC(), targetIP) + return + } + n.logf("replying to IPv6 NS %v->%v about target %v (replySrc=%v)", ep.SrcMAC(), ep.DstMAC(), targetIP, srcMAC) + + // Send a neighbor advertisement back. + eth := &layers.Ethernet{ + SrcMAC: srcMAC.HWAddr(), + DstMAC: ep.SrcMAC().HWAddr(), + EthernetType: layers.EthernetTypeIPv6, + } + ip := &layers.IPv6{ + HopLimit: 255, // per RFC 4861, 7.1.1 etc (all NDP messages); don't use mkPacket's default of 64 + NextHeader: layers.IPProtocolICMPv6, + SrcIP: ns.TargetAddress, + DstIP: v6.SrcIP, + } + icmp := &layers.ICMPv6{ + TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeNeighborAdvertisement, 0), + } + var flags uint8 = 0x40 // solicited + if srcMAC == n.mac { + flags |= 0x80 // router + } + flags |= 0x20 // override + + na := &layers.ICMPv6NeighborAdvertisement{ + TargetAddress: ns.TargetAddress, + Flags: flags, + } + na.Options = append(na.Options, layers.ICMPv6Option{ + Type: layers.ICMPv6OptTargetAddress, + Data: srcMAC.HWAddr(), + }) + pkt, err := mkPacket(eth, ip, icmp, na) + if err != nil { + n.logf("serializing ICMPv6 NA: %v", err) + } + if !n.writeEth(pkt) { + n.logf("failed to writeEth for IPv6 NA reply for %v", targetIP) + } +} + +// createDHCPResponse creates a DHCPv4 response for the given DHCPv4 request. func (s *Server) createDHCPResponse(request gopacket.Packet) ([]byte, error) { ethLayer := request.Layer(layers.LayerTypeEthernet).(*layers.Ethernet) srcMAC, ok := macOf(ethLayer.SrcMAC) @@ -1113,7 +1504,7 @@ func (s *Server) createDHCPResponse(request gopacket.Packet) ([]byte, error) { log.Printf("DHCP request from unknown node %v; ignoring", srcMAC) return nil, nil } - gwIP := node.net.lanIP.Addr() + gwIP := node.net.lanIP4.Addr() ipLayer := request.Layer(layers.LayerTypeIPv4).(*layers.IPv4) udpLayer := request.Layer(layers.LayerTypeUDP).(*layers.UDP) @@ -1168,12 +1559,12 @@ func (s *Server) createDHCPResponse(request gopacket.Packet) ([]byte, error) { }, layers.DHCPOption{ Type: layers.DHCPOptDNS, - Data: fakeDNSIP.AsSlice(), + Data: fakeDNS.v4.AsSlice(), Length: 4, }, layers.DHCPOption{ Type: layers.DHCPOptSubnetMask, - Data: net.CIDRMask(node.net.lanIP.Bits(), 32), + Data: net.CIDRMask(node.net.lanIP4.Bits(), 32), Length: 4, }, ) @@ -1182,37 +1573,21 @@ func (s *Server) createDHCPResponse(request gopacket.Packet) ([]byte, error) { eth := &layers.Ethernet{ SrcMAC: node.net.mac.HWAddr(), DstMAC: ethLayer.SrcMAC, - EthernetType: layers.EthernetTypeIPv4, + EthernetType: layers.EthernetTypeIPv4, // never IPv6 for DHCP } - ip := &layers.IPv4{ - Version: 4, - TTL: 64, Protocol: layers.IPProtocolUDP, SrcIP: ipLayer.DstIP, DstIP: ipLayer.SrcIP, } - udp := &layers.UDP{ SrcPort: udpLayer.DstPort, DstPort: udpLayer.SrcPort, } - udp.SetNetworkLayerForChecksum(ip) - - buffer := gopacket.NewSerializeBuffer() - options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} - if err := gopacket.SerializeLayers(buffer, options, - eth, - ip, - udp, - response, - ); err != nil { - return nil, err - } - - return buffer.Bytes(), nil + return mkPacket(eth, ip, udp, response) } +// isDHCPRequest reports whether pkt is a DHCPv4 request. func isDHCPRequest(pkt gopacket.Packet) bool { v4, ok := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) if !ok || v4.Protocol != layers.IPProtocolUDP { @@ -1237,54 +1612,78 @@ func (s *Server) shouldInterceptTCP(pkt gopacket.Packet) bool { if !ok { return false } - ipv4, ok := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + if tcp.DstPort == 123 { + // Test port for TCP interception. Not really useful, but cute for + // demos. + return true + } + flow, ok := flow(pkt) if !ok { return false } - if tcp.DstPort == 123 || tcp.DstPort == 124 { - return true + if flow.src.Is6() && flow.src.IsLinkLocalUnicast() { + return false } - dstIP, _ := netip.AddrFromSlice(ipv4.DstIP.To4()) + if tcp.DstPort == 80 || tcp.DstPort == 443 { - switch dstIP { - case fakeControlIP, fakeDERP1IP, fakeDERP2IP, fakeLogCatcherIP: - return true + for _, v := range []virtualIP{fakeControl, fakeDERP1, fakeDERP2, fakeLogCatcher} { + if v.Match(flow.dst) { + return true + } } - if dstIP == fakeProxyControlplaneIP { + if fakeProxyControlplane.Match(flow.dst) { return s.blendReality } - if s.derpIPs.Contains(dstIP) { + if s.derpIPs.Contains(flow.dst) { return true } } - if tcp.DstPort == 8008 && dstIP == fakeTestAgentIP { + if tcp.DstPort == 8008 && fakeTestAgent.Match(flow.dst) { // Connection from cmd/tta. return true } return false } +type ipSrcDst struct { + src netip.Addr + dst netip.Addr +} + +func flow(gp gopacket.Packet) (f ipSrcDst, ok bool) { + if gp == nil { + return f, false + } + n := gp.NetworkLayer() + if n == nil { + return f, false + } + sb, db := n.NetworkFlow().Endpoints() + src, _ := netip.AddrFromSlice(sb.Raw()) + dst, _ := netip.AddrFromSlice(db.Raw()) + return ipSrcDst{src: src, dst: dst}, src.IsValid() && dst.IsValid() +} + // isDNSRequest reports whether pkt is a DNS request to the fake DNS server. func isDNSRequest(pkt gopacket.Packet) bool { udp, ok := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) if !ok || udp.DstPort != 53 { return false } - ip, ok := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + f, ok := flow(pkt) if !ok { return false } - dstIP, ok := netip.AddrFromSlice(ip.DstIP) - if !ok || dstIP != fakeDNSIP { + if !fakeDNS.Match(f.dst) { + // TODO(bradfitz): maybe support configs where DNS is local in the LAN return false } dns, ok := pkt.Layer(layers.LayerTypeDNS).(*layers.DNS) return ok && dns.QR == false && len(dns.Questions) > 0 } -func isNATPMP(pkt gopacket.Packet) bool { - udp, ok := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) - return ok && udp.DstPort == 5351 && len(udp.Payload) > 0 && udp.Payload[0] == 0 // version 0, not 2 for PCP +func isNATPMP(udp *layers.UDP) bool { + return udp.DstPort == 5351 && len(udp.Payload) > 0 && udp.Payload[0] == 0 // version 0, not 2 for PCP } func makeSTUNReply(req UDPPacket) (res UDPPacket, ok bool) { @@ -1301,8 +1700,11 @@ func makeSTUNReply(req UDPPacket) (res UDPPacket, ok bool) { } func (s *Server) createDNSResponse(pkt gopacket.Packet) ([]byte, error) { + flow, ok := flow(pkt) + if !ok { + return nil, nil + } ethLayer := pkt.Layer(layers.LayerTypeEthernet).(*layers.Ethernet) - ipLayer := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) udpLayer := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) dnsLayer := pkt.Layer(layers.LayerTypeDNS).(*layers.DNS) @@ -1334,64 +1736,69 @@ func (s *Server) createDNSResponse(pkt gopacket.Packet) ([]byte, error) { } names = append(names, q.Type.String()+"/"+string(q.Name)) - if q.Class != layers.DNSClassIN || q.Type != layers.DNSTypeA { + if q.Class != layers.DNSClassIN { continue } - if ip, ok := s.IPv4ForDNS(string(q.Name)); ok { - response.ANCount++ - response.Answers = append(response.Answers, layers.DNSResourceRecord{ - Name: q.Name, - Type: q.Type, - Class: q.Class, - IP: ip.AsSlice(), - TTL: 60, - }) + if q.Type == layers.DNSTypeA || q.Type == layers.DNSTypeAAAA { + if v, ok := vips[string(q.Name)]; ok { + ip := v.v4 + if q.Type == layers.DNSTypeAAAA { + ip = v.v6 + } + response.ANCount++ + response.Answers = append(response.Answers, layers.DNSResourceRecord{ + Name: q.Name, + Type: q.Type, + Class: q.Class, + IP: ip.AsSlice(), + TTL: 60, + }) + } } } + // Make reply layers, all reversed. eth2 := &layers.Ethernet{ - SrcMAC: ethLayer.DstMAC, - DstMAC: ethLayer.SrcMAC, - EthernetType: layers.EthernetTypeIPv4, - } - ip2 := &layers.IPv4{ - Version: 4, - TTL: 64, - Protocol: layers.IPProtocolUDP, - SrcIP: ipLayer.DstIP, - DstIP: ipLayer.SrcIP, + SrcMAC: ethLayer.DstMAC, + DstMAC: ethLayer.SrcMAC, } + ip2 := mkIPLayer(layers.IPProtocolUDP, flow.dst, flow.src) udp2 := &layers.UDP{ SrcPort: udpLayer.DstPort, DstPort: udpLayer.SrcPort, } - udp2.SetNetworkLayerForChecksum(ip2) - buffer := gopacket.NewSerializeBuffer() - options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} - if err := gopacket.SerializeLayers(buffer, options, eth2, ip2, udp2, response); err != nil { + resPkt, err := mkPacket(eth2, ip2, udp2, response) + if err != nil { return nil, err } const debugDNS = false if debugDNS { if len(response.Answers) > 0 { - back := gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy) - log.Printf("Generated: %v", back) + back := gopacket.NewPacket(resPkt, layers.LayerTypeEthernet, gopacket.Lazy) + log.Printf("createDNSResponse generated answers: %v", back) } else { log.Printf("made empty response for %q", names) } } - return buffer.Bytes(), nil + return resPkt, nil } // doNATOut performs NAT on an outgoing packet from src to dst, where // src is a LAN IP and dst is a WAN IP. // // It returns the source WAN ip:port to use. +// +// If newSrc is invalid, the packet should be dropped. func (n *network) doNATOut(src, dst netip.AddrPort) (newSrc netip.AddrPort) { + if src.Addr().Is6() { + // TODO(bradfitz): IPv6 NAT? For now, normal IPv6 only. + return src + } + n.natMu.Lock() defer n.natMu.Unlock() @@ -1413,7 +1820,14 @@ type portmapFlowKey struct { // doNATIn performs NAT on an incoming packet from WAN src to WAN dst, returning // a new destination LAN ip:port to use. +// +// If newDst is invalid, the packet should be dropped. func (n *network) doNATIn(src, dst netip.AddrPort) (newDst netip.AddrPort) { + if dst.Addr().Is6() { + // TODO(bradfitz): IPv6 NAT? For now, normal IPv6 only. + return dst + } + n.natMu.Lock() defer n.natMu.Unlock() @@ -1454,7 +1868,7 @@ func (n *network) doPortMap(src netip.Addr, dstLANPort, wantExtPort uint16, sec return 0, false } - wanAP := netip.AddrPortFrom(n.wanIP, wantExtPort) + wanAP := netip.AddrPortFrom(n.wanIP4, wantExtPort) dst := netip.AddrPortFrom(src, dstLANPort) if sec == 0 { @@ -1486,7 +1900,7 @@ func (n *network) doPortMap(src netip.Addr, dstLANPort, wantExtPort uint16, sec return wanAP.Port(), true } wantExtPort = rand.N(uint16(32<<10)) + 32<<10 - wanAP = netip.AddrPortFrom(n.wanIP, wantExtPort) + wanAP = netip.AddrPortFrom(n.wanIP4, wantExtPort) } return 0, false } @@ -1521,7 +1935,7 @@ func (n *network) createARPResponse(pkt gopacket.Packet) ([]byte, error) { a2 := &layers.ARP{ AddrType: layers.LinkTypeEthernet, - Protocol: layers.EthernetTypeIPv4, + Protocol: layers.EthernetTypeIPv4, // never IPv6; IPv6 equivalent of ARP is handleIPv6NeighborSolicitation HwAddressSize: 6, ProtAddressSize: 4, Operation: layers.ARPReply, @@ -1554,7 +1968,7 @@ func (n *network) handleNATPMPRequest(req UDPPacket) { 0, 0, // result code success ) res = binary.BigEndian.AppendUint32(res, uint32(time.Now().Unix())) - wan4 := n.wanIP.As4() + wan4 := n.wanIP4.As4() res = append(res, wan4[:]...) n.WriteUDPPacketNoNAT(UDPPacket{ Src: req.Dst, @@ -1618,7 +2032,7 @@ func (s *Server) WriteStartingBanner(w io.Writer) { fmt.Fprintf(w, "vnet serving clients:\n") for _, n := range s.nodes { - fmt.Fprintf(w, " %v %15v (%v, %v)\n", n.mac, n.lanIP, n.net.wanIP, n.net.natStyle.Load()) + fmt.Fprintf(w, " %v %15v (%v, %v)\n", n.mac, n.lanIP, n.net.wanIP4, n.net.natStyle.Load()) } } @@ -1644,10 +2058,13 @@ func (s *Server) addIdleAgentConn(ac *agentConn) { } func (s *Server) takeAgentConn(ctx context.Context, n *node) (_ *agentConn, ok bool) { + const debug = false for { ac, ok := s.takeAgentConnOne(n) if ok { - //log.Printf("got agent conn for %v", n.mac) + if debug { + log.Printf("takeAgentConn: got agent conn for %v", n.mac) + } return ac, true } s.mu.Lock() @@ -1655,7 +2072,9 @@ func (s *Server) takeAgentConn(ctx context.Context, n *node) (_ *agentConn, ok b mak.Set(&s.agentConnWaiter, n, ready) s.mu.Unlock() - //log.Printf("waiting for agent conn for %v", n.mac) + if debug { + log.Printf("takeAgentConn: waiting for agent conn for %v", n.mac) + } select { case <-ctx.Done(): return nil, false @@ -1671,11 +2090,16 @@ func (s *Server) takeAgentConn(ctx context.Context, n *node) (_ *agentConn, ok b func (s *Server) takeAgentConnOne(n *node) (_ *agentConn, ok bool) { s.mu.Lock() defer s.mu.Unlock() + miss := 0 for ac := range s.agentConns { if ac.node == n { s.agentConns.Delete(ac) return ac, true } + miss++ + } + if miss > 0 { + log.Printf("takeAgentConnOne: missed %d times for %v", miss, n.mac) } return nil, false } @@ -1736,3 +2160,65 @@ func (c *NodeAgentClient) EnableHostFirewall(ctx context.Context) error { } return nil } + +// mkPacket is a serializes a number of layers into a packet. +// +// It's a convenience wrapper around gopacket.SerializeLayers +// that does some things automatically: +// +// * layers.Ethernet.EthernetType is set to IPv4 or IPv6 if not already set +// * layers.IPv4/IPv6 Version is set to 4/6 if not already set +// * layers.IPv4/IPv6 TTL/HopLimit is set to 64 if not already set +// * the TCP/UDP/ICMPv6 checksum is set based on the network layer +// +// The provided layers in ll must be sorted from lowest (e.g. *layers.Ethernet) +// to highest. (Depending on the need, the first layer will be either *layers.Ethernet +// or *layers.IPv4/IPv6). +func mkPacket(ll ...gopacket.SerializableLayer) ([]byte, error) { + var el *layers.Ethernet + var nl gopacket.NetworkLayer + for _, la := range ll { + switch la := la.(type) { + case *layers.IPv4: + nl = la + if el != nil && el.EthernetType == 0 { + el.EthernetType = layers.EthernetTypeIPv4 + } + if la.Version == 0 { + la.Version = 4 + } + if la.TTL == 0 { + la.TTL = 64 + } + case *layers.IPv6: + nl = la + if el != nil && el.EthernetType == 0 { + el.EthernetType = layers.EthernetTypeIPv6 + } + if la.Version == 0 { + la.Version = 6 + } + if la.HopLimit == 0 { + la.HopLimit = 64 + } + case *layers.Ethernet: + el = la + } + } + for _, la := range ll { + switch la := la.(type) { + case *layers.TCP: + la.SetNetworkLayerForChecksum(nl) + case *layers.UDP: + la.SetNetworkLayerForChecksum(nl) + case *layers.ICMPv6: + la.SetNetworkLayerForChecksum(nl) + } + } + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + if err := gopacket.SerializeLayers(buf, opts, ll...); err != nil { + return nil, fmt.Errorf("serializing packet: %v", err) + } + return buf.Bytes(), nil +} diff --git a/tstest/natlab/vnet/vnet_test.go b/tstest/natlab/vnet/vnet_test.go new file mode 100644 index 0000000000000..5ffa2b1049c88 --- /dev/null +++ b/tstest/natlab/vnet/vnet_test.go @@ -0,0 +1,664 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package vnet + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "net" + "net/netip" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "tailscale.com/util/must" +) + +const ( + ethType4 = layers.EthernetTypeIPv4 + ethType6 = layers.EthernetTypeIPv6 +) + +// TestPacketSideEffects tests that upon receiving certain +// packets, other packets and/or log statements are generated. +func TestPacketSideEffects(t *testing.T) { + type netTest struct { + name string + pkt []byte // to send + check func(*sideEffects) error + } + tests := []struct { + netName string // name of the Server returned by setup + setup func() (*Server, error) + tests []netTest // to run against setup's Server + }{ + { + netName: "basic", + setup: newTwoNodesSameNetwork, + tests: []netTest{ + { + name: "drop-rando-ethertype", + pkt: mkEth(nodeMac(2), nodeMac(1), 0x4321, []byte("hello")), + check: all( + logSubstr("Dropping non-IP packet"), + ), + }, + { + name: "dst-mac-between-nodes", + pkt: mkEth(nodeMac(2), nodeMac(1), testingEthertype, []byte("hello")), + check: all( + numPkts(1), + pktSubstr("SrcMAC=52:cc:cc:cc:cc:01 DstMAC=52:cc:cc:cc:cc:02 EthernetType=UnknownEthernetType"), + pktSubstr("Unable to decode EthernetType 4660"), + ), + }, + { + name: "broadcast-mac", + pkt: mkEth(macBroadcast, nodeMac(1), testingEthertype, []byte("hello")), + check: all( + numPkts(1), + pktSubstr("SrcMAC=52:cc:cc:cc:cc:01 DstMAC=ff:ff:ff:ff:ff:ff EthernetType=UnknownEthernetType"), + pktSubstr("Unable to decode EthernetType 4660"), + ), + }, + { + name: "dns-request-v4", + pkt: mkDNSReq(4), + check: all( + numPkts(1), + pktSubstr("Data=[52, 52, 0, 3] IP=52.52.0.3"), + ), + }, + { + name: "dns-request-v6", + pkt: mkDNSReq(6), + check: all( + numPkts(1), + pktSubstr(" IP=2052::3 "), + ), + }, + { + name: "syslog-v4", + pkt: mkSyslogPacket(clientIPv4(1), "<6>2024-08-30T10:36:06-07:00 natlabapp tailscaled[1]: 2024/08/30 10:36:06 some-message"), + check: all( + numPkts(0), + logSubstr("some-message"), + ), + }, + { + name: "syslog-v6", + pkt: mkSyslogPacket(nodeWANIP6(1), "<6>2024-08-30T10:36:06-07:00 natlabapp tailscaled[1]: 2024/08/30 10:36:06 some-message"), + check: all( + numPkts(0), + logSubstr("some-message"), + ), + }, + }, + }, + { + netName: "v4", + setup: newTwoNodesSameV4Network, + tests: []netTest{ + { + name: "no-v6-reply-on-v4-only", + pkt: mkIPv6RouterSolicit(nodeMac(1), nodeLANIP6(1)), + check: all( + numPkts(0), + logSubstr("dropping IPv6 packet on v4-only network"), + ), + }, + { + name: "dhcp-discover", + pkt: mkDHCP(nodeMac(1), layers.DHCPMsgTypeDiscover), + check: all( + numPkts(2), // DHCP discover broadcast to node2 also, and the DHCP reply from router + pktSubstr("SrcMAC=52:cc:cc:cc:cc:01 DstMAC=ff:ff:ff:ff:ff:ff"), + pktSubstr("Options=[Option(ServerID:192.168.0.1), Option(MessageType:Offer)]}"), + ), + }, + { + name: "dhcp-request", + pkt: mkDHCP(nodeMac(1), layers.DHCPMsgTypeRequest), + check: all( + numPkts(2), // DHCP discover broadcast to node2 also, and the DHCP reply from router + pktSubstr("SrcMAC=52:cc:cc:cc:cc:01 DstMAC=ff:ff:ff:ff:ff:ff"), + pktSubstr("YourClientIP=192.168.0.101"), + pktSubstr("Options=[Option(ServerID:192.168.0.1), Option(MessageType:Ack), Option(LeaseTime:3600), Option(Router:[192 168 0 1]), Option(DNS:[4 11 4 11]), Option(SubnetMask:255.255.255.0)]}"), + ), + }, + }, + }, + { + netName: "v6", + setup: func() (*Server, error) { + var c Config + nw := c.AddNetwork("2000:52::1/64") + c.AddNode(nw) + c.AddNode(nw) + return New(&c) + }, + tests: []netTest{ + { + name: "router-solicit", + pkt: mkIPv6RouterSolicit(nodeMac(1), nodeLANIP6(1)), + check: all( + logSubstr("sending IPv6 router advertisement to 52:cc:cc:cc:cc:01 from 52:ee:ee:ee:ee:01"), + numPkts(1), + pktSubstr("TypeCode=RouterAdvertisement"), + pktSubstr("HopLimit=255 "), // per RFC 4861, 7.1.1 etc (all NDP messages) + pktSubstr("= ICMPv6RouterAdvertisement"), + pktSubstr("SrcMAC=52:ee:ee:ee:ee:01 DstMAC=52:cc:cc:cc:cc:01 EthernetType=IPv6"), + ), + }, + { + name: "all-nodes", + pkt: mkAllNodesPing(nodeMac(1), nodeLANIP6(1)), + check: all( + numPkts(1), + pktSubstr("SrcMAC=52:cc:cc:cc:cc:01 DstMAC=33:33:00:00:00:01"), + pktSubstr("SrcIP=fe80::50cc:ccff:fecc:cc01 DstIP=ff02::1"), + pktSubstr("TypeCode=EchoRequest"), + ), + }, + { + name: "no-dhcp-on-v6-disco", + pkt: mkDHCP(nodeMac(1), layers.DHCPMsgTypeDiscover), + check: all( + numPkts(1), // DHCP discover broadcast to node2 only + logSubstr("dropping DHCPv4 packet on v6-only network"), + pktSubstr("SrcMAC=52:cc:cc:cc:cc:01 DstMAC=ff:ff:ff:ff:ff:ff"), + ), + }, + { + name: "no-dhcp-on-v6-request", + pkt: mkDHCP(nodeMac(1), layers.DHCPMsgTypeRequest), + check: all( + numPkts(1), // DHCP request broadcast to node2 only + pktSubstr("SrcMAC=52:cc:cc:cc:cc:01 DstMAC=ff:ff:ff:ff:ff:ff"), + logSubstr("dropping DHCPv4 packet on v6-only network"), + ), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.netName, func(t *testing.T) { + s, err := tt.setup() + if err != nil { + t.Fatal(err) + } + defer s.Close() + + for _, tt := range tt.tests { + t.Run(tt.name, func(t *testing.T) { + se := newSideEffects(s) + + if err := s.handleEthernetFrameFromVM(tt.pkt); err != nil { + t.Fatal(err) + } + if tt.check != nil { + if err := tt.check(se); err != nil { + t.Error(err) + } + } + if t.Failed() { + t.Logf("logs were:\n%s", strings.Join(se.logs, "\n")) + for i, rp := range se.got { + p := gopacket.NewPacket(rp.eth, layers.LayerTypeEthernet, gopacket.Lazy) + got := p.String() + t.Logf("[pkt%d, port %v]:\n%s\n", i, rp.port, got) + } + } + }) + } + }) + } + +} + +// mustPacket is like mkPacket but panics on error. +func mustPacket(layers ...gopacket.SerializableLayer) []byte { + return must.Get(mkPacket(layers...)) +} + +// mkEth encodes an ethernet frame with the given payload. +func mkEth(dst, src MAC, ethType layers.EthernetType, payload []byte) []byte { + ret := make([]byte, 0, 14+len(payload)) + ret = append(ret, dst.HWAddr()...) + ret = append(ret, src.HWAddr()...) + ret = binary.BigEndian.AppendUint16(ret, uint16(ethType)) + return append(ret, payload...) +} + +// mkLenPrefixed prepends a uint32 length to the given packet. +func mkLenPrefixed(pkt []byte) []byte { + ret := make([]byte, 4+len(pkt)) + binary.BigEndian.PutUint32(ret, uint32(len(pkt))) + copy(ret[4:], pkt) + return ret +} + +// mkIPv6RouterSolicit makes a IPv6 router solicitation packet +// ethernet frame. +func mkIPv6RouterSolicit(srcMAC MAC, srcIP netip.Addr) []byte { + ip := &layers.IPv6{ + Version: 6, + HopLimit: 255, + NextHeader: layers.IPProtocolICMPv6, + SrcIP: srcIP.AsSlice(), + DstIP: net.ParseIP("ff02::2"), // all routers + } + icmp := &layers.ICMPv6{ + TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeRouterSolicitation, 0), + } + + ra := &layers.ICMPv6RouterSolicitation{ + Options: []layers.ICMPv6Option{{ + Type: layers.ICMPv6OptSourceAddress, + Data: srcMAC.HWAddr(), + }}, + } + icmp.SetNetworkLayerForChecksum(ip) + return mkEth(macAllRouters, srcMAC, ethType6, mustPacket(ip, icmp, ra)) +} + +func mkAllNodesPing(srcMAC MAC, srcIP netip.Addr) []byte { + ip := &layers.IPv6{ + Version: 6, + HopLimit: 255, + NextHeader: layers.IPProtocolICMPv6, + SrcIP: srcIP.AsSlice(), + DstIP: net.ParseIP("ff02::1"), // all nodes + } + icmp := &layers.ICMPv6{ + TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeEchoRequest, 0), + } + icmp.SetNetworkLayerForChecksum(ip) + return mkEth(macAllNodes, srcMAC, ethType6, mustPacket(ip, icmp)) +} + +// mkDNSReq makes a DNS request to "control.tailscale" using the source IPs as +// defined in this test file. +// +// ipVer must be 4 or 6: +// If 4, it makes an A record request. +// If 6, it makes a AAAA record request. +// +// (Yes, this is technically unrelated (you can request A records over IPv6 or +// AAAA records over IPv4), but for test coverage reasons, assume that the ipVer +// of 6 means to also request an AAAA record.) +func mkDNSReq(ipVer int) []byte { + eth := &layers.Ethernet{ + SrcMAC: nodeMac(1).HWAddr(), + DstMAC: routerMac(1).HWAddr(), + EthernetType: layers.EthernetTypeIPv4, + } + if ipVer == 6 { + eth.EthernetType = layers.EthernetTypeIPv6 + } + + var ip serializableNetworkLayer + switch ipVer { + case 4: + ip = &layers.IPv4{ + Version: 4, + Protocol: layers.IPProtocolUDP, + SrcIP: clientIPv4(1).AsSlice(), + TTL: 64, + DstIP: FakeDNSIPv4().AsSlice(), + } + case 6: + ip = &layers.IPv6{ + Version: 6, + HopLimit: 64, + NextHeader: layers.IPProtocolUDP, + SrcIP: net.ParseIP("2000:52::1"), + DstIP: FakeDNSIPv6().AsSlice(), + } + default: + panic("bad ipVer") + } + + udp := &layers.UDP{ + SrcPort: 12345, + DstPort: 53, + } + udp.SetNetworkLayerForChecksum(ip) + dns := &layers.DNS{ + ID: 789, + Questions: []layers.DNSQuestion{{ + Name: []byte("control.tailscale"), + Type: layers.DNSTypeA, + Class: layers.DNSClassIN, + }}, + } + if ipVer == 6 { + dns.Questions[0].Type = layers.DNSTypeAAAA + } + return mustPacket(eth, ip, udp, dns) +} + +func mkDHCP(srcMAC MAC, typ layers.DHCPMsgType) []byte { + eth := &layers.Ethernet{ + SrcMAC: srcMAC.HWAddr(), + DstMAC: macBroadcast.HWAddr(), + EthernetType: layers.EthernetTypeIPv4, + } + ip := &layers.IPv4{ + Version: 4, + Protocol: layers.IPProtocolUDP, + SrcIP: net.ParseIP("0.0.0.0"), + DstIP: net.ParseIP("255.255.255.255"), + } + udp := &layers.UDP{ + SrcPort: 68, + DstPort: 67, + } + dhcp := &layers.DHCPv4{ + Operation: layers.DHCPOpRequest, + HardwareType: layers.LinkTypeEthernet, + HardwareLen: 6, + Xid: 0, + Secs: 0, + Flags: 0, + ClientHWAddr: srcMAC[:], + Options: []layers.DHCPOption{ + {Type: layers.DHCPOptMessageType, Length: 1, Data: []byte{byte(typ)}}, + }, + } + return mustPacket(eth, ip, udp, dhcp) +} + +func mkSyslogPacket(srcIP netip.Addr, msg string) []byte { + eth := &layers.Ethernet{ + SrcMAC: nodeMac(1).HWAddr(), + DstMAC: routerMac(1).HWAddr(), + } + ip := mkIPLayer(layers.IPProtocolUDP, srcIP, matchingIP(srcIP, FakeSyslogIPv4(), FakeSyslogIPv6())) + udp := &layers.UDP{ + SrcPort: 123, + DstPort: 456, // unused; only IP matches + } + return mustPacket(eth, ip, udp, gopacket.Payload([]byte(msg))) +} + +// matchingIP returns ip4 if toMatch is an IPv4 address, otherwise ip6. +func matchingIP(toMatch, if4, if6 netip.Addr) netip.Addr { + if toMatch.Is4() { + return if4 + } + return if6 +} + +// receivedPacket is an ethernet frame that was received during a test. +type receivedPacket struct { + port MAC // MAC address of client that received the packet + eth []byte // ethernet frame; dst MAC might be ff:ff:ff:ff:ff:ff, etc +} + +// sideEffects gathers side effects as a result of sending a packet and tests +// whether those effects were as desired. +type sideEffects struct { + logs []string + got []receivedPacket // ethernet packets received +} + +// newSideEffects creates a new sideEffects recorder, registering itself with s. +func newSideEffects(s *Server) *sideEffects { + se := &sideEffects{} + s.SetLoggerForTest(se.logf) + for mac := range s.MACs() { + s.RegisterSinkForTest(mac, func(eth []byte) { + se.got = append(se.got, receivedPacket{ + port: mac, + eth: eth, + }) + }) + } + return se +} + +func (se *sideEffects) logf(format string, args ...any) { + se.logs = append(se.logs, fmt.Sprintf(format, args...)) +} + +// all aggregates several side effects checkers into one. +func all(checks ...func(*sideEffects) error) func(*sideEffects) error { + return func(se *sideEffects) error { + var errs []error + for _, check := range checks { + if err := check(se); err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) + } +} + +// logSubstr returns a side effect checker func that checks +// whether a log statement was output containing substring sub. +func logSubstr(sub string) func(*sideEffects) error { + return func(se *sideEffects) error { + for _, log := range se.logs { + if strings.Contains(log, sub) { + return nil + } + } + return fmt.Errorf("expected log substring %q not found", sub) + } +} + +// pkgSubstr returns a side effect checker func that checks whether an ethernet +// packet was received that, once decoded and stringified by gopacket, contains +// substring sub. +func pktSubstr(sub string) func(*sideEffects) error { + return func(se *sideEffects) error { + for _, pkt := range se.got { + pkt := gopacket.NewPacket(pkt.eth, layers.LayerTypeEthernet, gopacket.Lazy) + got := pkt.String() + if strings.Contains(got, sub) { + return nil + } + } + return fmt.Errorf("packet summary with substring %q not found", sub) + } +} + +// numPkts returns a side effect checker func that checks whether +// the received number of ethernet packets was the given number. +func numPkts(want int) func(*sideEffects) error { + return func(se *sideEffects) error { + if len(se.got) == want { + return nil + } + return fmt.Errorf("got %d packets, want %d", len(se.got), want) + } +} + +func clientIPv4(n int) netip.Addr { + return netip.AddrFrom4([4]byte{192, 168, 0, byte(100 + n)}) +} + +var wanSLAACBase = netip.MustParseAddr("2052::50cc:ccff:fecc:cc01") + +// nodeLANIP6 returns a node number's Link Local SLAAC IPv6 address, +// such as fe80::50cc:ccff:fecc:cc03 for node 3. +func nodeWANIP6(n int) netip.Addr { + a := wanSLAACBase.As16() + a[15] = byte(n) + return netip.AddrFrom16(a) +} + +func newTwoNodesSameNetwork() (*Server, error) { + var c Config + nw := c.AddNetwork("192.168.0.1/24", "2052::1/64") + c.AddNode(nw) + c.AddNode(nw) + for _, c := range c.Nodes() { + c.SetVerboseSyslog(true) + } + return New(&c) +} + +func newTwoNodesSameV4Network() (*Server, error) { + var c Config + nw := c.AddNetwork("192.168.0.1/24") + c.AddNode(nw) + c.AddNode(nw) + for _, c := range c.Nodes() { + c.SetVerboseSyslog(true) + } + return New(&c) +} + +// TestProtocolQEMU tests the protocol that qemu uses to connect to natlab's +// vnet. (uint32-length prefixed ethernet frames over a unix stream socket) +// +// This test makes two clients (as qemu would act) and has one send an ethernet +// packet to the other virtual LAN segment. +func TestProtocolQEMU(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skipf("skipping on %s", runtime.GOOS) + } + s := must.Get(newTwoNodesSameNetwork()) + defer s.Close() + s.SetLoggerForTest(t.Logf) + + td := t.TempDir() + serverSock := filepath.Join(td, "vnet.sock") + + ln, err := net.Listen("unix", serverSock) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + var clientc [2]*net.UnixConn + for i := range clientc { + c, err := net.Dial("unix", serverSock) + if err != nil { + t.Fatal(err) + } + defer c.Close() + clientc[i] = c.(*net.UnixConn) + } + + for range clientc { + conn, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + go s.ServeUnixConn(conn.(*net.UnixConn), ProtocolQEMU) + } + + sendBetweenClients(t, clientc, s, mkLenPrefixed) +} + +// TestProtocolUnixDgram tests the protocol that macOS Virtualization.framework +// uses to connect to vnet. (unix datagram sockets) +// +// It is similar to TestProtocolQEMU but uses unix datagram sockets instead of +// streams. +func TestProtocolUnixDgram(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skipf("skipping on %s", runtime.GOOS) + } + s := must.Get(newTwoNodesSameNetwork()) + defer s.Close() + s.SetLoggerForTest(t.Logf) + + td := t.TempDir() + serverSock := filepath.Join(td, "vnet.sock") + serverAddr := must.Get(net.ResolveUnixAddr("unixgram", serverSock)) + + var clientSock [2]string + for i := range clientSock { + clientSock[i] = filepath.Join(td, fmt.Sprintf("c%d.sock", i)) + } + + uc, err := net.ListenUnixgram("unixgram", serverAddr) + if err != nil { + t.Fatal(err) + } + go s.ServeUnixConn(uc, ProtocolUnixDGRAM) + + var clientc [2]*net.UnixConn + for i := range clientc { + c, err := net.DialUnix("unixgram", + must.Get(net.ResolveUnixAddr("unixgram", clientSock[i])), + serverAddr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + clientc[i] = c + } + + sendBetweenClients(t, clientc, s, nil) +} + +// sendBetweenClients is a test helper that tries to send an ethernet frame from +// one client to another. +// +// It first makes the two clients send a packet to a fictitious node 3, which +// forces their src MACs to be registered with a networkWriter internally so +// they can receive traffic. +// +// Normally a node starts up spamming DHCP + NDP but we don't get that as a side +// effect here, so this does it manually. +// +// It also then waits for them to be registered. +// +// wrap is an optional function that wraps the packet before sending it. +func sendBetweenClients(t testing.TB, clientc [2]*net.UnixConn, s *Server, wrap func([]byte) []byte) { + t.Helper() + if wrap == nil { + wrap = func(b []byte) []byte { return b } + } + for i, c := range clientc { + must.Get(c.Write(wrap(mkEth(nodeMac(3), nodeMac(i+1), testingEthertype, []byte("hello"))))) + } + awaitCond(t, 5*time.Second, func() error { + if n := s.RegisteredWritersForTest(); n != 2 { + return fmt.Errorf("got %d registered writers, want 2", n) + } + return nil + }) + + // Now see if node1 can write to node2 and node2 receives it. + pkt := wrap(mkEth(nodeMac(2), nodeMac(1), testingEthertype, []byte("test-msg"))) + t.Logf("writing % 02x", pkt) + must.Get(clientc[0].Write(pkt)) + + buf := make([]byte, len(pkt)) + clientc[1].SetReadDeadline(time.Now().Add(5 * time.Second)) + n, err := clientc[1].Read(buf) + if err != nil { + t.Fatal(err) + } + got := buf[:n] + if !bytes.Equal(got, pkt) { + t.Errorf("bad packet\n got: % 02x\nwant: % 02x", got, pkt) + } +} + +func awaitCond(t testing.TB, timeout time.Duration, cond func() error) { + t.Helper() + t0 := time.Now() + for { + if err := cond(); err == nil { + return + } + if time.Since(t0) > timeout { + t.Fatalf("timed out after %v", timeout) + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/tstest/reflect.go b/tstest/reflect.go new file mode 100644 index 0000000000000..125391349a941 --- /dev/null +++ b/tstest/reflect.go @@ -0,0 +1,114 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tstest + +import ( + "net/netip" + "reflect" + "testing" + "time" + + "tailscale.com/types/ptr" +) + +// IsZeroable is the interface for things with an IsZero method. +type IsZeroable interface { + IsZero() bool +} + +var ( + netipAddrType = reflect.TypeFor[netip.Addr]() + netipAddrPortType = reflect.TypeFor[netip.AddrPort]() + netipPrefixType = reflect.TypeFor[netip.Prefix]() + timeType = reflect.TypeFor[time.Time]() + timePtrType = reflect.TypeFor[*time.Time]() +) + +// CheckIsZero checks that the IsZero method of a given type functions +// correctly, by instantiating a new value of that type, changing a field, and +// then checking that the IsZero method returns false. +// +// The nonzeroValues map should contain non-zero values for each type that +// exists in the type T or any contained types. Basic types like string, bool, +// and numeric types are handled automatically. +func CheckIsZero[T IsZeroable](t testing.TB, nonzeroValues map[reflect.Type]any) { + t.Helper() + + var zero T + if !zero.IsZero() { + t.Errorf("zero value of %T is not IsZero", zero) + return + } + + var nonEmptyValue func(t reflect.Type) reflect.Value + nonEmptyValue = func(ty reflect.Type) reflect.Value { + if v, ok := nonzeroValues[ty]; ok { + return reflect.ValueOf(v) + } + + switch ty { + // Given that we're a networking company, probably fine to have + // a special case for netip.Addr :) + case netipAddrType: + return reflect.ValueOf(netip.MustParseAddr("1.2.3.4")) + case netipAddrPortType: + return reflect.ValueOf(netip.MustParseAddrPort("1.2.3.4:9999")) + case netipPrefixType: + return reflect.ValueOf(netip.MustParsePrefix("1.2.3.4/24")) + + case timeType: + return reflect.ValueOf(time.Unix(1704067200, 0)) + case timePtrType: + return reflect.ValueOf(ptr.To(time.Unix(1704067200, 0))) + } + + switch ty.Kind() { + case reflect.String: + return reflect.ValueOf("foo").Convert(ty) + case reflect.Bool: + return reflect.ValueOf(true) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(int64(-42)).Convert(ty) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return reflect.ValueOf(uint64(42)).Convert(ty) + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(float64(3.14)).Convert(ty) + case reflect.Complex64, reflect.Complex128: + return reflect.ValueOf(complex(3.14, 2.71)).Convert(ty) + case reflect.Chan: + return reflect.MakeChan(ty, 1) + + // For slices, ensure that the slice is non-empty. + case reflect.Slice: + v := nonEmptyValue(ty.Elem()) + sl := reflect.MakeSlice(ty, 1, 1) + sl.Index(0).Set(v) + return sl + + case reflect.Map: + // Create a map with a single key-value pair, recursively creating each. + k := nonEmptyValue(ty.Key()) + v := nonEmptyValue(ty.Elem()) + + m := reflect.MakeMap(ty) + m.SetMapIndex(k, v) + return m + + default: + panic("unhandled type " + ty.String()) + } + } + + typ := reflect.TypeFor[T]() + for i, n := 0, typ.NumField(); i < n; i++ { + sf := typ.Field(i) + + var nonzero T + rv := reflect.ValueOf(&nonzero).Elem() + rv.Field(i).Set(nonEmptyValue(sf.Type)) + if nonzero.IsZero() { + t.Errorf("IsZero = true with %v set; want false\nvalue: %#v", sf.Name, nonzero) + } + } +} diff --git a/tstest/tailmac/Host.entitlements b/tstest/tailmac/Host.entitlements new file mode 100644 index 0000000000000..d7d0d6e8b6c29 --- /dev/null +++ b/tstest/tailmac/Host.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.security.virtualization + + + diff --git a/tstest/tailmac/LICENSE/LICENSE.txt b/tstest/tailmac/LICENSE/LICENSE.txt new file mode 100644 index 0000000000000..733d1795a97d1 --- /dev/null +++ b/tstest/tailmac/LICENSE/LICENSE.txt @@ -0,0 +1,8 @@ +Copyright © 2023 Apple Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/tstest/tailmac/Makefile b/tstest/tailmac/Makefile new file mode 100644 index 0000000000000..b87e44ed1c49d --- /dev/null +++ b/tstest/tailmac/Makefile @@ -0,0 +1,23 @@ +XCPRETTIFIER := xcpretty +ifeq (, $(shell which $(XCPRETTIFIER))) + XCPRETTIFIER := cat +endif + +.PHONY: tailmac +tailmac: + xcodebuild -scheme tailmac -destination 'platform=macOS,arch=arm64' -derivedDataPath build -configuration Release build | $(XCPRETTIFIER) + cp -r ./build/Build/Products/Release/tailmac ./bin/tailmac + +.PHONY: host +host: + xcodebuild -scheme host -destination 'platform=macOS,arch=arm64' -derivedDataPath build -configuration Release build | $(XCPRETTIFIER) + cp -r ./build/Build/Products/Release/Host.app ./bin/Host.app + +.PHONY: clean +clean: + rm -rf ./bin + rm -rf ./build + mkdir -p ./bin + +.PHONY: all +all: clean tailmac host diff --git a/tstest/tailmac/README.md b/tstest/tailmac/README.md new file mode 100644 index 0000000000000..a8b9f2598dde3 --- /dev/null +++ b/tstest/tailmac/README.md @@ -0,0 +1,161 @@ +# Lightweight macOS VM's for tstest and natlab + +This utility is designed to provide custom virtual machine tooling support for macOS. The intent +is to quickly create and spin up small, preconfigured virtual machines, for executing integration +and unit tests. + +The primary driver is to provide support for VZVirtioNetworkDeviceConfiguration which is not +supported by other popular macOS VM hosts. This also gives us the freedom to fully customize and script +all virtual machine setup and interaction. VZVirtioNetworkDeviceConfiguration lets us +directly inject and sink network traffic for simulating various network conditions, +protocols, and topologies and ensure that the TailScale clients handle all of these situations correctly. + +This may also be used as a drop-in replacement for UTM or Tart on ARM Macs for quickly spinning up +test VMs. It has the added benefit that, unlike UTM which uses AppleScript, it can be run +via SSH. + +This uses Virtualization.framework which only supports arm64. The binaries only build for arm64. + + +## Components + +The application is built in two components: + +The tailmac command line utility is used to set up and configure VM instances. The Host.app does the heavy lifting. + +You will typically initiate all interactions via the tailmac command-line util. + +For a full list of options: +``` +tailmac -h +``` + + +## Building + +``` +% make all +``` + +Will build both the tailmac command line util and Host.app. You will need a developer account. The default bundle identifiers +default to TailScale owned ids, so if you don't have (or aren't using) a TailScale dev account, you will need to change this. +This should build automatically as long as you have a valid developer cert. Signing is automatic. The binaries both +require the virtualization entitlement, so they do need to be signed. + +There are separate recipes in the makefile to rebuild the individual components if needed. + +All binaries are copied to the bin directory. + + +## Locations + +All vm images, restore images, block device files, save states, and other supporting files are persisted at ~/VM.bundle + +Each vm gets its own directory. These can be archived for posterity to preserve a particular image and/or state. +The mere existence of a directory containing all of the required files in ~/VM.bundle is sufficient for tailmac to +be able to see and run it. ~/VM.bundle and it's contents *is* tailmac's state. No other state is maintained elsewhere. + +Each vm has its own custom configuration which can be modified while the vm is idle. It's simple JSON - you may +modify this directly, or using 'tailmac configure'. + + +## Installing + +### Default a parameters + +* The default virtio socket device port is 51009 +* The default server socket for the virtual network device is /tmp/qemu-dgram.sock +* The default memory size is 4Gb +* The default mac address for the socket based networking is 52:cc:cc:cc:cc:01 +* The default mac address for the standard ethernet interface is 52:cc:cc:cc:ce:01 + +### Creating and managing VMs + + You generally perform all interactions via the tailmac command line util. A NAT ethernet device is provided so + you can ssh into your instance. The ethernet IP will be dhcp assigned by the host and can be determined by parsing + the contents of /var/db/dhcpd_leases + +#### Creation + +To create a new VM (this will grab a restore image for what apples deems a 'latest; if needed). Restore images are large +(on the order of 10 Gb) and installation after downloading takes a few minutes. If you wish to use a custom restore image, +specify it with the --image option. If RestoreImage.ipsw exists in ~/VM.bundle, it will be used. macOS versions from +12 to 15 have been tested and appear to work correctly. +``` +tailmac create --id my_vm_id +``` + +With a custom restore image and parameters: +``` +tailmac create --id my_custom_vm_id --image "/images/macos_ventura.ipsw" --mac 52:cc:cc:cc:cc:07 --mem 8000000000 --sock "/temp/custom.sock" --port 52345 +``` + +A typical workflow would be to create single VM, manually set it up the way you wish including the installation of any required client side software +(tailscaled or the client-side test harness for example) then clone that images as required and back up your +images for future use. + +Fetching and persisting pre-configured images is left as an exercise for the reader (for now). A previously used image can simply be copied to the +~/VM.bundle directory under a unique path and tailmac will automatically pick it up. No versioning is supported so old images may stop working in +the future. + +To delete a VM image, you may simply remove it's directory under ~/VM.bundle or +``` +tailmac delete --id my_stale_vm +``` + +Note that the disk size is fixed, but should be sufficient (perhaps even excessive) for most lightweight workflows. + +#### Restore Images + +To refresh an existing restore image: +``` +tailmac refresh +``` + +Restore images can also be obtained directly from Apple for all macOS releases. Note Apple restore images are raw installs, and the OS will require +configuration, user setup, etc before being useful. Cloning a vm after clicking through the setup, creating a user and disabling things like the +lock screen and enabling auto-login will save you time in the future. + + +#### Cloning + +To clone an existing vm (this will clone the mac and port as well) +``` +tailmac clone --id old_vm_id --target-id new_vm_id +``` + +#### Configuration + +To reconfigure a existing vm: +``` +tailmac configure --id vm_id --mac 11:22:33:44:55:66 --port 12345 --ethermac 22:33:44:55:66:77 -sock "/tmp/my.sock" +``` + +## Running a VM + +To list the available VM images +``` +tailmac ls +``` + +To launch an VM +``` +tailmac run --id machine_1 +``` + + You may invoke multiple vms, but the limit on the number of concurrent instances is on the order of 2. Use the --tail option to watch the stdout of the + Host.app process. There is currently no way to list the running VM instances, but invoking stop or halt for a vm instance + that is not running is perfectly safe. + + To gracefully stop a running VM and save its state (this is a fire and forget thing): + + ``` + tailmac stop --id machine_1 + ``` + +Manually closing a VM's window will save the VM's state (if possible) and is the equivalent of running 'tailmac stop --id vm_id' + + To halt a running vm without saving its state: + ``` + tailmac halt --id machine_1 + ``` diff --git a/tstest/tailmac/Swift/Common/Config.swift b/tstest/tailmac/Swift/Common/Config.swift new file mode 100644 index 0000000000000..01d5069b0049d --- /dev/null +++ b/tstest/tailmac/Swift/Common/Config.swift @@ -0,0 +1,125 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Foundation + +let kDefaultDiskSizeGb: Int64 = 72 +let kDefaultMemSizeGb: UInt64 = 72 + + +/// Represents a configuration for a virtual machine +class Config: Codable { + var serverSocket = "/tmp/qemu-dgram.sock" + var memorySize = (kDefaultMemSizeGb * 1024 * 1024 * 1024) as UInt64 + var mac = "52:cc:cc:cc:cc:01" + var ethermac = "52:cc:cc:cc:ce:01" + var port: UInt32 = 51009 + + // The virtual machines ID. Also double as the directory name under which + // we will store configuration, block device, etc. + let vmID: String + + required init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + if let ethermac = try container.decodeIfPresent(String.self, forKey: .ethermac) { + self.ethermac = ethermac + } + if let serverSocket = try container.decodeIfPresent(String.self, forKey: .serverSocket) { + self.serverSocket = serverSocket + } + if let memorySize = try container.decodeIfPresent(UInt64.self, forKey: .memorySize) { + self.memorySize = memorySize + } + if let port = try container.decodeIfPresent(UInt32.self, forKey: .port) { + self.port = port + } + if let mac = try container.decodeIfPresent(String.self, forKey: .mac) { + self.mac = mac + } + if let vmID = try container.decodeIfPresent(String.self, forKey: .vmID) { + self.vmID = vmID + } else { + self.vmID = "default" + } + } + + init(_ vmID: String = "default") { + self.vmID = vmID + let configFile = vmDataURL.appendingPathComponent("config.json") + if FileManager.default.fileExists(atPath: configFile.path()) { + print("Using config file at path \(configFile)") + if let jsonData = try? Data(contentsOf: configFile) { + let config = try! JSONDecoder().decode(Config.self, from: jsonData) + self.serverSocket = config.serverSocket + self.memorySize = config.memorySize + self.mac = config.mac + self.port = config.port + self.ethermac = config.ethermac + } + } + } + + func persist() { + let configFile = vmDataURL.appendingPathComponent("config.json") + let data = try! JSONEncoder().encode(self) + try! data.write(to: configFile) + } + + lazy var restoreImageURL: URL = { + vmBundleURL.appendingPathComponent("RestoreImage.ipsw") + }() + + // The VM Data URL holds the specific files composing a unique VM guest instance + // By default, VM's are persisted at ~/VM.bundle/ + lazy var vmDataURL = { + let dataURL = vmBundleURL.appendingPathComponent(vmID) + return dataURL + }() + + lazy var auxiliaryStorageURL = { + vmDataURL.appendingPathComponent("AuxiliaryStorage") + }() + + lazy var diskImageURL = { + vmDataURL.appendingPathComponent("Disk.img") + }() + + lazy var diskSize: Int64 = { + kDefaultDiskSizeGb * 1024 * 1024 * 1024 + }() + + lazy var hardwareModelURL = { + vmDataURL.appendingPathComponent("HardwareModel") + }() + + lazy var machineIdentifierURL = { + vmDataURL.appendingPathComponent("MachineIdentifier") + }() + + lazy var saveFileURL = { + vmDataURL.appendingPathComponent("SaveFile.vzvmsave") + }() + +} + +// The VM Bundle URL holds the restore image and a set of VM images +// By default, VM's are persisted at ~/VM.bundle +var vmBundleURL: URL = { + let vmBundlePath = NSHomeDirectory() + "/VM.bundle/" + createDir(vmBundlePath) + let bundleURL = URL(fileURLWithPath: vmBundlePath) + return bundleURL +}() + + +func createDir(_ path: String) { + do { + try FileManager.default.createDirectory(atPath: path, withIntermediateDirectories: true) + } catch { + fatalError("Unable to create dir at \(path) \(error)") + } +} + + + + diff --git a/tstest/tailmac/Swift/Common/Notifications.swift b/tstest/tailmac/Swift/Common/Notifications.swift new file mode 100644 index 0000000000000..de2216e227eb7 --- /dev/null +++ b/tstest/tailmac/Swift/Common/Notifications.swift @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Foundation + +struct Notifications { + // Stops the virtual machine and saves its state + static var stop = Notification.Name("io.tailscale.macvmhost.stop") + + // Pauses the virtual machine and exits without saving its state + static var halt = Notification.Name("io.tailscale.macvmhost.halt") +} diff --git a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift new file mode 100644 index 0000000000000..00f999a158c19 --- /dev/null +++ b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift @@ -0,0 +1,145 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Foundation +import Virtualization + +struct TailMacConfigHelper { + let config: Config + + func computeCPUCount() -> Int { + let totalAvailableCPUs = ProcessInfo.processInfo.processorCount + + var virtualCPUCount = totalAvailableCPUs <= 1 ? 1 : totalAvailableCPUs - 1 + virtualCPUCount = max(virtualCPUCount, VZVirtualMachineConfiguration.minimumAllowedCPUCount) + virtualCPUCount = min(virtualCPUCount, VZVirtualMachineConfiguration.maximumAllowedCPUCount) + + return virtualCPUCount + } + + func computeMemorySize() -> UInt64 { + // Set the amount of system memory to 4 GB; this is a baseline value + // that you can change depending on your use case. + var memorySize = config.memorySize + memorySize = max(memorySize, VZVirtualMachineConfiguration.minimumAllowedMemorySize) + memorySize = min(memorySize, VZVirtualMachineConfiguration.maximumAllowedMemorySize) + + return memorySize + } + + func createBootLoader() -> VZMacOSBootLoader { + return VZMacOSBootLoader() + } + + func createGraphicsDeviceConfiguration() -> VZMacGraphicsDeviceConfiguration { + let graphicsConfiguration = VZMacGraphicsDeviceConfiguration() + graphicsConfiguration.displays = [ + // The system arbitrarily chooses the resolution of the display to be 1920 x 1200. + VZMacGraphicsDisplayConfiguration(widthInPixels: 1920, heightInPixels: 1200, pixelsPerInch: 80) + ] + + return graphicsConfiguration + } + + func createBlockDeviceConfiguration() -> VZVirtioBlockDeviceConfiguration { + do { + let diskImageAttachment = try VZDiskImageStorageDeviceAttachment(url: config.diskImageURL, readOnly: false) + let disk = VZVirtioBlockDeviceConfiguration(attachment: diskImageAttachment) + return disk + } catch { + fatalError("Failed to create Disk image. \(error)") + } + } + + func createSocketDeviceConfiguration() -> VZVirtioSocketDeviceConfiguration { + return VZVirtioSocketDeviceConfiguration() + } + + func createNetworkDeviceConfiguration() -> VZVirtioNetworkDeviceConfiguration { + let networkDevice = VZVirtioNetworkDeviceConfiguration() + networkDevice.macAddress = VZMACAddress(string: config.ethermac)! + + /* Bridged networking requires special entitlements from Apple + if let interface = VZBridgedNetworkInterface.networkInterfaces.first(where: { $0.identifier == "en0" }) { + let networkAttachment = VZBridgedNetworkDeviceAttachment(interface: interface) + networkDevice.attachment = networkAttachment + } else { + print("Assuming en0 for bridged ethernet. Could not findd adapter") + }*/ + + /// But we can do NAT without Tim Apple's approval + let networkAttachment = VZNATNetworkDeviceAttachment() + networkDevice.attachment = networkAttachment + + return networkDevice + } + + func createSocketNetworkDeviceConfiguration() -> VZVirtioNetworkDeviceConfiguration { + let networkDevice = VZVirtioNetworkDeviceConfiguration() + networkDevice.macAddress = VZMACAddress(string: config.mac)! + + let socket = Darwin.socket(AF_UNIX, SOCK_DGRAM, 0) + + // Outbound network packets + let serverSocket = config.serverSocket + + // Inbound network packets + let clientSockId = config.vmID + let clientSocket = "/tmp/qemu-dgram-\(clientSockId).sock" + + unlink(clientSocket) + var clientAddr = sockaddr_un() + clientAddr.sun_family = sa_family_t(AF_UNIX) + clientSocket.withCString { ptr in + withUnsafeMutablePointer(to: &clientAddr.sun_path.0) { dest in + _ = strcpy(dest, ptr) + } + } + + let bindRes = Darwin.bind(socket, + withUnsafePointer(to: &clientAddr, { $0.withMemoryRebound(to: sockaddr.self, capacity: 1) { $0 } }), + socklen_t(MemoryLayout.size)) + + if bindRes == -1 { + print("Error binding virtual network client socket - \(String(cString: strerror(errno)))") + return networkDevice + } + + var serverAddr = sockaddr_un() + serverAddr.sun_family = sa_family_t(AF_UNIX) + serverSocket.withCString { ptr in + withUnsafeMutablePointer(to: &serverAddr.sun_path.0) { dest in + _ = strcpy(dest, ptr) + } + } + + let connectRes = Darwin.connect(socket, + withUnsafePointer(to: &serverAddr, { $0.withMemoryRebound(to: sockaddr.self, capacity: 1) { $0 } }), + socklen_t(MemoryLayout.size)) + + if connectRes == -1 { + print("Error binding virtual network server socket - \(String(cString: strerror(errno)))") + return networkDevice + } + + print("Virtual if mac address is \(config.mac)") + print("Client bound to \(clientSocket)") + print("Connected to server at \(serverSocket)") + print("Socket fd is \(socket)") + + + let handle = FileHandle(fileDescriptor: socket) + let device = VZFileHandleNetworkDeviceAttachment(fileHandle: handle) + networkDevice.attachment = device + return networkDevice + } + + func createPointingDeviceConfiguration() -> VZPointingDeviceConfiguration { + return VZMacTrackpadConfiguration() + } + + func createKeyboardConfiguration() -> VZKeyboardConfiguration { + return VZMacKeyboardConfiguration() + } +} + diff --git a/tstest/tailmac/Swift/Host/AppDelegate.swift b/tstest/tailmac/Swift/Host/AppDelegate.swift new file mode 100644 index 0000000000000..63c0192da236e --- /dev/null +++ b/tstest/tailmac/Swift/Host/AppDelegate.swift @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Cocoa +import Foundation +import Virtualization + +class AppDelegate: NSObject, NSApplicationDelegate { + @IBOutlet var window: NSWindow! + + @IBOutlet weak var virtualMachineView: VZVirtualMachineView! + + var runner: VMController! + + func applicationDidFinishLaunching(_ aNotification: Notification) { + DispatchQueue.main.async { [self] in + runner = VMController() + runner.createVirtualMachine() + virtualMachineView.virtualMachine = runner.virtualMachine + virtualMachineView.capturesSystemKeys = true + + // Configure the app to automatically respond to changes in the display size. + virtualMachineView.automaticallyReconfiguresDisplay = true + + let fileManager = FileManager.default + if fileManager.fileExists(atPath: config.saveFileURL.path) { + print("Restoring virtual machine state from \(config.saveFileURL)") + runner.restoreVirtualMachine() + } else { + print("Restarting virtual machine") + runner.startVirtualMachine() + } + + } + } + + func applicationShouldTerminateAfterLastWindowClosed(_ sender: NSApplication) -> Bool { + return true + } + + func applicationShouldTerminate(_ sender: NSApplication) -> NSApplication.TerminateReply { + if runner.virtualMachine.state == .running { + runner.pauseAndSaveVirtualMachine(completionHandler: { + sender.reply(toApplicationShouldTerminate: true) + }) + + return .terminateLater + } + + return .terminateNow + } +} diff --git a/tstest/tailmac/Swift/Host/Assets.xcassets/AccentColor.colorset/Contents.json b/tstest/tailmac/Swift/Host/Assets.xcassets/AccentColor.colorset/Contents.json new file mode 100644 index 0000000000000..eb87897008164 --- /dev/null +++ b/tstest/tailmac/Swift/Host/Assets.xcassets/AccentColor.colorset/Contents.json @@ -0,0 +1,11 @@ +{ + "colors" : [ + { + "idiom" : "universal" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/tstest/tailmac/Swift/Host/Assets.xcassets/AppIcon.appiconset/Contents.json b/tstest/tailmac/Swift/Host/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000000000..3f00db43ec3c8 --- /dev/null +++ b/tstest/tailmac/Swift/Host/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,58 @@ +{ + "images" : [ + { + "idiom" : "mac", + "scale" : "1x", + "size" : "16x16" + }, + { + "idiom" : "mac", + "scale" : "2x", + "size" : "16x16" + }, + { + "idiom" : "mac", + "scale" : "1x", + "size" : "32x32" + }, + { + "idiom" : "mac", + "scale" : "2x", + "size" : "32x32" + }, + { + "idiom" : "mac", + "scale" : "1x", + "size" : "128x128" + }, + { + "idiom" : "mac", + "scale" : "2x", + "size" : "128x128" + }, + { + "idiom" : "mac", + "scale" : "1x", + "size" : "256x256" + }, + { + "idiom" : "mac", + "scale" : "2x", + "size" : "256x256" + }, + { + "idiom" : "mac", + "scale" : "1x", + "size" : "512x512" + }, + { + "idiom" : "mac", + "scale" : "2x", + "size" : "512x512" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/tstest/tailmac/Swift/Host/Assets.xcassets/Contents.json b/tstest/tailmac/Swift/Host/Assets.xcassets/Contents.json new file mode 100644 index 0000000000000..73c00596a7fca --- /dev/null +++ b/tstest/tailmac/Swift/Host/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/tstest/tailmac/Swift/Host/Base.lproj/MainMenu.xib b/tstest/tailmac/Swift/Host/Base.lproj/MainMenu.xib new file mode 100644 index 0000000000000..547e5f05dfb09 --- /dev/null +++ b/tstest/tailmac/Swift/Host/Base.lproj/MainMenu.xib @@ -0,0 +1,696 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Default + + + + + + + Left to Right + + + + + + + Right to Left + + + + + + + + + + + Default + + + + + + + Left to Right + + + + + + + Right to Left + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tstest/tailmac/Swift/Host/HostCli.swift b/tstest/tailmac/Swift/Host/HostCli.swift new file mode 100644 index 0000000000000..1318a09fa546e --- /dev/null +++ b/tstest/tailmac/Swift/Host/HostCli.swift @@ -0,0 +1,30 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Cocoa +import Foundation +import Virtualization +import ArgumentParser + +@main +struct HostCli: ParsableCommand { + static var configuration = CommandConfiguration( + abstract: "A utility for running virtual machines", + subcommands: [Run.self], + defaultSubcommand: Run.self) +} + +var config: Config = Config() + +extension HostCli { + struct Run: ParsableCommand { + @Option var id: String + + mutating func run() { + print("Running vm with identifier \(id)") + config = Config(id) + _ = NSApplicationMain(CommandLine.argc, CommandLine.unsafeArgv) + } + } +} + diff --git a/tstest/tailmac/Swift/Host/Info.plist b/tstest/tailmac/Swift/Host/Info.plist new file mode 100644 index 0000000000000..0c67376ebacb4 --- /dev/null +++ b/tstest/tailmac/Swift/Host/Info.plist @@ -0,0 +1,5 @@ + + + + + diff --git a/tstest/tailmac/Swift/Host/VMController.swift b/tstest/tailmac/Swift/Host/VMController.swift new file mode 100644 index 0000000000000..8774894c1157a --- /dev/null +++ b/tstest/tailmac/Swift/Host/VMController.swift @@ -0,0 +1,179 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Cocoa +import Foundation +import Virtualization +import Foundation + +class VMController: NSObject, VZVirtualMachineDelegate { + var virtualMachine: VZVirtualMachine! + + lazy var helper = TailMacConfigHelper(config: config) + + override init() { + super.init() + listenForNotifications() + } + + func listenForNotifications() { + let nc = DistributedNotificationCenter() + nc.addObserver(forName: Notifications.stop, object: nil, queue: nil) { notification in + if let vmID = notification.userInfo?["id"] as? String { + if config.vmID == vmID { + print("We've been asked to stop... Saving state and exiting") + self.pauseAndSaveVirtualMachine { + exit(0) + } + } + } + } + + nc.addObserver(forName: Notifications.halt, object: nil, queue: nil) { notification in + if let vmID = notification.userInfo?["id"] as? String { + if config.vmID == vmID { + print("We've been asked to stop... Saving state and exiting") + self.virtualMachine.pause { (result) in + if case let .failure(error) = result { + fatalError("Virtual machine failed to pause with \(error)") + } + exit(0) + } + } + } + } + } + + func createMacPlaform() -> VZMacPlatformConfiguration { + let macPlatform = VZMacPlatformConfiguration() + + let auxiliaryStorage = VZMacAuxiliaryStorage(contentsOf: config.auxiliaryStorageURL) + macPlatform.auxiliaryStorage = auxiliaryStorage + + if !FileManager.default.fileExists(atPath: config.vmDataURL.path()) { + fatalError("Missing Virtual Machine Bundle at \(config.vmDataURL). Run InstallationTool first to create it.") + } + + // Retrieve the hardware model and save this value to disk during installation. + guard let hardwareModelData = try? Data(contentsOf: config.hardwareModelURL) else { + fatalError("Failed to retrieve hardware model data.") + } + + guard let hardwareModel = VZMacHardwareModel(dataRepresentation: hardwareModelData) else { + fatalError("Failed to create hardware model.") + } + + if !hardwareModel.isSupported { + fatalError("The hardware model isn't supported on the current host") + } + macPlatform.hardwareModel = hardwareModel + + // Retrieve the machine identifier and save this value to disk during installation. + guard let machineIdentifierData = try? Data(contentsOf: config.machineIdentifierURL) else { + fatalError("Failed to retrieve machine identifier data.") + } + + guard let machineIdentifier = VZMacMachineIdentifier(dataRepresentation: machineIdentifierData) else { + fatalError("Failed to create machine identifier.") + } + macPlatform.machineIdentifier = machineIdentifier + + return macPlatform + } + + func createVirtualMachine() { + let virtualMachineConfiguration = VZVirtualMachineConfiguration() + + virtualMachineConfiguration.platform = createMacPlaform() + virtualMachineConfiguration.bootLoader = helper.createBootLoader() + virtualMachineConfiguration.cpuCount = helper.computeCPUCount() + virtualMachineConfiguration.memorySize = helper.computeMemorySize() + virtualMachineConfiguration.graphicsDevices = [helper.createGraphicsDeviceConfiguration()] + virtualMachineConfiguration.storageDevices = [helper.createBlockDeviceConfiguration()] + virtualMachineConfiguration.networkDevices = [helper.createNetworkDeviceConfiguration(), helper.createSocketNetworkDeviceConfiguration()] + virtualMachineConfiguration.pointingDevices = [helper.createPointingDeviceConfiguration()] + virtualMachineConfiguration.keyboards = [helper.createKeyboardConfiguration()] + virtualMachineConfiguration.socketDevices = [helper.createSocketDeviceConfiguration()] + + try! virtualMachineConfiguration.validate() + try! virtualMachineConfiguration.validateSaveRestoreSupport() + + virtualMachine = VZVirtualMachine(configuration: virtualMachineConfiguration) + virtualMachine.delegate = self + } + + + func startVirtualMachine() { + virtualMachine.start(completionHandler: { (result) in + if case let .failure(error) = result { + fatalError("Virtual machine failed to start with \(error)") + } + self.startSocketDevice() + }) + } + + func startSocketDevice() { + if let device = virtualMachine.socketDevices.first as? VZVirtioSocketDevice { + print("Configuring socket device at port \(config.port)") + device.connect(toPort: config.port) { connection in + //TODO: Anything? Or is this enough to bootstrap it on both ends? + } + } else { + print("Virtual machine could not start it's socket device") + } + } + + func resumeVirtualMachine() { + virtualMachine.resume(completionHandler: { (result) in + if case let .failure(error) = result { + fatalError("Virtual machine failed to resume with \(error)") + } + }) + } + + func restoreVirtualMachine() { + virtualMachine.restoreMachineStateFrom(url: config.saveFileURL, completionHandler: { [self] (error) in + // Remove the saved file. Whether success or failure, the state no longer matches the VM's disk. + let fileManager = FileManager.default + try! fileManager.removeItem(at: config.saveFileURL) + + if error == nil { + self.resumeVirtualMachine() + } else { + self.startVirtualMachine() + } + }) + } + + func saveVirtualMachine(completionHandler: @escaping () -> Void) { + virtualMachine.saveMachineStateTo(url: config.saveFileURL, completionHandler: { (error) in + guard error == nil else { + fatalError("Virtual machine failed to save with \(error!)") + } + + completionHandler() + }) + } + + func pauseAndSaveVirtualMachine(completionHandler: @escaping () -> Void) { + virtualMachine.pause { result in + if case let .failure(error) = result { + fatalError("Virtual machine failed to pause with \(error)") + } + + self.saveVirtualMachine(completionHandler: completionHandler) + } + } + + // MARK: - VZVirtualMachineDeleate + + func virtualMachine(_ virtualMachine: VZVirtualMachine, didStopWithError error: Error) { + print("Virtual machine did stop with error: \(error.localizedDescription)") + exit(-1) + } + + func guestDidStop(_ virtualMachine: VZVirtualMachine) { + print("Guest did stop virtual machine.") + exit(0) + } +} diff --git a/tstest/tailmac/Swift/TailMac/RestoreImage.swift b/tstest/tailmac/Swift/TailMac/RestoreImage.swift new file mode 100644 index 0000000000000..c2b8b3dd6a878 --- /dev/null +++ b/tstest/tailmac/Swift/TailMac/RestoreImage.swift @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Foundation +import Virtualization + +class RestoreImage: NSObject { + private var downloadObserver: NSKeyValueObservation? + + // MARK: Observe the download progress. + + var restoreImageURL: URL + + init(_ dest: URL) { + restoreImageURL = dest + } + + public func download(completionHandler: @escaping () -> Void) { + print("Attempting to download latest available restore image.") + VZMacOSRestoreImage.fetchLatestSupported { [self](result: Result) in + switch result { + case let .failure(error): + fatalError(error.localizedDescription) + + case let .success(restoreImage): + downloadRestoreImage(restoreImage: restoreImage, completionHandler: completionHandler) + } + } + } + + private func downloadRestoreImage(restoreImage: VZMacOSRestoreImage, completionHandler: @escaping () -> Void) { + let downloadTask = URLSession.shared.downloadTask(with: restoreImage.url) { localURL, response, error in + if let error = error { + fatalError("Download failed. \(error.localizedDescription).") + } + + do { + try FileManager.default.moveItem(at: localURL!, to: self.restoreImageURL) + } catch { + fatalError("Failed to move downloaded restore image to \(self.restoreImageURL) \(error).") + } + + + completionHandler() + } + + var lastPct = 0 + downloadObserver = downloadTask.progress.observe(\.fractionCompleted, options: [.initial, .new]) { (progress, change) in + let pct = Int(change.newValue! * 100) + if pct != lastPct { + print("Restore image download progress: \(pct)%") + lastPct = pct + } + } + downloadTask.resume() + } +} + diff --git a/tstest/tailmac/Swift/TailMac/TailMac.swift b/tstest/tailmac/Swift/TailMac/TailMac.swift new file mode 100644 index 0000000000000..56f651696e12c --- /dev/null +++ b/tstest/tailmac/Swift/TailMac/TailMac.swift @@ -0,0 +1,334 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Foundation +import Virtualization +import ArgumentParser + +var usage = +""" +Installs and configures VMs suitable for use with natlab + +To create a new VM (this will grab a restore image if needed) +tailmac create --id + +To refresh an existing restore image: +tailmac refresh + +To clone a vm (this will clone the mac and port as well) +tailmac clone --identfier --target-id + +To reconfigure a vm: +tailmac configure --id --mac 11:22:33:44:55:66 --port 12345 --mem 8000000000000 -sock "/tmp/mySock.sock" + +To run a vm: +tailmac run --id + +To stop a vm: (this may take a minute - the vm needs to persist it's state) +tailmac stop --id + +To halt a vm without persisting its state +tailmac halt --id + +To delete a vm: +tailmac delete --id + +To list the available VM images: +tailmac ls +""" + +@main +struct Tailmac: ParsableCommand { + static var configuration = CommandConfiguration( + abstract: "A utility for setting up VM images", + usage: usage, + subcommands: [Create.self, Clone.self, Delete.self, Configure.self, Stop.self, Run.self, Ls.self, Halt.self], + defaultSubcommand: Ls.self) +} + +extension Tailmac { + struct Ls: ParsableCommand { + mutating func run() { + do { + let dirs = try FileManager.default.contentsOfDirectory(atPath: vmBundleURL.path()) + var images = [String]() + + // This assumes we don't put anything else interesting in our VM.bundle dir + // You may need to add some other exclusions or checks here if that's the case. + for dir in dirs { + if !dir.contains("ipsw") { + images.append(URL(fileURLWithPath: dir).lastPathComponent) + } + } + print("Available images:\n\(images)") + } catch { + fatalError("Failed to query available images \(error)") + } + } + } +} + +extension Tailmac { + struct Stop: ParsableCommand { + @Option(help: "The vm identifier") var id: String + + mutating func run() { + print("Stopping vm with id \(id). This may take some time!") + let nc = DistributedNotificationCenter() + nc.post(name: Notifications.stop, object: nil, userInfo: ["id": id]) + } + } +} + +extension Tailmac { + struct Halt: ParsableCommand { + @Option(help: "The vm identifier") var id: String + + mutating func run() { + print("Halting vm with id \(id)") + let nc = DistributedNotificationCenter() + nc.post(name: Notifications.halt, object: nil, userInfo: ["id": id]) + } + } +} + +extension Tailmac { + struct Run: ParsableCommand { + @Option(help: "The vm identifier") var id: String + @Flag(help: "Tail the TailMac log output instead of returning immediatly") var tail + + mutating func run() { + let process = Process() + let stdOutPipe = Pipe() + let appPath = "./Host.app/Contents/MacOS/Host" + + process.executableURL = URL( + fileURLWithPath: appPath, + isDirectory: false, + relativeTo: NSRunningApplication.current.bundleURL + ) + + if !FileManager.default.fileExists(atPath: appPath) { + fatalError("Could not find Host.app. This must be co-located with the tailmac utility") + } + + process.arguments = ["run", "--id", id] + + do { + process.standardOutput = stdOutPipe + try process.run() + } catch { + fatalError("Unable to launch the vm process") + } + + // This doesn't print until we exit which is not ideal, but at least we + // get the output + if tail != 0 { + let outHandle = stdOutPipe.fileHandleForReading + + let queue = OperationQueue() + NotificationCenter.default.addObserver( + forName: NSNotification.Name.NSFileHandleDataAvailable, + object: outHandle, queue: queue) + { + notification -> Void in + let data = outHandle.availableData + if data.count > 0 { + if let str = String(data: data, encoding: String.Encoding.utf8) { + print(str) + } + } + outHandle.waitForDataInBackgroundAndNotify() + } + outHandle.waitForDataInBackgroundAndNotify() + process.waitUntilExit() + } + } + } +} + +extension Tailmac { + struct Configure: ParsableCommand { + @Option(help: "The vm identifier") var id: String + @Option(help: "The mac address of the socket network interface") var mac: String? + @Option(help: "The port for the virtio socket device") var port: String? + @Option(help: "The named socket for the socket network interface") var sock: String? + @Option(help: "The desired RAM in bytes") var mem: String? + @Option(help: "The ethernet address for a standard NAT adapter") var ethermac: String? + + mutating func run() { + let config = Config(id) + + let vmExists = FileManager.default.fileExists(atPath: config.vmDataURL.path()) + if !vmExists { + print("VM with id \(id) doesn't exist. Cannot configure.") + return + } + + if let mac { + config.mac = mac + } + if let port, let portInt = UInt32(port) { + config.port = portInt + } + if let ethermac { + config.ethermac = ethermac + } + if let mem, let membytes = UInt64(mem) { + config.memorySize = membytes + } + if let sock { + config.serverSocket = sock + } + + config.persist() + + let str = String(data:try! JSONEncoder().encode(config), encoding: .utf8)! + print("New Config: \(str)") + } + } +} + +extension Tailmac { + struct Delete: ParsableCommand { + @Option(help: "The vm identifer") var id: String? + + mutating func run() { + guard let id else { + print("Usage: Installer delete --id=") + return + } + + let config = Config(id) + + let vmExists = FileManager.default.fileExists(atPath: config.vmDataURL.path()) + if !vmExists { + print("VM with id \(id) doesn't exist. Cannot delete.") + return + } + + do { + try FileManager.default.removeItem(at: config.vmDataURL) + } catch { + print("Whoops... Deletion failed \(error)") + } + } + } +} + + +extension Tailmac { + struct Clone: ParsableCommand { + @Option(help: "The vm identifier") var id: String + @Option(help: "The vm identifier for the cloned vm") var targetId: String + + mutating func run() { + + let config = Config(id) + let targetConfig = Config(targetId) + + if id == targetId { + fatalError("The ids match. Clone failed.") + } + + let vmExists = FileManager.default.fileExists(atPath: config.vmDataURL.path()) + if !vmExists { + print("VM with id \(id) doesn't exist. Cannot clone.") + return + } + + print("Cloning \(config.vmDataURL) to \(targetConfig.vmDataURL)") + do { + try FileManager.default.copyItem(at: config.vmDataURL, to: targetConfig.vmDataURL) + } catch { + print("Whoops... Cloning failed \(error)") + } + } + } +} + +extension Tailmac { + struct RefreshImage: ParsableCommand { + mutating func run() { + let config = Config() + let exists = FileManager.default.fileExists(atPath: config.restoreImageURL.path()) + if exists { + try? FileManager.default.removeItem(at: config.restoreImageURL) + } + let restoreImage = RestoreImage(config.restoreImageURL) + restoreImage.download { + print("Restore image refreshed") + } + } + } +} + +extension Tailmac { + struct Create: ParsableCommand { + @Option(help: "The vm identifier. Each VM instance needs a unique ID.") var id: String + @Option(help: "The mac address of the socket network interface") var mac: String? + @Option(help: "The port for the virtio socket device") var port: String? + @Option(help: "The named socket for the socket network interface") var sock: String? + @Option(help: "The desired RAM in bytes") var mem: String? + @Option(help: "The ethernet address for a standard NAT adapter") var ethermac: String? + @Option(help: "The image name to build from. If omitted we will use RestoreImage.ipsw in ~/VM.bundle and download it if needed") var image: String? + + mutating func run() { + buildVM(id) + } + + func buildVM(_ id: String) { + print("Configuring vm with id \(id)") + + let config = Config(id) + let installer = VMInstaller(config) + + let vmExists = FileManager.default.fileExists(atPath: config.vmDataURL.path()) + if vmExists { + print("VM with id \(id) already exists. No action taken.") + return + } + + createDir(config.vmDataURL.path()) + + if let mac { + config.mac = mac + } + if let port, let portInt = UInt32(port) { + config.port = portInt + } + if let ethermac { + config.ethermac = ethermac + } + if let mem, let membytes = UInt64(mem) { + config.memorySize = membytes + } + if let sock { + config.serverSocket = sock + } + + config.persist() + + let restoreImagePath = image ?? config.restoreImageURL.path() + + let exists = FileManager.default.fileExists(atPath: restoreImagePath) + if exists { + print("Using existing restore image at \(restoreImagePath)") + installer.installMacOS(ipswURL: URL(fileURLWithPath: restoreImagePath)) + } else { + if image != nil { + fatalError("Unable to find custom restore image") + } + + print("Downloading default restore image to \(config.restoreImageURL)") + let restoreImage = RestoreImage(URL(fileURLWithPath: restoreImagePath)) + restoreImage.download { + // Install from the restore image that you downloaded. + installer.installMacOS(ipswURL: URL(fileURLWithPath: restoreImagePath)) + } + } + + dispatchMain() + } + } +} diff --git a/tstest/tailmac/Swift/TailMac/VMInstaller.swift b/tstest/tailmac/Swift/TailMac/VMInstaller.swift new file mode 100644 index 0000000000000..568b6efc4bfe0 --- /dev/null +++ b/tstest/tailmac/Swift/TailMac/VMInstaller.swift @@ -0,0 +1,140 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +import Foundation +import Virtualization + +class VMInstaller: NSObject { + private var installationObserver: NSKeyValueObservation? + private var virtualMachine: VZVirtualMachine! + + private var config: Config + private var helper: TailMacConfigHelper + + init(_ config: Config) { + self.config = config + helper = TailMacConfigHelper(config: config) + } + + public func installMacOS(ipswURL: URL) { + print("Attempting to install from IPSW at \(ipswURL).") + VZMacOSRestoreImage.load(from: ipswURL, completionHandler: { [self](result: Result) in + switch result { + case let .failure(error): + fatalError(error.localizedDescription) + + case let .success(restoreImage): + installMacOS(restoreImage: restoreImage) + } + }) + } + + // MARK: - Internal helper functions. + + private func installMacOS(restoreImage: VZMacOSRestoreImage) { + guard let macOSConfiguration = restoreImage.mostFeaturefulSupportedConfiguration else { + fatalError("No supported configuration available.") + } + + if !macOSConfiguration.hardwareModel.isSupported { + fatalError("macOSConfiguration configuration isn't supported on the current host.") + } + + DispatchQueue.main.async { [self] in + setupVirtualMachine(macOSConfiguration: macOSConfiguration) + startInstallation(restoreImageURL: restoreImage.url) + } + } + + // MARK: Create the Mac platform configuration. + + private func createMacPlatformConfiguration(macOSConfiguration: VZMacOSConfigurationRequirements) -> VZMacPlatformConfiguration { + let macPlatformConfiguration = VZMacPlatformConfiguration() + + + let auxiliaryStorage: VZMacAuxiliaryStorage + do { + auxiliaryStorage = try VZMacAuxiliaryStorage(creatingStorageAt: config.auxiliaryStorageURL, + hardwareModel: macOSConfiguration.hardwareModel, + options: []) + } catch { + fatalError("Unable to create aux storage at \(config.auxiliaryStorageURL) \(error)") + } + macPlatformConfiguration.auxiliaryStorage = auxiliaryStorage + macPlatformConfiguration.hardwareModel = macOSConfiguration.hardwareModel + macPlatformConfiguration.machineIdentifier = VZMacMachineIdentifier() + + // Store the hardware model and machine identifier to disk so that you + // can retrieve them for subsequent boots. + try! macPlatformConfiguration.hardwareModel.dataRepresentation.write(to: config.hardwareModelURL) + try! macPlatformConfiguration.machineIdentifier.dataRepresentation.write(to: config.machineIdentifierURL) + + return macPlatformConfiguration + } + + private func setupVirtualMachine(macOSConfiguration: VZMacOSConfigurationRequirements) { + let virtualMachineConfiguration = VZVirtualMachineConfiguration() + + virtualMachineConfiguration.platform = createMacPlatformConfiguration(macOSConfiguration: macOSConfiguration) + virtualMachineConfiguration.cpuCount = helper.computeCPUCount() + if virtualMachineConfiguration.cpuCount < macOSConfiguration.minimumSupportedCPUCount { + fatalError("CPUCount isn't supported by the macOS configuration.") + } + + virtualMachineConfiguration.memorySize = helper.computeMemorySize() + if virtualMachineConfiguration.memorySize < macOSConfiguration.minimumSupportedMemorySize { + fatalError("memorySize isn't supported by the macOS configuration.") + } + + createDiskImage() + + virtualMachineConfiguration.bootLoader = helper.createBootLoader() + virtualMachineConfiguration.graphicsDevices = [helper.createGraphicsDeviceConfiguration()] + virtualMachineConfiguration.storageDevices = [helper.createBlockDeviceConfiguration()] + virtualMachineConfiguration.networkDevices = [helper.createNetworkDeviceConfiguration(), helper.createSocketNetworkDeviceConfiguration()] + virtualMachineConfiguration.pointingDevices = [helper.createPointingDeviceConfiguration()] + virtualMachineConfiguration.keyboards = [helper.createKeyboardConfiguration()] + + try! virtualMachineConfiguration.validate() + try! virtualMachineConfiguration.validateSaveRestoreSupport() + + virtualMachine = VZVirtualMachine(configuration: virtualMachineConfiguration) + } + + private func startInstallation(restoreImageURL: URL) { + let installer = VZMacOSInstaller(virtualMachine: virtualMachine, restoringFromImageAt: restoreImageURL) + + print("Starting installation.") + installer.install(completionHandler: { (result: Result) in + if case let .failure(error) = result { + fatalError(error.localizedDescription) + } else { + print("Installation succeeded.") + } + }) + + // Observe installation progress. + installationObserver = installer.progress.observe(\.fractionCompleted, options: [.initial, .new]) { (progress, change) in + print("Installation progress: \(change.newValue! * 100).") + } + } + + // Create an empty disk image for the virtual machine. + private func createDiskImage() { + let diskFd = open(config.diskImageURL.path, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR) + if diskFd == -1 { + fatalError("Cannot create disk image.") + } + + // 72 GB disk space. + var result = ftruncate(diskFd, config.diskSize) + if result != 0 { + fatalError("ftruncate() failed.") + } + + result = close(diskFd) + if result != 0 { + fatalError("Failed to close the disk image.") + } + } +} diff --git a/tstest/tailmac/Swift/TailMac/main b/tstest/tailmac/Swift/TailMac/main new file mode 100755 index 0000000000000..bbb1b051a6dde Binary files /dev/null and b/tstest/tailmac/Swift/TailMac/main differ diff --git a/tstest/tailmac/TailMac.entitlements b/tstest/tailmac/TailMac.entitlements new file mode 100644 index 0000000000000..d7d0d6e8b6c29 --- /dev/null +++ b/tstest/tailmac/TailMac.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.security.virtualization + + + diff --git a/tstest/tailmac/TailMac.xcodeproj/project.pbxproj b/tstest/tailmac/TailMac.xcodeproj/project.pbxproj new file mode 100644 index 0000000000000..542901554f69b --- /dev/null +++ b/tstest/tailmac/TailMac.xcodeproj/project.pbxproj @@ -0,0 +1,581 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 55; + objects = { + +/* Begin PBXBuildFile section */ + 8F87D52126C34111000EADA4 /* HostCli.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8F87D52026C34111000EADA4 /* HostCli.swift */; }; + 8F87D52326C34111000EADA4 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8F87D52226C34111000EADA4 /* Assets.xcassets */; }; + 8F87D52626C34111000EADA4 /* MainMenu.xib in Resources */ = {isa = PBXBuildFile; fileRef = 8F87D52426C34111000EADA4 /* MainMenu.xib */; }; + 8F87D53426C341AC000EADA4 /* TailMac.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8F87D53326C341AC000EADA4 /* TailMac.swift */; }; + 8F87D54026C34259000EADA4 /* TailMacConfigHelper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8F87D53D26C34259000EADA4 /* TailMacConfigHelper.swift */; }; + 8F87D54426C34269000EADA4 /* TailMacConfigHelper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8F87D53D26C34259000EADA4 /* TailMacConfigHelper.swift */; }; + 8F87D54726C3427C000EADA4 /* Virtualization.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8F87D54626C3427C000EADA4 /* Virtualization.framework */; }; + 8F87D54826C34286000EADA4 /* Virtualization.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8F87D54626C3427C000EADA4 /* Virtualization.framework */; }; + C266EA7F2C5D2AD800DC57E3 /* Config.swift in Sources */ = {isa = PBXBuildFile; fileRef = C266EA7E2C5D2AD800DC57E3 /* Config.swift */; }; + C266EA802C5D2AE700DC57E3 /* Config.swift in Sources */ = {isa = PBXBuildFile; fileRef = C266EA7E2C5D2AD800DC57E3 /* Config.swift */; }; + C28759A42C6BB68D0032283D /* VMInstaller.swift in Sources */ = {isa = PBXBuildFile; fileRef = C28759A32C6BB68D0032283D /* VMInstaller.swift */; }; + C28759A72C6BB7F90032283D /* RestoreImage.swift in Sources */ = {isa = PBXBuildFile; fileRef = C28759A62C6BB7F90032283D /* RestoreImage.swift */; }; + C28759AC2C6C00840032283D /* ArgumentParser in Frameworks */ = {isa = PBXBuildFile; productRef = C28759AB2C6C00840032283D /* ArgumentParser */; }; + C28759AE2C6D0FC10032283D /* ArgumentParser in Frameworks */ = {isa = PBXBuildFile; productRef = C28759AD2C6D0FC10032283D /* ArgumentParser */; }; + C28759BC2C6D19D40032283D /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = C28759BB2C6D19D40032283D /* AppDelegate.swift */; }; + C28759BE2C6D1A0F0032283D /* VMController.swift in Sources */ = {isa = PBXBuildFile; fileRef = C28759BD2C6D1A0F0032283D /* VMController.swift */; }; + C28759C02C6D1E980032283D /* Notifications.swift in Sources */ = {isa = PBXBuildFile; fileRef = C28759BF2C6D1E980032283D /* Notifications.swift */; }; + C28759C12C6D1E980032283D /* Notifications.swift in Sources */ = {isa = PBXBuildFile; fileRef = C28759BF2C6D1E980032283D /* Notifications.swift */; }; +/* End PBXBuildFile section */ + +/* Begin PBXCopyFilesBuildPhase section */ + 8F87D52F26C341AC000EADA4 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 8F87D51D26C34111000EADA4 /* Host.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Host.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 8F87D52026C34111000EADA4 /* HostCli.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HostCli.swift; sourceTree = ""; }; + 8F87D52226C34111000EADA4 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 8F87D52526C34111000EADA4 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = Base; path = Base.lproj/MainMenu.xib; sourceTree = ""; }; + 8F87D53126C341AC000EADA4 /* TailMac */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = TailMac; sourceTree = BUILT_PRODUCTS_DIR; }; + 8F87D53326C341AC000EADA4 /* TailMac.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TailMac.swift; sourceTree = ""; }; + 8F87D53826C3423F000EADA4 /* TailMac.entitlements */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.entitlements; path = TailMac.entitlements; sourceTree = ""; }; + 8F87D53B26C34250000EADA4 /* Host.entitlements */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.entitlements; path = Host.entitlements; sourceTree = ""; }; + 8F87D53D26C34259000EADA4 /* TailMacConfigHelper.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TailMacConfigHelper.swift; sourceTree = ""; }; + 8F87D54626C3427C000EADA4 /* Virtualization.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Virtualization.framework; path = System/Library/Frameworks/Virtualization.framework; sourceTree = SDKROOT; }; + 8FB90BE826D422FD00988F51 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + B0E246092DFBF28FAEA2709F /* LICENSE.txt */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text; path = LICENSE.txt; sourceTree = ""; }; + C266EA7E2C5D2AD800DC57E3 /* Config.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Config.swift; sourceTree = ""; }; + C28759A32C6BB68D0032283D /* VMInstaller.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = VMInstaller.swift; sourceTree = ""; }; + C28759A62C6BB7F90032283D /* RestoreImage.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RestoreImage.swift; sourceTree = ""; }; + C28759A92C6BF8800032283D /* Makefile */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.make; path = Makefile; sourceTree = ""; }; + C28759AF2C6D10060032283D /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = ""; }; + C28759BB2C6D19D40032283D /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + C28759BD2C6D1A0F0032283D /* VMController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = VMController.swift; sourceTree = ""; }; + C28759BF2C6D1E980032283D /* Notifications.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Notifications.swift; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 8F87D51A26C34111000EADA4 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + C28759AE2C6D0FC10032283D /* ArgumentParser in Frameworks */, + 8F87D54826C34286000EADA4 /* Virtualization.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8F87D52E26C341AC000EADA4 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + C28759AC2C6C00840032283D /* ArgumentParser in Frameworks */, + 8F87D54726C3427C000EADA4 /* Virtualization.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 09E329497FB7E44895839D88 /* LICENSE */ = { + isa = PBXGroup; + children = ( + B0E246092DFBF28FAEA2709F /* LICENSE.txt */, + ); + path = LICENSE; + sourceTree = ""; + }; + 8F87D51426C34111000EADA4 = { + isa = PBXGroup; + children = ( + C28759AF2C6D10060032283D /* README.md */, + C28759A92C6BF8800032283D /* Makefile */, + 8F87D53B26C34250000EADA4 /* Host.entitlements */, + 8F87D53826C3423F000EADA4 /* TailMac.entitlements */, + 8FDABC17270D0F9100D7FC60 /* Swift */, + 8F87D51E26C34111000EADA4 /* Products */, + 8F87D54526C3427C000EADA4 /* Frameworks */, + 09E329497FB7E44895839D88 /* LICENSE */, + ); + sourceTree = ""; + }; + 8F87D51E26C34111000EADA4 /* Products */ = { + isa = PBXGroup; + children = ( + 8F87D51D26C34111000EADA4 /* Host.app */, + 8F87D53126C341AC000EADA4 /* TailMac */, + ); + name = Products; + sourceTree = ""; + }; + 8F87D51F26C34111000EADA4 /* Host */ = { + isa = PBXGroup; + children = ( + 8F87D52026C34111000EADA4 /* HostCli.swift */, + C28759BD2C6D1A0F0032283D /* VMController.swift */, + C28759BB2C6D19D40032283D /* AppDelegate.swift */, + 8F87D52226C34111000EADA4 /* Assets.xcassets */, + 8F87D52426C34111000EADA4 /* MainMenu.xib */, + 8FB90BE826D422FD00988F51 /* Info.plist */, + ); + path = Host; + sourceTree = ""; + }; + 8F87D52C26C3418F000EADA4 /* Common */ = { + isa = PBXGroup; + children = ( + C266EA7E2C5D2AD800DC57E3 /* Config.swift */, + 8F87D53D26C34259000EADA4 /* TailMacConfigHelper.swift */, + C28759BF2C6D1E980032283D /* Notifications.swift */, + ); + path = Common; + sourceTree = ""; + }; + 8F87D53226C341AC000EADA4 /* TailMac */ = { + isa = PBXGroup; + children = ( + 8F87D53326C341AC000EADA4 /* TailMac.swift */, + C28759A62C6BB7F90032283D /* RestoreImage.swift */, + C28759A32C6BB68D0032283D /* VMInstaller.swift */, + ); + path = TailMac; + sourceTree = ""; + }; + 8F87D54526C3427C000EADA4 /* Frameworks */ = { + isa = PBXGroup; + children = ( + 8F87D54626C3427C000EADA4 /* Virtualization.framework */, + ); + name = Frameworks; + sourceTree = ""; + }; + 8FDABC17270D0F9100D7FC60 /* Swift */ = { + isa = PBXGroup; + children = ( + 8F87D52C26C3418F000EADA4 /* Common */, + 8F87D51F26C34111000EADA4 /* Host */, + 8F87D53226C341AC000EADA4 /* TailMac */, + ); + path = Swift; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 8F87D51C26C34111000EADA4 /* host */ = { + isa = PBXNativeTarget; + buildConfigurationList = 8F87D52926C34111000EADA4 /* Build configuration list for PBXNativeTarget "host" */; + buildPhases = ( + 8F87D51926C34111000EADA4 /* Sources */, + 8F87D51A26C34111000EADA4 /* Frameworks */, + 8F87D51B26C34111000EADA4 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = host; + packageProductDependencies = ( + C28759AD2C6D0FC10032283D /* ArgumentParser */, + ); + productName = macOSVirtualMachineSampleApp; + productReference = 8F87D51D26C34111000EADA4 /* Host.app */; + productType = "com.apple.product-type.application"; + }; + 8F87D53026C341AC000EADA4 /* tailmac */ = { + isa = PBXNativeTarget; + buildConfigurationList = 8F87D53526C341AC000EADA4 /* Build configuration list for PBXNativeTarget "tailmac" */; + buildPhases = ( + 8F87D52D26C341AC000EADA4 /* Sources */, + 8F87D52E26C341AC000EADA4 /* Frameworks */, + 8F87D52F26C341AC000EADA4 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = tailmac; + packageProductDependencies = ( + C28759AB2C6C00840032283D /* ArgumentParser */, + ); + productName = InstallationTool; + productReference = 8F87D53126C341AC000EADA4 /* TailMac */; + productType = "com.apple.product-type.tool"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 8F87D51526C34111000EADA4 /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = 1; + DefaultBuildSystemTypeForWorkspace = Latest; + LastSwiftUpdateCheck = 1540; + LastUpgradeCheck = 1300; + ORGANIZATIONNAME = Apple; + TargetAttributes = { + 8F87D51C26C34111000EADA4 = { + CreatedOnToolsVersion = 13.0; + }; + 8F87D53026C341AC000EADA4 = { + CreatedOnToolsVersion = 13.0; + }; + }; + }; + buildConfigurationList = 8F87D51826C34111000EADA4 /* Build configuration list for PBXProject "TailMac" */; + compatibilityVersion = "Xcode 13.0"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 8F87D51426C34111000EADA4; + packageReferences = ( + C28759AA2C6BFF0F0032283D /* XCRemoteSwiftPackageReference "swift-argument-parser" */, + ); + productRefGroup = 8F87D51E26C34111000EADA4 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8F87D51C26C34111000EADA4 /* host */, + 8F87D53026C341AC000EADA4 /* tailmac */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 8F87D51B26C34111000EADA4 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8F87D52326C34111000EADA4 /* Assets.xcassets in Resources */, + 8F87D52626C34111000EADA4 /* MainMenu.xib in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 8F87D51926C34111000EADA4 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8F87D52126C34111000EADA4 /* HostCli.swift in Sources */, + C28759C02C6D1E980032283D /* Notifications.swift in Sources */, + C266EA7F2C5D2AD800DC57E3 /* Config.swift in Sources */, + C28759BC2C6D19D40032283D /* AppDelegate.swift in Sources */, + C28759BE2C6D1A0F0032283D /* VMController.swift in Sources */, + 8F87D54026C34259000EADA4 /* TailMacConfigHelper.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8F87D52D26C341AC000EADA4 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8F87D54426C34269000EADA4 /* TailMacConfigHelper.swift in Sources */, + C28759C12C6D1E980032283D /* Notifications.swift in Sources */, + C28759A72C6BB7F90032283D /* RestoreImage.swift in Sources */, + C266EA802C5D2AE700DC57E3 /* Config.swift in Sources */, + C28759A42C6BB68D0032283D /* VMInstaller.swift in Sources */, + 8F87D53426C341AC000EADA4 /* TailMac.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXVariantGroup section */ + 8F87D52426C34111000EADA4 /* MainMenu.xib */ = { + isa = PBXVariantGroup; + children = ( + 8F87D52526C34111000EADA4 /* Base */, + ); + name = MainMenu.xib; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 8F87D52726C34111000EADA4 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++17"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 14.0; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = macosx; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + }; + name = Debug; + }; + 8F87D52826C34111000EADA4 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++17"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 14.0; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + SDKROOT = macosx; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_OPTIMIZATION_LEVEL = "-O"; + }; + name = Release; + }; + 8F87D52A26C34111000EADA4 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = arm64; + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_ENTITLEMENTS = Host.entitlements; + CODE_SIGN_IDENTITY = "Mac Developer"; + CODE_SIGN_STYLE = Automatic; + COMBINE_HIDPI_IMAGES = YES; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_TEAM = W5364U7YZB; + ENABLE_APP_SANDBOX = NO; + ENABLE_USER_SELECTED_FILES = readwrite; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_FILE = Swift/Host/Info.plist; + INFOPLIST_KEY_NSHumanReadableCopyright = ""; + INFOPLIST_KEY_NSMainNibFile = MainMenu; + INFOPLIST_KEY_NSMicrophoneUsageDescription = "Allow for using audio input devices."; + INFOPLIST_KEY_NSPrincipalClass = NSApplication; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/../Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 14.0; + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.tailscale.vnetMacHost; + PRODUCT_NAME = Host; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + }; + name = Debug; + }; + 8F87D52B26C34111000EADA4 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = arm64; + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_ENTITLEMENTS = Host.entitlements; + CODE_SIGN_IDENTITY = "Mac Developer"; + CODE_SIGN_STYLE = Automatic; + COMBINE_HIDPI_IMAGES = YES; + CURRENT_PROJECT_VERSION = 1; + DEVELOPMENT_TEAM = W5364U7YZB; + ENABLE_APP_SANDBOX = NO; + ENABLE_USER_SELECTED_FILES = readwrite; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_FILE = Swift/Host/Info.plist; + INFOPLIST_KEY_NSHumanReadableCopyright = ""; + INFOPLIST_KEY_NSMainNibFile = MainMenu; + INFOPLIST_KEY_NSMicrophoneUsageDescription = "Allow for using audio input devices."; + INFOPLIST_KEY_NSPrincipalClass = NSApplication; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/../Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 14.0; + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.tailscale.vnetMacHost; + PRODUCT_NAME = Host; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_EMIT_LOC_STRINGS = YES; + SWIFT_VERSION = 5.0; + }; + name = Release; + }; + 8F87D53626C341AC000EADA4 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = arm64; + CODE_SIGN_ENTITLEMENTS = TailMac.entitlements; + CODE_SIGN_IDENTITY = "Mac Developer"; + CODE_SIGN_STYLE = Automatic; + DEVELOPMENT_TEAM = W5364U7YZB; + ENABLE_USER_SELECTED_FILES = readwrite; + MACOSX_DEPLOYMENT_TARGET = 14.0; + PRODUCT_BUNDLE_IDENTIFIER = com.tailscale.vnetMacHostSetupTool; + PRODUCT_NAME = TailMac; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_VERSION = 5.0; + }; + name = Debug; + }; + 8F87D53726C341AC000EADA4 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = arm64; + CODE_SIGN_ENTITLEMENTS = TailMac.entitlements; + CODE_SIGN_IDENTITY = "Mac Developer"; + CODE_SIGN_STYLE = Automatic; + DEVELOPMENT_TEAM = W5364U7YZB; + ENABLE_USER_SELECTED_FILES = readwrite; + MACOSX_DEPLOYMENT_TARGET = 14.0; + PRODUCT_BUNDLE_IDENTIFIER = com.tailscale.vnetMacHostSetupTool; + PRODUCT_NAME = TailMac; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_VERSION = 5.0; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 8F87D51826C34111000EADA4 /* Build configuration list for PBXProject "TailMac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8F87D52726C34111000EADA4 /* Debug */, + 8F87D52826C34111000EADA4 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 8F87D52926C34111000EADA4 /* Build configuration list for PBXNativeTarget "host" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8F87D52A26C34111000EADA4 /* Debug */, + 8F87D52B26C34111000EADA4 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 8F87D53526C341AC000EADA4 /* Build configuration list for PBXNativeTarget "tailmac" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8F87D53626C341AC000EADA4 /* Debug */, + 8F87D53726C341AC000EADA4 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + +/* Begin XCRemoteSwiftPackageReference section */ + C28759AA2C6BFF0F0032283D /* XCRemoteSwiftPackageReference "swift-argument-parser" */ = { + isa = XCRemoteSwiftPackageReference; + repositoryURL = "https://github.com/apple/swift-argument-parser.git"; + requirement = { + kind = upToNextMajorVersion; + minimumVersion = 1.5.0; + }; + }; +/* End XCRemoteSwiftPackageReference section */ + +/* Begin XCSwiftPackageProductDependency section */ + C28759AB2C6C00840032283D /* ArgumentParser */ = { + isa = XCSwiftPackageProductDependency; + package = C28759AA2C6BFF0F0032283D /* XCRemoteSwiftPackageReference "swift-argument-parser" */; + productName = ArgumentParser; + }; + C28759AD2C6D0FC10032283D /* ArgumentParser */ = { + isa = XCSwiftPackageProductDependency; + package = C28759AA2C6BFF0F0032283D /* XCRemoteSwiftPackageReference "swift-argument-parser" */; + productName = ArgumentParser; + }; +/* End XCSwiftPackageProductDependency section */ + }; + rootObject = 8F87D51526C34111000EADA4 /* Project object */; +} diff --git a/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist new file mode 100644 index 0000000000000..18d981003d68d --- /dev/null +++ b/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist @@ -0,0 +1,8 @@ + + + + + IDEDidComputeMac32BitWarning + + + diff --git a/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings b/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings new file mode 100644 index 0000000000000..3ddf867a10ac3 --- /dev/null +++ b/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings @@ -0,0 +1,8 @@ + + + + + BuildSystemType + Latest + + diff --git a/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved new file mode 100644 index 0000000000000..d3fbce1982aef --- /dev/null +++ b/tstest/tailmac/TailMac.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved @@ -0,0 +1,15 @@ +{ + "originHash" : "59ba1edda695b389d6c9ac1809891cd779e4024f505b0ce1a9d5202b6762e38a", + "pins" : [ + { + "identity" : "swift-argument-parser", + "kind" : "remoteSourceControl", + "location" : "https://github.com/apple/swift-argument-parser.git", + "state" : { + "revision" : "41982a3656a71c768319979febd796c6fd111d5c", + "version" : "1.5.0" + } + } + ], + "version" : 3 +} diff --git a/tstest/tailmac/TailMac.xcodeproj/xcshareddata/xcschemes/host.xcscheme b/tstest/tailmac/TailMac.xcodeproj/xcshareddata/xcschemes/host.xcscheme new file mode 100644 index 0000000000000..060f48e0d6865 --- /dev/null +++ b/tstest/tailmac/TailMac.xcodeproj/xcshareddata/xcschemes/host.xcscheme @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tstest/tailmac/TailMac.xcodeproj/xcshareddata/xcschemes/tailmac.xcscheme b/tstest/tailmac/TailMac.xcodeproj/xcshareddata/xcschemes/tailmac.xcscheme new file mode 100644 index 0000000000000..80cdd413eddf0 --- /dev/null +++ b/tstest/tailmac/TailMac.xcodeproj/xcshareddata/xcschemes/tailmac.xcscheme @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tstest/tailmac/TailMac.xcodeproj/xcuserdata/jnobels.xcuserdatad/xcschemes/xcschememanagement.plist b/tstest/tailmac/TailMac.xcodeproj/xcuserdata/jnobels.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 0000000000000..543f1f77a0b13 --- /dev/null +++ b/tstest/tailmac/TailMac.xcodeproj/xcuserdata/jnobels.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,37 @@ + + + + + SchemeUserState + + VMRunner.xcscheme_^#shared#^_ + + orderHint + 2 + + host.xcscheme_^#shared#^_ + + orderHint + 1 + + tailmac.xcscheme_^#shared#^_ + + orderHint + 0 + + + SuppressBuildableAutocreation + + 8FDABC39270D1DC600D7FC60 + + primary + + + 8FDABC58270D1FFE00D7FC60 + + primary + + + + + diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index bf788674ed369..561b2487710e3 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "net/http" - _ "net/http/pprof" "reflect" "runtime" "sort" @@ -274,19 +273,28 @@ type sortedKVs struct { // // This will evolve over time, or perhaps be replaced. func Handler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain;version=0.0.4;charset=utf-8") + ExpvarDoHandler(expvarDo)(w, r) +} - s := sortedKVsPool.Get().(*sortedKVs) - defer sortedKVsPool.Put(s) - s.kvs = s.kvs[:0] - expvarDo(func(kv expvar.KeyValue) { - s.kvs = append(s.kvs, sortedKV{kv, removeTypePrefixes(kv.Key)}) - }) - sort.Slice(s.kvs, func(i, j int) bool { - return s.kvs[i].sortKey < s.kvs[j].sortKey - }) - for _, e := range s.kvs { - writePromExpVar(w, "", e.KeyValue) +// ExpvarDoHandler handler returns a Handler like above, but takes an optional +// expvar.Do func allow the usage of alternative containers of metrics, other +// than the global expvar.Map. +func ExpvarDoHandler(expvarDoFunc func(f func(expvar.KeyValue))) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain;version=0.0.4;charset=utf-8") + + s := sortedKVsPool.Get().(*sortedKVs) + defer sortedKVsPool.Put(s) + s.kvs = s.kvs[:0] + expvarDoFunc(func(kv expvar.KeyValue) { + s.kvs = append(s.kvs, sortedKV{kv, removeTypePrefixes(kv.Key)}) + }) + sort.Slice(s.kvs, func(i, j int) bool { + return s.kvs[i].sortKey < s.kvs[j].sortKey + }) + for _, e := range s.kvs { + writePromExpVar(w, "", e.KeyValue) + } } } diff --git a/types/opt/bool.go b/types/opt/bool.go index 2a9efe31b8e25..0a3ee67ad2a6e 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -105,3 +105,29 @@ func (b *Bool) UnmarshalJSON(j []byte) error { } return nil } + +// BoolFlag is a wrapper for Bool that implements [flag.Value]. +type BoolFlag struct { + *Bool +} + +// Set the value of b, using any value supported by [strconv.ParseBool]. +func (b *BoolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.Bool.Set(v) + return nil +} + +// String returns "true" or "false" if the value is set, or an empty string otherwise. +func (b *BoolFlag) String() string { + if b == nil || b.Bool == nil { + return "" + } + if v, ok := b.Bool.Get(); ok { + return strconv.FormatBool(v) + } + return "" +} diff --git a/types/opt/bool_test.go b/types/opt/bool_test.go index 92ba275e1e809..dddbcfc195d04 100644 --- a/types/opt/bool_test.go +++ b/types/opt/bool_test.go @@ -5,7 +5,9 @@ package opt import ( "encoding/json" + "flag" "reflect" + "strings" "testing" ) @@ -127,3 +129,38 @@ func TestUnmarshalAlloc(t *testing.T) { t.Errorf("got %v allocs, want 0", n) } } + +func TestBoolFlag(t *testing.T) { + tests := []struct { + arguments string + wantParseError bool // expect flag.Parse to error + want Bool + }{ + {"", false, Bool("")}, + {"-test", true, Bool("")}, + {`-test=""`, true, Bool("")}, + {"-test invalid", true, Bool("")}, + + {"-test true", false, NewBool(true)}, + {"-test 1", false, NewBool(true)}, + + {"-test false", false, NewBool(false)}, + {"-test 0", false, NewBool(false)}, + } + + for _, tt := range tests { + var got Bool + fs := flag.NewFlagSet(t.Name(), flag.ContinueOnError) + fs.Var(&BoolFlag{&got}, "test", "test flag") + + arguments := strings.Split(tt.arguments, " ") + err := fs.Parse(arguments) + if (err != nil) != tt.wantParseError { + t.Errorf("flag.Parse(%q) returned error %v, want %v", arguments, err, tt.wantParseError) + } + + if got != tt.want { + t.Errorf("flag.Parse(%q) got %q, want %q", arguments, got, tt.want) + } + } +} diff --git a/types/prefs/item.go b/types/prefs/item.go new file mode 100644 index 0000000000000..1032041471a75 --- /dev/null +++ b/types/prefs/item.go @@ -0,0 +1,178 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs + +import ( + "fmt" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/opt" + "tailscale.com/types/ptr" + "tailscale.com/types/views" + "tailscale.com/util/must" +) + +// Item is a single preference item that can be configured. +// T must either be an immutable type or implement the [views.ViewCloner] interface. +type Item[T any] struct { + preference[T] +} + +// ItemOf returns an [Item] configured with the specified value and [Options]. +func ItemOf[T any](v T, opts ...Options) Item[T] { + return Item[T]{preferenceOf(opt.ValueOf(must.Get(deepClone(v))), opts...)} +} + +// ItemWithOpts returns an unconfigured [Item] with the specified [Options]. +func ItemWithOpts[T any](opts ...Options) Item[T] { + return Item[T]{preferenceOf(opt.Value[T]{}, opts...)} +} + +// SetValue configures the preference with the specified value. +// It fails and returns [ErrManaged] if p is a managed preference, +// and [ErrReadOnly] if p is a read-only preference. +func (i *Item[T]) SetValue(val T) error { + return i.preference.SetValue(must.Get(deepClone(val))) +} + +// SetManagedValue configures the preference with the specified value +// and marks the preference as managed. +func (i *Item[T]) SetManagedValue(val T) { + i.preference.SetManagedValue(must.Get(deepClone(val))) +} + +// Clone returns a copy of i that aliases no memory with i. +// It is a runtime error to call [Item.Clone] if T contains pointers +// but does not implement [views.Cloner]. +func (i Item[T]) Clone() *Item[T] { + res := ptr.To(i) + if v, ok := i.ValueOk(); ok { + res.s.Value.Set(must.Get(deepClone(v))) + } + return res +} + +// Equal reports whether i and i2 are equal. +// If the template type T implements an Equal(T) bool method, it will be used +// instead of the == operator for value comparison. +// If T is not comparable, it reports false. +func (i Item[T]) Equal(i2 Item[T]) bool { + if i.s.Metadata != i2.s.Metadata { + return false + } + return i.s.Value.Equal(i2.s.Value) +} + +func deepClone[T any](v T) (T, error) { + if c, ok := any(v).(views.Cloner[T]); ok { + return c.Clone(), nil + } + if !views.ContainsPointers[T]() { + return v, nil + } + var zero T + return zero, fmt.Errorf("%T contains pointers, but does not implement Clone", v) +} + +// ItemView is a read-only view of an [Item][T], where T is a mutable type +// implementing [views.ViewCloner]. +type ItemView[T views.ViewCloner[T, V], V views.StructView[T]] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *Item[T] +} + +// ItemViewOf returns a read-only view of i. +// It is used by [tailscale.com/cmd/viewer]. +func ItemViewOf[T views.ViewCloner[T, V], V views.StructView[T]](i *Item[T]) ItemView[T, V] { + return ItemView[T, V]{i} +} + +// Valid reports whether the underlying [Item] is non-nil. +func (iv ItemView[T, V]) Valid() bool { + return iv.ж != nil +} + +// AsStruct implements [views.StructView] by returning a clone of the preference +// which aliases no memory with the original. +func (iv ItemView[T, V]) AsStruct() *Item[T] { + if iv.ж == nil { + return nil + } + return iv.ж.Clone() +} + +// IsSet reports whether the preference has a value set. +func (iv ItemView[T, V]) IsSet() bool { + return iv.ж.IsSet() +} + +// Value returns a read-only view of the value if the preference has a value set. +// Otherwise, it returns a read-only view of its default value. +func (iv ItemView[T, V]) Value() V { + return iv.ж.Value().View() +} + +// ValueOk returns a read-only view of the value and true if the preference has a value set. +// Otherwise, it returns an invalid view and false. +func (iv ItemView[T, V]) ValueOk() (val V, ok bool) { + if val, ok := iv.ж.ValueOk(); ok { + return val.View(), true + } + return val, false +} + +// DefaultValue returns a read-only view of the default value of the preference. +func (iv ItemView[T, V]) DefaultValue() V { + return iv.ж.DefaultValue().View() +} + +// IsManaged reports whether the preference is managed via MDM, Group Policy, or similar means. +func (iv ItemView[T, V]) IsManaged() bool { + return iv.ж.IsManaged() +} + +// IsReadOnly reports whether the preference is read-only and cannot be changed by user. +func (iv ItemView[T, V]) IsReadOnly() bool { + return iv.ж.IsReadOnly() +} + +// Equal reports whether iv and iv2 are equal. +func (iv ItemView[T, V]) Equal(iv2 ItemView[T, V]) bool { + if !iv.Valid() && !iv2.Valid() { + return true + } + if iv.Valid() != iv2.Valid() { + return false + } + return iv.ж.Equal(*iv2.ж) +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (iv ItemView[T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return iv.ж.MarshalJSONV2(out, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (iv *ItemView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + var x Item[T] + if err := x.UnmarshalJSONV2(in, opts); err != nil { + return err + } + iv.ж = &x + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (iv ItemView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(iv) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (iv *ItemView[T, V]) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, iv) // uses UnmarshalJSONV2 +} diff --git a/types/prefs/list.go b/types/prefs/list.go new file mode 100644 index 0000000000000..9830e79de86cb --- /dev/null +++ b/types/prefs/list.go @@ -0,0 +1,183 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs + +import ( + "net/netip" + "slices" + "time" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "golang.org/x/exp/constraints" + "tailscale.com/types/opt" + "tailscale.com/types/ptr" + "tailscale.com/types/views" +) + +// BasicType is a constraint that allows types whose underlying type is a predeclared +// boolean, numeric, or string type. +type BasicType interface { + ~bool | constraints.Integer | constraints.Float | constraints.Complex | ~string +} + +// ImmutableType is a constraint that allows [BasicType]s and certain well-known immutable types. +type ImmutableType interface { + BasicType | time.Time | netip.Addr | netip.Prefix | netip.AddrPort +} + +// List is a preference type that holds zero or more values of an [ImmutableType] T. +type List[T ImmutableType] struct { + preference[[]T] +} + +// ListOf returns a [List] configured with the specified value and [Options]. +func ListOf[T ImmutableType](v []T, opts ...Options) List[T] { + return List[T]{preferenceOf(opt.ValueOf(cloneSlice(v)), opts...)} +} + +// ListWithOpts returns an unconfigured [List] with the specified [Options]. +func ListWithOpts[T ImmutableType](opts ...Options) List[T] { + return List[T]{preferenceOf(opt.Value[[]T]{}, opts...)} +} + +// SetValue configures the preference with the specified value. +// It fails and returns [ErrManaged] if p is a managed preference, +// and [ErrReadOnly] if p is a read-only preference. +func (l *List[T]) SetValue(val []T) error { + return l.preference.SetValue(cloneSlice(val)) +} + +// SetManagedValue configures the preference with the specified value +// and marks the preference as managed. +func (l *List[T]) SetManagedValue(val []T) { + l.preference.SetManagedValue(cloneSlice(val)) +} + +// View returns a read-only view of l. +func (l *List[T]) View() ListView[T] { + return ListView[T]{l} +} + +// Clone returns a copy of l that aliases no memory with l. +func (l List[T]) Clone() *List[T] { + res := ptr.To(l) + if v, ok := l.s.Value.GetOk(); ok { + res.s.Value.Set(append(v[:0:0], v...)) + } + return res +} + +// Equal reports whether l and l2 are equal. +func (l List[T]) Equal(l2 List[T]) bool { + if l.s.Metadata != l2.s.Metadata { + return false + } + v1, ok1 := l.s.Value.GetOk() + v2, ok2 := l2.s.Value.GetOk() + if ok1 != ok2 { + return false + } + return !ok1 || slices.Equal(v1, v2) +} + +func cloneSlice[T ImmutableType](s []T) []T { + c := make([]T, len(s)) + copy(c, s) + return c +} + +// ListView is a read-only view of a [List]. +type ListView[T ImmutableType] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *List[T] +} + +// Valid reports whether the underlying [List] is non-nil. +func (lv ListView[T]) Valid() bool { + return lv.ж != nil +} + +// AsStruct implements [views.StructView] by returning a clone of the [List] +// which aliases no memory with the original. +func (lv ListView[T]) AsStruct() *List[T] { + if lv.ж == nil { + return nil + } + return lv.ж.Clone() +} + +// IsSet reports whether the preference has a value set. +func (lv ListView[T]) IsSet() bool { + return lv.ж.IsSet() +} + +// Value returns a read-only view of the value if the preference has a value set. +// Otherwise, it returns a read-only view of its default value. +func (lv ListView[T]) Value() views.Slice[T] { + return views.SliceOf(lv.ж.Value()) +} + +// ValueOk returns a read-only view of the value and true if the preference has a value set. +// Otherwise, it returns an invalid view and false. +func (lv ListView[T]) ValueOk() (val views.Slice[T], ok bool) { + if v, ok := lv.ж.ValueOk(); ok { + return views.SliceOf(v), true + } + return views.Slice[T]{}, false +} + +// DefaultValue returns a read-only view of the default value of the preference. +func (lv ListView[T]) DefaultValue() views.Slice[T] { + return views.SliceOf(lv.ж.DefaultValue()) +} + +// IsManaged reports whether the preference is managed via MDM, Group Policy, or similar means. +func (lv ListView[T]) IsManaged() bool { + return lv.ж.IsManaged() +} + +// IsReadOnly reports whether the preference is read-only and cannot be changed by user. +func (lv ListView[T]) IsReadOnly() bool { + return lv.ж.IsReadOnly() +} + +// Equal reports whether lv and lv2 are equal. +func (lv ListView[T]) Equal(lv2 ListView[T]) bool { + if !lv.Valid() && !lv2.Valid() { + return true + } + if lv.Valid() != lv2.Valid() { + return false + } + return lv.ж.Equal(*lv2.ж) +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (lv ListView[T]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return lv.ж.MarshalJSONV2(out, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (lv *ListView[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + var x List[T] + if err := x.UnmarshalJSONV2(in, opts); err != nil { + return err + } + lv.ж = &x + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (lv ListView[T]) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(lv) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (lv *ListView[T]) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONV2 +} diff --git a/types/prefs/map.go b/types/prefs/map.go new file mode 100644 index 0000000000000..2bd32bfbdec75 --- /dev/null +++ b/types/prefs/map.go @@ -0,0 +1,159 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs + +import ( + "maps" + "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "golang.org/x/exp/constraints" + "tailscale.com/types/opt" + "tailscale.com/types/ptr" + "tailscale.com/types/views" +) + +// MapKeyType is a constraint allowing types that can be used as [Map] and [StructMap] keys. +// To satisfy this requirement, a type must be comparable and must encode as a JSON string. +// See [jsonv2.Marshal] for more details. +type MapKeyType interface { + ~string | constraints.Integer | netip.Addr | netip.Prefix | netip.AddrPort +} + +// Map is a preference type that holds immutable key-value pairs. +type Map[K MapKeyType, V ImmutableType] struct { + preference[map[K]V] +} + +// MapOf returns a map configured with the specified value and [Options]. +func MapOf[K MapKeyType, V ImmutableType](v map[K]V, opts ...Options) Map[K, V] { + return Map[K, V]{preferenceOf(opt.ValueOf(v), opts...)} +} + +// MapWithOpts returns an unconfigured [Map] with the specified [Options]. +func MapWithOpts[K MapKeyType, V ImmutableType](opts ...Options) Map[K, V] { + return Map[K, V]{preferenceOf(opt.Value[map[K]V]{}, opts...)} +} + +// View returns a read-only view of m. +func (m *Map[K, V]) View() MapView[K, V] { + return MapView[K, V]{m} +} + +// Clone returns a copy of m that aliases no memory with m. +func (m Map[K, V]) Clone() *Map[K, V] { + res := ptr.To(m) + if v, ok := m.s.Value.GetOk(); ok { + res.s.Value.Set(maps.Clone(v)) + } + return res +} + +// Equal reports whether m and m2 are equal. +func (m Map[K, V]) Equal(m2 Map[K, V]) bool { + if m.s.Metadata != m2.s.Metadata { + return false + } + v1, ok1 := m.s.Value.GetOk() + v2, ok2 := m2.s.Value.GetOk() + if ok1 != ok2 { + return false + } + return !ok1 || maps.Equal(v1, v2) +} + +// MapView is a read-only view of a [Map]. +type MapView[K MapKeyType, V ImmutableType] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *Map[K, V] +} + +// Valid reports whether the underlying [Map] is non-nil. +func (mv MapView[K, V]) Valid() bool { + return mv.ж != nil +} + +// AsStruct implements [views.StructView] by returning a clone of the [Map] +// which aliases no memory with the original. +func (mv MapView[K, V]) AsStruct() *Map[K, V] { + if mv.ж == nil { + return nil + } + return mv.ж.Clone() +} + +// IsSet reports whether the preference has a value set. +func (mv MapView[K, V]) IsSet() bool { + return mv.ж.IsSet() +} + +// Value returns a read-only view of the value if the preference has a value set. +// Otherwise, it returns a read-only view of its default value. +func (mv MapView[K, V]) Value() views.Map[K, V] { + return views.MapOf(mv.ж.Value()) +} + +// ValueOk returns a read-only view of the value and true if the preference has a value set. +// Otherwise, it returns an invalid view and false. +func (mv MapView[K, V]) ValueOk() (val views.Map[K, V], ok bool) { + if v, ok := mv.ж.ValueOk(); ok { + return views.MapOf(v), true + } + return views.Map[K, V]{}, false +} + +// DefaultValue returns a read-only view of the default value of the preference. +func (mv MapView[K, V]) DefaultValue() views.Map[K, V] { + return views.MapOf(mv.ж.DefaultValue()) +} + +// Managed reports whether the preference is managed via MDM, Group Policy, or similar means. +func (mv MapView[K, V]) Managed() bool { + return mv.ж.IsManaged() +} + +// ReadOnly reports whether the preference is read-only and cannot be changed by user. +func (mv MapView[K, V]) ReadOnly() bool { + return mv.ж.IsReadOnly() +} + +// Equal reports whether mv and mv2 are equal. +func (mv MapView[K, V]) Equal(mv2 MapView[K, V]) bool { + if !mv.Valid() && !mv2.Valid() { + return true + } + if mv.Valid() != mv2.Valid() { + return false + } + return mv.ж.Equal(*mv2.ж) +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (mv MapView[K, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return mv.ж.MarshalJSONV2(out, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (mv *MapView[K, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + var x Map[K, V] + if err := x.UnmarshalJSONV2(in, opts); err != nil { + return err + } + mv.ж = &x + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (mv MapView[K, V]) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(mv) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (mv *MapView[K, V]) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONV2 +} diff --git a/types/prefs/options.go b/types/prefs/options.go new file mode 100644 index 0000000000000..3769b784b731a --- /dev/null +++ b/types/prefs/options.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs + +// Options are used to configure additional parameters of a preference. +type Options func(s *metadata) + +var ( + // ReadOnly is an option that marks preference as read-only. + ReadOnly Options = markReadOnly + // Managed is an option that marks preference as managed. + Managed Options = markManaged +) + +func markReadOnly(s *metadata) { + s.ReadOnly = true +} + +func markManaged(s *metadata) { + s.Managed = true +} diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go new file mode 100644 index 0000000000000..3bbd237fe5efe --- /dev/null +++ b/types/prefs/prefs.go @@ -0,0 +1,179 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package prefs contains types and functions to work with arbitrary +// preference hierarchies. +// +// Specifically, the package provides [Item], [List], [Map], [StructList] and [StructMap] +// types which represent individual preferences in a user-defined prefs struct. +// A valid prefs struct must contain one or more exported fields of the preference types, +// either directly or within nested structs, but not pointers to these types. +// Additionally to preferences, a prefs struct may contain any number of +// non-preference fields that will be marshalled and unmarshalled but are +// otherwise ignored by the prefs package. +// +// The preference types are compatible with the [tailscale.com/cmd/viewer] and +// [tailscale.com/cmd/cloner] utilities. It is recommended to generate a read-only view +// of the user-defined prefs structure and use it in place of prefs whenever the prefs +// should not be modified. +package prefs + +import ( + "errors" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/opt" +) + +var ( + // ErrManaged is the error returned when attempting to modify a managed preference. + ErrManaged = errors.New("cannot modify a managed preference") + // ErrReadOnly is the error returned when attempting to modify a readonly preference. + ErrReadOnly = errors.New("cannot modify a readonly preference") +) + +// metadata holds type-agnostic preference metadata. +type metadata struct { + // Managed indicates whether the preference is managed via MDM, Group Policy, or other means. + Managed bool `json:",omitzero"` + + // ReadOnly indicates whether the preference is read-only due to any other reasons, + // such as user's access rights. + ReadOnly bool `json:",omitzero"` +} + +// serializable is a JSON-serializable preference data. +type serializable[T any] struct { + // Value is an optional preference value that is set when the preference is + // configured by the user or managed by an admin. + Value opt.Value[T] `json:",omitzero"` + // Default is the default preference value to be used + // when the preference has not been configured. + Default T `json:",omitzero"` + // Metadata is any additional type-agnostic preference metadata to be serialized. + Metadata metadata `json:",inline"` +} + +// preference is an embeddable type that provides a common implementation for +// concrete preference types, such as [Item], [List], [Map], [StructList] and [StructMap]. +type preference[T any] struct { + s serializable[T] +} + +// preferenceOf returns a preference with the specified value and/or [Options]. +func preferenceOf[T any](v opt.Value[T], opts ...Options) preference[T] { + var m metadata + for _, o := range opts { + o(&m) + } + return preference[T]{serializable[T]{Value: v, Metadata: m}} +} + +// IsSet reports whether p has a value set. +func (p preference[T]) IsSet() bool { + return p.s.Value.IsSet() +} + +// Value returns the value of p if the preference has a value set. +// Otherwise, it returns its default value. +func (p preference[T]) Value() T { + val, _ := p.ValueOk() + return val +} + +// ValueOk returns the value of p and true if the preference has a value set. +// Otherwise, it returns its default value and false. +func (p preference[T]) ValueOk() (val T, ok bool) { + if val, ok = p.s.Value.GetOk(); ok { + return val, true + } + return p.DefaultValue(), false +} + +// SetValue configures the preference with the specified value. +// It fails and returns [ErrManaged] if p is a managed preference, +// and [ErrReadOnly] if p is a read-only preference. +func (p *preference[T]) SetValue(val T) error { + switch { + case p.s.Metadata.Managed: + return ErrManaged + case p.s.Metadata.ReadOnly: + return ErrReadOnly + default: + p.s.Value.Set(val) + return nil + } +} + +// ClearValue resets the preference to an unconfigured state. +// It fails and returns [ErrManaged] if p is a managed preference, +// and [ErrReadOnly] if p is a read-only preference. +func (p *preference[T]) ClearValue() error { + switch { + case p.s.Metadata.Managed: + return ErrManaged + case p.s.Metadata.ReadOnly: + return ErrReadOnly + default: + p.s.Value.Clear() + return nil + } +} + +// DefaultValue returns the default value of p. +func (p preference[T]) DefaultValue() T { + return p.s.Default +} + +// SetDefaultValue sets the default value of p. +func (p *preference[T]) SetDefaultValue(def T) { + p.s.Default = def +} + +// IsManaged reports whether p is managed via MDM, Group Policy, or similar means. +func (p preference[T]) IsManaged() bool { + return p.s.Metadata.Managed +} + +// SetManagedValue configures the preference with the specified value +// and marks the preference as managed. +func (p *preference[T]) SetManagedValue(val T) { + p.s.Value.Set(val) + p.s.Metadata.Managed = true +} + +// ClearManaged clears the managed flag of the preference without altering its value. +func (p *preference[T]) ClearManaged() { + p.s.Metadata.Managed = false +} + +// IsReadOnly reports whether p is read-only and cannot be changed by user. +func (p preference[T]) IsReadOnly() bool { + return p.s.Metadata.ReadOnly || p.s.Metadata.Managed +} + +// SetReadOnly sets the read-only status of p, preventing changes by a user if set to true. +func (p *preference[T]) SetReadOnly(readonly bool) { + p.s.Metadata.ReadOnly = readonly +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (p preference[T]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return jsonv2.MarshalEncode(out, &p.s, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (p *preference[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + return jsonv2.UnmarshalDecode(in, &p.s, opts) +} + +// MarshalJSON implements [json.Marshaler]. +func (p preference[T]) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(p) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (p *preference[T]) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 +} diff --git a/types/prefs/prefs_clone_test.go b/types/prefs/prefs_clone_test.go new file mode 100644 index 0000000000000..2a03fba8b092c --- /dev/null +++ b/types/prefs/prefs_clone_test.go @@ -0,0 +1,130 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. + +package prefs + +import ( + "net/netip" + + "tailscale.com/types/ptr" +) + +// Clone makes a deep copy of TestPrefs. +// The result aliases no memory with the original. +func (src *TestPrefs) Clone() *TestPrefs { + if src == nil { + return nil + } + dst := new(TestPrefs) + *dst = *src + dst.StringSlice = *src.StringSlice.Clone() + dst.IntSlice = *src.IntSlice.Clone() + dst.StringStringMap = *src.StringStringMap.Clone() + dst.IntStringMap = *src.IntStringMap.Clone() + dst.AddrIntMap = *src.AddrIntMap.Clone() + dst.Bundle1 = *src.Bundle1.Clone() + dst.Bundle2 = *src.Bundle2.Clone() + dst.Generic = *src.Generic.Clone() + dst.BundleList = *src.BundleList.Clone() + dst.StringBundleMap = *src.StringBundleMap.Clone() + dst.IntBundleMap = *src.IntBundleMap.Clone() + dst.AddrBundleMap = *src.AddrBundleMap.Clone() + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestPrefsCloneNeedsRegeneration = TestPrefs(struct { + Int32Item Item[int32] + UInt64Item Item[uint64] + StringItem1 Item[string] + StringItem2 Item[string] + BoolItem1 Item[bool] + BoolItem2 Item[bool] + StringSlice List[string] + IntSlice List[int] + AddrItem Item[netip.Addr] + StringStringMap Map[string, string] + IntStringMap Map[int, string] + AddrIntMap Map[netip.Addr, int] + Bundle1 Item[*TestBundle] + Bundle2 Item[*TestBundle] + Generic Item[*TestGenericStruct[int]] + BundleList StructList[*TestBundle] + StringBundleMap StructMap[string, *TestBundle] + IntBundleMap StructMap[int, *TestBundle] + AddrBundleMap StructMap[netip.Addr, *TestBundle] + Group TestPrefsGroup +}{}) + +// Clone makes a deep copy of TestBundle. +// The result aliases no memory with the original. +func (src *TestBundle) Clone() *TestBundle { + if src == nil { + return nil + } + dst := new(TestBundle) + *dst = *src + if dst.Nested != nil { + dst.Nested = ptr.To(*src.Nested) + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestBundleCloneNeedsRegeneration = TestBundle(struct { + Name string + Nested *TestValueStruct +}{}) + +// Clone makes a deep copy of TestValueStruct. +// The result aliases no memory with the original. +func (src *TestValueStruct) Clone() *TestValueStruct { + if src == nil { + return nil + } + dst := new(TestValueStruct) + *dst = *src + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestValueStructCloneNeedsRegeneration = TestValueStruct(struct { + Value int +}{}) + +// Clone makes a deep copy of TestGenericStruct. +// The result aliases no memory with the original. +func (src *TestGenericStruct[T]) Clone() *TestGenericStruct[T] { + if src == nil { + return nil + } + dst := new(TestGenericStruct[T]) + *dst = *src + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +func _TestGenericStructCloneNeedsRegeneration[T ImmutableType](TestGenericStruct[T]) { + _TestGenericStructCloneNeedsRegeneration(struct { + Value T + }{}) +} + +// Clone makes a deep copy of TestPrefsGroup. +// The result aliases no memory with the original. +func (src *TestPrefsGroup) Clone() *TestPrefsGroup { + if src == nil { + return nil + } + dst := new(TestPrefsGroup) + *dst = *src + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestPrefsGroupCloneNeedsRegeneration = TestPrefsGroup(struct { + FloatItem Item[float64] + TestStringItem Item[TestStringType] +}{}) diff --git a/types/prefs/prefs_example/prefs_example_clone.go b/types/prefs/prefs_example/prefs_example_clone.go new file mode 100644 index 0000000000000..5c707b46343e1 --- /dev/null +++ b/types/prefs/prefs_example/prefs_example_clone.go @@ -0,0 +1,99 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. + +package prefs_example + +import ( + "net/netip" + + "tailscale.com/drive" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" + "tailscale.com/types/persist" + "tailscale.com/types/prefs" + "tailscale.com/types/preftype" +) + +// Clone makes a deep copy of Prefs. +// The result aliases no memory with the original. +func (src *Prefs) Clone() *Prefs { + if src == nil { + return nil + } + dst := new(Prefs) + *dst = *src + dst.AdvertiseTags = *src.AdvertiseTags.Clone() + dst.AdvertiseRoutes = *src.AdvertiseRoutes.Clone() + dst.DriveShares = *src.DriveShares.Clone() + dst.Persist = src.Persist.Clone() + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _PrefsCloneNeedsRegeneration = Prefs(struct { + ControlURL prefs.Item[string] + RouteAll prefs.Item[bool] + ExitNodeID prefs.Item[tailcfg.StableNodeID] + ExitNodeIP prefs.Item[netip.Addr] + ExitNodePrior tailcfg.StableNodeID + ExitNodeAllowLANAccess prefs.Item[bool] + CorpDNS prefs.Item[bool] + RunSSH prefs.Item[bool] + RunWebClient prefs.Item[bool] + WantRunning prefs.Item[bool] + LoggedOut prefs.Item[bool] + ShieldsUp prefs.Item[bool] + AdvertiseTags prefs.List[string] + Hostname prefs.Item[string] + NotepadURLs prefs.Item[bool] + ForceDaemon prefs.Item[bool] + Egg prefs.Item[bool] + AdvertiseRoutes prefs.List[netip.Prefix] + NoSNAT prefs.Item[bool] + NoStatefulFiltering prefs.Item[opt.Bool] + NetfilterMode prefs.Item[preftype.NetfilterMode] + OperatorUser prefs.Item[string] + ProfileName prefs.Item[string] + AutoUpdate AutoUpdatePrefs + AppConnector AppConnectorPrefs + PostureChecking prefs.Item[bool] + NetfilterKind prefs.Item[string] + DriveShares prefs.StructList[*drive.Share] + AllowSingleHosts prefs.Item[marshalAsTrueInJSON] + Persist *persist.Persist +}{}) + +// Clone makes a deep copy of AutoUpdatePrefs. +// The result aliases no memory with the original. +func (src *AutoUpdatePrefs) Clone() *AutoUpdatePrefs { + if src == nil { + return nil + } + dst := new(AutoUpdatePrefs) + *dst = *src + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _AutoUpdatePrefsCloneNeedsRegeneration = AutoUpdatePrefs(struct { + Check prefs.Item[bool] + Apply prefs.Item[opt.Bool] +}{}) + +// Clone makes a deep copy of AppConnectorPrefs. +// The result aliases no memory with the original. +func (src *AppConnectorPrefs) Clone() *AppConnectorPrefs { + if src == nil { + return nil + } + dst := new(AppConnectorPrefs) + *dst = *src + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _AppConnectorPrefsCloneNeedsRegeneration = AppConnectorPrefs(struct { + Advertise prefs.Item[bool] +}{}) diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go new file mode 100644 index 0000000000000..0256bd7e6d25b --- /dev/null +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -0,0 +1,239 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale/cmd/viewer; DO NOT EDIT. + +package prefs_example + +import ( + "encoding/json" + "errors" + "net/netip" + + "tailscale.com/drive" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" + "tailscale.com/types/persist" + "tailscale.com/types/prefs" + "tailscale.com/types/preftype" +) + +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,AutoUpdatePrefs,AppConnectorPrefs + +// View returns a readonly view of Prefs. +func (p *Prefs) View() PrefsView { + return PrefsView{ж: p} +} + +// PrefsView provides a read-only view over Prefs. +// +// Its methods should only be called if `Valid()` returns true. +type PrefsView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *Prefs +} + +// Valid reports whether underlying value is non-nil. +func (v PrefsView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v PrefsView) AsStruct() *Prefs { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *PrefsView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x Prefs + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v PrefsView) ControlURL() prefs.Item[string] { return v.ж.ControlURL } +func (v PrefsView) RouteAll() prefs.Item[bool] { return v.ж.RouteAll } +func (v PrefsView) ExitNodeID() prefs.Item[tailcfg.StableNodeID] { return v.ж.ExitNodeID } +func (v PrefsView) ExitNodeIP() prefs.Item[netip.Addr] { return v.ж.ExitNodeIP } +func (v PrefsView) ExitNodePrior() tailcfg.StableNodeID { return v.ж.ExitNodePrior } +func (v PrefsView) ExitNodeAllowLANAccess() prefs.Item[bool] { return v.ж.ExitNodeAllowLANAccess } +func (v PrefsView) CorpDNS() prefs.Item[bool] { return v.ж.CorpDNS } +func (v PrefsView) RunSSH() prefs.Item[bool] { return v.ж.RunSSH } +func (v PrefsView) RunWebClient() prefs.Item[bool] { return v.ж.RunWebClient } +func (v PrefsView) WantRunning() prefs.Item[bool] { return v.ж.WantRunning } +func (v PrefsView) LoggedOut() prefs.Item[bool] { return v.ж.LoggedOut } +func (v PrefsView) ShieldsUp() prefs.Item[bool] { return v.ж.ShieldsUp } +func (v PrefsView) AdvertiseTags() prefs.ListView[string] { return v.ж.AdvertiseTags.View() } +func (v PrefsView) Hostname() prefs.Item[string] { return v.ж.Hostname } +func (v PrefsView) NotepadURLs() prefs.Item[bool] { return v.ж.NotepadURLs } +func (v PrefsView) ForceDaemon() prefs.Item[bool] { return v.ж.ForceDaemon } +func (v PrefsView) Egg() prefs.Item[bool] { return v.ж.Egg } +func (v PrefsView) AdvertiseRoutes() prefs.ListView[netip.Prefix] { return v.ж.AdvertiseRoutes.View() } +func (v PrefsView) NoSNAT() prefs.Item[bool] { return v.ж.NoSNAT } +func (v PrefsView) NoStatefulFiltering() prefs.Item[opt.Bool] { return v.ж.NoStatefulFiltering } +func (v PrefsView) NetfilterMode() prefs.Item[preftype.NetfilterMode] { return v.ж.NetfilterMode } +func (v PrefsView) OperatorUser() prefs.Item[string] { return v.ж.OperatorUser } +func (v PrefsView) ProfileName() prefs.Item[string] { return v.ж.ProfileName } +func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } +func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } +func (v PrefsView) PostureChecking() prefs.Item[bool] { return v.ж.PostureChecking } +func (v PrefsView) NetfilterKind() prefs.Item[string] { return v.ж.NetfilterKind } +func (v PrefsView) DriveShares() prefs.StructListView[*drive.Share, drive.ShareView] { + return prefs.StructListViewOf(&v.ж.DriveShares) +} +func (v PrefsView) AllowSingleHosts() prefs.Item[marshalAsTrueInJSON] { return v.ж.AllowSingleHosts } +func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _PrefsViewNeedsRegeneration = Prefs(struct { + ControlURL prefs.Item[string] + RouteAll prefs.Item[bool] + ExitNodeID prefs.Item[tailcfg.StableNodeID] + ExitNodeIP prefs.Item[netip.Addr] + ExitNodePrior tailcfg.StableNodeID + ExitNodeAllowLANAccess prefs.Item[bool] + CorpDNS prefs.Item[bool] + RunSSH prefs.Item[bool] + RunWebClient prefs.Item[bool] + WantRunning prefs.Item[bool] + LoggedOut prefs.Item[bool] + ShieldsUp prefs.Item[bool] + AdvertiseTags prefs.List[string] + Hostname prefs.Item[string] + NotepadURLs prefs.Item[bool] + ForceDaemon prefs.Item[bool] + Egg prefs.Item[bool] + AdvertiseRoutes prefs.List[netip.Prefix] + NoSNAT prefs.Item[bool] + NoStatefulFiltering prefs.Item[opt.Bool] + NetfilterMode prefs.Item[preftype.NetfilterMode] + OperatorUser prefs.Item[string] + ProfileName prefs.Item[string] + AutoUpdate AutoUpdatePrefs + AppConnector AppConnectorPrefs + PostureChecking prefs.Item[bool] + NetfilterKind prefs.Item[string] + DriveShares prefs.StructList[*drive.Share] + AllowSingleHosts prefs.Item[marshalAsTrueInJSON] + Persist *persist.Persist +}{}) + +// View returns a readonly view of AutoUpdatePrefs. +func (p *AutoUpdatePrefs) View() AutoUpdatePrefsView { + return AutoUpdatePrefsView{ж: p} +} + +// AutoUpdatePrefsView provides a read-only view over AutoUpdatePrefs. +// +// Its methods should only be called if `Valid()` returns true. +type AutoUpdatePrefsView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *AutoUpdatePrefs +} + +// Valid reports whether underlying value is non-nil. +func (v AutoUpdatePrefsView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v AutoUpdatePrefsView) AsStruct() *AutoUpdatePrefs { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v AutoUpdatePrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *AutoUpdatePrefsView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x AutoUpdatePrefs + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v AutoUpdatePrefsView) Check() prefs.Item[bool] { return v.ж.Check } +func (v AutoUpdatePrefsView) Apply() prefs.Item[opt.Bool] { return v.ж.Apply } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _AutoUpdatePrefsViewNeedsRegeneration = AutoUpdatePrefs(struct { + Check prefs.Item[bool] + Apply prefs.Item[opt.Bool] +}{}) + +// View returns a readonly view of AppConnectorPrefs. +func (p *AppConnectorPrefs) View() AppConnectorPrefsView { + return AppConnectorPrefsView{ж: p} +} + +// AppConnectorPrefsView provides a read-only view over AppConnectorPrefs. +// +// Its methods should only be called if `Valid()` returns true. +type AppConnectorPrefsView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *AppConnectorPrefs +} + +// Valid reports whether underlying value is non-nil. +func (v AppConnectorPrefsView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v AppConnectorPrefsView) AsStruct() *AppConnectorPrefs { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v AppConnectorPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *AppConnectorPrefsView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x AppConnectorPrefs + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v AppConnectorPrefsView) Advertise() prefs.Item[bool] { return v.ж.Advertise } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _AppConnectorPrefsViewNeedsRegeneration = AppConnectorPrefs(struct { + Advertise prefs.Item[bool] +}{}) diff --git a/types/prefs/prefs_example/prefs_test.go b/types/prefs/prefs_example/prefs_test.go new file mode 100644 index 0000000000000..aefbae9f2873a --- /dev/null +++ b/types/prefs/prefs_example/prefs_test.go @@ -0,0 +1,140 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs_example + +import ( + "fmt" + "net/netip" + + "tailscale.com/ipn" + "tailscale.com/types/prefs" +) + +func ExamplePrefs_AdvertiseRoutes_setValue() { + p := &Prefs{} + + // Initially, preferences are not configured. + fmt.Println("IsSet:", p.AdvertiseRoutes.IsSet()) // prints false + // And the Value method returns the default (or zero) value. + fmt.Println("Initial:", p.AdvertiseRoutes.Value()) // prints [] + + // Preferences can be configured with user-provided values using the + // SetValue method. It may fail if the preference is managed via syspolicy + // or is otherwise read-only. + routes := []netip.Prefix{netip.MustParsePrefix("192.168.1.1/24")} + if err := p.AdvertiseRoutes.SetValue(routes); err != nil { + // This block is never executed in the example because the + // AdvertiseRoutes preference is neither managed nor read-only. + fmt.Println("SetValue:", err) + } + fmt.Println("IsSet:", p.AdvertiseRoutes.IsSet()) // prints true + fmt.Println("Value:", p.AdvertiseRoutes.Value()) // prints 192.168.1.1/24 + + // Preference values are copied on use; you cannot not modify them after they are set. + routes[0] = netip.MustParsePrefix("10.10.10.0/24") // this has no effect + fmt.Println("Unchanged:", p.AdvertiseRoutes.Value()) // still prints 192.168.1.1/24 + // If necessary, the value can be changed by calling the SetValue method again. + p.AdvertiseRoutes.SetValue(routes) + fmt.Println("Changed:", p.AdvertiseRoutes.Value()) // prints 10.10.10.0/24 + + // The following code is fine when defining default or baseline prefs, or + // in tests. However, assigning to a preference field directly overwrites + // syspolicy-managed values and metadata, so it should generally be avoided + // when working with the actual profile or device preferences. + // It is caller's responsibility to use the mutable Prefs struct correctly. + defaults := &Prefs{WantRunning: prefs.ItemOf(true)} + defaults.CorpDNS = prefs.Item[bool]{} + defaults.ExitNodeAllowLANAccess = prefs.ItemOf(true) + _, _, _ = defaults.WantRunning, defaults.CorpDNS, defaults.ExitNodeAllowLANAccess + + // In most contexts, preferences should only be read and never mutated. + // To make it easier to enforce this guarantee, a view type generated with + // [tailscale.com/cmd/viewer] can be used instead of the mutable Prefs struct. + // Preferences accessed via a view have the same set of non-mutating + // methods as the underlying preferences but do not expose [prefs.Item.SetValue] or + // other methods that modify the preference's value or state. + v := p.View() + // Additionally, non-mutating methods like [prefs.ItemView.Value] and [prefs.ItemView.ValueOk] + // return read-only views of the underlying values instead of the actual potentially mutable values. + // For example, on the next line Value() returns a views.Slice[netip.Prefix], not a []netip.Prefix. + _ = v.AdvertiseRoutes().Value() + fmt.Println("Via View:", v.AdvertiseRoutes().Value().At(0)) // prints 10.10.10.0/24 + fmt.Println("IsSet:", v.AdvertiseRoutes().IsSet()) // prints true + fmt.Println("IsManaged:", v.AdvertiseRoutes().IsManaged()) // prints false + fmt.Println("IsReadOnly:", v.AdvertiseRoutes().IsReadOnly()) // prints false + + // Output: + // IsSet: false + // Initial: [] + // IsSet: true + // Value: [192.168.1.1/24] + // Unchanged: [192.168.1.1/24] + // Changed: [10.10.10.0/24] + // Via View: 10.10.10.0/24 + // IsSet: true + // IsManaged: false + // IsReadOnly: false +} + +func ExamplePrefs_ControlURL_setDefaultValue() { + p := &Prefs{} + v := p.View() + + // We can set default values for preferences when their default values + // should differ from the zero values of the corresponding Go types. + // + // Note that in this example, we configure preferences via a mutable + // [Prefs] struct but fetch values via a read-only [PrefsView]. + // Typically, we set and get preference values in different parts + // of the codebase. + p.ControlURL.SetDefaultValue(ipn.DefaultControlURL) + // The default value is used if the preference is not configured... + fmt.Println("Default:", v.ControlURL().Value()) + p.ControlURL.SetValue("https://control.example.com") + fmt.Println("User Set:", v.ControlURL().Value()) + // ...including when it has been reset. + p.ControlURL.ClearValue() + fmt.Println("Reset to Default:", v.ControlURL().Value()) + + // Output: + // Default: https://controlplane.tailscale.com + // User Set: https://control.example.com + // Reset to Default: https://controlplane.tailscale.com +} + +func ExamplePrefs_ExitNodeID_setManagedValue() { + p := &Prefs{} + v := p.View() + + // We can mark preferences as being managed via syspolicy (e.g., via GP/MDM) + // by setting its managed value. + // + // Note that in this example, we enforce syspolicy-managed values + // via a mutable [Prefs] struct but fetch values via a read-only [PrefsView]. + // This is typically spread throughout the codebase. + p.ExitNodeID.SetManagedValue("ManagedExitNode") + // Marking a preference as managed prevents it from being changed by the user. + if err := p.ExitNodeID.SetValue("CustomExitNode"); err != nil { + fmt.Println("SetValue:", err) // reports an error + } + fmt.Println("Exit Node:", v.ExitNodeID().Value()) // prints ManagedExitNode + + // Clients can hide or disable preferences that are managed or read-only. + fmt.Println("IsManaged:", v.ExitNodeID().IsManaged()) // prints true + fmt.Println("IsReadOnly:", v.ExitNodeID().IsReadOnly()) // prints true; managed preferences are always read-only. + + // ClearManaged is called when the preference is no longer managed, + // allowing the user to change it. + p.ExitNodeID.ClearManaged() + fmt.Println("IsManaged:", v.ExitNodeID().IsManaged()) // prints false + fmt.Println("IsReadOnly:", v.ExitNodeID().IsReadOnly()) // prints false + + // Output: + // SetValue: cannot modify a managed preference + // Exit Node: ManagedExitNode + // IsManaged: true + // IsReadOnly: true + // IsManaged: false + // IsReadOnly: false +} diff --git a/types/prefs/prefs_example/prefs_types.go b/types/prefs/prefs_example/prefs_types.go new file mode 100644 index 0000000000000..49f0d8c3c4b57 --- /dev/null +++ b/types/prefs/prefs_example/prefs_types.go @@ -0,0 +1,166 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package prefs_example contains a [Prefs] type, which is like [tailscale.com/ipn.Prefs], +// but uses the [prefs] package to enhance individual preferences with state and metadata. +// +// It also includes testable examples utilizing the [Prefs] type. +// We made it a separate package to avoid circular dependencies +// and due to limitations in [tailscale.com/cmd/viewer] when +// generating code for test packages. +package prefs_example + +import ( + "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/drive" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" + "tailscale.com/types/persist" + "tailscale.com/types/prefs" + "tailscale.com/types/preftype" +) + +//go:generate go run tailscale.com/cmd/viewer --type=Prefs,AutoUpdatePrefs,AppConnectorPrefs + +// Prefs is like [tailscale.com/ipn.Prefs], but with individual preferences wrapped in +// [prefs.Item], [prefs.List], and [prefs.StructList] to include preference +// state and metadata. Related preferences can be grouped together in a nested +// struct (e.g., [AutoUpdatePrefs] or [AppConnectorPrefs]), whereas each +// individual preference that can be configured by a user or managed via +// syspolicy is wrapped. +// +// Non-preference fields, such as ExitNodePrior and Persist, can be included as-is. +// +// Just like [tailscale.com/ipn.Prefs], [Prefs] is a mutable struct. It should +// only be used in well-defined contexts where mutability is expected and desired, +// such as when the LocalBackend receives a request from the GUI/CLI to change a +// preference, when a preference is managed via syspolicy and needs to be +// configured with an admin-provided value, or when the internal state (e.g., +// [persist.Persist]) has changed and needs to be preserved. +// In other contexts, a [PrefsView] should be used to provide a read-only view +// of the preferences. +// +// It is recommended to use [jsonv2] for [Prefs] marshaling and unmarshalling to +// improve performance and enable the omission of unconfigured preferences with +// the `omitzero` JSON tag option. This option is not supported by the +// [encoding/json] package as of 2024-08-21; see golang/go#45669. +// It is recommended that a prefs type implements both +// [jsonv2.MarshalerV2]/[jsonv2.UnmarshalerV2] and [json.Marshaler]/[json.Unmarshaler] +// to ensure consistent and more performant marshaling, regardless of the JSON package +// used at the call sites; the standard marshalers can be implemented via [jsonv2]. +// See [Prefs.MarshalJSONV2], [Prefs.UnmarshalJSONV2], [Prefs.MarshalJSON], +// and [Prefs.UnmarshalJSON] for an example implementation. +type Prefs struct { + ControlURL prefs.Item[string] `json:",omitzero"` + RouteAll prefs.Item[bool] `json:",omitzero"` + ExitNodeID prefs.Item[tailcfg.StableNodeID] `json:",omitzero"` + ExitNodeIP prefs.Item[netip.Addr] `json:",omitzero"` + + // ExitNodePrior is an internal state rather than a preference. + // It can be kept in the Prefs structure but should not be wrapped + // and is ignored by the [prefs] package. + ExitNodePrior tailcfg.StableNodeID + + ExitNodeAllowLANAccess prefs.Item[bool] `json:",omitzero"` + CorpDNS prefs.Item[bool] `json:",omitzero"` + RunSSH prefs.Item[bool] `json:",omitzero"` + RunWebClient prefs.Item[bool] `json:",omitzero"` + WantRunning prefs.Item[bool] `json:",omitzero"` + LoggedOut prefs.Item[bool] `json:",omitzero"` + ShieldsUp prefs.Item[bool] `json:",omitzero"` + // AdvertiseTags is a preference whose value is a slice of strings. + // The value is atomic, and individual items in the slice should + // not be modified after the preference is set. + // Since the item type (string) is immutable, we can use [prefs.List]. + AdvertiseTags prefs.List[string] `json:",omitzero"` + Hostname prefs.Item[string] `json:",omitzero"` + NotepadURLs prefs.Item[bool] `json:",omitzero"` + ForceDaemon prefs.Item[bool] `json:",omitzero"` + Egg prefs.Item[bool] `json:",omitzero"` + // AdvertiseRoutes is a preference whose value is a slice of netip.Prefix. + // The value is atomic, and individual items in the slice should + // not be modified after the preference is set. + // Since the item type (netip.Prefix) is immutable, we can use [prefs.List]. + AdvertiseRoutes prefs.List[netip.Prefix] `json:",omitzero"` + NoSNAT prefs.Item[bool] `json:",omitzero"` + NoStatefulFiltering prefs.Item[opt.Bool] `json:",omitzero"` + NetfilterMode prefs.Item[preftype.NetfilterMode] `json:",omitzero"` + OperatorUser prefs.Item[string] `json:",omitzero"` + ProfileName prefs.Item[string] `json:",omitzero"` + + // AutoUpdate contains auto-update preferences. + // Each preference in the group can be configured and managed individually. + AutoUpdate AutoUpdatePrefs `json:",omitzero"` + + // AppConnector contains app connector-related preferences. + // Each preference in the group can be configured and managed individually. + AppConnector AppConnectorPrefs `json:",omitzero"` + + PostureChecking prefs.Item[bool] `json:",omitzero"` + NetfilterKind prefs.Item[string] `json:",omitzero"` + // DriveShares is a preference whose value is a slice of *[drive.Share]. + // The value is atomic, and individual items in the slice should + // not be modified after the preference is set. + // Since the item type (*drive.Share) is mutable and implements [views.ViewCloner], + // we need to use [prefs.StructList] instead of [prefs.List]. + DriveShares prefs.StructList[*drive.Share] `json:",omitzero"` + AllowSingleHosts prefs.Item[marshalAsTrueInJSON] `json:",omitzero"` + + // Persist is an internal state rather than a preference. + // It can be kept in the Prefs structure but should not be wrapped + // and is ignored by the [prefs] package. + Persist *persist.Persist `json:"Config"` +} + +// AutoUpdatePrefs is like [ipn.AutoUpdatePrefs], but it wraps individual preferences with [prefs.Item]. +// It groups related preferences together while allowing each to be configured individually. +type AutoUpdatePrefs struct { + Check prefs.Item[bool] `json:",omitzero"` + Apply prefs.Item[opt.Bool] `json:",omitzero"` +} + +// AppConnectorPrefs is like [ipn.AppConnectorPrefs], but it wraps individual preferences with [prefs.Item]. +// It groups related preferences together while allowing each to be configured individually. +type AppConnectorPrefs struct { + Advertise prefs.Item[bool] `json:",omitzero"` +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +// It is implemented as a performance improvement and to enable omission of +// unconfigured preferences from the JSON output. See the [Prefs] doc for details. +func (p Prefs) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + // The prefs type shadows the Prefs's method set, + // causing [jsonv2] to use the default marshaler and avoiding + // infinite recursion. + type prefs Prefs + return jsonv2.MarshalEncode(out, (*prefs)(&p), opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (p *Prefs) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + // The prefs type shadows the Prefs's method set, + // causing [jsonv2] to use the default unmarshaler and avoiding + // infinite recursion. + type prefs Prefs + return jsonv2.UnmarshalDecode(in, (*prefs)(p), opts) +} + +// MarshalJSON implements [json.Marshaler]. +func (p Prefs) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(p) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (p *Prefs) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 +} + +type marshalAsTrueInJSON struct{} + +var trueJSON = []byte("true") + +func (marshalAsTrueInJSON) MarshalJSON() ([]byte, error) { return trueJSON, nil } +func (*marshalAsTrueInJSON) UnmarshalJSON([]byte) error { return nil } diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go new file mode 100644 index 0000000000000..ea4729366bc23 --- /dev/null +++ b/types/prefs/prefs_test.go @@ -0,0 +1,670 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs + +import ( + "bytes" + "encoding/json" + "errors" + "net/netip" + "reflect" + "testing" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "github.com/google/go-cmp/cmp" + "tailscale.com/types/views" +) + +//go:generate go run tailscale.com/cmd/viewer --tags=test --type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup + +type TestPrefs struct { + Int32Item Item[int32] `json:",omitzero"` + UInt64Item Item[uint64] `json:",omitzero"` + StringItem1 Item[string] `json:",omitzero"` + StringItem2 Item[string] `json:",omitzero"` + BoolItem1 Item[bool] `json:",omitzero"` + BoolItem2 Item[bool] `json:",omitzero"` + StringSlice List[string] `json:",omitzero"` + IntSlice List[int] `json:",omitzero"` + + AddrItem Item[netip.Addr] `json:",omitzero"` + + StringStringMap Map[string, string] `json:",omitzero"` + IntStringMap Map[int, string] `json:",omitzero"` + AddrIntMap Map[netip.Addr, int] `json:",omitzero"` + + // Bundles are complex preferences that usually consist of + // multiple parameters that must be configured atomically. + Bundle1 Item[*TestBundle] `json:",omitzero"` + Bundle2 Item[*TestBundle] `json:",omitzero"` + Generic Item[*TestGenericStruct[int]] `json:",omitzero"` + + BundleList StructList[*TestBundle] `json:",omitzero"` + + StringBundleMap StructMap[string, *TestBundle] `json:",omitzero"` + IntBundleMap StructMap[int, *TestBundle] `json:",omitzero"` + AddrBundleMap StructMap[netip.Addr, *TestBundle] `json:",omitzero"` + + // Group is a nested struct that contains one or more preferences. + // Each preference in a group can be configured individually. + // Preference groups should be included directly rather than by pointers. + Group TestPrefsGroup `json:",omitzero"` +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (p TestPrefs) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + // The testPrefs type shadows the TestPrefs's method set, + // causing jsonv2 to use the default marshaler and avoiding + // infinite recursion. + type testPrefs TestPrefs + return jsonv2.MarshalEncode(out, (*testPrefs)(&p), opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (p *TestPrefs) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + // The testPrefs type shadows the TestPrefs's method set, + // causing jsonv2 to use the default unmarshaler and avoiding + // infinite recursion. + type testPrefs TestPrefs + return jsonv2.UnmarshalDecode(in, (*testPrefs)(p), opts) +} + +// MarshalJSON implements [json.Marshaler]. +func (p TestPrefs) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(p) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (p *TestPrefs) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 +} + +// TestBundle is an example structure type that, +// despite containing multiple values, represents +// a single configurable preference item. +type TestBundle struct { + Name string `json:",omitzero"` + Nested *TestValueStruct `json:",omitzero"` +} + +func (b *TestBundle) Equal(b2 *TestBundle) bool { + if b == b2 { + return true + } + if b == nil || b2 == nil { + return false + } + return b.Name == b2.Name && b.Nested.Equal(b2.Nested) +} + +// TestPrefsGroup contains logically grouped preference items. +// Each preference item in a group can be configured individually. +type TestPrefsGroup struct { + FloatItem Item[float64] `json:",omitzero"` + + TestStringItem Item[TestStringType] `json:",omitzero"` +} + +type TestValueStruct struct { + Value int +} + +func (s *TestValueStruct) Equal(s2 *TestValueStruct) bool { + if s == s2 { + return true + } + if s == nil || s2 == nil { + return false + } + return *s == *s2 +} + +type TestGenericStruct[T ImmutableType] struct { + Value T +} + +func (s *TestGenericStruct[T]) Equal(s2 *TestGenericStruct[T]) bool { + if s == s2 { + return true + } + if s == nil || s2 == nil { + return false + } + return *s == *s2 +} + +type TestStringType string + +func TestMarshalUnmarshal(t *testing.T) { + tests := []struct { + name string + prefs *TestPrefs + indent bool + want string + }{ + { + name: "string", + prefs: &TestPrefs{StringItem1: ItemOf("Value1")}, + want: `{"StringItem1": {"Value": "Value1"}}`, + }, + { + name: "empty-string", + prefs: &TestPrefs{StringItem1: ItemOf("")}, + want: `{"StringItem1": {"Value": ""}}`, + }, + { + name: "managed-string", + prefs: &TestPrefs{StringItem1: ItemOf("Value1", Managed)}, + want: `{"StringItem1": {"Value": "Value1", "Managed": true}}`, + }, + { + name: "readonly-item", + prefs: &TestPrefs{StringItem1: ItemWithOpts[string](ReadOnly)}, + want: `{"StringItem1": {"ReadOnly": true}}`, + }, + { + name: "readonly-item-with-value", + prefs: &TestPrefs{StringItem1: ItemOf("RO", ReadOnly)}, + want: `{"StringItem1": {"Value": "RO", "ReadOnly": true}}`, + }, + { + name: "int32", + prefs: &TestPrefs{Int32Item: ItemOf[int32](101)}, + want: `{"Int32Item": {"Value": 101}}`, + }, + { + name: "uint64", + prefs: &TestPrefs{UInt64Item: ItemOf[uint64](42)}, + want: `{"UInt64Item": {"Value": 42}}`, + }, + { + name: "bool-true", + prefs: &TestPrefs{BoolItem1: ItemOf(true)}, + want: `{"BoolItem1": {"Value": true}}`, + }, + { + name: "bool-false", + prefs: &TestPrefs{BoolItem1: ItemOf(false)}, + want: `{"BoolItem1": {"Value": false}}`, + }, + { + name: "empty-slice", + prefs: &TestPrefs{StringSlice: ListOf([]string{})}, + want: `{"StringSlice": {"Value": []}}`, + }, + { + name: "string-slice", + prefs: &TestPrefs{StringSlice: ListOf([]string{"1", "2", "3"})}, + want: `{"StringSlice": {"Value": ["1", "2", "3"]}}`, + }, + { + name: "int-slice", + prefs: &TestPrefs{IntSlice: ListOf([]int{4, 8, 15, 16, 23})}, + want: `{"IntSlice": {"Value": [4, 8, 15, 16, 23]}}`, + }, + { + name: "managed-int-slice", + prefs: &TestPrefs{IntSlice: ListOf([]int{4, 8, 15, 16, 23}, Managed)}, + want: `{"IntSlice": {"Value": [4, 8, 15, 16, 23], "Managed": true}}`, + }, + { + name: "netip-addr", + prefs: &TestPrefs{AddrItem: ItemOf(netip.MustParseAddr("127.0.0.1"))}, + want: `{"AddrItem": {"Value": "127.0.0.1"}}`, + }, + { + name: "string-string-map", + prefs: &TestPrefs{StringStringMap: MapOf(map[string]string{"K1": "V1"})}, + want: `{"StringStringMap": {"Value": {"K1": "V1"}}}`, + }, + { + name: "int-string-map", + prefs: &TestPrefs{IntStringMap: MapOf(map[int]string{42: "V1"})}, + want: `{"IntStringMap": {"Value": {"42": "V1"}}}`, + }, + { + name: "addr-int-map", + prefs: &TestPrefs{AddrIntMap: MapOf(map[netip.Addr]int{netip.MustParseAddr("127.0.0.1"): 42})}, + want: `{"AddrIntMap": {"Value": {"127.0.0.1": 42}}}`, + }, + { + name: "bundle-list", + prefs: &TestPrefs{BundleList: StructListOf([]*TestBundle{{Name: "Bundle1"}, {Name: "Bundle2"}})}, + want: `{"BundleList": {"Value": [{"Name": "Bundle1"},{"Name": "Bundle2"}]}}`, + }, + { + name: "string-bundle-map", + prefs: &TestPrefs{StringBundleMap: StructMapOf(map[string]*TestBundle{ + "K1": {Name: "Bundle1"}, + "K2": {Name: "Bundle2"}, + })}, + want: `{"StringBundleMap": {"Value": {"K1": {"Name": "Bundle1"}, "K2": {"Name": "Bundle2"}}}}`, + }, + { + name: "int-bundle-map", + prefs: &TestPrefs{IntBundleMap: StructMapOf(map[int]*TestBundle{42: {Name: "Bundle1"}})}, + want: `{"IntBundleMap": {"Value": {"42": {"Name": "Bundle1"}}}}`, + }, + { + name: "addr-bundle-map", + prefs: &TestPrefs{AddrBundleMap: StructMapOf(map[netip.Addr]*TestBundle{netip.MustParseAddr("127.0.0.1"): {Name: "Bundle1"}})}, + want: `{"AddrBundleMap": {"Value": {"127.0.0.1": {"Name": "Bundle1"}}}}`, + }, + { + name: "bundle", + prefs: &TestPrefs{Bundle1: ItemOf(&TestBundle{Name: "Bundle1"})}, + want: `{"Bundle1": {"Value": {"Name": "Bundle1"}}}`, + }, + { + name: "managed-bundle", + prefs: &TestPrefs{Bundle2: ItemOf(&TestBundle{Name: "Bundle2", Nested: &TestValueStruct{Value: 17}}, Managed)}, + want: `{"Bundle2": {"Value": {"Name": "Bundle2", "Nested": {"Value": 17}}, "Managed": true}}`, + }, + { + name: "subgroup", + prefs: &TestPrefs{Group: TestPrefsGroup{FloatItem: ItemOf(1.618), TestStringItem: ItemOf(TestStringType("Value"))}}, + want: `{"Group": {"FloatItem": {"Value": 1.618}, "TestStringItem": {"Value": "Value"}}}`, + }, + { + name: "various", + prefs: &TestPrefs{ + Int32Item: ItemOf[int32](101), + UInt64Item: ItemOf[uint64](42), + StringItem1: ItemOf("Value1"), + StringItem2: ItemWithOpts[string](ReadOnly), + BoolItem1: ItemOf(true), + BoolItem2: ItemOf(false, Managed), + StringSlice: ListOf([]string{"1", "2", "3"}), + IntSlice: ListOf([]int{4, 8, 15, 16, 23}, Managed), + AddrItem: ItemOf(netip.MustParseAddr("127.0.0.1")), + StringStringMap: MapOf(map[string]string{"K1": "V1"}), + IntStringMap: MapOf(map[int]string{42: "V1"}), + AddrIntMap: MapOf(map[netip.Addr]int{netip.MustParseAddr("127.0.0.1"): 42}), + BundleList: StructListOf([]*TestBundle{{Name: "Bundle1"}}), + StringBundleMap: StructMapOf(map[string]*TestBundle{"K1": {Name: "Bundle1"}}), + IntBundleMap: StructMapOf(map[int]*TestBundle{42: {Name: "Bundle1"}}), + AddrBundleMap: StructMapOf(map[netip.Addr]*TestBundle{netip.MustParseAddr("127.0.0.1"): {Name: "Bundle1"}}), + Bundle1: ItemOf(&TestBundle{Name: "Bundle1"}), + Bundle2: ItemOf(&TestBundle{Name: "Bundle2", Nested: &TestValueStruct{Value: 17}}, Managed), + Group: TestPrefsGroup{ + FloatItem: ItemOf(1.618), + TestStringItem: ItemOf(TestStringType("Value")), + }, + }, + want: `{ + "Int32Item": {"Value": 101}, + "UInt64Item": {"Value": 42}, + "StringItem1": {"Value": "Value1"}, + "StringItem2": {"ReadOnly": true}, + "BoolItem1": {"Value": true}, + "BoolItem2": {"Value": false, "Managed": true}, + "StringSlice": {"Value": ["1", "2", "3"]}, + "IntSlice": {"Value": [4, 8, 15, 16, 23], "Managed": true}, + "AddrItem": {"Value": "127.0.0.1"}, + "StringStringMap": {"Value": {"K1": "V1"}}, + "IntStringMap": {"Value": {"42": "V1"}}, + "AddrIntMap": {"Value": {"127.0.0.1": 42}}, + "BundleList": {"Value": [{"Name": "Bundle1"}]}, + "StringBundleMap": {"Value": {"K1": {"Name": "Bundle1"}}}, + "IntBundleMap": {"Value": {"42": {"Name": "Bundle1"}}}, + "AddrBundleMap": {"Value": {"127.0.0.1": {"Name": "Bundle1"}}}, + "Bundle1": {"Value": {"Name": "Bundle1"}}, + "Bundle2": {"Value": {"Name": "Bundle2", "Nested": {"Value": 17}}, "Managed": true}, + "Group": { + "FloatItem": {"Value": 1.618}, + "TestStringItem": {"Value": "Value"} + } + }`, + }, + } + + arshalers := []struct { + name string + marshal func(in any) (out []byte, err error) + unmarshal func(in []byte, out any) (err error) + }{ + { + name: "json", + marshal: json.Marshal, + unmarshal: json.Unmarshal, + }, + { + name: "jsonv2", + marshal: func(in any) (out []byte, err error) { return jsonv2.Marshal(in) }, + unmarshal: func(in []byte, out any) (err error) { return jsonv2.Unmarshal(in, out) }, + }, + } + + for _, a := range arshalers { + t.Run(a.name, func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run("marshal-directly", func(t *testing.T) { + gotJSON, err := a.marshal(tt.prefs) + if err != nil { + t.Fatalf("marshalling failed: %v", err) + } + + checkJSON(t, gotJSON, jsontext.Value(tt.want)) + + var gotPrefs TestPrefs + if err = a.unmarshal(gotJSON, &gotPrefs); err != nil { + t.Fatalf("unmarshalling failed: %v", err) + } + + if diff := cmp.Diff(tt.prefs, &gotPrefs); diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + }) + + t.Run("marshal-via-view", func(t *testing.T) { + gotJSON, err := a.marshal(tt.prefs.View()) + if err != nil { + t.Fatalf("marshalling failed: %v", err) + } + + checkJSON(t, gotJSON, jsontext.Value(tt.want)) + + var gotPrefs TestPrefsView + if err = a.unmarshal(gotJSON, &gotPrefs); err != nil { + t.Fatalf("unmarshalling failed: %v", err) + } + + if diff := cmp.Diff(tt.prefs, gotPrefs.AsStruct()); diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + }) + }) + } + }) + } +} + +func TestPreferenceStates(t *testing.T) { + const ( + zeroValue = 0 + defValue = 5 + userValue = 42 + mdmValue = 1001 + ) + i := ItemWithOpts[int]() + checkIsSet(t, &i, false) + checkIsManaged(t, &i, false) + checkIsReadOnly(t, &i, false) + checkValueOk(t, &i, zeroValue, false) + + i.SetDefaultValue(defValue) + checkValue(t, &i, defValue) + checkValueOk(t, &i, defValue, false) + + checkSetValue(t, &i, userValue) + checkValue(t, &i, userValue) + checkValueOk(t, &i, userValue, true) + + i2 := ItemOf(userValue) + checkIsSet(t, &i2, true) + checkValue(t, &i2, userValue) + checkValueOk(t, &i2, userValue, true) + checkEqual(t, i2, i, true) + + i2.SetManagedValue(mdmValue) + // Setting a managed value should set the value, mark the preference + // as managed and read-only, and prevent it from being modified with SetValue. + checkIsSet(t, &i2, true) + checkIsManaged(t, &i2, true) + checkIsReadOnly(t, &i2, true) + checkValue(t, &i2, mdmValue) + checkValueOk(t, &i2, mdmValue, true) + checkCanNotSetValue(t, &i2, userValue, ErrManaged) + checkValue(t, &i2, mdmValue) // the value must not be changed + checkCanNotClearValue(t, &i2, ErrManaged) + + i2.ClearManaged() + // Clearing the managed flag should change the IsManaged and IsReadOnly flags... + checkIsManaged(t, &i2, false) + checkIsReadOnly(t, &i2, false) + // ...but not the value. + checkValue(t, &i2, mdmValue) + + // We should be able to change the value after clearing the managed flag. + checkSetValue(t, &i2, userValue) + checkIsSet(t, &i2, true) + checkValue(t, &i2, userValue) + checkValueOk(t, &i2, userValue, true) + checkEqual(t, i2, i, true) + + i2.SetReadOnly(true) + checkIsReadOnly(t, &i2, true) + checkIsManaged(t, &i2, false) + checkCanNotSetValue(t, &i2, userValue, ErrReadOnly) + checkCanNotClearValue(t, &i2, ErrReadOnly) + + i2.SetReadOnly(false) + i2.SetDefaultValue(defValue) + checkClearValue(t, &i2) + checkIsSet(t, &i2, false) + checkValue(t, &i2, defValue) + checkValueOk(t, &i2, defValue, false) +} + +func TestItemView(t *testing.T) { + i := ItemOf(&TestBundle{Name: "B1"}) + + iv := ItemViewOf(&i) + checkIsSet(t, iv, true) + checkIsManaged(t, iv, false) + checkIsReadOnly(t, iv, false) + checkValue(t, iv, TestBundleView{i.Value()}) + checkValueOk(t, iv, TestBundleView{i.Value()}, true) + + i2 := *iv.AsStruct() + checkEqual(t, i, i2, true) + i2.SetValue(&TestBundle{Name: "B2"}) + + iv2 := ItemViewOf(&i2) + checkEqual(t, iv, iv2, false) +} + +func TestListView(t *testing.T) { + l := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) + + lv := l.View() + checkIsSet(t, lv, true) + checkIsManaged(t, lv, false) + checkIsReadOnly(t, lv, true) + checkValue(t, lv, views.SliceOf(l.Value())) + checkValueOk(t, lv, views.SliceOf(l.Value()), true) + + l2 := *lv.AsStruct() + checkEqual(t, l, l2, true) +} + +func TestStructListView(t *testing.T) { + l := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) + + lv := StructListViewOf(&l) + checkIsSet(t, lv, true) + checkIsManaged(t, lv, false) + checkIsReadOnly(t, lv, true) + checkValue(t, lv, views.SliceOfViews(l.Value())) + checkValueOk(t, lv, views.SliceOfViews(l.Value()), true) + + l2 := *lv.AsStruct() + checkEqual(t, l, l2, true) +} + +func TestStructMapView(t *testing.T) { + m := StructMapOf(map[string]*TestBundle{ + "K1": {Name: "E1"}, + "K2": {Name: "E2"}, + }, ReadOnly) + + mv := StructMapViewOf(&m) + checkIsSet(t, mv, true) + checkIsManaged(t, mv, false) + checkIsReadOnly(t, mv, true) + checkValue(t, *mv.AsStruct(), m.Value()) + checkValueOk(t, *mv.AsStruct(), m.Value(), true) + + m2 := *mv.AsStruct() + checkEqual(t, m, m2, true) +} + +// check that the preference types implement the test [pref] interface. +var ( + _ pref[int] = (*Item[int])(nil) + _ pref[*TestBundle] = (*Item[*TestBundle])(nil) + _ pref[[]int] = (*List[int])(nil) + _ pref[[]*TestBundle] = (*StructList[*TestBundle])(nil) + _ pref[map[string]*TestBundle] = (*StructMap[string, *TestBundle])(nil) +) + +// pref is an interface used by [checkSetValue], [checkClearValue], and similar test +// functions that mutate preferences. It is implemented by all preference types, such +// as [Item], [List], [StructList], and [StructMap], and provides both read and write +// access to the preference's value and state. +type pref[T any] interface { + prefView[T] + SetValue(v T) error + ClearValue() error + SetDefaultValue(v T) + SetManagedValue(v T) + ClearManaged() + SetReadOnly(readonly bool) +} + +// check that the preference view types implement the test [prefView] interface. +var ( + _ prefView[int] = (*Item[int])(nil) + _ prefView[TestBundleView] = (*ItemView[*TestBundle, TestBundleView])(nil) + _ prefView[views.Slice[int]] = (*ListView[int])(nil) + _ prefView[views.SliceView[*TestBundle, TestBundleView]] = (*StructListView[*TestBundle, TestBundleView])(nil) + _ prefView[views.MapFn[string, *TestBundle, TestBundleView]] = (*StructMapView[string, *TestBundle, TestBundleView])(nil) +) + +// prefView is an interface used by [checkIsSet], [checkIsManaged], and similar non-mutating +// test functions. It is implemented by all preference types, such as [Item], [List], [StructList], +// and [StructMap], as well as their corresponding views, such as [ItemView], [ListView], [StructListView], +// and [StructMapView], and provides read-only access to the preference's value and state. +type prefView[T any] interface { + IsSet() bool + Value() T + ValueOk() (T, bool) + DefaultValue() T + IsManaged() bool + IsReadOnly() bool +} + +func checkIsSet[T any](tb testing.TB, p prefView[T], wantSet bool) { + tb.Helper() + if gotSet := p.IsSet(); gotSet != wantSet { + tb.Errorf("IsSet: got %v; want %v", gotSet, wantSet) + } +} + +func checkIsManaged[T any](tb testing.TB, p prefView[T], wantManaged bool) { + tb.Helper() + if gotManaged := p.IsManaged(); gotManaged != wantManaged { + tb.Errorf("IsManaged: got %v; want %v", gotManaged, wantManaged) + } +} + +func checkIsReadOnly[T any](tb testing.TB, p prefView[T], wantReadOnly bool) { + tb.Helper() + if gotReadOnly := p.IsReadOnly(); gotReadOnly != wantReadOnly { + tb.Errorf("IsReadOnly: got %v; want %v", gotReadOnly, wantReadOnly) + } +} + +func checkValue[T any](tb testing.TB, p prefView[T], wantValue T) { + tb.Helper() + if gotValue := p.Value(); !testComparerFor[T]()(gotValue, wantValue) { + tb.Errorf("Value: got %v; want %v", gotValue, wantValue) + } +} + +func checkValueOk[T any](tb testing.TB, p prefView[T], wantValue T, wantOk bool) { + tb.Helper() + gotValue, gotOk := p.ValueOk() + + if gotOk != wantOk || !testComparerFor[T]()(gotValue, wantValue) { + tb.Errorf("ValueOk: got (%v, %v); want (%v, %v)", gotValue, gotOk, wantValue, wantOk) + } +} + +func checkEqual[T equatable[T]](tb testing.TB, a, b T, wantEqual bool) { + tb.Helper() + if gotEqual := a.Equal(b); gotEqual != wantEqual { + tb.Errorf("Equal: got %v; want %v", gotEqual, wantEqual) + } +} + +func checkSetValue[T any](tb testing.TB, p pref[T], v T) { + tb.Helper() + if err := p.SetValue(v); err != nil { + tb.Fatalf("SetValue: gotErr %v, wantErr: nil", err) + } +} + +func checkCanNotSetValue[T any](tb testing.TB, p pref[T], v T, wantErr error) { + tb.Helper() + if err := p.SetValue(v); err == nil || !errors.Is(err, wantErr) { + tb.Fatalf("SetValue: gotErr %v, wantErr: %v", err, wantErr) + } +} + +func checkClearValue[T any](tb testing.TB, p pref[T]) { + tb.Helper() + if err := p.ClearValue(); err != nil { + tb.Fatalf("ClearValue: gotErr %v, wantErr: nil", err) + } +} + +func checkCanNotClearValue[T any](tb testing.TB, p pref[T], wantErr error) { + tb.Helper() + err := p.ClearValue() + if err == nil || !errors.Is(err, wantErr) { + tb.Fatalf("ClearValue: gotErr %v, wantErr: %v", err, wantErr) + } +} + +// testComparerFor is like [comparerFor], but uses [reflect.DeepEqual] +// unless T is [equatable]. +func testComparerFor[T any]() func(a, b T) bool { + return func(a, b T) bool { + switch a := any(a).(type) { + case equatable[T]: + return a.Equal(b) + default: + return reflect.DeepEqual(a, b) + } + } +} + +func checkJSON(tb testing.TB, got, want jsontext.Value) { + tb.Helper() + got = got.Clone() + want = want.Clone() + // Compare canonical forms. + if err := got.Canonicalize(); err != nil { + tb.Error(err) + } + if err := want.Canonicalize(); err != nil { + tb.Error(err) + } + if bytes.Equal(got, want) { + return + } + + gotMap := make(map[string]any) + if err := jsonv2.Unmarshal(got, &gotMap); err != nil { + tb.Fatal(err) + } + wantMap := make(map[string]any) + if err := jsonv2.Unmarshal(want, &wantMap); err != nil { + tb.Fatal(err) + } + tb.Errorf("mismatch (-want +got):\n%s", cmp.Diff(wantMap, gotMap)) +} diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go new file mode 100644 index 0000000000000..d76eebb43e9ef --- /dev/null +++ b/types/prefs/prefs_view_test.go @@ -0,0 +1,342 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale/cmd/viewer; DO NOT EDIT. + +package prefs + +import ( + "encoding/json" + "errors" + "net/netip" +) + +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup -tags=test + +// View returns a readonly view of TestPrefs. +func (p *TestPrefs) View() TestPrefsView { + return TestPrefsView{ж: p} +} + +// TestPrefsView provides a read-only view over TestPrefs. +// +// Its methods should only be called if `Valid()` returns true. +type TestPrefsView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *TestPrefs +} + +// Valid reports whether underlying value is non-nil. +func (v TestPrefsView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v TestPrefsView) AsStruct() *TestPrefs { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v TestPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *TestPrefsView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x TestPrefs + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v TestPrefsView) Int32Item() Item[int32] { return v.ж.Int32Item } +func (v TestPrefsView) UInt64Item() Item[uint64] { return v.ж.UInt64Item } +func (v TestPrefsView) StringItem1() Item[string] { return v.ж.StringItem1 } +func (v TestPrefsView) StringItem2() Item[string] { return v.ж.StringItem2 } +func (v TestPrefsView) BoolItem1() Item[bool] { return v.ж.BoolItem1 } +func (v TestPrefsView) BoolItem2() Item[bool] { return v.ж.BoolItem2 } +func (v TestPrefsView) StringSlice() ListView[string] { return v.ж.StringSlice.View() } +func (v TestPrefsView) IntSlice() ListView[int] { return v.ж.IntSlice.View() } +func (v TestPrefsView) AddrItem() Item[netip.Addr] { return v.ж.AddrItem } +func (v TestPrefsView) StringStringMap() MapView[string, string] { return v.ж.StringStringMap.View() } +func (v TestPrefsView) IntStringMap() MapView[int, string] { return v.ж.IntStringMap.View() } +func (v TestPrefsView) AddrIntMap() MapView[netip.Addr, int] { return v.ж.AddrIntMap.View() } +func (v TestPrefsView) Bundle1() ItemView[*TestBundle, TestBundleView] { + return ItemViewOf(&v.ж.Bundle1) +} +func (v TestPrefsView) Bundle2() ItemView[*TestBundle, TestBundleView] { + return ItemViewOf(&v.ж.Bundle2) +} +func (v TestPrefsView) Generic() ItemView[*TestGenericStruct[int], TestGenericStructView[int]] { + return ItemViewOf(&v.ж.Generic) +} +func (v TestPrefsView) BundleList() StructListView[*TestBundle, TestBundleView] { + return StructListViewOf(&v.ж.BundleList) +} +func (v TestPrefsView) StringBundleMap() StructMapView[string, *TestBundle, TestBundleView] { + return StructMapViewOf(&v.ж.StringBundleMap) +} +func (v TestPrefsView) IntBundleMap() StructMapView[int, *TestBundle, TestBundleView] { + return StructMapViewOf(&v.ж.IntBundleMap) +} +func (v TestPrefsView) AddrBundleMap() StructMapView[netip.Addr, *TestBundle, TestBundleView] { + return StructMapViewOf(&v.ж.AddrBundleMap) +} +func (v TestPrefsView) Group() TestPrefsGroup { return v.ж.Group } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestPrefsViewNeedsRegeneration = TestPrefs(struct { + Int32Item Item[int32] + UInt64Item Item[uint64] + StringItem1 Item[string] + StringItem2 Item[string] + BoolItem1 Item[bool] + BoolItem2 Item[bool] + StringSlice List[string] + IntSlice List[int] + AddrItem Item[netip.Addr] + StringStringMap Map[string, string] + IntStringMap Map[int, string] + AddrIntMap Map[netip.Addr, int] + Bundle1 Item[*TestBundle] + Bundle2 Item[*TestBundle] + Generic Item[*TestGenericStruct[int]] + BundleList StructList[*TestBundle] + StringBundleMap StructMap[string, *TestBundle] + IntBundleMap StructMap[int, *TestBundle] + AddrBundleMap StructMap[netip.Addr, *TestBundle] + Group TestPrefsGroup +}{}) + +// View returns a readonly view of TestBundle. +func (p *TestBundle) View() TestBundleView { + return TestBundleView{ж: p} +} + +// TestBundleView provides a read-only view over TestBundle. +// +// Its methods should only be called if `Valid()` returns true. +type TestBundleView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *TestBundle +} + +// Valid reports whether underlying value is non-nil. +func (v TestBundleView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v TestBundleView) AsStruct() *TestBundle { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v TestBundleView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *TestBundleView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x TestBundle + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v TestBundleView) Name() string { return v.ж.Name } +func (v TestBundleView) Nested() *TestValueStruct { + if v.ж.Nested == nil { + return nil + } + x := *v.ж.Nested + return &x +} + +func (v TestBundleView) Equal(v2 TestBundleView) bool { return v.ж.Equal(v2.ж) } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestBundleViewNeedsRegeneration = TestBundle(struct { + Name string + Nested *TestValueStruct +}{}) + +// View returns a readonly view of TestValueStruct. +func (p *TestValueStruct) View() TestValueStructView { + return TestValueStructView{ж: p} +} + +// TestValueStructView provides a read-only view over TestValueStruct. +// +// Its methods should only be called if `Valid()` returns true. +type TestValueStructView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *TestValueStruct +} + +// Valid reports whether underlying value is non-nil. +func (v TestValueStructView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v TestValueStructView) AsStruct() *TestValueStruct { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v TestValueStructView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *TestValueStructView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x TestValueStruct + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v TestValueStructView) Value() int { return v.ж.Value } +func (v TestValueStructView) Equal(v2 TestValueStructView) bool { return v.ж.Equal(v2.ж) } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestValueStructViewNeedsRegeneration = TestValueStruct(struct { + Value int +}{}) + +// View returns a readonly view of TestGenericStruct. +func (p *TestGenericStruct[T]) View() TestGenericStructView[T] { + return TestGenericStructView[T]{ж: p} +} + +// TestGenericStructView[T] provides a read-only view over TestGenericStruct[T]. +// +// Its methods should only be called if `Valid()` returns true. +type TestGenericStructView[T ImmutableType] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *TestGenericStruct[T] +} + +// Valid reports whether underlying value is non-nil. +func (v TestGenericStructView[T]) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v TestGenericStructView[T]) AsStruct() *TestGenericStruct[T] { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v TestGenericStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *TestGenericStructView[T]) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x TestGenericStruct[T] + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v TestGenericStructView[T]) Value() T { return v.ж.Value } +func (v TestGenericStructView[T]) Equal(v2 TestGenericStructView[T]) bool { return v.ж.Equal(v2.ж) } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +func _TestGenericStructViewNeedsRegeneration[T ImmutableType](TestGenericStruct[T]) { + _TestGenericStructViewNeedsRegeneration(struct { + Value T + }{}) +} + +// View returns a readonly view of TestPrefsGroup. +func (p *TestPrefsGroup) View() TestPrefsGroupView { + return TestPrefsGroupView{ж: p} +} + +// TestPrefsGroupView provides a read-only view over TestPrefsGroup. +// +// Its methods should only be called if `Valid()` returns true. +type TestPrefsGroupView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *TestPrefsGroup +} + +// Valid reports whether underlying value is non-nil. +func (v TestPrefsGroupView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v TestPrefsGroupView) AsStruct() *TestPrefsGroup { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v TestPrefsGroupView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *TestPrefsGroupView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x TestPrefsGroup + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v TestPrefsGroupView) FloatItem() Item[float64] { return v.ж.FloatItem } +func (v TestPrefsGroupView) TestStringItem() Item[TestStringType] { return v.ж.TestStringItem } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _TestPrefsGroupViewNeedsRegeneration = TestPrefsGroup(struct { + FloatItem Item[float64] + TestStringItem Item[TestStringType] +}{}) diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go new file mode 100644 index 0000000000000..872cb232655e3 --- /dev/null +++ b/types/prefs/struct_list.go @@ -0,0 +1,195 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs + +import ( + "fmt" + "reflect" + "slices" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/opt" + "tailscale.com/types/ptr" + "tailscale.com/types/views" +) + +// StructList is a preference type that holds zero or more potentially mutable struct values. +type StructList[T views.Cloner[T]] struct { + preference[[]T] +} + +// StructListOf returns a [StructList] configured with the specified value and [Options]. +func StructListOf[T views.Cloner[T]](v []T, opts ...Options) StructList[T] { + return StructList[T]{preferenceOf(opt.ValueOf(deepCloneSlice(v)), opts...)} +} + +// StructListWithOpts returns an unconfigured [StructList] with the specified [Options]. +func StructListWithOpts[T views.Cloner[T]](opts ...Options) StructList[T] { + return StructList[T]{preferenceOf(opt.Value[[]T]{}, opts...)} +} + +// SetValue configures the preference with the specified value. +// It fails and returns [ErrManaged] if p is a managed preference, +// and [ErrReadOnly] if p is a read-only preference. +func (l *StructList[T]) SetValue(val []T) error { + return l.preference.SetValue(deepCloneSlice(val)) +} + +// SetManagedValue configures the preference with the specified value +// and marks the preference as managed. +func (l *StructList[T]) SetManagedValue(val []T) { + l.preference.SetManagedValue(deepCloneSlice(val)) +} + +// Clone returns a copy of l that aliases no memory with l. +func (l StructList[T]) Clone() *StructList[T] { + res := ptr.To(l) + if v, ok := l.s.Value.GetOk(); ok { + res.s.Value.Set(deepCloneSlice(v)) + } + return res +} + +// Equal reports whether l and l2 are equal. +// If the template type T implements an Equal(T) bool method, it will be used +// instead of the == operator for value comparison. +// It panics if T is not comparable. +func (l StructList[T]) Equal(l2 StructList[T]) bool { + if l.s.Metadata != l2.s.Metadata { + return false + } + v1, ok1 := l.s.Value.GetOk() + v2, ok2 := l2.s.Value.GetOk() + if ok1 != ok2 { + return false + } + if ok1 != ok2 { + return false + } + return !ok1 || slices.EqualFunc(v1, v2, comparerFor[T]()) +} + +func deepCloneSlice[T views.Cloner[T]](s []T) []T { + c := make([]T, len(s)) + for i := range s { + c[i] = s[i].Clone() + } + return c +} + +type equatable[T any] interface { + Equal(other T) bool +} + +func comparerFor[T any]() func(a, b T) bool { + switch t := reflect.TypeFor[T](); { + case t.Implements(reflect.TypeFor[equatable[T]]()): + return func(a, b T) bool { return any(a).(equatable[T]).Equal(b) } + case t.Comparable(): + return func(a, b T) bool { return any(a) == any(b) } + default: + panic(fmt.Errorf("%v is not comparable", t)) + } +} + +// StructListView is a read-only view of a [StructList]. +type StructListView[T views.ViewCloner[T, V], V views.StructView[T]] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *StructList[T] +} + +// StructListViewOf returns a read-only view of l. +// It is used by [tailscale.com/cmd/viewer]. +func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](l *StructList[T]) StructListView[T, V] { + return StructListView[T, V]{l} +} + +// Valid reports whether the underlying [StructList] is non-nil. +func (lv StructListView[T, V]) Valid() bool { + return lv.ж != nil +} + +// AsStruct implements [views.StructView] by returning a clone of the preference +// which aliases no memory with the original. +func (lv StructListView[T, V]) AsStruct() *StructList[T] { + if lv.ж == nil { + return nil + } + return lv.ж.Clone() +} + +// IsSet reports whether the preference has a value set. +func (lv StructListView[T, V]) IsSet() bool { + return lv.ж.IsSet() +} + +// Value returns a read-only view of the value if the preference has a value set. +// Otherwise, it returns a read-only view of its default value. +func (lv StructListView[T, V]) Value() views.SliceView[T, V] { + return views.SliceOfViews(lv.ж.Value()) +} + +// ValueOk returns a read-only view of the value and true if the preference has a value set. +// Otherwise, it returns an invalid view and false. +func (lv StructListView[T, V]) ValueOk() (val views.SliceView[T, V], ok bool) { + if v, ok := lv.ж.ValueOk(); ok { + return views.SliceOfViews(v), true + } + return views.SliceView[T, V]{}, false +} + +// DefaultValue returns a read-only view of the default value of the preference. +func (lv StructListView[T, V]) DefaultValue() views.SliceView[T, V] { + return views.SliceOfViews(lv.ж.DefaultValue()) +} + +// IsManaged reports whether the preference is managed via MDM, Group Policy, or similar means. +func (lv StructListView[T, V]) IsManaged() bool { + return lv.ж.IsManaged() +} + +// IsReadOnly reports whether the preference is read-only and cannot be changed by user. +func (lv StructListView[T, V]) IsReadOnly() bool { + return lv.ж.IsReadOnly() +} + +// Equal reports whether iv and iv2 are equal. +func (lv StructListView[T, V]) Equal(lv2 StructListView[T, V]) bool { + if !lv.Valid() && !lv2.Valid() { + return true + } + if lv.Valid() != lv2.Valid() { + return false + } + return lv.ж.Equal(*lv2.ж) +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (lv StructListView[T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return lv.ж.MarshalJSONV2(out, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (lv *StructListView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + var x StructList[T] + if err := x.UnmarshalJSONV2(in, opts); err != nil { + return err + } + lv.ж = &x + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (lv StructListView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(lv) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (lv *StructListView[T, V]) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONV2 +} diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go new file mode 100644 index 0000000000000..2003eebe323fa --- /dev/null +++ b/types/prefs/struct_map.go @@ -0,0 +1,175 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prefs + +import ( + "maps" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/opt" + "tailscale.com/types/ptr" + "tailscale.com/types/views" +) + +// StructMap is a preference type that holds potentially mutable key-value pairs. +type StructMap[K MapKeyType, V views.Cloner[V]] struct { + preference[map[K]V] +} + +// StructMapOf returns a [StructMap] configured with the specified value and [Options]. +func StructMapOf[K MapKeyType, V views.Cloner[V]](v map[K]V, opts ...Options) StructMap[K, V] { + return StructMap[K, V]{preferenceOf(opt.ValueOf(deepCloneMap(v)), opts...)} +} + +// StructMapWithOpts returns an unconfigured [StructMap] with the specified [Options]. +func StructMapWithOpts[K MapKeyType, V views.Cloner[V]](opts ...Options) StructMap[K, V] { + return StructMap[K, V]{preferenceOf(opt.Value[map[K]V]{}, opts...)} +} + +// SetValue configures the preference with the specified value. +// It fails and returns [ErrManaged] if p is a managed preference, +// and [ErrReadOnly] if p is a read-only preference. +func (l *StructMap[K, V]) SetValue(val map[K]V) error { + return l.preference.SetValue(deepCloneMap(val)) +} + +// SetManagedValue configures the preference with the specified value +// and marks the preference as managed. +func (l *StructMap[K, V]) SetManagedValue(val map[K]V) { + l.preference.SetManagedValue(deepCloneMap(val)) +} + +// Clone returns a copy of m that aliases no memory with m. +func (m StructMap[K, V]) Clone() *StructMap[K, V] { + res := ptr.To(m) + if v, ok := m.s.Value.GetOk(); ok { + res.s.Value.Set(deepCloneMap(v)) + } + return res +} + +// Equal reports whether m and m2 are equal. +// If the template type V implements an Equal(V) bool method, it will be used +// instead of the == operator for value comparison. +// It panics if T is not comparable. +func (m StructMap[K, V]) Equal(m2 StructMap[K, V]) bool { + if m.s.Metadata != m2.s.Metadata { + return false + } + v1, ok1 := m.s.Value.GetOk() + v2, ok2 := m2.s.Value.GetOk() + if ok1 != ok2 { + return false + } + return !ok1 || maps.EqualFunc(v1, v2, comparerFor[V]()) +} + +func deepCloneMap[K comparable, V views.Cloner[V]](m map[K]V) map[K]V { + c := make(map[K]V, len(m)) + for i := range m { + c[i] = m[i].Clone() + } + return c +} + +// StructMapView is a read-only view of a [StructMap]. +type StructMapView[K MapKeyType, T views.ViewCloner[T, V], V views.StructView[T]] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *StructMap[K, T] +} + +// StructMapViewOf returns a readonly view of m. +// It is used by [tailscale.com/cmd/viewer]. +func StructMapViewOf[K MapKeyType, T views.ViewCloner[T, V], V views.StructView[T]](m *StructMap[K, T]) StructMapView[K, T, V] { + return StructMapView[K, T, V]{m} +} + +// Valid reports whether the underlying [StructMap] is non-nil. +func (mv StructMapView[K, T, V]) Valid() bool { + return mv.ж != nil +} + +// AsStruct implements [views.StructView] by returning a clone of the preference +// which aliases no memory with the original. +func (mv StructMapView[K, T, V]) AsStruct() *StructMap[K, T] { + if mv.ж == nil { + return nil + } + return mv.ж.Clone() +} + +// IsSet reports whether the preference has a value set. +func (mv StructMapView[K, T, V]) IsSet() bool { + return mv.ж.IsSet() +} + +// Value returns a read-only view of the value if the preference has a value set. +// Otherwise, it returns a read-only view of its default value. +func (mv StructMapView[K, T, V]) Value() views.MapFn[K, T, V] { + return views.MapFnOf(mv.ж.Value(), func(t T) V { return t.View() }) +} + +// ValueOk returns a read-only view of the value and true if the preference has a value set. +// Otherwise, it returns an invalid view and false. +func (mv StructMapView[K, T, V]) ValueOk() (val views.MapFn[K, T, V], ok bool) { + if v, ok := mv.ж.ValueOk(); ok { + return views.MapFnOf(v, func(t T) V { return t.View() }), true + } + return views.MapFn[K, T, V]{}, false +} + +// DefaultValue returns a read-only view of the default value of the preference. +func (mv StructMapView[K, T, V]) DefaultValue() views.MapFn[K, T, V] { + return views.MapFnOf(mv.ж.DefaultValue(), func(t T) V { return t.View() }) +} + +// Managed reports whether the preference is managed via MDM, Group Policy, or similar means. +func (mv StructMapView[K, T, V]) IsManaged() bool { + return mv.ж.IsManaged() +} + +// ReadOnly reports whether the preference is read-only and cannot be changed by user. +func (mv StructMapView[K, T, V]) IsReadOnly() bool { + return mv.ж.IsReadOnly() +} + +// Equal reports whether mv and mv2 are equal. +func (mv StructMapView[K, T, V]) Equal(mv2 StructMapView[K, T, V]) bool { + if !mv.Valid() && !mv2.Valid() { + return true + } + if mv.Valid() != mv2.Valid() { + return false + } + return mv.ж.Equal(*mv2.ж) +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (mv StructMapView[K, T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return mv.ж.MarshalJSONV2(out, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (mv *StructMapView[K, T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + var x StructMap[K, T] + if err := x.UnmarshalJSONV2(in, opts); err != nil { + return err + } + mv.ж = &x + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (mv StructMapView[K, T, V]) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(mv) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (mv *StructMapView[K, T, V]) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONV2 +} diff --git a/types/views/views.go b/types/views/views.go index 4edd72688f7b4..b99a20a488bbd 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "iter" "maps" "reflect" "slices" @@ -208,6 +209,17 @@ type Slice[T any] struct { ж []T } +// All returns an iterator over v. +func (v Slice[T]) All() iter.Seq2[int, T] { + return func(yield func(int, T) bool) { + for i, v := range v.ж { + if !yield(i, v) { + return + } + } + } +} + // MapKey returns a unique key for a slice, based on its address and length. func (v Slice[T]) MapKey() SliceMapKey[T] { return mapKey(v.ж) } diff --git a/types/views/views_test.go b/types/views/views_test.go index 1a4f1f2d4405f..24118d0997078 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -6,8 +6,10 @@ package views import ( "bytes" "encoding/json" + "fmt" "net/netip" "reflect" + "slices" "strings" "testing" "unsafe" @@ -412,3 +414,15 @@ func TestContainsPointers(t *testing.T) { }) } } + +func TestSliceRange(t *testing.T) { + sv := SliceOf([]string{"foo", "bar"}) + var got []string + for i, v := range sv.All() { + got = append(got, fmt.Sprintf("%d-%s", i, v)) + } + want := []string{"0-foo", "1-bar"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index 3ef4b9cc1230a..d998d925d9143 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -24,7 +24,7 @@ import ( var flagCopyright = flag.Bool("copyright", true, "add Tailscale copyright to generated file headers") // LoadTypes returns all named types in pkgName, keyed by their type name. -func LoadTypes(buildTags string, pkgName string) (*packages.Package, map[string]*types.Named, error) { +func LoadTypes(buildTags string, pkgName string) (*packages.Package, map[string]types.Type, error) { cfg := &packages.Config{ Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedName, Tests: buildTags == "test", @@ -181,8 +181,8 @@ func writeFormatted(code []byte, path string) error { } // namedTypes returns all named types in pkg, keyed by their type name. -func namedTypes(pkg *packages.Package) map[string]*types.Named { - nt := make(map[string]*types.Named) +func namedTypes(pkg *packages.Package) map[string]types.Type { + nt := make(map[string]types.Type) for _, file := range pkg.Syntax { for _, d := range file.Decls { decl, ok := d.(*ast.GenDecl) @@ -198,11 +198,10 @@ func namedTypes(pkg *packages.Package) map[string]*types.Named { if !ok { continue } - typ, ok := typeNameObj.Type().(*types.Named) - if !ok { - continue + switch typ := typeNameObj.Type(); typ.(type) { + case *types.Alias, *types.Named: + nt[spec.Name.Name] = typ } - nt[spec.Name.Name] = typ } } } @@ -356,14 +355,25 @@ func FormatTypeParams(params *types.TypeParamList, it *ImportTracker) (constrain // LookupMethod returns the method with the specified name in t, or nil if the method does not exist. func LookupMethod(t types.Type, name string) *types.Func { - if t, ok := t.(*types.Named); ok { - for i := 0; i < t.NumMethods(); i++ { - if method := t.Method(i); method.Name() == name { - return method + switch t := t.(type) { + case *types.Alias: + return LookupMethod(t.Rhs(), name) + case *types.TypeParam: + return LookupMethod(t.Constraint(), name) + case *types.Pointer: + return LookupMethod(t.Elem(), name) + case *types.Named: + switch u := t.Underlying().(type) { + case *types.Interface: + return LookupMethod(u, name) + default: + for i := 0; i < t.NumMethods(); i++ { + if method := t.Method(i); method.Name() == name { + return method + } } } - } - if t, ok := t.Underlying().(*types.Interface); ok { + case *types.Interface: for i := 0; i < t.NumMethods(); i++ { if method := t.Method(i); method.Name() == name { return method @@ -372,3 +382,12 @@ func LookupMethod(t types.Type, name string) *types.Func { } return nil } + +// NamedTypeOf is like t.(*types.Named), but also works with type aliases. +func NamedTypeOf(t types.Type) (named *types.Named, ok bool) { + if a, ok := t.(*types.Alias); ok { + return NamedTypeOf(types.Unalias(a)) + } + named, ok = t.(*types.Named) + return +} diff --git a/util/codegen/codegen_test.go b/util/codegen/codegen_test.go index 9c61da51d0ae2..28ddaed2bac36 100644 --- a/util/codegen/codegen_test.go +++ b/util/codegen/codegen_test.go @@ -4,10 +4,11 @@ package codegen import ( + "cmp" "go/types" - "log" "net/netip" "strings" + "sync" "testing" "unsafe" @@ -162,14 +163,9 @@ func TestGenericContainsPointers(t *testing.T) { }, } - _, namedTypes, err := LoadTypes("test", ".") - if err != nil { - log.Fatal(err) - } - for _, tt := range tests { t.Run(tt.typ, func(t *testing.T) { - typ := namedTypes[tt.typ] + typ := lookupTestType(t, tt.typ) if isPointer := ContainsPointers(typ); isPointer != tt.wantPointer { t.Fatalf("ContainsPointers: got %v, want: %v", isPointer, tt.wantPointer) } @@ -252,3 +248,199 @@ func TestAssertStructUnchanged(t *testing.T) { }) } } + +type NamedType struct{} + +func (NamedType) Method() {} + +type NamedTypeAlias = NamedType + +type NamedInterface interface { + Method() +} + +type NamedInterfaceAlias = NamedInterface + +type GenericType[T NamedInterface] struct { + TypeParamField T + TypeParamPtrField *T +} + +type GenericTypeWithAliasConstraint[T NamedInterfaceAlias] struct { + TypeParamField T + TypeParamPtrField *T +} + +func TestLookupMethod(t *testing.T) { + tests := []struct { + name string + typ types.Type + methodName string + wantHasMethod bool + wantReceiver types.Type + }{ + { + name: "NamedType/HasMethod", + typ: lookupTestType(t, "NamedType"), + methodName: "Method", + wantHasMethod: true, + }, + { + name: "NamedType/NoMethod", + typ: lookupTestType(t, "NamedType"), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "NamedTypeAlias/HasMethod", + typ: lookupTestType(t, "NamedTypeAlias"), + methodName: "Method", + wantHasMethod: true, + wantReceiver: lookupTestType(t, "NamedType"), + }, + { + name: "NamedTypeAlias/NoMethod", + typ: lookupTestType(t, "NamedTypeAlias"), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "PtrToNamedType/HasMethod", + typ: types.NewPointer(lookupTestType(t, "NamedType")), + methodName: "Method", + wantHasMethod: true, + wantReceiver: lookupTestType(t, "NamedType"), + }, + { + name: "PtrToNamedType/NoMethod", + typ: types.NewPointer(lookupTestType(t, "NamedType")), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "PtrToNamedTypeAlias/HasMethod", + typ: types.NewPointer(lookupTestType(t, "NamedTypeAlias")), + methodName: "Method", + wantHasMethod: true, + wantReceiver: lookupTestType(t, "NamedType"), + }, + { + name: "PtrToNamedTypeAlias/NoMethod", + typ: types.NewPointer(lookupTestType(t, "NamedTypeAlias")), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "NamedInterface/HasMethod", + typ: lookupTestType(t, "NamedInterface"), + methodName: "Method", + wantHasMethod: true, + }, + { + name: "NamedInterface/NoMethod", + typ: lookupTestType(t, "NamedInterface"), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "Interface/HasMethod", + typ: types.NewInterfaceType([]*types.Func{types.NewFunc(0, nil, "Method", types.NewSignatureType(nil, nil, nil, nil, nil, false))}, nil), + methodName: "Method", + wantHasMethod: true, + }, + { + name: "Interface/NoMethod", + typ: types.NewInterfaceType(nil, nil), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "TypeParam/HasMethod", + typ: lookupTestType(t, "GenericType").Underlying().(*types.Struct).Field(0).Type(), + methodName: "Method", + wantHasMethod: true, + wantReceiver: lookupTestType(t, "NamedInterface"), + }, + { + name: "TypeParam/NoMethod", + typ: lookupTestType(t, "GenericType").Underlying().(*types.Struct).Field(0).Type(), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "TypeParamPtr/HasMethod", + typ: lookupTestType(t, "GenericType").Underlying().(*types.Struct).Field(1).Type(), + methodName: "Method", + wantHasMethod: true, + wantReceiver: lookupTestType(t, "NamedInterface"), + }, + { + name: "TypeParamPtr/NoMethod", + typ: lookupTestType(t, "GenericType").Underlying().(*types.Struct).Field(1).Type(), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "TypeParamWithAlias/HasMethod", + typ: lookupTestType(t, "GenericTypeWithAliasConstraint").Underlying().(*types.Struct).Field(0).Type(), + methodName: "Method", + wantHasMethod: true, + wantReceiver: lookupTestType(t, "NamedInterface"), + }, + { + name: "TypeParamWithAlias/NoMethod", + typ: lookupTestType(t, "GenericTypeWithAliasConstraint").Underlying().(*types.Struct).Field(0).Type(), + methodName: "NoMethod", + wantHasMethod: false, + }, + { + name: "TypeParamWithAliasPtr/HasMethod", + typ: lookupTestType(t, "GenericTypeWithAliasConstraint").Underlying().(*types.Struct).Field(1).Type(), + methodName: "Method", + wantHasMethod: true, + wantReceiver: lookupTestType(t, "NamedInterface"), + }, + { + name: "TypeParamWithAliasPtr/NoMethod", + typ: lookupTestType(t, "GenericTypeWithAliasConstraint").Underlying().(*types.Struct).Field(1).Type(), + methodName: "NoMethod", + wantHasMethod: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotMethod := LookupMethod(tt.typ, tt.methodName) + if gotHasMethod := gotMethod != nil; gotHasMethod != tt.wantHasMethod { + t.Fatalf("HasMethod: got %v; want %v", gotMethod, tt.wantHasMethod) + } + if gotMethod == nil { + return + } + if gotMethod.Name() != tt.methodName { + t.Errorf("Name: got %v; want %v", gotMethod.Name(), tt.methodName) + } + if gotRecv, wantRecv := gotMethod.Signature().Recv().Type(), cmp.Or(tt.wantReceiver, tt.typ); !types.Identical(gotRecv, wantRecv) { + t.Errorf("Recv: got %v; want %v", gotRecv, wantRecv) + } + }) + } +} + +var namedTestTypes = sync.OnceValues(func() (map[string]types.Type, error) { + _, namedTypes, err := LoadTypes("test", ".") + return namedTypes, err +}) + +func lookupTestType(t *testing.T, name string) types.Type { + t.Helper() + types, err := namedTestTypes() + if err != nil { + t.Fatal(err) + } + typ, ok := types[name] + if !ok { + t.Fatalf("type %q is not declared in the current package", name) + } + return typ +} diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index ea9e6741fa3f7..f3ee4aa0b84f0 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -6,6 +6,9 @@ package linuxfw import ( + "errors" + "os/exec" + "tailscale.com/envknob" "tailscale.com/hostinfo" "tailscale.com/types/logger" @@ -30,11 +33,22 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { } else if prefHint != "" { logf("TS_DEBUG_FIREWALL_MODE set, overriding firewall mode from %s to %s", prefHint, mode) } + + var det linuxFWDetector + if mode == "" { + // We have no preference, so check if `iptables` is even available. + _, err := det.iptDetect() + if err != nil && errors.Is(err, exec.ErrNotFound) { + logf("iptables not found: %v; falling back to nftables", err) + mode = "nftables" + } + } + // We now use iptables as default and have "auto" and "nftables" as // options for people to test further. switch mode { case "auto": - return pickFirewallModeFromInstalledRules(logf, linuxFWDetector{}) + return pickFirewallModeFromInstalledRules(logf, det) case "nftables": hostinfo.SetFirewallMode("nft-forced") return FirewallModeNfTables diff --git a/util/linuxfw/helpers.go b/util/linuxfw/helpers.go index 5d76adac6deee..a4b9fdf402558 100644 --- a/util/linuxfw/helpers.go +++ b/util/linuxfw/helpers.go @@ -10,11 +10,13 @@ import ( "fmt" "strings" "unicode" + + "tailscale.com/util/slicesx" ) func formatMaybePrintable(b []byte) string { - // Remove a single trailing null, if any - if len(b) > 0 && b[len(b)-1] == 0 { + // Remove a single trailing null, if any. + if slicesx.LastEqual(b, 0) { b = b[:len(b)-1] } diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 7231c83fe8283..234fa526ce17c 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -29,6 +29,9 @@ func DebugIptables(logf logger.Logf) error { // // It only returns an error when there is no iptables binary, or when iptables -S // fails. In all other cases, it returns the number of non-default rules. +// +// If the iptables binary is not found, it returns an underlying exec.ErrNotFound +// error. func detectIptables() (int, error) { // run "iptables -S" to get the list of rules using iptables // exec.Command returns an error if the binary is not found diff --git a/util/slicesx/slicesx.go b/util/slicesx/slicesx.go index 8abf2bd645856..e0b820eb71e91 100644 --- a/util/slicesx/slicesx.go +++ b/util/slicesx/slicesx.go @@ -136,3 +136,15 @@ func CutSuffix[E comparable](s, suffix []E) (after []E, found bool) { } return s[:len(s)-len(suffix)], true } + +// FirstEqual reports whether len(s) > 0 and +// its first element == v. +func FirstEqual[T comparable](s []T, v T) bool { + return len(s) > 0 && s[0] == v +} + +// LastEqual reports whether len(s) > 0 and +// its last element == v. +func LastEqual[T comparable](s []T, v T) bool { + return len(s) > 0 && s[len(s)-1] == v +} diff --git a/util/slicesx/slicesx_test.go b/util/slicesx/slicesx_test.go index be136d288f3e0..597b22b8335fe 100644 --- a/util/slicesx/slicesx_test.go +++ b/util/slicesx/slicesx_test.go @@ -197,3 +197,28 @@ func TestCutSuffix(t *testing.T) { }) } } + +func TestFirstLastEqual(t *testing.T) { + tests := []struct { + name string + in string + v byte + f func([]byte, byte) bool + want bool + }{ + {"first-empty", "", 'f', FirstEqual[byte], false}, + {"first-true", "foo", 'f', FirstEqual[byte], true}, + {"first-false", "foo", 'b', FirstEqual[byte], false}, + {"last-empty", "", 'f', LastEqual[byte], false}, + {"last-true", "bar", 'r', LastEqual[byte], true}, + {"last-false", "bar", 'o', LastEqual[byte], false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.f([]byte(tt.in), tt.v); got != tt.want { + t.Errorf("got %v; want %v", got, tt.want) + } + }) + } + +} diff --git a/util/syspolicy/internal/loggerx/logger.go b/util/syspolicy/internal/loggerx/logger.go new file mode 100644 index 0000000000000..b28610826382b --- /dev/null +++ b/util/syspolicy/internal/loggerx/logger.go @@ -0,0 +1,46 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package loggerx provides logging functions to the rest of the syspolicy packages. +package loggerx + +import ( + "log" + + "tailscale.com/types/lazy" + "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/internal" +) + +const ( + errorPrefix = "syspolicy: " + verbosePrefix = "syspolicy: [v2] " +) + +var ( + lazyErrorf lazy.SyncValue[logger.Logf] + lazyVerbosef lazy.SyncValue[logger.Logf] +) + +// Errorf formats and writes an error message to the log. +func Errorf(format string, args ...any) { + errorf := lazyErrorf.Get(func() logger.Logf { + return logger.WithPrefix(log.Printf, errorPrefix) + }) + errorf(format, args...) +} + +// Verbosef formats and writes an optional, verbose message to the log. +func Verbosef(format string, args ...any) { + verbosef := lazyVerbosef.Get(func() logger.Logf { + return logger.WithPrefix(log.Printf, verbosePrefix) + }) + verbosef(format, args...) +} + +// SetForTest sets the specified errorf and verbosef functions for the duration +// of tb and its subtests. +func SetForTest(tb internal.TB, errorf, verbosef logger.Logf) { + lazyErrorf.SetForTest(tb, errorf, nil) + lazyVerbosef.SetForTest(tb, verbosef, nil) +} diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go new file mode 100644 index 0000000000000..2ea02278afc92 --- /dev/null +++ b/util/syspolicy/internal/metrics/metrics.go @@ -0,0 +1,320 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package metrics provides logging and reporting for policy settings and scopes. +package metrics + +import ( + "strings" + "sync" + + xmaps "golang.org/x/exp/maps" + + "tailscale.com/syncs" + "tailscale.com/types/lazy" + "tailscale.com/util/clientmetric" + "tailscale.com/util/mak" + "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/testenv" +) + +var lazyReportMetrics lazy.SyncValue[bool] // used as a test hook + +// ShouldReport reports whether metrics should be reported on the current environment. +func ShouldReport() bool { + return lazyReportMetrics.Get(func() bool { + // macOS, iOS and tvOS create their own metrics, + // and we don't have syspolicy on any other platforms. + return setting.PlatformList{"android", "windows"}.HasCurrent() + }) +} + +// Reset metrics for the specified policy origin. +func Reset(origin *setting.Origin) { + scopeMetrics(origin).Reset() +} + +// ReportConfigured updates metrics and logs that the specified setting is +// configured with the given value in the origin. +func ReportConfigured(origin *setting.Origin, setting *setting.Definition, value any) { + settingMetricsFor(setting).ReportValue(origin, value) +} + +// ReportError updates metrics and logs that the specified setting has an error +// in the origin. +func ReportError(origin *setting.Origin, setting *setting.Definition, err error) { + settingMetricsFor(setting).ReportError(origin, err) +} + +// ReportNotConfigured updates metrics and logs that the specified setting is +// not configured in the origin. +func ReportNotConfigured(origin *setting.Origin, setting *setting.Definition) { + settingMetricsFor(setting).Reset(origin) +} + +// metric is an interface implemented by [clientmetric.Metric] and [funcMetric]. +type metric interface { + Add(v int64) + Set(v int64) +} + +// policyScopeMetrics are metrics that apply to an entire policy scope rather +// than a specific policy setting. +type policyScopeMetrics struct { + hasAny metric + numErrored metric +} + +func newScopeMetrics(scope setting.Scope) *policyScopeMetrics { + prefix := metricScopeName(scope) + // {os}_syspolicy_{scope_unless_device}_any + // Example: windows_syspolicy_any or windows_syspolicy_user_any. + hasAny := newMetric([]string{prefix, "any"}, clientmetric.TypeGauge) + // {os}_syspolicy_{scope_unless_device}_errors + // Example: windows_syspolicy_errors or windows_syspolicy_user_errors. + // + // TODO(nickkhyl): maybe make the `{os}_syspolicy_errors` metric a gauge rather than a counter? + // It was a counter prior to https://github.com/tailscale/tailscale/issues/12687, so I kept it as such. + // But I think a gauge makes more sense: syspolicy errors indicate a mismatch between the expected + // policy value type or format and the actual value read from the underlying store (like the Windows Registry). + // We'll encounter the same error every time we re-read the policy setting from the backing store + // until the policy value is corrected by the user, or until we fix the bug in the code or ADMX. + // There's probably no reason to count and accumulate them over time. + // + // Brief discussion: https://github.com/tailscale/tailscale/pull/13113#discussion_r1723475136 + numErrored := newMetric([]string{prefix, "errors"}, clientmetric.TypeCounter) + return &policyScopeMetrics{hasAny, numErrored} +} + +// ReportHasSettings is called when there's any configured policy setting in the scope. +func (m *policyScopeMetrics) ReportHasSettings() { + if m != nil { + m.hasAny.Set(1) + } +} + +// ReportError is called when there's any errored policy setting in the scope. +func (m *policyScopeMetrics) ReportError() { + if m != nil { + m.numErrored.Add(1) + } +} + +// Reset is called to reset the policy scope metrics, such as when the policy scope +// is about to be reloaded. +func (m *policyScopeMetrics) Reset() { + if m != nil { + m.hasAny.Set(0) + // numErrored is a counter and cannot be (re-)set. + } +} + +// settingMetrics are metrics for a single policy setting in one or more scopes. +type settingMetrics struct { + definition *setting.Definition + isSet []metric // by scope + hasErrors []metric // by scope +} + +// ReportValue is called when the policy setting is found to be configured in the specified source. +func (m *settingMetrics) ReportValue(origin *setting.Origin, v any) { + if m == nil { + return + } + if scope := origin.Scope().Kind(); scope >= 0 && int(scope) < len(m.isSet) { + m.isSet[scope].Set(1) + m.hasErrors[scope].Set(0) + } + scopeMetrics(origin).ReportHasSettings() + loggerx.Verbosef("%v(%q) = %v", origin, m.definition.Key(), v) +} + +// ReportError is called when there's an error with the policy setting in the specified source. +func (m *settingMetrics) ReportError(origin *setting.Origin, err error) { + if m == nil { + return + } + if scope := origin.Scope().Kind(); int(scope) < len(m.hasErrors) { + m.isSet[scope].Set(0) + m.hasErrors[scope].Set(1) + } + scopeMetrics(origin).ReportError() + loggerx.Errorf("%v(%q): %v", origin, m.definition.Key(), err) +} + +// Reset is called to reset the policy setting's metrics, such as when +// the policy setting does not exist or the source containing the policy +// is about to be reloaded. +func (m *settingMetrics) Reset(origin *setting.Origin) { + if m == nil { + return + } + if scope := origin.Scope().Kind(); scope >= 0 && int(scope) < len(m.isSet) { + m.isSet[scope].Set(0) + m.hasErrors[scope].Set(0) + } +} + +// metricFn is a function that adds or sets a metric value. +type metricFn func(name string, typ clientmetric.Type, v int64) + +// funcMetric implements [metric] by calling the specified add and set functions. +// Used for testing, and with nil functions on platforms that do not support +// syspolicy, and on platforms that report policy metrics from the GUI. +type funcMetric struct { + name string + typ clientmetric.Type + add, set metricFn +} + +func (m funcMetric) Add(v int64) { + if m.add != nil { + m.add(m.name, m.typ, v) + } +} + +func (m funcMetric) Set(v int64) { + if m.set != nil { + m.set(m.name, m.typ, v) + } +} + +var ( + lazyDeviceMetrics lazy.SyncValue[*policyScopeMetrics] + lazyProfileMetrics lazy.SyncValue[*policyScopeMetrics] + lazyUserMetrics lazy.SyncValue[*policyScopeMetrics] +) + +func scopeMetrics(origin *setting.Origin) *policyScopeMetrics { + switch origin.Scope().Kind() { + case setting.DeviceSetting: + return lazyDeviceMetrics.Get(func() *policyScopeMetrics { + return newScopeMetrics(setting.DeviceSetting) + }) + case setting.ProfileSetting: + return lazyProfileMetrics.Get(func() *policyScopeMetrics { + return newScopeMetrics(setting.ProfileSetting) + }) + case setting.UserSetting: + return lazyUserMetrics.Get(func() *policyScopeMetrics { + return newScopeMetrics(setting.UserSetting) + }) + default: + panic("unreachable") + } +} + +var ( + settingMetricsMu sync.RWMutex + settingMetricsMap map[setting.Key]*settingMetrics +) + +func settingMetricsFor(setting *setting.Definition) *settingMetrics { + settingMetricsMu.RLock() + metrics, ok := settingMetricsMap[setting.Key()] + settingMetricsMu.RUnlock() + if ok { + return metrics + } + return settingMetricsForSlow(setting) +} + +func settingMetricsForSlow(d *setting.Definition) *settingMetrics { + settingMetricsMu.Lock() + defer settingMetricsMu.Unlock() + if metrics, ok := settingMetricsMap[d.Key()]; ok { + return metrics + } + + // The loop below initializes metrics for each scope where a policy setting defined in 'd' + // can be configured. The [setting.Definition.Scope] returns the narrowest scope at which the policy + // setting may be configured, and more specific scopes always have higher numeric values. + // In other words, [setting.UserSetting] > [setting.ProfileScope] > [setting.DeviceScope]. + // It's impossible for a policy setting to be configured in a scope with a higher numeric value than + // the [setting.Definition.Scope] returns. Therefore, a policy setting can be configured in at + // most d.Scope()+1 different scopes, and having d.Scope()+1 metrics for the corresponding scopes + // is always sufficient for [settingMetrics]; it won't access elements past the end of the slice + // or need to reallocate with a longer slice if one of those arrives. + isSet := make([]metric, d.Scope()+1) + hasErrors := make([]metric, d.Scope()+1) + for i := range isSet { + scope := setting.Scope(i) + // {os}_syspolicy_{key}_{scope_unless_device} + // Example: windows_syspolicy_AdminConsole or windows_syspolicy_AdminConsole_user. + isSet[i] = newSettingMetric(d.Key(), scope, "", clientmetric.TypeGauge) + // {os}_syspolicy_{key}_{scope_unless_device}_error + // Example: windows_syspolicy_AdminConsole_error or windows_syspolicy_TestSetting01_user_error. + hasErrors[i] = newSettingMetric(d.Key(), scope, "error", clientmetric.TypeGauge) + } + metrics := &settingMetrics{d, isSet, hasErrors} + mak.Set(&settingMetricsMap, d.Key(), metrics) + return metrics +} + +// hooks for testing +var addMetricTestHook, setMetricTestHook syncs.AtomicValue[metricFn] + +// SetHooksForTest sets the specified addMetric and setMetric functions +// as the metric functions for the duration of tb and all its subtests. +func SetHooksForTest(tb internal.TB, addMetric, setMetric metricFn) { + oldAddMetric := addMetricTestHook.Swap(addMetric) + oldSetMetric := setMetricTestHook.Swap(setMetric) + tb.Cleanup(func() { + addMetricTestHook.Store(oldAddMetric) + setMetricTestHook.Store(oldSetMetric) + }) + + settingMetricsMu.Lock() + oldSettingMetricsMap := xmaps.Clone(settingMetricsMap) + clear(settingMetricsMap) + settingMetricsMu.Unlock() + tb.Cleanup(func() { + settingMetricsMu.Lock() + settingMetricsMap = oldSettingMetricsMap + settingMetricsMu.Unlock() + }) + + // (re-)set the scope metrics to use the test hooks for the duration of tb. + lazyDeviceMetrics.SetForTest(tb, newScopeMetrics(setting.DeviceSetting), nil) + lazyProfileMetrics.SetForTest(tb, newScopeMetrics(setting.ProfileSetting), nil) + lazyUserMetrics.SetForTest(tb, newScopeMetrics(setting.UserSetting), nil) +} + +func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { + name := strings.ReplaceAll(string(key), setting.KeyPathSeparator, "_") + return newMetric([]string{name, metricScopeName(scope), suffix}, typ) +} + +func newMetric(nameParts []string, typ clientmetric.Type) metric { + name := strings.Join(slicesx.Filter([]string{internal.OS(), "syspolicy"}, nameParts, isNonEmpty), "_") + switch { + case !ShouldReport(): + return &funcMetric{name: name, typ: typ} + case testenv.InTest(): + return &funcMetric{name, typ, addMetricTestHook.Load(), setMetricTestHook.Load()} + case typ == clientmetric.TypeCounter: + return clientmetric.NewCounter(name) + case typ == clientmetric.TypeGauge: + return clientmetric.NewGauge(name) + default: + panic("unreachable") + } +} + +func isNonEmpty(s string) bool { return s != "" } + +func metricScopeName(scope setting.Scope) string { + switch scope { + case setting.DeviceSetting: + return "" + case setting.ProfileSetting: + return "profile" + case setting.UserSetting: + return "user" + default: + panic("unreachable") + } +} diff --git a/util/syspolicy/internal/metrics/metrics_test.go b/util/syspolicy/internal/metrics/metrics_test.go new file mode 100644 index 0000000000000..07be4773c9fcb --- /dev/null +++ b/util/syspolicy/internal/metrics/metrics_test.go @@ -0,0 +1,423 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package metrics + +import ( + "errors" + "testing" + + "tailscale.com/types/lazy" + "tailscale.com/util/clientmetric" + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/setting" +) + +func TestSettingMetricNames(t *testing.T) { + tests := []struct { + name string + key setting.Key + scope setting.Scope + suffix string + typ clientmetric.Type + osOverride string + wantMetricName string + }{ + { + name: "windows-device-no-suffix", + key: "AdminConsole", + scope: setting.DeviceSetting, + suffix: "", + typ: clientmetric.TypeCounter, + osOverride: "windows", + wantMetricName: "windows_syspolicy_AdminConsole", + }, + { + name: "windows-user-no-suffix", + key: "AdminConsole", + scope: setting.UserSetting, + suffix: "", + typ: clientmetric.TypeCounter, + osOverride: "windows", + wantMetricName: "windows_syspolicy_AdminConsole_user", + }, + { + name: "windows-profile-no-suffix", + key: "AdminConsole", + scope: setting.ProfileSetting, + suffix: "", + typ: clientmetric.TypeCounter, + osOverride: "windows", + wantMetricName: "windows_syspolicy_AdminConsole_profile", + }, + { + name: "windows-profile-err", + key: "AdminConsole", + scope: setting.ProfileSetting, + suffix: "error", + typ: clientmetric.TypeCounter, + osOverride: "windows", + wantMetricName: "windows_syspolicy_AdminConsole_profile_error", + }, + { + name: "android-device-no-suffix", + key: "AdminConsole", + scope: setting.DeviceSetting, + suffix: "", + typ: clientmetric.TypeCounter, + osOverride: "android", + wantMetricName: "android_syspolicy_AdminConsole", + }, + { + name: "key-path", + key: "category/subcategory/setting", + scope: setting.DeviceSetting, + suffix: "", + typ: clientmetric.TypeCounter, + osOverride: "fakeos", + wantMetricName: "fakeos_syspolicy_category_subcategory_setting", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + internal.OSForTesting.SetForTest(t, tt.osOverride, nil) + metric, ok := newSettingMetric(tt.key, tt.scope, tt.suffix, tt.typ).(*funcMetric) + if !ok { + t.Fatal("metric is not a funcMetric") + } + if metric.name != tt.wantMetricName { + t.Errorf("got %q, want %q", metric.name, tt.wantMetricName) + } + }) + } +} + +func TestScopeMetrics(t *testing.T) { + tests := []struct { + name string + scope setting.Scope + osOverride string + wantHasAnyName string + wantNumErroredName string + wantHasAnyType clientmetric.Type + wantNumErroredType clientmetric.Type + }{ + { + name: "windows-device", + scope: setting.DeviceSetting, + osOverride: "windows", + wantHasAnyName: "windows_syspolicy_any", + wantHasAnyType: clientmetric.TypeGauge, + wantNumErroredName: "windows_syspolicy_errors", + wantNumErroredType: clientmetric.TypeCounter, + }, + { + name: "windows-profile", + scope: setting.ProfileSetting, + osOverride: "windows", + wantHasAnyName: "windows_syspolicy_profile_any", + wantHasAnyType: clientmetric.TypeGauge, + wantNumErroredName: "windows_syspolicy_profile_errors", + wantNumErroredType: clientmetric.TypeCounter, + }, + { + name: "windows-user", + scope: setting.UserSetting, + osOverride: "windows", + wantHasAnyName: "windows_syspolicy_user_any", + wantHasAnyType: clientmetric.TypeGauge, + wantNumErroredName: "windows_syspolicy_user_errors", + wantNumErroredType: clientmetric.TypeCounter, + }, + { + name: "android-device", + scope: setting.DeviceSetting, + osOverride: "android", + wantHasAnyName: "android_syspolicy_any", + wantHasAnyType: clientmetric.TypeGauge, + wantNumErroredName: "android_syspolicy_errors", + wantNumErroredType: clientmetric.TypeCounter, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + internal.OSForTesting.SetForTest(t, tt.osOverride, nil) + metrics := newScopeMetrics(tt.scope) + hasAny, ok := metrics.hasAny.(*funcMetric) + if !ok { + t.Fatal("hasAny is not a funcMetric") + } + numErrored, ok := metrics.numErrored.(*funcMetric) + if !ok { + t.Fatal("numErrored is not a funcMetric") + } + if hasAny.name != tt.wantHasAnyName { + t.Errorf("hasAny.Name: got %q, want %q", hasAny.name, tt.wantHasAnyName) + } + if hasAny.typ != tt.wantHasAnyType { + t.Errorf("hasAny.Type: got %q, want %q", hasAny.typ, tt.wantHasAnyType) + } + if numErrored.name != tt.wantNumErroredName { + t.Errorf("numErrored.Name: got %q, want %q", numErrored.name, tt.wantNumErroredName) + } + if numErrored.typ != tt.wantNumErroredType { + t.Errorf("hasAny.Type: got %q, want %q", numErrored.typ, tt.wantNumErroredType) + } + }) + } +} + +type testSettingDetails struct { + definition *setting.Definition + origin *setting.Origin + value any + err error +} + +func TestReportMetrics(t *testing.T) { + tests := []struct { + name string + osOverride string + useMetrics bool + settings []testSettingDetails + wantMetrics []TestState + wantResetMetrics []TestState + }{ + { + name: "none", + osOverride: "windows", + settings: []testSettingDetails{}, + wantMetrics: []TestState{}, + }, + { + name: "single-value", + osOverride: "windows", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 42, + }, + }, + wantMetrics: []TestState{ + {"windows_syspolicy_any", 1}, + {"windows_syspolicy_TestSetting01", 1}, + }, + wantResetMetrics: []TestState{ + {"windows_syspolicy_any", 0}, + {"windows_syspolicy_TestSetting01", 0}, + }, + }, + { + name: "single-error", + osOverride: "windows", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting02", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + err: errors.New("bang!"), + }, + }, + wantMetrics: []TestState{ + {"windows_syspolicy_errors", 1}, + {"windows_syspolicy_TestSetting02_error", 1}, + }, + wantResetMetrics: []TestState{ + {"windows_syspolicy_errors", 1}, + {"windows_syspolicy_TestSetting02_error", 0}, + }, + }, + { + name: "value-and-error", + osOverride: "windows", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 42, + }, + { + definition: setting.NewDefinition("TestSetting02", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + err: errors.New("bang!"), + }, + }, + + wantMetrics: []TestState{ + {"windows_syspolicy_any", 1}, + {"windows_syspolicy_errors", 1}, + {"windows_syspolicy_TestSetting01", 1}, + {"windows_syspolicy_TestSetting02_error", 1}, + }, + wantResetMetrics: []TestState{ + {"windows_syspolicy_any", 0}, + {"windows_syspolicy_errors", 1}, + {"windows_syspolicy_TestSetting01", 0}, + {"windows_syspolicy_TestSetting02_error", 0}, + }, + }, + { + name: "two-values", + osOverride: "windows", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 42, + }, + { + definition: setting.NewDefinition("TestSetting02", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 17, + }, + }, + wantMetrics: []TestState{ + {"windows_syspolicy_any", 1}, + {"windows_syspolicy_TestSetting01", 1}, + {"windows_syspolicy_TestSetting02", 1}, + }, + wantResetMetrics: []TestState{ + {"windows_syspolicy_any", 0}, + {"windows_syspolicy_TestSetting01", 0}, + {"windows_syspolicy_TestSetting02", 0}, + }, + }, + { + name: "two-errors", + osOverride: "windows", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + err: errors.New("bang!"), + }, + { + definition: setting.NewDefinition("TestSetting02", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + err: errors.New("bang!"), + }, + }, + wantMetrics: []TestState{ + {"windows_syspolicy_errors", 2}, + {"windows_syspolicy_TestSetting01_error", 1}, + {"windows_syspolicy_TestSetting02_error", 1}, + }, + wantResetMetrics: []TestState{ + {"windows_syspolicy_errors", 2}, + {"windows_syspolicy_TestSetting01_error", 0}, + {"windows_syspolicy_TestSetting02_error", 0}, + }, + }, + { + name: "multi-scope", + osOverride: "windows", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.ProfileSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 42, + }, + { + definition: setting.NewDefinition("TestSetting02", setting.ProfileSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.CurrentProfileScope), + err: errors.New("bang!"), + }, + { + definition: setting.NewDefinition("TestSetting03", setting.UserSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.CurrentUserScope), + value: 17, + }, + }, + wantMetrics: []TestState{ + {"windows_syspolicy_any", 1}, + {"windows_syspolicy_profile_errors", 1}, + {"windows_syspolicy_user_any", 1}, + {"windows_syspolicy_TestSetting01", 1}, + {"windows_syspolicy_TestSetting02_profile_error", 1}, + {"windows_syspolicy_TestSetting03_user", 1}, + }, + wantResetMetrics: []TestState{ + {"windows_syspolicy_any", 0}, + {"windows_syspolicy_profile_errors", 1}, + {"windows_syspolicy_user_any", 0}, + {"windows_syspolicy_TestSetting01", 0}, + {"windows_syspolicy_TestSetting02_profile_error", 0}, + {"windows_syspolicy_TestSetting03_user", 0}, + }, + }, + { + name: "report-metrics-on-android", + osOverride: "android", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 42, + }, + }, + wantMetrics: []TestState{ + {"android_syspolicy_any", 1}, + {"android_syspolicy_TestSetting01", 1}, + }, + wantResetMetrics: []TestState{ + {"android_syspolicy_any", 0}, + {"android_syspolicy_TestSetting01", 0}, + }, + }, + { + name: "do-not-report-metrics-on-macos", + osOverride: "macos", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 42, + }, + }, + + wantMetrics: []TestState{}, // none reported + }, + { + name: "do-not-report-metrics-on-ios", + osOverride: "ios", + settings: []testSettingDetails{ + { + definition: setting.NewDefinition("TestSetting01", setting.DeviceSetting, setting.IntegerValue), + origin: setting.NewOrigin(setting.DeviceScope), + value: 42, + }, + }, + + wantMetrics: []TestState{}, // none reported + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset the lazy value so it'll be re-evaluated with the osOverride. + lazyReportMetrics = lazy.SyncValue[bool]{} + t.Cleanup(func() { + // Also reset it during the cleanup. + lazyReportMetrics = lazy.SyncValue[bool]{} + }) + internal.OSForTesting.SetForTest(t, tt.osOverride, nil) + + h := NewTestHandler(t) + SetHooksForTest(t, h.AddMetric, h.SetMetric) + + for _, s := range tt.settings { + if s.err != nil { + ReportError(s.origin, s.definition, s.err) + } else { + ReportConfigured(s.origin, s.definition, s.value) + } + } + h.MustEqual(tt.wantMetrics...) + + for _, s := range tt.settings { + Reset(s.origin) + ReportNotConfigured(s.origin, s.definition) + } + h.MustEqual(tt.wantResetMetrics...) + }) + } +} diff --git a/util/syspolicy/internal/metrics/test_handler.go b/util/syspolicy/internal/metrics/test_handler.go new file mode 100644 index 0000000000000..f9e4846092be3 --- /dev/null +++ b/util/syspolicy/internal/metrics/test_handler.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package metrics + +import ( + "strings" + + "tailscale.com/util/clientmetric" + "tailscale.com/util/set" + "tailscale.com/util/syspolicy/internal" +) + +// TestState represents a metric name and its expected value. +type TestState struct { + Name string // `$os` in the name will be replaced by the actual operating system name. + Value int64 +} + +// TestHandler facilitates testing of the code that uses metrics. +type TestHandler struct { + t internal.TB + + m map[string]int64 +} + +// NewTestHandler returns a new TestHandler. +func NewTestHandler(t internal.TB) *TestHandler { + return &TestHandler{t, make(map[string]int64)} +} + +// AddMetric increments the metric with the specified name and type by delta d. +func (h *TestHandler) AddMetric(name string, typ clientmetric.Type, d int64) { + h.t.Helper() + if typ == clientmetric.TypeCounter && d < 0 { + h.t.Fatalf("an attempt was made to decrement a counter metric %q", name) + } + if v, ok := h.m[name]; ok || d != 0 { + h.m[name] = v + d + } +} + +// SetMetric sets the metric with the specified name and type to the value v. +func (h *TestHandler) SetMetric(name string, typ clientmetric.Type, v int64) { + h.t.Helper() + if typ == clientmetric.TypeCounter { + h.t.Fatalf("an attempt was made to set a counter metric %q", name) + } + if _, ok := h.m[name]; ok || v != 0 { + h.m[name] = v + } +} + +// MustEqual fails the test if the actual metric state differs from the specified state. +func (h *TestHandler) MustEqual(metrics ...TestState) { + h.t.Helper() + h.MustContain(metrics...) + h.mustNoExtra(metrics...) +} + +// MustContain fails the test if the specified metrics are not set or have +// different values than specified. It permits other metrics to be set in +// addition to the ones being tested. +func (h *TestHandler) MustContain(metrics ...TestState) { + h.t.Helper() + for _, m := range metrics { + name := strings.ReplaceAll(m.Name, "$os", internal.OS()) + v, ok := h.m[name] + if !ok { + h.t.Errorf("%q: got (none), want %v", name, m.Value) + } else if v != m.Value { + h.t.Fatalf("%q: got %v, want %v", name, v, m.Value) + } + } +} + +func (h *TestHandler) mustNoExtra(metrics ...TestState) { + h.t.Helper() + s := make(set.Set[string]) + for i := range metrics { + s.Add(strings.ReplaceAll(metrics[i].Name, "$os", internal.OS())) + } + for n, v := range h.m { + if !s.Contains(n) { + h.t.Errorf("%q: got %v, want (none)", n, v) + } + } +} diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index a88025205fa26..ec0556a942cc6 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -94,6 +94,18 @@ const ( // organization. A button in the client UI provides easy access to this URL. ManagedByURL Key = "ManagedByURL" + // AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to + // automatically authenticate managed devices, without requiring user interaction. + AuthKey Key = "AuthKey" + + // MachineCertificateSubject is the exact name of a Subject that needs + // to be present in an identity's certificate chain to sign a RegisterRequest, + // formatted as per pkix.Name.String(). The Subject may be that of the identity + // itself, an intermediate CA or the root CA. + // + // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" + MachineCertificateSubject Key = "MachineCertificateSubject" + // Keys with a string array value. // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" diff --git a/util/syspolicy/setting/errors.go b/util/syspolicy/setting/errors.go index d7e14df83b8fe..38dc6a88c7f1d 100644 --- a/util/syspolicy/setting/errors.go +++ b/util/syspolicy/setting/errors.go @@ -42,9 +42,9 @@ func NewErrorText(text string) *ErrorText { return ptr.To(ErrorText(text)) } -// NewErrorTextFromError returns an [ErrorText] with the text of the specified error, +// MaybeErrorText returns an [ErrorText] with the text of the specified error, // or nil if err is nil, [ErrNotConfigured], or [ErrNoSuchKey]. -func NewErrorTextFromError(err error) *ErrorText { +func MaybeErrorText(err error) *ErrorText { if err == nil || errors.Is(err, ErrNotConfigured) || errors.Is(err, ErrNoSuchKey) { return nil } diff --git a/util/syspolicy/source/policy_reader.go b/util/syspolicy/source/policy_reader.go new file mode 100644 index 0000000000000..a1bd3147ea85e --- /dev/null +++ b/util/syspolicy/source/policy_reader.go @@ -0,0 +1,394 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package source + +import ( + "errors" + "fmt" + "io" + "slices" + "sort" + "sync" + "time" + + "tailscale.com/util/mak" + "tailscale.com/util/set" + "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/internal/metrics" + "tailscale.com/util/syspolicy/setting" +) + +// Reader reads all configured policy settings from a given [Store]. +// It registers a change callback with the [Store] and maintains the current version +// of the [setting.Snapshot] by lazily re-reading policy settings from the [Store] +// whenever a new settings snapshot is requested with [Reader.GetSettings]. +// It is safe for concurrent use. +type Reader struct { + store Store + origin *setting.Origin + settings []*setting.Definition + unregisterChangeNotifier func() + doneCh chan struct{} // closed when [Reader] is closed. + + mu sync.Mutex + closing bool + upToDate bool + lastPolicy *setting.Snapshot + sessions set.HandleSet[*ReadingSession] +} + +// newReader returns a new [Reader] that reads policy settings from a given [Store]. +// The returned reader takes ownership of the store. If the store implements [io.Closer], +// the returned reader will close the store when it is closed. +func newReader(store Store, origin *setting.Origin) (*Reader, error) { + settings, err := setting.Definitions() + if err != nil { + return nil, err + } + + if expirable, ok := store.(Expirable); ok { + select { + case <-expirable.Done(): + return nil, ErrStoreClosed + default: + } + } + + reader := &Reader{store: store, origin: origin, settings: settings, doneCh: make(chan struct{})} + if changeable, ok := store.(Changeable); ok { + // We should subscribe to policy change notifications first before reading + // the policy settings from the store. This way we won't miss any notifications. + if reader.unregisterChangeNotifier, err = changeable.RegisterChangeCallback(reader.onPolicyChange); err != nil { + // Errors registering policy change callbacks are non-fatal. + // TODO(nickkhyl): implement a background policy refresh every X minutes? + loggerx.Errorf("failed to register %v policy change callback: %v", origin, err) + } + } + + if _, err := reader.reload(true); err != nil { + if reader.unregisterChangeNotifier != nil { + reader.unregisterChangeNotifier() + } + return nil, err + } + + if expirable, ok := store.(Expirable); ok { + if waitCh := expirable.Done(); waitCh != nil { + go func() { + select { + case <-waitCh: + reader.Close() + case <-reader.doneCh: + } + }() + } + } + + return reader, nil +} + +// GetSettings returns the current [*setting.Snapshot], +// re-reading it from from the underlying [Store] only if the policy +// has changed since it was read last. It never fails and returns +// the previous version of the policy settings if a read attempt fails. +func (r *Reader) GetSettings() *setting.Snapshot { + r.mu.Lock() + upToDate, lastPolicy := r.upToDate, r.lastPolicy + r.mu.Unlock() + if upToDate { + return lastPolicy + } + + policy, err := r.reload(false) + if err != nil { + // If the policy fails to reload completely, log an error and return the last cached version. + // However, errors related to individual policy items are always + // propagated to callers when they fetch those settings. + loggerx.Errorf("failed to reload %v policy: %v", r.origin, err) + } + return policy +} + +// ReadSettings reads policy settings from the underlying [Store] even if no +// changes were detected. It returns the new [*setting.Snapshot],nil on +// success or an undefined snapshot (possibly `nil`) along with a non-`nil` +// error in case of failure. +func (r *Reader) ReadSettings() (*setting.Snapshot, error) { + return r.reload(true) +} + +// reload is like [Reader.ReadSettings], but allows specifying whether to re-read +// an unchanged policy, and returns the last [*setting.Snapshot] if the read fails. +func (r *Reader) reload(force bool) (*setting.Snapshot, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.upToDate && !force { + return r.lastPolicy, nil + } + + if lockable, ok := r.store.(Lockable); ok { + if err := lockable.Lock(); err != nil { + return r.lastPolicy, err + } + defer lockable.Unlock() + } + + r.upToDate = true + + metrics.Reset(r.origin) + + var m map[setting.Key]setting.RawItem + if lastPolicyCount := r.lastPolicy.Len(); lastPolicyCount > 0 { + m = make(map[setting.Key]setting.RawItem, lastPolicyCount) + } + for _, s := range r.settings { + if !r.origin.Scope().IsConfigurableSetting(s) { + // Skip settings that cannot be configured in the current scope. + continue + } + + val, err := readPolicySettingValue(r.store, s) + if err != nil && (errors.Is(err, setting.ErrNoSuchKey) || errors.Is(err, setting.ErrNotConfigured)) { + metrics.ReportNotConfigured(r.origin, s) + continue + } + + if err == nil { + metrics.ReportConfigured(r.origin, s, val) + } else { + metrics.ReportError(r.origin, s, err) + } + + // If there's an error reading a single policy, such as a value type mismatch, + // we'll wrap the error to preserve its text and return it + // whenever someone attempts to fetch the value. + // Otherwise, the errorText will be nil. + errorText := setting.MaybeErrorText(err) + item := setting.RawItemWith(val, errorText, r.origin) + mak.Set(&m, s.Key(), item) + } + + newPolicy := setting.NewSnapshot(m, setting.SummaryWith(r.origin)) + if r.lastPolicy == nil || !newPolicy.EqualItems(r.lastPolicy) { + r.lastPolicy = newPolicy + } + return r.lastPolicy, nil +} + +// ReadingSession is like [Reader], but with a channel that's written +// to when there's a policy change, and closed when the session is terminated. +type ReadingSession struct { + reader *Reader + policyChangedCh chan struct{} // 1-buffered channel + handle set.Handle // in the reader.sessions + closeInternal func() +} + +// OpenSession opens and returns a new session to r, allowing the caller +// to get notified whenever a policy change is reported by the [source.Store], +// or an [ErrStoreClosed] if the reader has already been closed. +func (r *Reader) OpenSession() (*ReadingSession, error) { + session := &ReadingSession{ + reader: r, + policyChangedCh: make(chan struct{}, 1), + } + session.closeInternal = sync.OnceFunc(func() { close(session.policyChangedCh) }) + r.mu.Lock() + defer r.mu.Unlock() + if r.closing { + return nil, ErrStoreClosed + } + session.handle = r.sessions.Add(session) + return session, nil +} + +// GetSettings is like [Reader.GetSettings]. +func (s *ReadingSession) GetSettings() *setting.Snapshot { + return s.reader.GetSettings() +} + +// ReadSettings is like [Reader.ReadSettings]. +func (s *ReadingSession) ReadSettings() (*setting.Snapshot, error) { + return s.reader.ReadSettings() +} + +// PolicyChanged returns a channel that's written to when +// there's a policy change, closed when the session is terminated. +func (s *ReadingSession) PolicyChanged() <-chan struct{} { + return s.policyChangedCh +} + +// Close unregisters this session with the [Reader]. +func (s *ReadingSession) Close() { + s.reader.mu.Lock() + delete(s.reader.sessions, s.handle) + s.closeInternal() + s.reader.mu.Unlock() +} + +// onPolicyChange handles a policy change notification from the [Store], +// invalidating the current [setting.Snapshot] in r, +// and notifying the active [ReadingSession]s. +func (r *Reader) onPolicyChange() { + r.mu.Lock() + defer r.mu.Unlock() + r.upToDate = false + for _, s := range r.sessions { + select { + case s.policyChangedCh <- struct{}{}: + // Notified. + default: + // 1-buffered channel is full, meaning that another policy change + // notification is already en route. + } + } +} + +// Close closes the store reader and the underlying store. +func (r *Reader) Close() error { + r.mu.Lock() + if r.closing { + r.mu.Unlock() + return nil + } + r.closing = true + r.mu.Unlock() + + if r.unregisterChangeNotifier != nil { + r.unregisterChangeNotifier() + r.unregisterChangeNotifier = nil + } + + if closer, ok := r.store.(io.Closer); ok { + if err := closer.Close(); err != nil { + return err + } + } + r.store = nil + + close(r.doneCh) + + r.mu.Lock() + defer r.mu.Unlock() + for _, c := range r.sessions { + c.closeInternal() + } + r.sessions = nil + return nil +} + +// Done returns a channel that is closed when the reader is closed. +func (r *Reader) Done() <-chan struct{} { + return r.doneCh +} + +// ReadableSource is a [Source] open for reading. +type ReadableSource struct { + *Source + *ReadingSession +} + +// Close closes the underlying [ReadingSession]. +func (s ReadableSource) Close() { + s.ReadingSession.Close() +} + +// ReadableSources is a slice of [ReadableSource]. +type ReadableSources []ReadableSource + +// Contains reports whether s contains the specified source. +func (s ReadableSources) Contains(source *Source) bool { + return s.IndexOf(source) != -1 +} + +// IndexOf returns position of the specified source in s, or -1 +// if the source does not exist. +func (s ReadableSources) IndexOf(source *Source) int { + return slices.IndexFunc(s, func(rs ReadableSource) bool { + return rs.Source == source + }) +} + +// InsertionIndexOf returns the position at which source can be inserted +// to maintain the sorted order of the readableSources. +// The return value is unspecified if s is not sorted on entry to InsertionIndexOf. +func (s ReadableSources) InsertionIndexOf(source *Source) int { + // Insert new sources after any existing sources with the same precedence, + // and just before the first source with higher precedence. + // Just like stable sort, but for insertion. + // It's okay to use linear search as insertions are rare + // and we never have more than just a few policy sources. + higherPrecedence := func(rs ReadableSource) bool { return rs.Compare(source) > 0 } + if i := slices.IndexFunc(s, higherPrecedence); i != -1 { + return i + } + return len(s) +} + +// StableSort sorts [ReadableSource] in s by precedence, so that policy +// settings from sources with higher precedence (e.g., [DeviceScope]) +// will be read and merged last, overriding any policy settings with +// the same keys configured in sources with lower precedence +// (e.g., [CurrentUserScope]). +func (s *ReadableSources) StableSort() { + sort.SliceStable(*s, func(i, j int) bool { + return (*s)[i].Source.Compare((*s)[j].Source) < 0 + }) +} + +// DeleteAt closes and deletes the i-th source from s. +func (s *ReadableSources) DeleteAt(i int) { + (*s)[i].Close() + *s = slices.Delete(*s, i, i+1) +} + +// Close closes and deletes all sources in s. +func (s *ReadableSources) Close() { + for _, s := range *s { + s.Close() + } + *s = nil +} + +func readPolicySettingValue(store Store, s *setting.Definition) (value any, err error) { + switch key := s.Key(); s.Type() { + case setting.BooleanValue: + return store.ReadBoolean(key) + case setting.IntegerValue: + return store.ReadUInt64(key) + case setting.StringValue: + return store.ReadString(key) + case setting.StringListValue: + return store.ReadStringArray(key) + case setting.PreferenceOptionValue: + s, err := store.ReadString(key) + if err == nil { + var value setting.PreferenceOption + if err = value.UnmarshalText([]byte(s)); err == nil { + return value, nil + } + } + return setting.ShowChoiceByPolicy, err + case setting.VisibilityValue: + s, err := store.ReadString(key) + if err == nil { + var value setting.Visibility + if err = value.UnmarshalText([]byte(s)); err == nil { + return value, nil + } + } + return setting.VisibleByPolicy, err + case setting.DurationValue: + s, err := store.ReadString(key) + if err == nil { + var value time.Duration + if value, err = time.ParseDuration(s); err == nil { + return value, nil + } + } + return nil, err + default: + return nil, fmt.Errorf("%w: unsupported setting type: %v", setting.ErrTypeMismatch, s.Type()) + } +} diff --git a/util/syspolicy/source/policy_reader_test.go b/util/syspolicy/source/policy_reader_test.go new file mode 100644 index 0000000000000..57676e67da614 --- /dev/null +++ b/util/syspolicy/source/policy_reader_test.go @@ -0,0 +1,291 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package source + +import ( + "cmp" + "testing" + "time" + + "tailscale.com/util/must" + "tailscale.com/util/syspolicy/setting" +) + +func TestReaderLifecycle(t *testing.T) { + tests := []struct { + name string + origin *setting.Origin + definitions []*setting.Definition + wantReads []TestExpectedReads + initStrings []TestSetting[string] + initUInt64s []TestSetting[uint64] + initWant *setting.Snapshot + addStrings []TestSetting[string] + addStringLists []TestSetting[[]string] + newWant *setting.Snapshot + }{ + { + name: "read-all-settings-once", + origin: setting.NewNamedOrigin("Test", setting.DeviceScope), + definitions: []*setting.Definition{ + setting.NewDefinition("StringValue", setting.DeviceSetting, setting.StringValue), + setting.NewDefinition("IntegerValue", setting.DeviceSetting, setting.IntegerValue), + setting.NewDefinition("BooleanValue", setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition("StringListValue", setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition("DurationValue", setting.DeviceSetting, setting.DurationValue), + setting.NewDefinition("PreferenceOptionValue", setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition("VisibilityValue", setting.DeviceSetting, setting.VisibilityValue), + }, + wantReads: []TestExpectedReads{ + {Key: "StringValue", Type: setting.StringValue, NumTimes: 1}, + {Key: "IntegerValue", Type: setting.IntegerValue, NumTimes: 1}, + {Key: "BooleanValue", Type: setting.BooleanValue, NumTimes: 1}, + {Key: "StringListValue", Type: setting.StringListValue, NumTimes: 1}, + {Key: "DurationValue", Type: setting.StringValue, NumTimes: 1}, // duration is string from the [Store]'s perspective + {Key: "PreferenceOptionValue", Type: setting.StringValue, NumTimes: 1}, // and so are [setting.PreferenceOption]s + {Key: "VisibilityValue", Type: setting.StringValue, NumTimes: 1}, // and [setting.Visibility] + }, + initWant: setting.NewSnapshot(nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + }, + { + name: "re-read-all-settings-when-the-policy-changes", + origin: setting.NewNamedOrigin("Test", setting.DeviceScope), + definitions: []*setting.Definition{ + setting.NewDefinition("StringValue", setting.DeviceSetting, setting.StringValue), + setting.NewDefinition("IntegerValue", setting.DeviceSetting, setting.IntegerValue), + setting.NewDefinition("BooleanValue", setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition("StringListValue", setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition("DurationValue", setting.DeviceSetting, setting.DurationValue), + setting.NewDefinition("PreferenceOptionValue", setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition("VisibilityValue", setting.DeviceSetting, setting.VisibilityValue), + }, + wantReads: []TestExpectedReads{ + {Key: "StringValue", Type: setting.StringValue, NumTimes: 1}, + {Key: "IntegerValue", Type: setting.IntegerValue, NumTimes: 1}, + {Key: "BooleanValue", Type: setting.BooleanValue, NumTimes: 1}, + {Key: "StringListValue", Type: setting.StringListValue, NumTimes: 1}, + {Key: "DurationValue", Type: setting.StringValue, NumTimes: 1}, // duration is string from the [Store]'s perspective + {Key: "PreferenceOptionValue", Type: setting.StringValue, NumTimes: 1}, // and so are [setting.PreferenceOption]s + {Key: "VisibilityValue", Type: setting.StringValue, NumTimes: 1}, // and [setting.Visibility] + }, + initWant: setting.NewSnapshot(nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + addStrings: []TestSetting[string]{TestSettingOf("StringValue", "S1")}, + addStringLists: []TestSetting[[]string]{TestSettingOf("StringListValue", []string{"S1", "S2", "S3"})}, + newWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "StringValue": setting.RawItemWith("S1", nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "StringListValue": setting.RawItemWith([]string{"S1", "S2", "S3"}, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + }, setting.NewNamedOrigin("Test", setting.DeviceScope)), + }, + { + name: "read-settings-if-in-scope/device", + origin: setting.NewNamedOrigin("Test", setting.DeviceScope), + definitions: []*setting.Definition{ + setting.NewDefinition("DeviceSetting", setting.DeviceSetting, setting.StringValue), + setting.NewDefinition("ProfileSetting", setting.ProfileSetting, setting.IntegerValue), + setting.NewDefinition("UserSetting", setting.UserSetting, setting.BooleanValue), + }, + wantReads: []TestExpectedReads{ + {Key: "DeviceSetting", Type: setting.StringValue, NumTimes: 1}, + {Key: "ProfileSetting", Type: setting.IntegerValue, NumTimes: 1}, + {Key: "UserSetting", Type: setting.BooleanValue, NumTimes: 1}, + }, + }, + { + name: "read-settings-if-in-scope/profile", + origin: setting.NewNamedOrigin("Test", setting.CurrentProfileScope), + definitions: []*setting.Definition{ + setting.NewDefinition("DeviceSetting", setting.DeviceSetting, setting.StringValue), + setting.NewDefinition("ProfileSetting", setting.ProfileSetting, setting.IntegerValue), + setting.NewDefinition("UserSetting", setting.UserSetting, setting.BooleanValue), + }, + wantReads: []TestExpectedReads{ + // Device settings cannot be configured at the profile scope and should not be read. + {Key: "ProfileSetting", Type: setting.IntegerValue, NumTimes: 1}, + {Key: "UserSetting", Type: setting.BooleanValue, NumTimes: 1}, + }, + }, + { + name: "read-settings-if-in-scope/user", + origin: setting.NewNamedOrigin("Test", setting.CurrentUserScope), + definitions: []*setting.Definition{ + setting.NewDefinition("DeviceSetting", setting.DeviceSetting, setting.StringValue), + setting.NewDefinition("ProfileSetting", setting.ProfileSetting, setting.IntegerValue), + setting.NewDefinition("UserSetting", setting.UserSetting, setting.BooleanValue), + }, + wantReads: []TestExpectedReads{ + // Device and profile settings cannot be configured at the profile scope and should not be read. + {Key: "UserSetting", Type: setting.BooleanValue, NumTimes: 1}, + }, + }, + { + name: "read-stringy-settings", + origin: setting.NewNamedOrigin("Test", setting.DeviceScope), + definitions: []*setting.Definition{ + setting.NewDefinition("DurationValue", setting.DeviceSetting, setting.DurationValue), + setting.NewDefinition("PreferenceOptionValue", setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition("VisibilityValue", setting.DeviceSetting, setting.VisibilityValue), + }, + wantReads: []TestExpectedReads{ + {Key: "DurationValue", Type: setting.StringValue, NumTimes: 1}, // duration is string from the [Store]'s perspective + {Key: "PreferenceOptionValue", Type: setting.StringValue, NumTimes: 1}, // and so are [setting.PreferenceOption]s + {Key: "VisibilityValue", Type: setting.StringValue, NumTimes: 1}, // and [setting.Visibility] + }, + initStrings: []TestSetting[string]{ + TestSettingOf("DurationValue", "2h30m"), + TestSettingOf("PreferenceOptionValue", "always"), + TestSettingOf("VisibilityValue", "show"), + }, + initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "DurationValue": setting.RawItemWith(must.Get(time.ParseDuration("2h30m")), nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "PreferenceOptionValue": setting.RawItemWith(setting.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + }, setting.NewNamedOrigin("Test", setting.DeviceScope)), + }, + { + name: "read-erroneous-stringy-settings", + origin: setting.NewNamedOrigin("Test", setting.CurrentUserScope), + definitions: []*setting.Definition{ + setting.NewDefinition("DurationValue1", setting.UserSetting, setting.DurationValue), + setting.NewDefinition("DurationValue2", setting.UserSetting, setting.DurationValue), + setting.NewDefinition("PreferenceOptionValue", setting.UserSetting, setting.PreferenceOptionValue), + setting.NewDefinition("VisibilityValue", setting.UserSetting, setting.VisibilityValue), + }, + wantReads: []TestExpectedReads{ + {Key: "DurationValue1", Type: setting.StringValue, NumTimes: 1}, // duration is string from the [Store]'s perspective + {Key: "DurationValue2", Type: setting.StringValue, NumTimes: 1}, // duration is string from the [Store]'s perspective + {Key: "PreferenceOptionValue", Type: setting.StringValue, NumTimes: 1}, // and so are [setting.PreferenceOption]s + {Key: "VisibilityValue", Type: setting.StringValue, NumTimes: 1}, // and [setting.Visibility] + }, + initStrings: []TestSetting[string]{ + TestSettingOf("DurationValue1", "soon"), + TestSettingWithError[string]("DurationValue2", setting.NewErrorText("bang!")), + TestSettingOf("PreferenceOptionValue", "sometimes"), + }, + initUInt64s: []TestSetting[uint64]{ + TestSettingOf[uint64]("VisibilityValue", 42), // type mismatch + }, + initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "DurationValue1": setting.RawItemWith(nil, setting.NewErrorText("time: invalid duration \"soon\""), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "DurationValue2": setting.RawItemWith(nil, setting.NewErrorText("bang!"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "PreferenceOptionValue": setting.RawItemWith(setting.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, setting.NewErrorText("type mismatch in ReadString: got uint64"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + }, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setting.SetDefinitionsForTest(t, tt.definitions...) + store := NewTestStore(t) + store.SetStrings(tt.initStrings...) + store.SetUInt64s(tt.initUInt64s...) + + reader, err := newReader(store, tt.origin) + if err != nil { + t.Fatalf("newReader failed: %v", err) + } + + if got := reader.GetSettings(); tt.initWant != nil && !got.Equal(tt.initWant) { + t.Errorf("Settings do not match: got %v, want %v", got, tt.initWant) + } + if tt.wantReads != nil { + store.ReadsMustEqual(tt.wantReads...) + } + + // Should not result in new reads as there were no changes. + N := 100 + for range N { + reader.GetSettings() + } + if tt.wantReads != nil { + store.ReadsMustEqual(tt.wantReads...) + } + store.ResetCounters() + + got, err := reader.ReadSettings() + if err != nil { + t.Fatalf("ReadSettings failed: %v", err) + } + + if tt.initWant != nil && !got.Equal(tt.initWant) { + t.Errorf("Settings do not match: got %v, want %v", got, tt.initWant) + } + + if tt.wantReads != nil { + store.ReadsMustEqual(tt.wantReads...) + } + store.ResetCounters() + + if len(tt.addStrings) != 0 || len(tt.addStringLists) != 0 { + store.SetStrings(tt.addStrings...) + store.SetStringLists(tt.addStringLists...) + + // As the settings have changed, GetSettings needs to re-read them. + if got, want := reader.GetSettings(), cmp.Or(tt.newWant, tt.initWant); !got.Equal(want) { + t.Errorf("New Settings do not match: got %v, want %v", got, want) + } + if tt.wantReads != nil { + store.ReadsMustEqual(tt.wantReads...) + } + } + + select { + case <-reader.Done(): + t.Fatalf("the reader is closed") + default: + } + + store.Close() + + <-reader.Done() + }) + } +} + +func TestReadingSession(t *testing.T) { + setting.SetDefinitionsForTest(t, setting.NewDefinition("StringValue", setting.DeviceSetting, setting.StringValue)) + store := NewTestStore(t) + + origin := setting.NewOrigin(setting.DeviceScope) + reader, err := newReader(store, origin) + if err != nil { + t.Fatalf("newReader failed: %v", err) + } + session, err := reader.OpenSession() + if err != nil { + t.Fatalf("failed to open a reading session: %v", err) + } + t.Cleanup(session.Close) + + if got, want := session.GetSettings(), setting.NewSnapshot(nil, origin); !got.Equal(want) { + t.Errorf("Settings do not match: got %v, want %v", got, want) + } + + select { + case _, ok := <-session.PolicyChanged(): + if ok { + t.Fatalf("the policy changed notification was sent prematurely") + } else { + t.Fatalf("the session was closed prematurely") + } + default: + } + + store.SetStrings(TestSettingOf("StringValue", "S1")) + _, ok := <-session.PolicyChanged() + if !ok { + t.Fatalf("the session was closed prematurely") + } + + want := setting.NewSnapshot(map[setting.Key]setting.RawItem{ + "StringValue": setting.RawItemWith("S1", nil, origin), + }, origin) + if got := session.GetSettings(); !got.Equal(want) { + t.Errorf("Settings do not match: got %v, want %v", got, want) + } + + store.Close() + if _, ok = <-session.PolicyChanged(); ok { + t.Fatalf("the session must be closed") + } +} diff --git a/util/syspolicy/source/policy_source.go b/util/syspolicy/source/policy_source.go new file mode 100644 index 0000000000000..7f2821b596e62 --- /dev/null +++ b/util/syspolicy/source/policy_source.go @@ -0,0 +1,146 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package source defines interfaces for policy stores, +// facilitates the creation of policy sources, and provides +// functionality for reading policy settings from these sources. +package source + +import ( + "cmp" + "errors" + "fmt" + "io" + + "tailscale.com/types/lazy" + "tailscale.com/util/syspolicy/setting" +) + +// ErrStoreClosed is an error returned when attempting to use a [Store] after it +// has been closed. +var ErrStoreClosed = errors.New("the policy store has been closed") + +// Store provides methods to read system policy settings from OS-specific storage. +// Implementations must be concurrency-safe, and may also implement +// [Lockable], [Changeable], [Expirable] and [io.Closer]. +// +// If a [Store] implementation also implements [io.Closer], +// it will be called by the package to release the resources +// when the store is no longer needed. +type Store interface { + // ReadString returns the value of a [setting.StringValue] with the specified key, + // an [setting.ErrNotConfigured] if the policy setting is not configured, or + // an error on failure. + ReadString(key setting.Key) (string, error) + // ReadUInt64 returns the value of a [setting.IntegerValue] with the specified key, + // an [setting.ErrNotConfigured] if the policy setting is not configured, or + // an error on failure. + ReadUInt64(key setting.Key) (uint64, error) + // ReadBoolean returns the value of a [setting.BooleanValue] with the specified key, + // an [setting.ErrNotConfigured] if the policy setting is not configured, or + // an error on failure. + ReadBoolean(key setting.Key) (bool, error) + // ReadStringArray returns the value of a [setting.StringListValue] with the specified key, + // an [setting.ErrNotConfigured] if the policy setting is not configured, or + // an error on failure. + ReadStringArray(key setting.Key) ([]string, error) +} + +// Lockable is an optional interface that [Store] implementations may support. +// Locking a [Store] is not mandatory as [Store] must be concurrency-safe, +// but is recommended to avoid issues where consecutive read calls for related +// policies might return inconsistent results if a policy change occurs between +// the calls. Implementations may use locking to pre-read policies or for +// similar performance optimizations. +type Lockable interface { + // Lock acquires a read lock on the policy store, + // ensuring the store's state remains unchanged while locked. + // Multiple readers can hold the lock simultaneously. + // It returns an error if the store cannot be locked. + Lock() error + // Unlock unlocks the policy store. + // It is a run-time error if the store is not locked on entry to Unlock. + Unlock() +} + +// Changeable is an optional interface that [Store] implementations may support +// if the policy settings they contain can be externally changed after being initially read. +type Changeable interface { + // RegisterChangeCallback adds a function that will be called + // whenever there's a policy change in the [Store]. + // The returned function can be used to unregister the callback. + RegisterChangeCallback(callback func()) (unregister func(), err error) +} + +// Expirable is an optional interface that [Store] implementations may support +// if they can be externally closed or otherwise become invalid while in use. +type Expirable interface { + // Done returns a channel that is closed when the policy [Store] should no longer be used. + // It should return nil if the store never expires. + Done() <-chan struct{} +} + +// Source represents a named source of policy settings for a given [setting.PolicyScope]. +type Source struct { + name string + scope setting.PolicyScope + store Store + origin *setting.Origin + + lazyReader lazy.SyncValue[*Reader] +} + +// NewSource returns a new [Source] with the specified name, scope, and store. +func NewSource(name string, scope setting.PolicyScope, store Store) *Source { + return &Source{name: name, scope: scope, store: store, origin: setting.NewNamedOrigin(name, scope)} +} + +// Name reports the name of the policy source. +func (s *Source) Name() string { + return s.name +} + +// Scope reports the management scope of the policy source. +func (s *Source) Scope() setting.PolicyScope { + return s.scope +} + +// Reader returns a [Reader] that reads from this source's [Store]. +func (s *Source) Reader() (*Reader, error) { + return s.lazyReader.GetErr(func() (*Reader, error) { + return newReader(s.store, s.origin) + }) +} + +// Description returns a formatted string with the scope and name of this policy source. +// It can be used for logging or display purposes. +func (s *Source) Description() string { + if s.name != "" { + return fmt.Sprintf("%s (%v)", s.name, s.Scope()) + } + return s.Scope().String() +} + +// Compare returns an integer comparing s and s2 +// by their precedence, following the "last-wins" model. +// The result will be: +// +// -1 if policy settings from s should be processed before policy settings from s2; +// +1 if policy settings from s should be processed after policy settings from s2, overriding s2; +// 0 if the relative processing order of policy settings in s and s2 is unspecified. +func (s *Source) Compare(s2 *Source) int { + return cmp.Compare(s2.Scope().Kind(), s.Scope().Kind()) +} + +// Close closes the [Source] and the underlying [Store]. +func (s *Source) Close() error { + // The [Reader], if any, owns the [Store]. + if reader, _ := s.lazyReader.GetErr(func() (*Reader, error) { return nil, ErrStoreClosed }); reader != nil { + return reader.Close() + } + // Otherwise, it is our responsibility to close it. + if closer, ok := s.store.(io.Closer); ok { + return closer.Close() + } + return nil +} diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go new file mode 100644 index 0000000000000..f526b4ce1c666 --- /dev/null +++ b/util/syspolicy/source/policy_store_windows.go @@ -0,0 +1,450 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package source + +import ( + "errors" + "fmt" + "strings" + "sync" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" + "tailscale.com/util/set" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/winutil/gp" +) + +const ( + softwareKeyName = `Software` + tsPoliciesSubkey = `Policies\Tailscale` + tsIPNSubkey = `Tailscale IPN` // the legacy key we need to fallback to +) + +var ( + _ Store = (*PlatformPolicyStore)(nil) + _ Lockable = (*PlatformPolicyStore)(nil) + _ Changeable = (*PlatformPolicyStore)(nil) + _ Expirable = (*PlatformPolicyStore)(nil) +) + +// PlatformPolicyStore implements [Store] by providing read access to +// Registry-based Tailscale policies, such as those configured via Group Policy or MDM. +// For better performance and consistency, it is recommended to lock it when +// reading multiple policy settings sequentially. +// It also allows subscribing to policy change notifications. +type PlatformPolicyStore struct { + scope gp.Scope // [gp.MachinePolicy] or [gp.UserPolicy] + + // The softwareKey can be HKLM\Software, HKCU\Software, or + // HKU\{SID}\Software. Anything below the Software subkey, including + // Software\Policies, may not yet exist or could be deleted throughout the + // [PlatformPolicyStore]'s lifespan, invalidating the handle. We also prefer + // to always use a real registry key (rather than a predefined HKLM or HKCU) + // to simplify bookkeeping (predefined keys should never be closed). + // Finally, this will allow us to watch for any registry changes directly + // should we need this in the future in addition to gp.ChangeWatcher. + softwareKey registry.Key + watcher *gp.ChangeWatcher + + done chan struct{} // done is closed when Close call completes + + // The policyLock can be locked by the caller when reading multiple policy settings + // to prevent the Group Policy Client service from modifying policies while + // they are being read. + // + // When both policyLock and mu need to be taken, mu must be taken before policyLock. + policyLock *gp.PolicyLock + + mu sync.Mutex + tsKeys []registry.Key // or nil if the [PlatformPolicyStore] hasn't been locked. + cbs set.HandleSet[func()] // policy change callbacks + lockCnt int + locked sync.WaitGroup + closing bool + closed bool +} + +type registryValueGetter[T any] func(key registry.Key, name string) (T, error) + +// NewMachinePlatformPolicyStore returns a new [PlatformPolicyStore] for the machine. +func NewMachinePlatformPolicyStore() (*PlatformPolicyStore, error) { + softwareKey, err := registry.OpenKey(registry.LOCAL_MACHINE, softwareKeyName, windows.KEY_READ) + if err != nil { + return nil, fmt.Errorf("failed to open the %s key: %w", softwareKeyName, err) + } + return newPlatformPolicyStore(gp.MachinePolicy, softwareKey, gp.NewMachinePolicyLock()), nil +} + +// NewUserPlatformPolicyStore returns a new [PlatformPolicyStore] for the user specified by its token. +// User's profile must be loaded, and the token handle must have [windows.TOKEN_QUERY] +// and [windows.TOKEN_DUPLICATE] access. The caller retains ownership of the token. +func NewUserPlatformPolicyStore(token windows.Token) (*PlatformPolicyStore, error) { + var err error + var softwareKey registry.Key + if token != 0 { + var user *windows.Tokenuser + if user, err = token.GetTokenUser(); err != nil { + return nil, fmt.Errorf("failed to get token user: %w", err) + } + userSid := user.User.Sid + softwareKey, err = registry.OpenKey(registry.USERS, userSid.String()+`\`+softwareKeyName, windows.KEY_READ) + } else { + softwareKey, err = registry.OpenKey(registry.CURRENT_USER, softwareKeyName, windows.KEY_READ) + } + if err != nil { + return nil, fmt.Errorf("failed to open the %s key: %w", softwareKeyName, err) + } + policyLock, err := gp.NewUserPolicyLock(token) + if err != nil { + return nil, fmt.Errorf("failed to create a user policy lock: %w", err) + } + return newPlatformPolicyStore(gp.UserPolicy, softwareKey, policyLock), nil +} + +func newPlatformPolicyStore(scope gp.Scope, softwareKey registry.Key, policyLock *gp.PolicyLock) *PlatformPolicyStore { + return &PlatformPolicyStore{ + scope: scope, + softwareKey: softwareKey, + done: make(chan struct{}), + policyLock: policyLock, + } +} + +// Lock locks the policy store, preventing the system from modifying the policies +// while they are being read. It is a read lock that may be acquired by multiple goroutines. +// Each Lock call must be balanced by exactly one Unlock call. +func (ps *PlatformPolicyStore) Lock() (err error) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if ps.closing { + return ErrStoreClosed + } + + ps.lockCnt += 1 + if ps.lockCnt != 1 { + return nil + } + defer func() { + if err != nil { + ps.lockCnt -= 1 + } + }() + + // Ensure ps remains open while the lock is held. + ps.locked.Add(1) + defer func() { + if err != nil { + ps.locked.Done() + } + }() + + // Acquire the GP lock to prevent the system from modifying policy settings + // while they are being read. + if err := ps.policyLock.Lock(); err != nil { + if errors.Is(err, gp.ErrInvalidLockState) { + // The policy store is being closed and we've lost the race. + return ErrStoreClosed + } + return err + } + defer func() { + if err != nil { + ps.policyLock.Unlock() + } + }() + + // Keep the Tailscale's registry keys open for the duration of the lock. + keyNames := tailscaleKeyNamesFor(ps.scope) + ps.tsKeys = make([]registry.Key, 0, len(keyNames)) + for _, keyName := range keyNames { + var tsKey registry.Key + tsKey, err = registry.OpenKey(ps.softwareKey, keyName, windows.KEY_READ) + if err != nil { + if err == registry.ErrNotExist { + continue + } + return err + } + ps.tsKeys = append(ps.tsKeys, tsKey) + } + + return nil +} + +// Unlock decrements the lock counter and unlocks the policy store once the counter reaches 0. +// It panics if ps is not locked on entry to Unlock. +func (ps *PlatformPolicyStore) Unlock() { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.lockCnt -= 1 + if ps.lockCnt < 0 { + panic("negative lockCnt") + } else if ps.lockCnt != 0 { + return + } + + for _, key := range ps.tsKeys { + key.Close() + } + ps.tsKeys = nil + ps.policyLock.Unlock() + ps.locked.Done() +} + +// RegisterChangeCallback adds a function that will be called whenever there's a policy change. +// It returns a function that can be used to unregister the specified callback or an error. +// The error is [ErrStoreClosed] if ps has already been closed. +func (ps *PlatformPolicyStore) RegisterChangeCallback(cb func()) (unregister func(), err error) { + ps.mu.Lock() + defer ps.mu.Unlock() + if ps.closing { + return nil, ErrStoreClosed + } + + handle := ps.cbs.Add(cb) + if len(ps.cbs) == 1 { + if ps.watcher, err = gp.NewChangeWatcher(ps.scope, ps.onChange); err != nil { + return nil, err + } + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.cbs, handle) + if len(ps.cbs) == 0 { + if ps.watcher != nil { + ps.watcher.Close() + ps.watcher = nil + } + } + }, nil +} + +func (ps *PlatformPolicyStore) onChange() { + ps.mu.Lock() + defer ps.mu.Unlock() + if ps.closing { + return + } + for _, callback := range ps.cbs { + go callback() + } +} + +// ReadString retrieves a string policy with the specified key. +// It returns [setting.ErrNotConfigured] if the policy setting does not exist. +func (ps *PlatformPolicyStore) ReadString(key setting.Key) (val string, err error) { + return getPolicyValue(ps, key, + func(key registry.Key, valueName string) (string, error) { + val, _, err := key.GetStringValue(valueName) + return val, err + }) +} + +// ReadUInt64 retrieves an integer policy with the specified key. +// It returns [setting.ErrNotConfigured] if the policy setting does not exist. +func (ps *PlatformPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { + return getPolicyValue(ps, key, + func(key registry.Key, valueName string) (uint64, error) { + val, _, err := key.GetIntegerValue(valueName) + return val, err + }) +} + +// ReadBoolean retrieves a boolean policy with the specified key. +// It returns [setting.ErrNotConfigured] if the policy setting does not exist. +func (ps *PlatformPolicyStore) ReadBoolean(key setting.Key) (bool, error) { + return getPolicyValue(ps, key, + func(key registry.Key, valueName string) (bool, error) { + val, _, err := key.GetIntegerValue(valueName) + if err != nil { + return false, err + } + return val != 0, nil + }) +} + +// ReadString retrieves a multi-string policy with the specified key. +// It returns [setting.ErrNotConfigured] if the policy setting does not exist. +func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { + return getPolicyValue(ps, key, + func(key registry.Key, valueName string) ([]string, error) { + val, _, err := key.GetStringsValue(valueName) + if err != registry.ErrNotExist { + return val, err // the err may be nil or non-nil + } + + // The idiomatic way to store multiple string values in Group Policy + // and MDM for Windows is to have multiple REG_SZ (or REG_EXPAND_SZ) + // values under a subkey rather than in a single REG_MULTI_SZ value. + // + // See the Group Policy: Registry Extension Encoding specification, + // and specifically the ListElement and ListBox types. + // https://web.archive.org/web/20240721033657/https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-GPREG/%5BMS-GPREG%5D.pdf + valKey, err := registry.OpenKey(key, valueName, windows.KEY_READ) + if err != nil { + return nil, err + } + valNames, err := valKey.ReadValueNames(0) + if err != nil { + return nil, err + } + val = make([]string, 0, len(valNames)) + for _, name := range valNames { + switch item, _, err := valKey.GetStringValue(name); { + case err == registry.ErrNotExist: + continue + case err != nil: + return nil, err + default: + val = append(val, item) + } + } + return val, nil + }) +} + +// splitSettingKey extracts the registry key name and value name from a [setting.Key]. +// The [setting.Key] format allows grouping settings into nested categories using one +// or more [setting.KeyPathSeparator]s in the path. How individual policy settings are +// stored is an implementation detail of each [Store]. In the [PlatformPolicyStore] +// for Windows, we map nested policy categories onto the Registry key hierarchy. +// The last component after a [setting.KeyPathSeparator] is treated as the value name, +// while everything preceding it is considered a subpath (relative to the {HKLM,HKCU}\Software\Policies\Tailscale key). +// If there are no [setting.KeyPathSeparator]s in the key, the policy setting value +// is meant to be stored directly under {HKLM,HKCU}\Software\Policies\Tailscale. +func splitSettingKey(key setting.Key) (path, valueName string) { + if idx := strings.LastIndex(string(key), setting.KeyPathSeparator); idx != -1 { + path = strings.ReplaceAll(string(key[:idx]), setting.KeyPathSeparator, `\`) + valueName = string(key[idx+len(setting.KeyPathSeparator):]) + return path, valueName + } + return "", string(key) +} + +func getPolicyValue[T any](ps *PlatformPolicyStore, key setting.Key, getter registryValueGetter[T]) (T, error) { + var zero T + + ps.mu.Lock() + defer ps.mu.Unlock() + if ps.closed { + return zero, ErrStoreClosed + } + + path, valueName := splitSettingKey(key) + getValue := func(key registry.Key) (T, error) { + var err error + if path != "" { + key, err = registry.OpenKey(key, path, windows.KEY_READ) + if err != nil { + return zero, err + } + defer key.Close() + } + return getter(key, valueName) + } + + if ps.tsKeys != nil { + // A non-nil tsKeys indicates that ps has been locked. + // The slice may be empty if Tailscale policy keys do not exist. + for _, tsKey := range ps.tsKeys { + val, err := getValue(tsKey) + if err == nil || err != registry.ErrNotExist { + return val, err + } + } + return zero, setting.ErrNotConfigured + } + + // The ps has not been locked, so we don't have any pre-opened keys. + for _, tsKeyName := range tailscaleKeyNamesFor(ps.scope) { + var tsKey registry.Key + tsKey, err := registry.OpenKey(ps.softwareKey, tsKeyName, windows.KEY_READ) + if err != nil { + if err == registry.ErrNotExist { + continue + } + return zero, err + } + val, err := getValue(tsKey) + tsKey.Close() + if err == nil || err != registry.ErrNotExist { + return val, err + } + } + + return zero, setting.ErrNotConfigured +} + +// Close closes the policy store and releases any associated resources. +// It cancels pending locks and prevents any new lock attempts, +// but waits for existing locks to be released. +func (ps *PlatformPolicyStore) Close() error { + // Request to close the Group Policy read lock. + // Existing held locks will remain valid, but any new or pending locks + // will fail. In certain scenarios, the corresponding write lock may be held + // by the Group Policy service for extended periods (minutes rather than + // seconds or milliseconds). In such cases, we prefer not to wait that long + // if the ps is being closed anyway. + if ps.policyLock != nil { + ps.policyLock.Close() + } + + // Mark ps as closing to fast-fail any new lock attempts. + // Callers that have already locked it can finish their reading. + ps.mu.Lock() + if ps.closing { + ps.mu.Unlock() + return nil + } + ps.closing = true + if ps.watcher != nil { + ps.watcher.Close() + ps.watcher = nil + } + ps.mu.Unlock() + + // Signal to the external code that ps should no longer be used. + close(ps.done) + + // Wait for any outstanding locks to be released. + ps.locked.Wait() + + // Deny any further read attempts and release remaining resources. + ps.mu.Lock() + defer ps.mu.Unlock() + ps.cbs = nil + ps.policyLock = nil + ps.closed = true + if ps.softwareKey != 0 { + ps.softwareKey.Close() + ps.softwareKey = 0 + } + return nil +} + +// Done returns a channel that is closed when the Close method is called. +func (ps *PlatformPolicyStore) Done() <-chan struct{} { + return ps.done +} + +func tailscaleKeyNamesFor(scope gp.Scope) []string { + switch scope { + case gp.MachinePolicy: + // If a computer-side policy value does not exist under Software\Policies\Tailscale, + // we need to fallback and use the legacy Software\Tailscale IPN key. + return []string{tsPoliciesSubkey, tsIPNSubkey} + case gp.UserPolicy: + // However, we've never used the legacy key with user-side policies, + // and we should never do so. Unlike HKLM\Software\Tailscale IPN, + // its HKCU counterpart is user-writable. + return []string{tsPoliciesSubkey} + default: + panic("unreachable") + } +} diff --git a/util/syspolicy/source/policy_store_windows_test.go b/util/syspolicy/source/policy_store_windows_test.go new file mode 100644 index 0000000000000..33f85dc0b2b7e --- /dev/null +++ b/util/syspolicy/source/policy_store_windows_test.go @@ -0,0 +1,398 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package source + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" + "tailscale.com/tstest" + "tailscale.com/util/cibuild" + "tailscale.com/util/mak" + "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/winutil" + "tailscale.com/util/winutil/gp" +) + +// subkeyStrings is a test type indicating that a string slice should be written +// to the registry as multiple REG_SZ values under the setting's key, +// rather than as a single REG_MULTI_SZ value under the group key. +// This is the same format as ADMX use for string lists. +type subkeyStrings []string + +type testPolicyValue struct { + name setting.Key + value any +} + +func TestLockUnlockPolicyStore(t *testing.T) { + // Make sure we don't leak goroutines + tstest.ResourceCheck(t) + + store, err := NewMachinePlatformPolicyStore() + if err != nil { + t.Fatalf("NewMachinePolicyStore failed: %v", err) + } + + t.Run("One-Goroutine", func(t *testing.T) { + if err := store.Lock(); err != nil { + t.Errorf("store.Lock(): got %v; want nil", err) + return + } + if v, err := store.ReadString("NonExistingPolicySetting"); err == nil || !errors.Is(err, setting.ErrNotConfigured) { + t.Errorf(`ReadString: got %v, %v; want "", %v`, v, err, setting.ErrNotConfigured) + } + store.Unlock() + }) + + // Lock the store N times from different goroutines. + const N = 100 + var unlocked atomic.Int32 + t.Run("N-Goroutines", func(t *testing.T) { + var wg sync.WaitGroup + wg.Add(N) + for range N { + go func() { + if err := store.Lock(); err != nil { + t.Errorf("store.Lock(): got %v; want nil", err) + return + } + if v, err := store.ReadString("NonExistingPolicySetting"); err == nil || !errors.Is(err, setting.ErrNotConfigured) { + t.Errorf(`ReadString: got %v, %v; want "", %v`, v, err, setting.ErrNotConfigured) + } + wg.Done() + time.Sleep(10 * time.Millisecond) + unlocked.Add(1) + store.Unlock() + }() + } + + // Wait until the store is locked N times. + wg.Wait() + }) + + // Close the store. The call should wait for all held locks to be released. + if err := store.Close(); err != nil { + t.Fatalf("(*PolicyStore).Close failed: %v", err) + } + if locked := unlocked.Load(); locked != N { + t.Errorf("locked.Load(): got %v; want %v", locked, N) + } + + // Any further attempts to lock it should fail. + if err = store.Lock(); err == nil || !errors.Is(err, ErrStoreClosed) { + t.Errorf("store.Lock(): got %v; want %v", err, ErrStoreClosed) + } +} + +func TestReadPolicyStore(t *testing.T) { + if !winutil.IsCurrentProcessElevated() { + t.Skipf("test requires running as elevated user") + } + tests := []struct { + name setting.Key + newValue any + legacyValue any + want any + }{ + {name: "LegacyPolicy", legacyValue: "LegacyValue", want: "LegacyValue"}, + {name: "StringPolicy", legacyValue: "LegacyValue", newValue: "Value", want: "Value"}, + {name: "StringPolicy_Empty", legacyValue: "LegacyValue", newValue: "", want: ""}, + {name: "BoolPolicy_True", newValue: true, want: true}, + {name: "BoolPolicy_False", newValue: false, want: false}, + {name: "UIntPolicy_1", newValue: uint32(10), want: uint64(10)}, // uint32 values should be returned as uint64 + {name: "UIntPolicy_2", newValue: uint64(1 << 37), want: uint64(1 << 37)}, + {name: "StringListPolicy", newValue: []string{"Value1", "Value2"}, want: []string{"Value1", "Value2"}}, + {name: "StringListPolicy_Empty", newValue: []string{}, want: []string{}}, + {name: "StringListPolicy_SubKey", newValue: subkeyStrings{"Value1", "Value2"}, want: []string{"Value1", "Value2"}}, + {name: "StringListPolicy_SubKey_Empty", newValue: subkeyStrings{}, want: []string{}}, + } + + runTests := func(t *testing.T, userStore bool, token windows.Token) { + var hive registry.Key + if userStore { + hive = registry.CURRENT_USER + } else { + hive = registry.LOCAL_MACHINE + } + + // Write policy values to the registry. + newValues := make([]testPolicyValue, 0, len(tests)) + for _, tt := range tests { + if tt.newValue != nil { + newValues = append(newValues, testPolicyValue{name: tt.name, value: tt.newValue}) + } + } + policiesKeyName := softwareKeyName + `\` + tsPoliciesSubkey + cleanup, err := createTestPolicyValues(hive, policiesKeyName, newValues) + if err != nil { + t.Fatalf("createTestPolicyValues failed: %v", err) + } + t.Cleanup(cleanup) + + // Write legacy policy values to the registry. + legacyValues := make([]testPolicyValue, 0, len(tests)) + for _, tt := range tests { + if tt.legacyValue != nil { + legacyValues = append(legacyValues, testPolicyValue{name: tt.name, value: tt.legacyValue}) + } + } + legacyKeyName := softwareKeyName + `\` + tsIPNSubkey + cleanup, err = createTestPolicyValues(hive, legacyKeyName, legacyValues) + if err != nil { + t.Fatalf("createTestPolicyValues failed: %v", err) + } + t.Cleanup(cleanup) + + var store *PlatformPolicyStore + if userStore { + store, err = NewUserPlatformPolicyStore(token) + } else { + store, err = NewMachinePlatformPolicyStore() + } + if err != nil { + t.Fatalf("NewXPolicyStore failed: %v", err) + } + t.Cleanup(func() { + if err := store.Close(); err != nil { + t.Errorf("(*PolicyStore).Close failed: %v", err) + } + }) + + // testReadValues checks that [PolicyStore] returns the same values we wrote directly to the registry. + testReadValues := func(t *testing.T, withLocks bool) { + for _, tt := range tests { + t.Run(string(tt.name), func(t *testing.T) { + if userStore && tt.newValue == nil { + t.Skip("there is no legacy policies for users") + } + + t.Parallel() + + if withLocks { + if err := store.Lock(); err != nil { + t.Errorf("failed to acquire the lock: %v", err) + } + defer store.Unlock() + } + + var got any + var err error + switch tt.want.(type) { + case string: + got, err = store.ReadString(tt.name) + case uint64: + got, err = store.ReadUInt64(tt.name) + case bool: + got, err = store.ReadBoolean(tt.name) + case []string: + got, err = store.ReadStringArray(tt.name) + } + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %v; want %v", got, tt.want) + } + }) + } + } + t.Run("NoLock", func(t *testing.T) { + testReadValues(t, false) + }) + + t.Run("WithLock", func(t *testing.T) { + testReadValues(t, true) + }) + } + + t.Run("MachineStore", func(t *testing.T) { + runTests(t, false, 0) + }) + + t.Run("CurrentUserStore", func(t *testing.T) { + runTests(t, true, 0) + }) + + t.Run("UserStoreWithToken", func(t *testing.T) { + var token windows.Token + if err := windows.OpenProcessToken(windows.CurrentProcess(), windows.TOKEN_QUERY, &token); err != nil { + t.Fatalf("OpenProcessToken: %v", err) + } + defer token.Close() + runTests(t, true, token) + }) +} + +func TestPolicyStoreChangeNotifications(t *testing.T) { + if cibuild.On() { + t.Skipf("test requires running on a real Windows environment") + } + store, err := NewMachinePlatformPolicyStore() + if err != nil { + t.Fatalf("NewMachinePolicyStore failed: %v", err) + } + t.Cleanup(func() { + if err := store.Close(); err != nil { + t.Errorf("(*PolicyStore).Close failed: %v", err) + } + }) + + done := make(chan struct{}) + unregister, err := store.RegisterChangeCallback(func() { close(done) }) + if err != nil { + t.Fatalf("RegisterChangeCallback failed: %v", err) + } + t.Cleanup(unregister) + + // RefreshMachinePolicy is a non-blocking call. + if err := gp.RefreshMachinePolicy(true); err != nil { + t.Fatalf("RefreshMachinePolicy failed: %v", err) + } + + // We should receive a policy change notification when + // the Group Policy service completes policy processing. + // Otherwise, the test will eventually time out. + <-done +} + +func TestSplitSettingKey(t *testing.T) { + tests := []struct { + name string + key setting.Key + wantPath string + wantValue string + }{ + { + name: "empty", + key: "", + wantPath: ``, + wantValue: "", + }, + { + name: "explicit-empty-path", + key: "/ValueName", + wantPath: ``, + wantValue: "ValueName", + }, + { + name: "empty-value", + key: "Root/Sub/", + wantPath: `Root\Sub`, + wantValue: "", + }, + { + name: "with-path", + key: "Root/Sub/ValueName", + wantPath: `Root\Sub`, + wantValue: "ValueName", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPath, gotValue := splitSettingKey(tt.key) + if gotPath != tt.wantPath { + t.Errorf("Path: got %q, want %q", gotPath, tt.wantPath) + } + if gotValue != tt.wantValue { + t.Errorf("Value: got %q, want %q", gotValue, tt.wantPath) + } + }) + } +} + +func createTestPolicyValues(hive registry.Key, keyName string, values []testPolicyValue) (cleanup func(), err error) { + key, existing, err := registry.CreateKey(hive, keyName, registry.ALL_ACCESS) + if err != nil { + return nil, err + } + var valuesToDelete map[string][]string + doCleanup := func() { + for path, values := range valuesToDelete { + if len(values) == 0 { + registry.DeleteKey(key, path) + continue + } + key, err := registry.OpenKey(key, path, windows.KEY_ALL_ACCESS) + if err != nil { + continue + } + defer key.Close() + for _, value := range values { + key.DeleteValue(value) + } + } + + key.Close() + if !existing { + registry.DeleteKey(hive, keyName) + } + } + defer func() { + if err != nil { + doCleanup() + } + }() + + for _, v := range values { + key, existing := key, existing + path, valueName := splitSettingKey(v.name) + if path != "" { + if key, existing, err = registry.CreateKey(key, valueName, windows.KEY_ALL_ACCESS); err != nil { + return nil, err + } + defer key.Close() + } + if values, ok := valuesToDelete[path]; len(values) > 0 || (!ok && existing) { + values = append(values, valueName) + mak.Set(&valuesToDelete, path, values) + } else if !ok { + mak.Set(&valuesToDelete, path, nil) + } + + switch value := v.value.(type) { + case string: + err = key.SetStringValue(valueName, value) + case uint32: + err = key.SetDWordValue(valueName, value) + case uint64: + err = key.SetQWordValue(valueName, value) + case bool: + if value { + err = key.SetDWordValue(valueName, 1) + } else { + err = key.SetDWordValue(valueName, 0) + } + case []string: + err = key.SetStringsValue(valueName, value) + case subkeyStrings: + key, _, err := registry.CreateKey(key, valueName, windows.KEY_ALL_ACCESS) + if err != nil { + return nil, err + } + defer key.Close() + mak.Set(&valuesToDelete, strings.Trim(path+`\`+valueName, `\`), nil) + for i, value := range value { + if err := key.SetStringValue(strconv.Itoa(i), value); err != nil { + return nil, err + } + } + default: + err = fmt.Errorf("unsupported value: %v (%T), name: %q", value, value, v.name) + } + if err != nil { + return nil, err + } + } + return doCleanup, nil +} diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go new file mode 100644 index 0000000000000..bb8e164fb414a --- /dev/null +++ b/util/syspolicy/source/test_store.go @@ -0,0 +1,451 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package source + +import ( + "fmt" + "sync" + "sync/atomic" + + xmaps "golang.org/x/exp/maps" + "tailscale.com/util/mak" + "tailscale.com/util/set" + "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/setting" +) + +var ( + _ Store = (*TestStore)(nil) + _ Lockable = (*TestStore)(nil) + _ Changeable = (*TestStore)(nil) + _ Expirable = (*TestStore)(nil) +) + +// TestValueType is a constraint that allows types supported by [TestStore]. +type TestValueType interface { + bool | uint64 | string | []string +} + +// TestSetting is a policy setting in a [TestStore]. +type TestSetting[T TestValueType] struct { + // Key is the setting's unique identifier. + Key setting.Key + // Error is the error to be returned by the [TestStore] when reading + // a policy setting with the specified key. + Error error + // Value is the value to be returned by the [TestStore] when reading + // a policy setting with the specified key. + // It is only used if the Error is nil. + Value T +} + +// TestSettingOf returns a [TestSetting] representing a policy setting +// configured with the specified key and value. +func TestSettingOf[T TestValueType](key setting.Key, value T) TestSetting[T] { + return TestSetting[T]{Key: key, Value: value} +} + +// TestSettingWithError returns a [TestSetting] representing a policy setting +// with the specified key and error. +func TestSettingWithError[T TestValueType](key setting.Key, err error) TestSetting[T] { + return TestSetting[T]{Key: key, Error: err} +} + +// testReadOperation describes a single policy setting read operation. +type testReadOperation struct { + // Key is the setting's unique identifier. + Key setting.Key + // Type is a value type of a read operation. + // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] + Type setting.Type +} + +// TestExpectedReads is the number of read operations with the specified details. +type TestExpectedReads struct { + // Key is the setting's unique identifier. + Key setting.Key + // Type is a value type of a read operation. + // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] + Type setting.Type + // NumTimes is how many times a setting with the specified key and type should have been read. + NumTimes int +} + +func (r TestExpectedReads) operation() testReadOperation { + return testReadOperation{r.Key, r.Type} +} + +// TestStore is a [Store] that can be used in tests. +type TestStore struct { + tb internal.TB + + done chan struct{} + + storeLock sync.RWMutex // its RLock is exposed via [Store.Lock]/[Store.Unlock]. + storeLockCount atomic.Int32 + + mu sync.RWMutex + suspendCount int // change callback are suspended if > 0 + mr, mw map[setting.Key]any // maps for reading and writing; they're the same unless the store is suspended. + cbs set.HandleSet[func()] + + readsMu sync.Mutex + reads map[testReadOperation]int // how many times a policy setting was read +} + +// NewTestStore returns a new [TestStore]. +// The tb will be used to report coding errors detected by the [TestStore]. +func NewTestStore(tb internal.TB) *TestStore { + m := make(map[setting.Key]any) + return &TestStore{ + tb: tb, + done: make(chan struct{}), + mr: m, + mw: m, + } +} + +// NewTestStoreOf is a shorthand for [NewTestStore] followed by [TestStore.SetBooleans], +// [TestStore.SetUInt64s], [TestStore.SetStrings] or [TestStore.SetStringLists]. +func NewTestStoreOf[T TestValueType](tb internal.TB, settings ...TestSetting[T]) *TestStore { + m := make(map[setting.Key]any) + store := &TestStore{ + tb: tb, + done: make(chan struct{}), + mr: m, + mw: m, + } + switch settings := any(settings).(type) { + case []TestSetting[bool]: + store.SetBooleans(settings...) + case []TestSetting[uint64]: + store.SetUInt64s(settings...) + case []TestSetting[string]: + store.SetStrings(settings...) + case []TestSetting[[]string]: + store.SetStringLists(settings...) + } + return store +} + +// Lock implements [Lockable]. +func (s *TestStore) Lock() error { + s.storeLock.RLock() + s.storeLockCount.Add(1) + return nil +} + +// Unlock implements [Lockable]. +func (s *TestStore) Unlock() { + if s.storeLockCount.Add(-1) < 0 { + s.tb.Fatal("negative storeLockCount") + } + s.storeLock.RUnlock() +} + +// RegisterChangeCallback implements [Changeable]. +func (s *TestStore) RegisterChangeCallback(callback func()) (unregister func(), err error) { + s.mu.Lock() + defer s.mu.Unlock() + handle := s.cbs.Add(callback) + return func() { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.cbs, handle) + }, nil +} + +// ReadString implements [Store]. +func (s *TestStore) ReadString(key setting.Key) (string, error) { + defer s.recordRead(key, setting.StringValue) + s.mu.RLock() + defer s.mu.RUnlock() + v, ok := s.mr[key] + if !ok { + return "", setting.ErrNotConfigured + } + if err, ok := v.(error); ok { + return "", err + } + str, ok := v.(string) + if !ok { + return "", fmt.Errorf("%w in ReadString: got %T", setting.ErrTypeMismatch, v) + } + return str, nil +} + +// ReadUInt64 implements [Store]. +func (s *TestStore) ReadUInt64(key setting.Key) (uint64, error) { + defer s.recordRead(key, setting.IntegerValue) + s.mu.RLock() + defer s.mu.RUnlock() + v, ok := s.mr[key] + if !ok { + return 0, setting.ErrNotConfigured + } + if err, ok := v.(error); ok { + return 0, err + } + u64, ok := v.(uint64) + if !ok { + return 0, fmt.Errorf("%w in ReadUInt64: got %T", setting.ErrTypeMismatch, v) + } + return u64, nil +} + +// ReadBoolean implements [Store]. +func (s *TestStore) ReadBoolean(key setting.Key) (bool, error) { + defer s.recordRead(key, setting.BooleanValue) + s.mu.RLock() + defer s.mu.RUnlock() + v, ok := s.mr[key] + if !ok { + return false, setting.ErrNotConfigured + } + if err, ok := v.(error); ok { + return false, err + } + b, ok := v.(bool) + if !ok { + return false, fmt.Errorf("%w in ReadBoolean: got %T", setting.ErrTypeMismatch, v) + } + return b, nil +} + +// ReadStringArray implements [Store]. +func (s *TestStore) ReadStringArray(key setting.Key) ([]string, error) { + defer s.recordRead(key, setting.StringListValue) + s.mu.RLock() + defer s.mu.RUnlock() + v, ok := s.mr[key] + if !ok { + return nil, setting.ErrNotConfigured + } + if err, ok := v.(error); ok { + return nil, err + } + slice, ok := v.([]string) + if !ok { + return nil, fmt.Errorf("%w in ReadStringArray: got %T", setting.ErrTypeMismatch, v) + } + return slice, nil +} + +func (s *TestStore) recordRead(key setting.Key, typ setting.Type) { + s.readsMu.Lock() + op := testReadOperation{key, typ} + num := s.reads[op] + num++ + mak.Set(&s.reads, op, num) + s.readsMu.Unlock() +} + +func (s *TestStore) ResetCounters() { + s.readsMu.Lock() + clear(s.reads) + s.readsMu.Unlock() +} + +// ReadsMustEqual fails the test if the actual reads differs from the specified reads. +func (s *TestStore) ReadsMustEqual(reads ...TestExpectedReads) { + s.tb.Helper() + s.readsMu.Lock() + defer s.readsMu.Unlock() + s.readsMustContainLocked(reads...) + s.readMustNoExtraLocked(reads...) +} + +// ReadsMustContain fails the test if the specified reads have not been made, +// or have been made a different number of times. It permits other values to be +// read in addition to the ones being tested. +func (s *TestStore) ReadsMustContain(reads ...TestExpectedReads) { + s.tb.Helper() + s.readsMu.Lock() + defer s.readsMu.Unlock() + s.readsMustContainLocked(reads...) +} + +func (s *TestStore) readsMustContainLocked(reads ...TestExpectedReads) { + s.tb.Helper() + for _, r := range reads { + if numTimes := s.reads[r.operation()]; numTimes != r.NumTimes { + s.tb.Errorf("%q (%v) reads: got %v, want %v", r.Key, r.Type, numTimes, r.NumTimes) + } + } +} + +func (s *TestStore) readMustNoExtraLocked(reads ...TestExpectedReads) { + s.tb.Helper() + rs := make(set.Set[testReadOperation]) + for i := range reads { + rs.Add(reads[i].operation()) + } + for ro, num := range s.reads { + if !rs.Contains(ro) { + s.tb.Errorf("%q (%v) reads: got %v, want 0", ro.Key, ro.Type, num) + } + } +} + +// Suspend suspends the store, batching changes and notifications +// until [TestStore.Resume] is called the same number of times as Suspend. +func (s *TestStore) Suspend() { + s.mu.Lock() + defer s.mu.Unlock() + if s.suspendCount++; s.suspendCount == 1 { + s.mw = xmaps.Clone(s.mr) + } +} + +// Resume resumes the store, applying the changes and invoking +// the change callbacks. +func (s *TestStore) Resume() { + s.storeLock.Lock() + s.mu.Lock() + switch s.suspendCount--; { + case s.suspendCount == 0: + s.mr = s.mw + s.mu.Unlock() + s.storeLock.Unlock() + s.notifyPolicyChanged() + case s.suspendCount < 0: + s.tb.Fatal("negative suspendCount") + default: + s.mu.Unlock() + s.storeLock.Unlock() + } +} + +// SetBooleans sets the specified boolean settings in s. +func (s *TestStore) SetBooleans(settings ...TestSetting[bool]) { + s.storeLock.Lock() + for _, setting := range settings { + if setting.Key == "" { + s.tb.Fatal("empty keys disallowed") + } + s.mu.Lock() + if setting.Error != nil { + mak.Set(&s.mw, setting.Key, any(setting.Error)) + } else { + mak.Set(&s.mw, setting.Key, any(setting.Value)) + } + s.mu.Unlock() + } + s.storeLock.Unlock() + s.notifyPolicyChanged() +} + +// SetUInt64s sets the specified integer settings in s. +func (s *TestStore) SetUInt64s(settings ...TestSetting[uint64]) { + s.storeLock.Lock() + for _, setting := range settings { + if setting.Key == "" { + s.tb.Fatal("empty keys disallowed") + } + s.mu.Lock() + if setting.Error != nil { + mak.Set(&s.mw, setting.Key, any(setting.Error)) + } else { + mak.Set(&s.mw, setting.Key, any(setting.Value)) + } + s.mu.Unlock() + } + s.storeLock.Unlock() + s.notifyPolicyChanged() +} + +// SetStrings sets the specified string settings in s. +func (s *TestStore) SetStrings(settings ...TestSetting[string]) { + s.storeLock.Lock() + for _, setting := range settings { + if setting.Key == "" { + s.tb.Fatal("empty keys disallowed") + } + s.mu.Lock() + if setting.Error != nil { + mak.Set(&s.mw, setting.Key, any(setting.Error)) + } else { + mak.Set(&s.mw, setting.Key, any(setting.Value)) + } + s.mu.Unlock() + } + s.storeLock.Unlock() + s.notifyPolicyChanged() +} + +// SetStrings sets the specified string list settings in s. +func (s *TestStore) SetStringLists(settings ...TestSetting[[]string]) { + s.storeLock.Lock() + for _, setting := range settings { + if setting.Key == "" { + s.tb.Fatal("empty keys disallowed") + } + s.mu.Lock() + if setting.Error != nil { + mak.Set(&s.mw, setting.Key, any(setting.Error)) + } else { + mak.Set(&s.mw, setting.Key, any(setting.Value)) + } + s.mu.Unlock() + } + s.storeLock.Unlock() + s.notifyPolicyChanged() +} + +// Delete deletes the specified settings from s. +func (s *TestStore) Delete(keys ...setting.Key) { + s.storeLock.Lock() + for _, key := range keys { + s.mu.Lock() + delete(s.mw, key) + s.mu.Unlock() + } + s.storeLock.Unlock() + s.notifyPolicyChanged() +} + +// Clear deletes all settings from s. +func (s *TestStore) Clear() { + s.storeLock.Lock() + s.mu.Lock() + clear(s.mw) + s.mu.Unlock() + s.storeLock.Unlock() + s.notifyPolicyChanged() +} + +func (s *TestStore) notifyPolicyChanged() { + s.mu.RLock() + if s.suspendCount != 0 { + s.mu.RUnlock() + return + } + cbs := xmaps.Values(s.cbs) + s.mu.RUnlock() + + var wg sync.WaitGroup + wg.Add(len(cbs)) + for _, cb := range cbs { + go func() { + defer wg.Done() + cb() + }() + } + wg.Wait() +} + +// Close closes s, notifying its users that it has expired. +func (s *TestStore) Close() { + s.mu.Lock() + defer s.mu.Unlock() + if s.done != nil { + close(s.done) + s.done = nil + } +} + +// Done implements [Expirable]. +func (s *TestStore) Done() <-chan struct{} { + return s.done +} diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go new file mode 100644 index 0000000000000..cb3f66ea98d89 --- /dev/null +++ b/util/usermetric/usermetric.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package usermetric provides a container and handler +// for user-facing metrics. +package usermetric + +import ( + "expvar" + "fmt" + "io" + "net/http" + + "tailscale.com/metrics" + "tailscale.com/tsweb/varz" +) + +var vars expvar.Map + +// NewMultiLabelMap creates and register a new +// MultiLabelMap[T] variable with the given name and returns it. +// The variable is registered with the userfacing metrics package. +// +// Note that usermetric are not protected against duplicate +// metrics name. It is the caller's responsibility to ensure that +// the name is unique. +func NewMultiLabelMap[T comparable](name string, promType, helpText string) *metrics.MultiLabelMap[T] { + m := &metrics.MultiLabelMap[T]{ + Type: promType, + Help: helpText, + } + var zero T + _ = metrics.LabelString(zero) // panic early if T is invalid + vars.Set(name, m) + return m +} + +// Gauge is a gauge metric with no labels. +type Gauge struct { + m *expvar.Float + help string +} + +// NewGauge creates and register a new gauge metric with the given name and help text. +func NewGauge(name, help string) *Gauge { + g := &Gauge{&expvar.Float{}, help} + vars.Set(name, g) + return g +} + +// Set sets the gauge to the given value. +func (g *Gauge) Set(v float64) { + g.m.Set(v) +} + +// String returns the string of the underlying expvar.Float. +// This satisfies the expvar.Var interface. +func (g *Gauge) String() string { + return g.m.String() +} + +// WritePrometheus writes the gauge metric in Prometheus format to the given writer. +// This satisfies the varz.PrometheusWriter interface. +func (g *Gauge) WritePrometheus(w io.Writer, name string) { + io.WriteString(w, "# TYPE ") + io.WriteString(w, name) + io.WriteString(w, " gauge\n") + if g.help != "" { + io.WriteString(w, "# HELP ") + io.WriteString(w, name) + io.WriteString(w, " ") + io.WriteString(w, g.help) + io.WriteString(w, "\n") + } + + io.WriteString(w, name) + fmt.Fprintf(w, " %v\n", g.m.Value()) +} + +// Handler returns a varz.Handler that serves the userfacing expvar contained +// in this package. +func Handler(w http.ResponseWriter, r *http.Request) { + varz.ExpvarDoHandler(vars.Do)(w, r) +} diff --git a/util/usermetric/usermetric_test.go b/util/usermetric/usermetric_test.go new file mode 100644 index 0000000000000..aa0e82ea61969 --- /dev/null +++ b/util/usermetric/usermetric_test.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package usermetric + +import ( + "bytes" + "testing" +) + +func TestGauge(t *testing.T) { + g := NewGauge("test_gauge", "This is a test gauge") + g.Set(15) + + var buf bytes.Buffer + g.WritePrometheus(&buf, "test_gauge") + const want = `# TYPE test_gauge gauge +# HELP test_gauge This is a test gauge +test_gauge 15 +` + if got := buf.String(); got != want { + t.Errorf("got %q; want %q", got, want) + } + +} diff --git a/util/winutil/startupinfo_windows.go b/util/winutil/startupinfo_windows.go index f2234fdbe2ea7..e04e9ea9b3d3a 100644 --- a/util/winutil/startupinfo_windows.go +++ b/util/winutil/startupinfo_windows.go @@ -35,9 +35,11 @@ const ( // Mitigation flags from the Win32 SDK const ( - PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_REMOTE_ALWAYS_ON = (1 << 52) - PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_LOW_LABEL_ALWAYS_ON = (1 << 56) - PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_PREFER_SYSTEM32_ALWAYS_ON = (1 << 60) + PROCESS_CREATION_MITIGATION_POLICY_EXTENSION_POINT_DISABLE_ALWAYS_ON = (1 << 32) + PROCESS_CREATION_MITIGATION_POLICY_BLOCK_NON_MICROSOFT_BINARIES_ALWAYS_ON = (1 << 44) + PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_REMOTE_ALWAYS_ON = (1 << 52) + PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_LOW_LABEL_ALWAYS_ON = (1 << 56) + PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_PREFER_SYSTEM32_ALWAYS_ON = (1 << 60) ) // StartupInfoBuilder constructs a Windows STARTUPINFOEX and optional diff --git a/util/winutil/winutil_windows.go b/util/winutil/winutil_windows.go index 9d81795aa4393..5dde9a347d7f7 100644 --- a/util/winutil/winutil_windows.go +++ b/util/winutil/winutil_windows.go @@ -52,7 +52,7 @@ func GetDesktopPID() (uint32, error) { } func getPolicyString(name string) (string, error) { - s, err := getRegStringInternal(regPolicyBase, name) + s, err := getRegStringInternal(registry.LOCAL_MACHINE, regPolicyBase, name) if err != nil { // Fall back to the legacy path return getRegString(name) @@ -65,7 +65,7 @@ func getPolicyStringArray(name string) ([]string, error) { } func getRegString(name string) (string, error) { - s, err := getRegStringInternal(regBase, name) + s, err := getRegStringInternal(registry.LOCAL_MACHINE, regBase, name) if err != nil { return "", err } @@ -89,8 +89,8 @@ func getRegInteger(name string) (uint64, error) { return i, err } -func getRegStringInternal(subKey, name string) (string, error) { - key, err := registry.OpenKey(registry.LOCAL_MACHINE, subKey, registry.READ) +func getRegStringInternal(key registry.Key, subKey, name string) (string, error) { + key, err := registry.OpenKey(key, subKey, registry.READ) if err != nil { if err != ErrNoValue { log.Printf("registry.OpenKey(%v): %v", subKey, err) @@ -109,6 +109,24 @@ func getRegStringInternal(subKey, name string) (string, error) { return val, nil } +// GetRegUserString looks up a registry path in the current user key, or returns +// an empty string and error. +func GetRegUserString(name string) (string, error) { + return getRegStringInternal(registry.CURRENT_USER, regBase, name) +} + +// SetRegUserString sets a SZ value identified by name in the current user key +// to the string specified by value. +func SetRegUserString(name, value string) error { + key, _, err := registry.CreateKey(registry.CURRENT_USER, regBase, registry.SET_VALUE) + if err != nil { + log.Printf("registry.CreateKey(%v): %v", regBase, err) + } + defer key.Close() + + return key.SetStringValue(name, value) +} + // GetRegStrings looks up a registry value in the local machine path, or returns // the given default if it can't. func GetRegStrings(name string, defval []string) []string { diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 1735a71d83c42..69c5cbc90a5f4 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -7,11 +7,12 @@ import ( "bufio" "context" "fmt" + "maps" "net" "net/netip" "reflect" "runtime" - "sort" + "slices" "sync" "time" "unsafe" @@ -907,12 +908,7 @@ func (c *Conn) foreachActiveDerpSortedLocked(fn func(regionID int, ad activeDerp } return } - ids := make([]int, 0, len(c.activeDerp)) - for id := range c.activeDerp { - ids = append(ids, id) - } - sort.Ints(ids) - for _, id := range ids { + for _, id := range slices.Sorted(maps.Keys(c.activeDerp)) { fn(id, c.activeDerp[id]) } } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index b4ea54a02647e..53ecb84de833b 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -20,7 +20,6 @@ import ( "sync/atomic" "time" - "golang.org/x/crypto/poly1305" xmaps "golang.org/x/exp/maps" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" @@ -1067,9 +1066,14 @@ func (de *endpoint) removeSentDiscoPingLocked(txid stun.TxID, sp sentPing, resul delete(de.sentPing, txid) } +// poly1305AuthenticatorSize is the size, in bytes, of a poly1305 authenticator. +// It's the same as golang.org/x/crypto/poly1305.TagSize, but that +// page is deprecated and we only need this one constant, so we copy it. +const poly1305AuthenticatorSize = 16 + // discoPingSize is the size of a complete disco ping packet, without any padding. const discoPingSize = len(disco.Magic) + key.DiscoPublicRawLen + disco.NonceLen + - poly1305.TagSize + disco.MessageHeaderLen + disco.PingLen + poly1305AuthenticatorSize + disco.MessageHeaderLen + disco.PingLen // sendDiscoPing sends a ping with the provided txid to ep using de's discoKey. size // is the desired disco message size, including all disco headers but excluding IP/UDP diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 7b121d415233e..de6b13fc1abec 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1089,7 +1089,13 @@ func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) error { metricSendDataNetworkDown.Add(n) return errNetworkDown } - return ep.(*endpoint).send(buffs) + if ep, ok := ep.(*endpoint); ok { + return ep.send(buffs) + } + // If it's not of type *endpoint, it's probably *lazyEndpoint, which means + // we don't actually know who the peer is and we're waiting for wireguard-go + // to switch the endpoint. See go/corp/20732. + return nil } var errConnClosed = errors.New("Conn closed") diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index a647c90d2b176..f658c016b884d 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -5,28 +5,37 @@ package magicsock import ( "bytes" + "context" "encoding/binary" "errors" "fmt" "io" "net" "net/netip" + "strings" "syscall" "time" - "unsafe" + "github.com/mdlayher/socket" "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + "golang.org/x/sys/cpu" "golang.org/x/sys/unix" + "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/net/netns" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/nettype" ) const ( - udpHeaderSize = 8 - ipv6FragmentHeaderSize = 8 + udpHeaderSize = 8 + + // discoMinHeaderSize is the minimum size of the disco header in bytes. + discoMinHeaderSize = len(disco.Magic) + 32 /* key length */ + disco.NonceLen ) // Enable/disable using raw sockets to receive disco traffic. @@ -38,8 +47,17 @@ var debugRawDiscoReads = envknob.RegisterBool("TS_DEBUG_RAW_DISCO") // These are our BPF filters that we use for testing packets. var ( magicsockFilterV4 = []bpf.Instruction{ - // For raw UDPv4 sockets, BPF receives the entire IP packet to - // inspect. + // For raw sockets (with ETH_P_IP set), the BPF program + // receives the entire IPv4 packet, but not the Ethernet + // header. + + // Double-check that this is a UDP packet; we shouldn't be + // seeing anything else given how we create our AF_PACKET + // socket, but an extra check here is cheap, and matches the + // check that we do in the IPv6 path. + bpf.LoadAbsolute{Off: 9, Size: 1}, + bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(ipproto.UDP), SkipTrue: 1, SkipFalse: 0}, + bpf.RetConstant{Val: 0x0}, // Disco packets are so small they should never get // fragmented, and we don't want to handle reassembly. @@ -53,6 +71,25 @@ var ( // Load IP header length into X register. bpf.LoadMemShift{Off: 0}, + // Verify that we have a packet that's big enough to (possibly) + // contain a disco packet. + // + // The length of an IPv4 disco packet is composed of: + // - 8 bytes for the UDP header + // - N bytes for the disco packet header + // + // bpf will implicitly return 0 ("skip") if attempting an + // out-of-bounds load, so we can check the length of the packet + // loading a byte from that offset here. We subtract 1 byte + // from the offset to ensure that we accept a packet that's + // exactly the minimum size. + // + // We use LoadIndirect; since we loaded the start of the packet's + // payload into the X register, above, we don't need to add + // ipv4.HeaderLen to the offset (and this properly handles IPv4 + // extensions). + bpf.LoadIndirect{Off: uint32(udpHeaderSize + discoMinHeaderSize - 1), Size: 1}, + // Get the first 4 bytes of the UDP packet, compare with our magic number bpf.LoadIndirect{Off: udpHeaderSize, Size: 4}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: discoMagic1, SkipTrue: 0, SkipFalse: 3}, @@ -82,25 +119,24 @@ var ( // and thus we'd rather be conservative here and possibly not receive // disco packets rather than slow down the system. magicsockFilterV6 = []bpf.Instruction{ - // For raw UDPv6 sockets, BPF receives _only_ the UDP header onwards, not an entire IP packet. - // - // https://stackoverflow.com/questions/24514333/using-bpf-with-sock-dgram-on-linux-machine - // https://blog.cloudflare.com/epbf_sockets_hop_distance/ - // - // This is especially confusing because this *isn't* true for - // IPv4; see the following code from the 'ping' utility that - // corroborates this: - // - // https://github.com/iputils/iputils/blob/1ab5fa/ping/ping.c#L1667-L1676 - // https://github.com/iputils/iputils/blob/1ab5fa/ping/ping6_common.c#L933-L941 + // Do a bounds check to ensure we have enough space for a disco + // packet; see the comment in the IPv4 BPF program for more + // details. + bpf.LoadAbsolute{Off: uint32(ipv6.HeaderLen + udpHeaderSize + discoMinHeaderSize - 1), Size: 1}, + + // Verify that the 'next header' value of the IPv6 packet is + // UDP, which is what we're expecting; if it's anything else + // (including extension headers), we skip the packet. + bpf.LoadAbsolute{Off: 6, Size: 1}, + bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(ipproto.UDP), SkipTrue: 0, SkipFalse: 5}, // Compare with our magic number. Start by loading and // comparing the first 4 bytes of the UDP payload. - bpf.LoadAbsolute{Off: udpHeaderSize, Size: 4}, + bpf.LoadAbsolute{Off: ipv6.HeaderLen + udpHeaderSize, Size: 4}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: discoMagic1, SkipTrue: 0, SkipFalse: 3}, // Compare the next 2 bytes - bpf.LoadAbsolute{Off: udpHeaderSize + 4, Size: 2}, + bpf.LoadAbsolute{Off: ipv6.HeaderLen + udpHeaderSize + 4, Size: 2}, bpf.JumpIf{Cond: bpf.JumpEqual, Val: discoMagic2, SkipTrue: 0, SkipFalse: 1}, // Accept the whole packet @@ -140,21 +176,24 @@ func (c *Conn) listenRawDisco(family string) (io.Closer, error) { } var ( - network string + udpnet string addr string - testAddr string + proto int + testAddr netip.AddrPort prog []bpf.Instruction ) switch family { case "ip4": - network = "ip4:17" + udpnet = "udp4" addr = "0.0.0.0" - testAddr = "127.0.0.1:1" + proto = ethernetProtoIPv4() + testAddr = netip.AddrPortFrom(netip.AddrFrom4([4]byte{127, 0, 0, 1}), 1) prog = magicsockFilterV4 case "ip6": - network = "ip6:17" + udpnet = "udp6" addr = "::" - testAddr = "[::1]:1" + proto = ethernetProtoIPv6() + testAddr = netip.AddrPortFrom(netip.IPv6Loopback(), 1) prog = magicsockFilterV6 default: return nil, fmt.Errorf("unsupported address family %q", family) @@ -165,72 +204,214 @@ func (c *Conn) listenRawDisco(family string) (io.Closer, error) { return nil, fmt.Errorf("assembling filter: %w", err) } - pc, err := net.ListenPacket(network, addr) + sock, err := socket.Socket( + unix.AF_PACKET, + unix.SOCK_DGRAM, + proto, + "afpacket", + nil, // no config + ) if err != nil { - return nil, fmt.Errorf("creating packet conn: %w", err) + return nil, fmt.Errorf("creating AF_PACKET socket: %w", err) } - if err := setBPF(pc, asm); err != nil { - pc.Close() + if err := sock.SetBPF(asm); err != nil { + sock.Close() return nil, fmt.Errorf("installing BPF filter: %w", err) } // If all the above succeeds, we should be ready to receive. Just // out of paranoia, check that we do receive a well-formed disco // packet. - tc, err := net.ListenPacket("udp", net.JoinHostPort(addr, "0")) + tc, err := net.ListenPacket(udpnet, net.JoinHostPort(addr, "0")) if err != nil { - pc.Close() + sock.Close() return nil, fmt.Errorf("creating disco test socket: %w", err) } defer tc.Close() - if _, err := tc.(*net.UDPConn).WriteToUDPAddrPort(testDiscoPacket, netip.MustParseAddrPort(testAddr)); err != nil { - pc.Close() + if _, err := tc.(*net.UDPConn).WriteToUDPAddrPort(testDiscoPacket, testAddr); err != nil { + sock.Close() return nil, fmt.Errorf("writing disco test packet: %w", err) } - pc.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) - var buf [1500]byte + + const selfTestTimeout = 100 * time.Millisecond + if err := sock.SetReadDeadline(time.Now().Add(selfTestTimeout)); err != nil { + sock.Close() + return nil, fmt.Errorf("setting socket timeout: %w", err) + } + + var ( + ctx = context.Background() + buf [1500]byte + ) for { - n, _, err := pc.ReadFrom(buf[:]) + n, _, err := sock.Recvfrom(ctx, buf[:], 0) if err != nil { - pc.Close() + sock.Close() return nil, fmt.Errorf("reading during raw disco self-test: %w", err) } - if n < udpHeaderSize { + + _ /* src */, _ /* dst */, payload := parseUDPPacket(buf[:n], family == "ip6") + if payload == nil { continue } - if !bytes.Equal(buf[udpHeaderSize:n], testDiscoPacket) { + if !bytes.Equal(payload, testDiscoPacket) { + c.discoLogf("listenRawDisco: self-test: received mismatched UDP packet of %d bytes", len(payload)) continue } + c.logf("[v1] listenRawDisco: self-test passed for %s", family) break } - pc.SetReadDeadline(time.Time{}) + sock.SetReadDeadline(time.Time{}) - go c.receiveDisco(pc, family == "ip6") - return pc, nil + go c.receiveDisco(sock, family == "ip6") + return sock, nil } -func (c *Conn) receiveDisco(pc net.PacketConn, isIPV6 bool) { +// parseUDPPacket is a basic parser for UDP packets that returns the source and +// destination addresses, and the payload. The returned payload is a sub-slice +// of the input buffer. +// +// It expects to be called with a buffer that contains the entire UDP packet, +// including the IP header, and one that has been filtered with the BPF +// programs above. +// +// If an error occurs, it will return the zero values for all return values. +func parseUDPPacket(buf []byte, isIPv6 bool) (src, dst netip.AddrPort, payload []byte) { + // First, parse the IPv4 or IPv6 header to get to the UDP header. Since + // we assume this was filtered with BPF, we know that there will be no + // IPv6 extension headers. + var ( + srcIP, dstIP netip.Addr + udp []byte + ) + if isIPv6 { + // Basic length check to ensure that we don't panic + if len(buf) < ipv6.HeaderLen+udpHeaderSize { + return + } + + // Extract the source and destination addresses from the IPv6 + // header. + srcIP, _ = netip.AddrFromSlice(buf[8:24]) + dstIP, _ = netip.AddrFromSlice(buf[24:40]) + + // We know that the UDP packet starts immediately after the IPv6 + // packet. + udp = buf[ipv6.HeaderLen:] + } else { + // This is an IPv4 packet; read the length field from the header. + if len(buf) < ipv4.HeaderLen { + return + } + udpOffset := int((buf[0] & 0x0F) << 2) + if udpOffset+udpHeaderSize > len(buf) { + return + } + + // Parse the source and destination IPs. + srcIP, _ = netip.AddrFromSlice(buf[12:16]) + dstIP, _ = netip.AddrFromSlice(buf[16:20]) + udp = buf[udpOffset:] + } + + // Parse the ports + srcPort := binary.BigEndian.Uint16(udp[0:2]) + dstPort := binary.BigEndian.Uint16(udp[2:4]) + + // The payload starts after the UDP header. + payload = udp[8:] + return netip.AddrPortFrom(srcIP, srcPort), netip.AddrPortFrom(dstIP, dstPort), payload +} + +// ethernetProtoIPv4 returns the constant unix.ETH_P_IP, in network byte order. +// packet(7) sockets require that the 'protocol' argument be in network byte +// order; see: +// +// https://man7.org/linux/man-pages/man7/packet.7.html +// +// Instead of using htons at runtime, we can just hardcode the value here... +// but we also have a test that verifies that this is correct. +func ethernetProtoIPv4() int { + if cpu.IsBigEndian { + return 0x0800 + } else { + return 0x0008 + } +} + +// ethernetProtoIPv6 returns the constant unix.ETH_P_IPV6, and is otherwise the +// same as ethernetProtoIPv4. +func ethernetProtoIPv6() int { + if cpu.IsBigEndian { + return 0x86dd + } else { + return 0xdd86 + } +} + +func (c *Conn) discoLogf(format string, args ...any) { + // Enable debug logging if we're debugging raw disco reads or if the + // magicsock component logs are on. + if debugRawDiscoReads() { + c.logf(format, args...) + } else { + c.dlogf(format, args...) + } +} + +func (c *Conn) receiveDisco(pc *socket.Conn, isIPV6 bool) { + // Given that we're parsing raw packets, be extra careful and recover + // from any panics in this function. + // + // If we didn't have a recover() here and panic'd, we'd take down the + // entire process since this function is the top of a goroutine, and Go + // will kill the process if a goroutine panics and it unwinds past the + // top-level function. + defer func() { + if err := recover(); err != nil { + c.logf("[unexpected] recovered from panic in receiveDisco(isIPv6=%v): %v", isIPV6, err) + } + }() + + ctx := context.Background() + + // Set up our loggers + var family string + if isIPV6 { + family = "ip6" + } else { + family = "ip4" + } + var ( + prefix string = "disco raw " + family + ": " + logf logger.Logf = logger.WithPrefix(c.logf, prefix) + dlogf logger.Logf = logger.WithPrefix(c.discoLogf, prefix) + ) + var buf [1500]byte for { - n, src, err := pc.ReadFrom(buf[:]) + n, src, err := pc.Recvfrom(ctx, buf[:], 0) if debugRawDiscoReads() { - c.logf("raw disco read from %v = (%v, %v)", src, n, err) + logf("read from %s = (%v, %v)", printSockaddr(src), n, err) } - if errors.Is(err, net.ErrClosed) { + if err != nil && (errors.Is(err, net.ErrClosed) || err.Error() == "use of closed file") { + // EOF; no need to print an error return } else if err != nil { - c.logf("disco raw reader failed: %v", err) + logf("reader failed: %v", err) return } - if n < udpHeaderSize { - // Too small to be a valid UDP datagram, drop. + + srcAddr, dstAddr, payload := parseUDPPacket(buf[:n], family == "ip6") + if payload == nil { + // callee logged continue } - dstPort := binary.BigEndian.Uint16(buf[2:4]) + dstPort := dstAddr.Port() if dstPort == 0 { - c.logf("[unexpected] disco raw: received packet for port 0") + logf("[unexpected] received packet for port 0") } var acceptPort uint16 @@ -242,59 +423,58 @@ func (c *Conn) receiveDisco(pc net.PacketConn, isIPV6 bool) { if acceptPort == 0 { // This should only typically happen if the receiving address family // was recently disabled. - c.dlogf("[v1] disco raw: dropping packet for port %d as acceptPort=0", dstPort) + dlogf("[v1] dropping packet for port %d as acceptPort=0", dstPort) continue } + // If the packet isn't destined for our local port, then we + // should drop it since it might be for another Tailscale + // process on the same machine, or NATed to a different machine + // if this is a router, etc. + // + // We get the local port to compare against inside the receive + // loop; we can't cache this beforehand because it can change + // if/when we rebind. if dstPort != acceptPort { - c.dlogf("[v1] disco raw: dropping packet for port %d", dstPort) - continue - } - - srcIP, ok := netip.AddrFromSlice(src.(*net.IPAddr).IP) - if !ok { - c.logf("[unexpected] PacketConn.ReadFrom returned not-an-IP %v in from", src) + dlogf("[v1] dropping packet for port %d that isn't our local port", dstPort) continue } - srcPort := binary.BigEndian.Uint16(buf[:2]) - if srcIP.Is4() { - metricRecvDiscoPacketIPv4.Add(1) - } else { + if isIPV6 { metricRecvDiscoPacketIPv6.Add(1) + } else { + metricRecvDiscoPacketIPv4.Add(1) } - c.handleDiscoMessage(buf[udpHeaderSize:n], netip.AddrPortFrom(srcIP, srcPort), key.NodePublic{}, discoRXPathRawSocket) + c.handleDiscoMessage(payload, srcAddr, key.NodePublic{}, discoRXPathRawSocket) } } -// setBPF installs filter as the BPF filter on conn. -// Ideally we would just use SetBPF as implemented in x/net/ipv4, -// but x/net/ipv6 doesn't implement it. And once you've written -// this code once, it turns out to be address family agnostic, so -// we might as well use it on both and get to use a net.PacketConn -// directly for both families instead of being stuck with -// different types. -func setBPF(conn net.PacketConn, filter []bpf.RawInstruction) error { - sc, err := conn.(*net.IPConn).SyscallConn() - if err != nil { - return err - } - prog := &unix.SockFprog{ - Len: uint16(len(filter)), - Filter: (*unix.SockFilter)(unsafe.Pointer(&filter[0])), - } - var setErr error - err = sc.Control(func(fd uintptr) { - setErr = unix.SetsockoptSockFprog(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_FILTER, prog) - }) - if err != nil { - return err - } - if setErr != nil { - return err +// printSockaddr is a helper function to pretty-print various sockaddr types. +func printSockaddr(sa unix.Sockaddr) string { + switch sa := sa.(type) { + case *unix.SockaddrInet4: + addr := netip.AddrFrom4(sa.Addr) + return netip.AddrPortFrom(addr, uint16(sa.Port)).String() + case *unix.SockaddrInet6: + addr := netip.AddrFrom16(sa.Addr) + return netip.AddrPortFrom(addr, uint16(sa.Port)).String() + case *unix.SockaddrLinklayer: + hwaddr := sa.Addr[:sa.Halen] + + var buf strings.Builder + fmt.Fprintf(&buf, "link(ty=0x%04x,if=%d):[", sa.Protocol, sa.Ifindex) + for i, b := range hwaddr { + if i > 0 { + buf.WriteByte(':') + } + fmt.Fprintf(&buf, "%02x", b) + } + buf.WriteByte(']') + return buf.String() + default: + return fmt.Sprintf("unknown(%T)", sa) } - return nil } // trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which diff --git a/wgengine/magicsock/magicsock_linux_test.go b/wgengine/magicsock/magicsock_linux_test.go new file mode 100644 index 0000000000000..6b86b04f2c8d4 --- /dev/null +++ b/wgengine/magicsock/magicsock_linux_test.go @@ -0,0 +1,148 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "bytes" + "encoding/binary" + "net/netip" + "testing" + + "golang.org/x/sys/cpu" + "golang.org/x/sys/unix" + "tailscale.com/disco" +) + +func TestParseUDPPacket(t *testing.T) { + src4 := netip.MustParseAddrPort("127.0.0.1:12345") + dst4 := netip.MustParseAddrPort("127.0.0.2:54321") + + src6 := netip.MustParseAddrPort("[::1]:12345") + dst6 := netip.MustParseAddrPort("[::2]:54321") + + udp4Packet := []byte{ + // IPv4 header + 0x45, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x7f, 0x00, 0x00, 0x01, // source ip + 0x7f, 0x00, 0x00, 0x02, // dest ip + + // UDP header + 0x30, 0x39, // src port + 0xd4, 0x31, // dest port + 0x00, 0x12, // length; 8 bytes header + 10 bytes payload = 18 bytes + 0x00, 0x00, // checksum; unused + + // Payload: disco magic plus 4 bytes + 0x54, 0x53, 0xf0, 0x9f, 0x92, 0xac, 0x00, 0x01, 0x02, 0x03, + } + udp6Packet := []byte{ + // IPv6 header + 0x60, 0x00, 0x00, 0x00, + 0x00, 0x12, // payload length + 0x11, // next header: UDP + 0x00, // hop limit; unused + + // Source IP + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // Dest IP + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // UDP header + 0x30, 0x39, // src port + 0xd4, 0x31, // dest port + 0x00, 0x12, // length; 8 bytes header + 10 bytes payload = 18 bytes + 0x00, 0x00, // checksum; unused + + // Payload: disco magic plus 4 bytes + 0x54, 0x53, 0xf0, 0x9f, 0x92, 0xac, 0x00, 0x01, 0x02, 0x03, + } + + // Verify that parsing the UDP packet works correctly. + t.Run("IPv4", func(t *testing.T) { + src, dst, payload := parseUDPPacket(udp4Packet, false) + if src != src4 { + t.Errorf("src = %v; want %v", src, src4) + } + if dst != dst4 { + t.Errorf("dst = %v; want %v", dst, dst4) + } + if !bytes.HasPrefix(payload, []byte(disco.Magic)) { + t.Errorf("payload = %x; must start with %x", payload, disco.Magic) + } + }) + t.Run("IPv6", func(t *testing.T) { + src, dst, payload := parseUDPPacket(udp6Packet, true) + if src != src6 { + t.Errorf("src = %v; want %v", src, src6) + } + if dst != dst6 { + t.Errorf("dst = %v; want %v", dst, dst6) + } + if !bytes.HasPrefix(payload, []byte(disco.Magic)) { + t.Errorf("payload = %x; must start with %x", payload, disco.Magic) + } + }) + t.Run("Truncated", func(t *testing.T) { + truncateBy := func(b []byte, n int) []byte { + if n >= len(b) { + return nil + } + return b[:len(b)-n] + } + + src, dst, payload := parseUDPPacket(truncateBy(udp4Packet, 11), false) + if payload != nil { + t.Errorf("payload = %x; want nil", payload) + } + if src.IsValid() || dst.IsValid() { + t.Errorf("src = %v, dst = %v; want invalid", src, dst) + } + + src, dst, payload = parseUDPPacket(truncateBy(udp6Packet, 11), true) + if payload != nil { + t.Errorf("payload = %x; want nil", payload) + } + if src.IsValid() || dst.IsValid() { + t.Errorf("src = %v, dst = %v; want invalid", src, dst) + } + }) +} + +func TestEthernetProto(t *testing.T) { + htons := func(x uint16) int { + // Network byte order is big-endian; write the value as + // big-endian to a byte slice and read it back in the native + // endian-ness. This is a no-op on a big-endian platform and a + // byte swap on a little-endian platform. + var b [2]byte + binary.BigEndian.PutUint16(b[:], x) + return int(binary.NativeEndian.Uint16(b[:])) + } + + if v4 := ethernetProtoIPv4(); v4 != htons(unix.ETH_P_IP) { + t.Errorf("ethernetProtoIPv4 = 0x%04x; want 0x%04x", v4, htons(unix.ETH_P_IP)) + } + if v6 := ethernetProtoIPv6(); v6 != htons(unix.ETH_P_IPV6) { + t.Errorf("ethernetProtoIPv6 = 0x%04x; want 0x%04x", v6, htons(unix.ETH_P_IPV6)) + } + + // As a way to verify that the htons function is working correctly, + // assert that the ETH_P_IP value returned from our function matches + // the value defined in the unix package based on whether the host is + // big-endian (network byte order) or little-endian. + if cpu.IsBigEndian { + if v4 := ethernetProtoIPv4(); v4 != unix.ETH_P_IP { + t.Errorf("ethernetProtoIPv4 = 0x%04x; want 0x%04x", v4, unix.ETH_P_IP) + } + } else { + if v4 := ethernetProtoIPv4(); v4 == unix.ETH_P_IP { + t.Errorf("ethernetProtoIPv4 = 0x%04x; want 0x%04x", v4, htons(unix.ETH_P_IP)) + } else { + t.Logf("ethernetProtoIPv4 = 0x%04x, correctly different from 0x%04x", v4, unix.ETH_P_IP) + } + } +} diff --git a/wgengine/netstack/gro/gro.go b/wgengine/netstack/gro/gro.go new file mode 100644 index 0000000000000..b268534eb46c8 --- /dev/null +++ b/wgengine/netstack/gro/gro.go @@ -0,0 +1,104 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package gro implements GRO for the receive (write) path into gVisor. +package gro + +import ( + "bytes" + "github.com/tailscale/wireguard-go/tun" + "gvisor.dev/gvisor/pkg/buffer" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/header/parse" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "tailscale.com/net/packet" + "tailscale.com/types/ipproto" +) + +// RXChecksumOffload validates IPv4, TCP, and UDP header checksums in p, +// returning an equivalent *stack.PacketBuffer if they are valid, otherwise nil. +// The set of headers validated covers where gVisor would perform validation if +// !stack.PacketBuffer.RXChecksumValidated, i.e. it satisfies +// stack.CapabilityRXChecksumOffload. Other protocols with checksum fields, +// e.g. ICMP{v6}, are still validated by gVisor regardless of rx checksum +// offloading capabilities. +func RXChecksumOffload(p *packet.Parsed) *stack.PacketBuffer { + var ( + pn tcpip.NetworkProtocolNumber + csumStart int + ) + buf := p.Buffer() + + switch p.IPVersion { + case 4: + if len(buf) < header.IPv4MinimumSize { + return nil + } + csumStart = int((buf[0] & 0x0F) * 4) + if csumStart < header.IPv4MinimumSize || csumStart > header.IPv4MaximumHeaderSize || len(buf) < csumStart { + return nil + } + if ^tun.Checksum(buf[:csumStart], 0) != 0 { + return nil + } + pn = header.IPv4ProtocolNumber + case 6: + if len(buf) < header.IPv6FixedHeaderSize { + return nil + } + csumStart = header.IPv6FixedHeaderSize + pn = header.IPv6ProtocolNumber + if p.IPProto != ipproto.ICMPv6 && p.IPProto != ipproto.TCP && p.IPProto != ipproto.UDP { + // buf could have extension headers before a UDP or TCP header, but + // packet.Parsed.IPProto will be set to the ext header type, so we + // have to look deeper. We are still responsible for validating the + // L4 checksum in this case. So, make use of gVisor's existing + // extension header parsing via parse.IPv6() in order to unpack the + // L4 csumStart index. This is not particularly efficient as we have + // to allocate a short-lived stack.PacketBuffer that cannot be + // re-used. parse.IPv6() "consumes" the IPv6 headers, so we can't + // inject this stack.PacketBuffer into the stack at a later point. + packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(bytes.Clone(buf)), + }) + defer packetBuf.DecRef() + // The rightmost bool returns false only if packetBuf is too short, + // which we've already accounted for above. + transportProto, _, _, _, _ := parse.IPv6(packetBuf) + if transportProto == header.TCPProtocolNumber || transportProto == header.UDPProtocolNumber { + csumLen := packetBuf.Data().Size() + if len(buf) < csumLen { + return nil + } + csumStart = len(buf) - csumLen + p.IPProto = ipproto.Proto(transportProto) + } + } + } + + if p.IPProto == ipproto.TCP || p.IPProto == ipproto.UDP { + lenForPseudo := len(buf) - csumStart + csum := tun.PseudoHeaderChecksum( + uint8(p.IPProto), + p.Src.Addr().AsSlice(), + p.Dst.Addr().AsSlice(), + uint16(lenForPseudo)) + csum = tun.Checksum(buf[csumStart:], csum) + if ^csum != 0 { + return nil + } + } + + packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(bytes.Clone(buf)), + }) + packetBuf.NetworkProtocolNumber = pn + // Setting this is not technically required. gVisor overrides where + // stack.CapabilityRXChecksumOffload is advertised from Capabilities(). + // https://github.com/google/gvisor/blob/64c016c92987cc04dfd4c7b091ddd21bdad875f8/pkg/tcpip/stack/nic.go#L763 + // This is also why we offload for all packets since we cannot signal this + // per-packet. + packetBuf.RXChecksumValidated = true + return packetBuf +} diff --git a/wgengine/netstack/gro/gro_default.go b/wgengine/netstack/gro/gro_default.go new file mode 100644 index 0000000000000..f92ee15ecac15 --- /dev/null +++ b/wgengine/netstack/gro/gro_default.go @@ -0,0 +1,76 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios + +package gro + +import ( + "sync" + + "gvisor.dev/gvisor/pkg/tcpip/stack" + nsgro "gvisor.dev/gvisor/pkg/tcpip/stack/gro" + "tailscale.com/net/packet" +) + +var ( + groPool sync.Pool +) + +func init() { + groPool.New = func() any { + g := &GRO{} + g.gro.Init(true) + return g + } +} + +// GRO coalesces incoming packets to increase throughput. It is NOT thread-safe. +type GRO struct { + gro nsgro.GRO + maybeEnqueued bool +} + +// NewGRO returns a new instance of *GRO from a sync.Pool. It can be returned to +// the pool with GRO.Flush(). +func NewGRO() *GRO { + return groPool.Get().(*GRO) +} + +// SetDispatcher sets the underlying stack.NetworkDispatcher where packets are +// delivered. +func (g *GRO) SetDispatcher(d stack.NetworkDispatcher) { + g.gro.Dispatcher = d +} + +// Enqueue enqueues the provided packet for GRO. It may immediately deliver +// it to the underlying stack.NetworkDispatcher depending on its contents. To +// explicitly flush previously enqueued packets see Flush(). +func (g *GRO) Enqueue(p *packet.Parsed) { + if g.gro.Dispatcher == nil { + return + } + pkt := RXChecksumOffload(p) + if pkt == nil { + return + } + // TODO(jwhited): g.gro.Enqueue() duplicates a lot of p.Decode(). + // We may want to push stack.PacketBuffer further up as a + // replacement for packet.Parsed, or inversely push packet.Parsed + // down into refactored GRO logic. + g.gro.Enqueue(pkt) + g.maybeEnqueued = true + pkt.DecRef() +} + +// Flush flushes previously enqueued packets to the underlying +// stack.NetworkDispatcher, and returns GRO to a pool for later re-use. Callers +// MUST NOT use GRO once it has been Flush()'d. +func (g *GRO) Flush() { + if g.gro.Dispatcher != nil && g.maybeEnqueued { + g.gro.Flush() + } + g.gro.Dispatcher = nil + g.maybeEnqueued = false + groPool.Put(g) +} diff --git a/wgengine/netstack/gro/gro_ios.go b/wgengine/netstack/gro/gro_ios.go new file mode 100644 index 0000000000000..627b42d7e5cfd --- /dev/null +++ b/wgengine/netstack/gro/gro_ios.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios + +package gro + +import ( + "gvisor.dev/gvisor/pkg/tcpip/stack" + "tailscale.com/net/packet" +) + +type GRO struct{} + +func NewGRO() *GRO { + panic("unsupported on iOS") +} + +func (g *GRO) SetDispatcher(_ stack.NetworkDispatcher) {} + +func (g *GRO) Enqueue(_ *packet.Parsed) {} + +func (g *GRO) Flush() {} diff --git a/wgengine/netstack/link_endpoint_test.go b/wgengine/netstack/gro/gro_test.go similarity index 97% rename from wgengine/netstack/link_endpoint_test.go rename to wgengine/netstack/gro/gro_test.go index 97bc9e70af5fd..1eb200a05134c 100644 --- a/wgengine/netstack/link_endpoint_test.go +++ b/wgengine/netstack/gro/gro_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package netstack +package gro import ( "bytes" @@ -13,7 +13,7 @@ import ( "tailscale.com/net/packet" ) -func Test_rxChecksumOffload(t *testing.T) { +func Test_RXChecksumOffload(t *testing.T) { payloadLen := 100 tcpFields := &header.TCPFields{ @@ -97,7 +97,7 @@ func Test_rxChecksumOffload(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := &packet.Parsed{} p.Decode(tt.input) - got := rxChecksumOffload(p) + got := RXChecksumOffload(p) if tt.wantPB != (got != nil) { t.Fatalf("wantPB = %v != (got != nil): %v", tt.wantPB, got != nil) } diff --git a/wgengine/netstack/gro_default.go b/wgengine/netstack/gro_default.go deleted file mode 100644 index ef4ff4b98df88..0000000000000 --- a/wgengine/netstack/gro_default.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ios - -package netstack - -import ( - nsgro "gvisor.dev/gvisor/pkg/tcpip/stack/gro" -) - -// gro wraps a gVisor GRO implementation. It exists solely to prevent iOS from -// importing said package (see _ios.go). -type gro struct { - nsgro.GRO -} diff --git a/wgengine/netstack/gro_ios.go b/wgengine/netstack/gro_ios.go deleted file mode 100644 index fb252f7db7658..0000000000000 --- a/wgengine/netstack/gro_ios.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ios - -package netstack - -import ( - "gvisor.dev/gvisor/pkg/tcpip/stack" -) - -// gro on iOS delivers packets to its Dispatcher, immediately. This type exists -// to prevent importation of the gVisor GRO implementation as said package -// increases binary size. This is a penalty we do not wish to pay since we -// currently do not leverage GRO on iOS. -type gro struct { - Dispatcher stack.NetworkDispatcher -} - -func (g *gro) Init(v bool) { - if v { - panic("GRO is not supported on this platform") - } -} - -func (g *gro) Flush() {} - -func (g *gro) Enqueue(pkt *stack.PacketBuffer) { - g.Dispatcher.DeliverNetworkPacket(pkt.NetworkProtocolNumber, pkt) -} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 238e14cf0bff0..485d829a3b8e5 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -4,18 +4,15 @@ package netstack import ( - "bytes" "context" "sync" - "github.com/tailscale/wireguard-go/tun" - "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" - "gvisor.dev/gvisor/pkg/tcpip/header/parse" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/net/packet" "tailscale.com/types/ipproto" + "tailscale.com/wgengine/netstack/gro" ) type queue struct { @@ -83,54 +80,72 @@ func (q *queue) Num() int { var _ stack.LinkEndpoint = (*linkEndpoint)(nil) var _ stack.GSOEndpoint = (*linkEndpoint)(nil) +type supportedGRO int + +const ( + groNotSupported supportedGRO = iota + tcpGROSupported +) + // linkEndpoint implements stack.LinkEndpoint and stack.GSOEndpoint. Outbound // packets written by gVisor towards Tailscale are stored in a channel. -// Inbound is fed to gVisor via injectInbound or enqueueGRO. This is loosely +// Inbound is fed to gVisor via injectInbound or gro. This is loosely // modeled after gvisor.dev/pkg/tcpip/link/channel.Endpoint. type linkEndpoint struct { SupportedGSOKind stack.SupportedGSO - initGRO initGRO + supportedGRO supportedGRO mu sync.RWMutex // mu guards the following fields dispatcher stack.NetworkDispatcher linkAddr tcpip.LinkAddress mtu uint32 - gro gro // mu only guards access to gro.Dispatcher q *queue // outbound } -// TODO(jwhited): move to linkEndpointOpts struct or similar. -type initGRO bool - -const ( - disableGRO initGRO = false - enableGRO initGRO = true -) - -func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, gro initGRO) *linkEndpoint { +func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supportedGRO supportedGRO) *linkEndpoint { le := &linkEndpoint{ + supportedGRO: supportedGRO, q: &queue{ c: make(chan *stack.PacketBuffer, size), }, mtu: mtu, linkAddr: linkAddr, } - le.initGRO = gro - le.gro.Init(bool(gro)) return le } +// gro attempts to enqueue p on g if l supports a GRO kind matching the +// transport protocol carried in p. gro may allocate g if it is nil. gro can +// either return the existing g, a newly allocated one, or nil. Callers are +// responsible for calling Flush() on the returned value if it is non-nil once +// they have finished iterating through all GRO candidates for a given vector. +// If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via +// SetDispatcher(). +func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { + if l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { + // IPv6 may have extension headers preceding a TCP header, but we trade + // for a fast path and assume p cannot be coalesced in such a case. + l.injectInbound(p) + return g + } + if g == nil { + l.mu.RLock() + d := l.dispatcher + l.mu.RUnlock() + g = gro.NewGRO() + g.SetDispatcher(d) + } + g.Enqueue(p) + return g +} + // Close closes l. Further packet injections will return an error, and all // pending packets are discarded. Close may be called concurrently with // WritePackets. func (l *linkEndpoint) Close() { l.mu.Lock() - if l.gro.Dispatcher != nil { - l.gro.Flush() - } l.dispatcher = nil - l.gro.Dispatcher = nil l.mu.Unlock() l.q.Close() l.Drain() @@ -162,93 +177,6 @@ func (l *linkEndpoint) NumQueued() int { return l.q.Num() } -// rxChecksumOffload validates IPv4, TCP, and UDP header checksums in p, -// returning an equivalent *stack.PacketBuffer if they are valid, otherwise nil. -// The set of headers validated covers where gVisor would perform validation if -// !stack.PacketBuffer.RXChecksumValidated, i.e. it satisfies -// stack.CapabilityRXChecksumOffload. Other protocols with checksum fields, -// e.g. ICMP{v6}, are still validated by gVisor regardless of rx checksum -// offloading capabilities. -func rxChecksumOffload(p *packet.Parsed) *stack.PacketBuffer { - var ( - pn tcpip.NetworkProtocolNumber - csumStart int - ) - buf := p.Buffer() - - switch p.IPVersion { - case 4: - if len(buf) < header.IPv4MinimumSize { - return nil - } - csumStart = int((buf[0] & 0x0F) * 4) - if csumStart < header.IPv4MinimumSize || csumStart > header.IPv4MaximumHeaderSize || len(buf) < csumStart { - return nil - } - if ^tun.Checksum(buf[:csumStart], 0) != 0 { - return nil - } - pn = header.IPv4ProtocolNumber - case 6: - if len(buf) < header.IPv6FixedHeaderSize { - return nil - } - csumStart = header.IPv6FixedHeaderSize - pn = header.IPv6ProtocolNumber - if p.IPProto != ipproto.ICMPv6 && p.IPProto != ipproto.TCP && p.IPProto != ipproto.UDP { - // buf could have extension headers before a UDP or TCP header, but - // packet.Parsed.IPProto will be set to the ext header type, so we - // have to look deeper. We are still responsible for validating the - // L4 checksum in this case. So, make use of gVisor's existing - // extension header parsing via parse.IPv6() in order to unpack the - // L4 csumStart index. This is not particularly efficient as we have - // to allocate a short-lived stack.PacketBuffer that cannot be - // re-used. parse.IPv6() "consumes" the IPv6 headers, so we can't - // inject this stack.PacketBuffer into the stack at a later point. - packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ - Payload: buffer.MakeWithData(bytes.Clone(buf)), - }) - defer packetBuf.DecRef() - // The rightmost bool returns false only if packetBuf is too short, - // which we've already accounted for above. - transportProto, _, _, _, _ := parse.IPv6(packetBuf) - if transportProto == header.TCPProtocolNumber || transportProto == header.UDPProtocolNumber { - csumLen := packetBuf.Data().Size() - if len(buf) < csumLen { - return nil - } - csumStart = len(buf) - csumLen - p.IPProto = ipproto.Proto(transportProto) - } - } - } - - if p.IPProto == ipproto.TCP || p.IPProto == ipproto.UDP { - lenForPseudo := len(buf) - csumStart - csum := tun.PseudoHeaderChecksum( - uint8(p.IPProto), - p.Src.Addr().AsSlice(), - p.Dst.Addr().AsSlice(), - uint16(lenForPseudo)) - csum = tun.Checksum(buf[csumStart:], csum) - if ^csum != 0 { - return nil - } - } - - packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ - Payload: buffer.MakeWithData(bytes.Clone(buf)), - }) - packetBuf.NetworkProtocolNumber = pn - // Setting this is not technically required. gVisor overrides where - // stack.CapabilityRXChecksumOffload is advertised from Capabilities(). - // https://github.com/google/gvisor/blob/64c016c92987cc04dfd4c7b091ddd21bdad875f8/pkg/tcpip/stack/nic.go#L763 - // This is also why we offload for all packets since we cannot signal this - // per-packet. - packetBuf.RXChecksumValidated = true - return packetBuf -} - func (l *linkEndpoint) injectInbound(p *packet.Parsed) { l.mu.RLock() d := l.dispatcher @@ -256,7 +184,7 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { if d == nil { return } - pkt := rxChecksumOffload(p) + pkt := gro.RXChecksumOffload(p) if pkt == nil { return } @@ -264,52 +192,12 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { pkt.DecRef() } -// enqueueGRO enqueues the provided packet for GRO. It may immediately deliver -// it to the underlying stack.NetworkDispatcher depending on its contents and if -// GRO was initialized via newLinkEndpoint. To explicitly flush previously -// enqueued packets see flushGRO. enqueueGRO is not thread-safe and must not -// be called concurrently with flushGRO. -func (l *linkEndpoint) enqueueGRO(p *packet.Parsed) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.gro.Dispatcher == nil { - return - } - pkt := rxChecksumOffload(p) - if pkt == nil { - return - } - // TODO(jwhited): gro.Enqueue() duplicates a lot of p.Decode(). - // We may want to push stack.PacketBuffer further up as a - // replacement for packet.Parsed, or inversely push packet.Parsed - // down into refactored GRO logic. - l.gro.Enqueue(pkt) - pkt.DecRef() -} - -// flushGRO flushes previously enqueueGRO'd packets to the underlying -// stack.NetworkDispatcher. flushGRO is not thread-safe, and must not be -// called concurrently with enqueueGRO. -func (l *linkEndpoint) flushGRO() { - if !l.initGRO { - // If GRO was not initialized fast path return to avoid scanning GRO - // buckets (see l.gro.Flush()) that will always be empty. - return - } - l.mu.RLock() - defer l.mu.RUnlock() - if l.gro.Dispatcher != nil { - l.gro.Flush() - } -} - // Attach saves the stack network-layer dispatcher for use later when packets // are injected. func (l *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { l.mu.Lock() defer l.mu.Unlock() l.dispatcher = dispatcher - l.gro.Dispatcher = dispatcher } // IsAttached implements stack.LinkEndpoint.IsAttached. diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 2afbae0be888b..d029b6c194575 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -10,7 +10,6 @@ import ( "expvar" "fmt" "io" - "log" "math" "net" "net/netip" @@ -20,6 +19,7 @@ import ( "sync/atomic" "time" + "github.com/tailscale/wireguard-go/conn" "gvisor.dev/gvisor/pkg/refs" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" @@ -54,6 +54,7 @@ import ( "tailscale.com/wgengine" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/magicsock" + "tailscale.com/wgengine/netstack/gro" ) const debugPackets = false @@ -186,6 +187,11 @@ type Impl struct { dns *dns.Manager driveForLocal drive.FileSystemForLocal // or nil + // loopbackPort, if non-nil, will enable Impl to loop back (dnat to + // :loopbackPort) TCP & UDP flows originally + // destined to serviceIP{v6}:loopbackPort. + loopbackPort *int + peerapiPort4Atomic atomic.Uint32 // uint16 port number for IPv4 peerapi peerapiPort6Atomic atomic.Uint32 // uint16 port number for IPv6 peerapi @@ -324,16 +330,15 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi if err != nil { return nil, err } - var linkEP *linkEndpoint + supportedGSOKind := stack.GSONotSupported + supportedGROKind := groNotSupported if runtime.GOOS == "linux" { - // TODO(jwhited): add Windows GSO support https://github.com/tailscale/corp/issues/21874 - // TODO(jwhited): exercise enableGRO in relation to https://github.com/tailscale/corp/issues/22353 - linkEP = newLinkEndpoint(512, uint32(tstun.DefaultTUNMTU()), "", disableGRO) - // TODO(jwhited): re-enable GSO https://github.com/tailscale/corp/issues/22511 - linkEP.SupportedGSOKind = stack.GSONotSupported - } else { - linkEP = newLinkEndpoint(512, uint32(tstun.DefaultTUNMTU()), "", disableGRO) + // TODO(jwhited): add Windows support https://github.com/tailscale/corp/issues/21874 + supportedGROKind = tcpGROSupported + supportedGSOKind = stack.HostGSOSupported } + linkEP := newLinkEndpoint(512, uint32(tstun.DefaultTUNMTU()), "", supportedGROKind) + linkEP.SupportedGSOKind = supportedGSOKind if tcpipProblem := ipstack.CreateNIC(nicID, linkEP); tcpipProblem != nil { return nil, fmt.Errorf("could not create netstack NIC: %v", tcpipProblem) } @@ -378,10 +383,13 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi dns: dns, driveForLocal: driveForLocal, } + loopbackPort, ok := envknob.LookupInt("TS_DEBUG_NETSTACK_LOOPBACK_PORT") + if ok && loopbackPort >= 0 && loopbackPort <= math.MaxUint16 { + ns.loopbackPort = &loopbackPort + } ns.ctx, ns.ctxCancel = context.WithCancel(context.Background()) ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) ns.tundev.PostFilterPacketInboundFromWireGuard = ns.injectInbound - ns.tundev.EndPacketVectorInboundFromWireGuardFlush = linkEP.flushGRO ns.tundev.PreFilterPacketOutboundToWireGuardNetstackIntercept = ns.handleLocalPackets stacksForMetrics.Store(ns, struct{}{}) return ns, nil @@ -707,12 +715,19 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { } } +func (ns *Impl) isLoopbackPort(port uint16) bool { + if ns.loopbackPort != nil && int(port) == *ns.loopbackPort { + return true + } + return false +} + // handleLocalPackets is hooked into the tun datapath for packets leaving // the host and arriving at tailscaled. This method returns filter.DropSilently // to intercept a packet for handling, for instance traffic to quad-100. -func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Response { +func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { if ns.ctx.Err() != nil { - return filter.DropSilently + return filter.DropSilently, gro } // Determine if we care about this local packet. @@ -725,12 +740,12 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Re // 80, and 8080. switch p.IPProto { case ipproto.TCP: - if port := p.Dst.Port(); port != 53 && port != 80 && port != 8080 { - return filter.Accept + if port := p.Dst.Port(); port != 53 && port != 80 && port != 8080 && !ns.isLoopbackPort(port) { + return filter.Accept, gro } case ipproto.UDP: - if port := p.Dst.Port(); port != 53 { - return filter.Accept + if port := p.Dst.Port(); port != 53 && !ns.isLoopbackPort(port) { + return filter.Accept, gro } } case viaRange.Contains(dst): @@ -744,7 +759,7 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Re if !shouldHandle { // Unhandled means that we let the regular processing // occur without doing anything ourselves. - return filter.Accept + return filter.Accept, gro } if debugNetstack() { @@ -770,7 +785,7 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Re } go ns.userPing(pingIP, pong, userPingDirectionInbound) - return filter.DropSilently + return filter.DropSilently, gro } // Fall through to writing inbound so netstack handles the @@ -779,14 +794,14 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Re default: // Not traffic to the service IP or a 4via6 IP, so we don't // care about the packet; resume processing. - return filter.Accept + return filter.Accept, gro } if debugPackets { ns.logf("[v2] service packet in (from %v): % x", p.Src, p.Buffer()) } - ns.linkEP.injectInbound(p) - return filter.DropSilently + gro = ns.linkEP.gro(p, gro) + return filter.DropSilently, gro } func (ns *Impl) DialContextTCP(ctx context.Context, ipp netip.AddrPort) (*gonet.TCPConn, error) { @@ -821,9 +836,32 @@ func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet. return gonet.DialUDP(ns.ipstack, nil, remoteAddress, ipType) } +// getInjectInboundBuffsSizes returns packet memory and a sizes slice for usage +// when calling tstun.Wrapper.InjectInboundPacketBuffer(). These are sized with +// consideration for MTU and GSO support on ns.linkEP. They should be recycled +// across subsequent inbound packet injection calls. +func (ns *Impl) getInjectInboundBuffsSizes() (buffs [][]byte, sizes []int) { + batchSize := 1 + gsoEnabled := ns.linkEP.SupportedGSO() == stack.HostGSOSupported + if gsoEnabled { + batchSize = conn.IdealBatchSize + } + buffs = make([][]byte, batchSize) + sizes = make([]int, batchSize) + for i := 0; i < batchSize; i++ { + if i == 0 && gsoEnabled { + buffs[i] = make([]byte, tstun.PacketStartOffset+ns.linkEP.GSOMaxSize()) + } else { + buffs[i] = make([]byte, tstun.PacketStartOffset+tstun.DefaultTUNMTU()) + } + } + return buffs, sizes +} + // The inject goroutine reads in packets that netstack generated, and delivers // them to the correct path. func (ns *Impl) inject() { + inboundBuffs, inboundBuffsSizes := ns.getInjectInboundBuffsSizes() for { pkt := ns.linkEP.ReadContext(ns.ctx) if pkt == nil { @@ -849,13 +887,13 @@ func (ns *Impl) inject() { // pkt has a non-zero refcount, so injection methods takes // ownership of one count and will decrement on completion. if sendToHost { - if err := ns.tundev.InjectInboundPacketBuffer(pkt); err != nil { - log.Printf("netstack inject inbound: %v", err) + if err := ns.tundev.InjectInboundPacketBuffer(pkt, inboundBuffs, inboundBuffsSizes); err != nil { + ns.logf("netstack inject inbound: %v", err) return } } else { if err := ns.tundev.InjectOutboundPacketBuffer(pkt); err != nil { - log.Printf("netstack inject outbound: %v", err) + ns.logf("netstack inject outbound: %v", err) return } } @@ -1040,14 +1078,14 @@ func (ns *Impl) userPing(dstIP netip.Addr, pingResPkt []byte, direction userPing // continue normally (typically being delivered to the host networking stack), // whereas returning filter.DropSilently is done when netstack intercepts the // packet and no further processing towards to host should be done. -func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Response { +func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { if ns.ctx.Err() != nil { - return filter.DropSilently + return filter.DropSilently, gro } if !ns.shouldProcessInbound(p, t) { // Let the host network stack (if any) deal with it. - return filter.Accept + return filter.Accept, gro } destIP := p.Dst.Addr() @@ -1067,13 +1105,13 @@ func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Respons pong = packet.Generate(&h, p.Payload()) } go ns.userPing(pingIP, pong, userPingDirectionOutbound) - return filter.DropSilently + return filter.DropSilently, gro } if debugPackets { ns.logf("[v2] packet in (from %v): % x", p.Src, p.Buffer()) } - ns.linkEP.enqueueGRO(p) + gro = ns.linkEP.gro(p, gro) // We've now delivered this to netstack, so we're done. // Instead of returning a filter.Accept here (which would also @@ -1081,7 +1119,7 @@ func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Respons // filter.Drop (which would log about rejected traffic), // instead return filter.DropSilently which just quietly stops // processing it in the tstun TUN wrapper. - return filter.DropSilently + return filter.DropSilently, gro } // shouldHandlePing returns whether or not netstack should handle an incoming @@ -1147,6 +1185,11 @@ func netaddrIPFromNetstackIP(s tcpip.Address) netip.Addr { return netip.Addr{} } +var ( + ipv4Loopback = netip.MustParseAddr("127.0.0.1") + ipv6Loopback = netip.MustParseAddr("::1") +) + func (ns *Impl) acceptTCP(r *tcp.ForwarderRequest) { reqDetails := r.ID() if debugNetstack() { @@ -1283,8 +1326,15 @@ func (ns *Impl) acceptTCP(r *tcp.ForwarderRequest) { return } } - if isTailscaleIP { - dialIP = netaddr.IPv4(127, 0, 0, 1) + switch { + case hittingServiceIP && ns.isLoopbackPort(reqDetails.LocalPort): + if dialIP == serviceIPv6 { + dialIP = ipv6Loopback + } else { + dialIP = ipv4Loopback + } + case isTailscaleIP: + dialIP = ipv4Loopback } dialAddr := netip.AddrPortFrom(dialIP, uint16(reqDetails.LocalPort)) @@ -1328,12 +1378,23 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. var stdDialer net.Dialer dialFunc = stdDialer.DialContext } - server, err := dialFunc(ctx, "tcp", dialAddrStr) + + // TODO: this is racy, dialing before we register our local address. See + // https://github.com/tailscale/tailscale/issues/1616. + backend, err := dialFunc(ctx, "tcp", dialAddrStr) if err != nil { - ns.logf("netstack: could not connect to local server at %s: %v", dialAddr.String(), err) + ns.logf("netstack: could not connect to local backend server at %s: %v", dialAddr.String(), err) return } - defer server.Close() + defer backend.Close() + + backendLocalAddr := backend.LocalAddr().(*net.TCPAddr) + backendLocalIPPort := netaddr.Unmap(backendLocalAddr.AddrPort()) + if err := ns.pm.RegisterIPPortIdentity("tcp", backendLocalIPPort, clientRemoteIP); err != nil { + ns.logf("netstack: could not register TCP mapping %s: %v", backendLocalIPPort, err) + return + } + defer ns.pm.UnregisterIPPortIdentity("tcp", backendLocalIPPort) // If we get here, either the getClient call below will succeed and // return something we can Close, or it will fail and will properly @@ -1348,17 +1409,13 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. } defer client.Close() - backendLocalAddr := server.LocalAddr().(*net.TCPAddr) - backendLocalIPPort := netaddr.Unmap(backendLocalAddr.AddrPort()) - ns.pm.RegisterIPPortIdentity("tcp", backendLocalIPPort, clientRemoteIP) - defer ns.pm.UnregisterIPPortIdentity("tcp", backendLocalIPPort) connClosed := make(chan error, 2) go func() { - _, err := io.Copy(server, client) + _, err := io.Copy(backend, client) connClosed <- err }() go func() { - _, err := io.Copy(client, server) + _, err := io.Copy(client, backend) connClosed <- err }() err = <-connClosed @@ -1435,16 +1492,23 @@ func (ns *Impl) acceptUDP(r *udp.ForwarderRequest) { return } - // Handle magicDNS traffic (via UDP) here. + // Handle magicDNS and loopback traffic (via UDP) here. if dst := dstAddr.Addr(); dst == serviceIP || dst == serviceIPv6 { - if dstAddr.Port() != 53 { + switch { + case dstAddr.Port() == 53: + c := gonet.NewUDPConn(&wq, ep) + go ns.handleMagicDNSUDP(srcAddr, c) + return + case ns.isLoopbackPort(dstAddr.Port()): + if dst == serviceIPv6 { + dstAddr = netip.AddrPortFrom(ipv6Loopback, dstAddr.Port()) + } else { + dstAddr = netip.AddrPortFrom(ipv4Loopback, dstAddr.Port()) + } + default: ep.Close() - return // Only MagicDNS traffic runs on the service IPs for now. + return // Only MagicDNS and loopback traffic runs on the service IPs for now. } - - c := gonet.NewUDPConn(&wq, ep) - go ns.handleMagicDNSUDP(srcAddr, c) - return } if get := ns.GetUDPHandlerForFlow; get != nil { @@ -1523,9 +1587,17 @@ func (ns *Impl) forwardUDP(client *gonet.UDPConn, clientAddr, dstAddr netip.Addr var backendListenAddr *net.UDPAddr var backendRemoteAddr *net.UDPAddr isLocal := ns.isLocalIP(dstAddr.Addr()) + isLoopback := dstAddr.Addr() == ipv4Loopback || dstAddr.Addr() == ipv6Loopback if isLocal { backendRemoteAddr = &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: int(port)} backendListenAddr = &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: int(srcPort)} + } else if isLoopback { + ip := net.IP(ipv4Loopback.AsSlice()) + if dstAddr.Addr() == ipv6Loopback { + ip = ipv6Loopback.AsSlice() + } + backendRemoteAddr = &net.UDPAddr{IP: ip, Port: int(port)} + backendListenAddr = &net.UDPAddr{IP: ip, Port: int(srcPort)} } else { if dstIP := dstAddr.Addr(); viaRange.Contains(dstIP) { dstAddr = netip.AddrPortFrom(tsaddr.UnmapVia(dstIP), dstAddr.Port()) @@ -1555,7 +1627,10 @@ func (ns *Impl) forwardUDP(client *gonet.UDPConn, clientAddr, dstAddr netip.Addr ns.logf("could not get backend local IP:port from %v:%v", backendLocalAddr.IP, backendLocalAddr.Port) } if isLocal { - ns.pm.RegisterIPPortIdentity("udp", backendLocalIPPort, clientAddr.Addr()) + if err := ns.pm.RegisterIPPortIdentity("udp", backendLocalIPPort, clientAddr.Addr()); err != nil { + ns.logf("netstack: could not register UDP mapping %s: %v", backendLocalIPPort, err) + return + } } ctx, cancel := context.WithCancel(context.Background()) diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 43287d8763831..6be61cd58fa35 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -79,7 +79,7 @@ func TestInjectInboundLeak(t *testing.T) { const N = 10_000 ms0 := getMemStats() for range N { - outcome := ns.injectInbound(pkt, tunWrap) + outcome, _ := ns.injectInbound(pkt, tunWrap, nil) if outcome != filter.DropSilently { t.Fatalf("got outcome %v; want DropSilently", outcome) } @@ -569,7 +569,7 @@ func TestTCPForwardLimits(t *testing.T) { // When injecting this packet, we want the outcome to be "drop // silently", which indicates that netstack is processing the // packet and not delivering it to the host system. - if resp := impl.injectInbound(&parsed, impl.tundev); resp != filter.DropSilently { + if resp, _ := impl.injectInbound(&parsed, impl.tundev, nil); resp != filter.DropSilently { t.Errorf("got filter outcome %v, want filter.DropSilently", resp) } @@ -587,7 +587,7 @@ func TestTCPForwardLimits(t *testing.T) { // Inject another packet, which will be deduplicated and thus not // increment our counter. parsed.Decode(pkt) - if resp := impl.injectInbound(&parsed, impl.tundev); resp != filter.DropSilently { + if resp, _ := impl.injectInbound(&parsed, impl.tundev, nil); resp != filter.DropSilently { t.Errorf("got filter outcome %v, want filter.DropSilently", resp) } @@ -655,7 +655,7 @@ func TestTCPForwardLimits_PerClient(t *testing.T) { // When injecting this packet, we want the outcome to be "drop // silently", which indicates that netstack is processing the // packet and not delivering it to the host system. - if resp := impl.injectInbound(&parsed, impl.tundev); resp != filter.DropSilently { + if resp, _ := impl.injectInbound(&parsed, impl.tundev, nil); resp != filter.DropSilently { t.Fatalf("got filter outcome %v, want filter.DropSilently", resp) } } @@ -750,7 +750,7 @@ func TestHandleLocalPackets(t *testing.T) { Dst: netip.MustParseAddrPort("100.100.100.100:53"), TCPFlags: packet.TCPSyn, } - resp := impl.handleLocalPackets(pkt, impl.tundev) + resp, _ := impl.handleLocalPackets(pkt, impl.tundev, nil) if resp != filter.DropSilently { t.Errorf("got filter outcome %v, want filter.DropSilently", resp) } @@ -767,7 +767,7 @@ func TestHandleLocalPackets(t *testing.T) { Dst: netip.MustParseAddrPort("[fd7a:115c:a1e0:b1a:0:7:a01:109]:5678"), TCPFlags: packet.TCPSyn, } - resp := impl.handleLocalPackets(pkt, impl.tundev) + resp, _ := impl.handleLocalPackets(pkt, impl.tundev, nil) // DropSilently is the outcome we expected, since we actually // handled this packet by injecting it into netstack, which @@ -789,7 +789,7 @@ func TestHandleLocalPackets(t *testing.T) { Dst: netip.MustParseAddrPort("[fd7a:115c:a1e0:b1a:0:63:a01:109]:5678"), TCPFlags: packet.TCPSyn, } - resp := impl.handleLocalPackets(pkt, impl.tundev) + resp, _ := impl.handleLocalPackets(pkt, impl.tundev, nil) // Accept means that handleLocalPackets does not handle this // packet, we "accept" it to continue further processing, diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index a62855e8ac225..f55361225a302 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -19,8 +19,8 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/tailscale/netlink" "github.com/tailscale/wireguard-go/tun" - "github.com/vishvananda/netlink" "go4.org/netipx" "tailscale.com/health" "tailscale.com/net/netmon" diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 1a3c7637fee00..f6b4586cbeadb 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -54,6 +54,7 @@ import ( "tailscale.com/wgengine/filter" "tailscale.com/wgengine/magicsock" "tailscale.com/wgengine/netlog" + "tailscale.com/wgengine/netstack/gro" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgint" @@ -491,6 +492,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) if err := e.router.Up(); err != nil { return nil, fmt.Errorf("router.Up: %w", err) } + tsTUNDev.SetLinkFeaturesPostUp() // It's a little pointless to apply no-op settings here (they // should already be empty?), but it at least exercises the @@ -519,7 +521,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } // echoRespondToAll is an inbound post-filter responding to all echo requests. -func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper) filter.Response { +func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { if p.IsEchoRequest() { header := p.ICMP4Header() header.ToResponse() @@ -531,9 +533,9 @@ func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper) filter.Response { // it away. If this ever gets run in non-fake mode, you'll // get double responses to pings, which is an indicator you // shouldn't be doing that I guess.) - return filter.Accept + return filter.Accept, gro } - return filter.Accept + return filter.Accept, gro } // handleLocalPackets inspects packets coming from the local network