mirror of
https://github.com/tailscale/tailscale.git
synced 2025-12-22 13:37:10 +00:00
tstest/integration: expand the tests for tailscale up
Expand the integration tests to cover a wider range of scenarios, including: * Before and after a successful initial login * Auth URLs and auth keys * With and without the `--force-reauth` flag * With and without seamless key renewal These tests expose a race condition when using `--force-reauth` on an already-logged in device. The command completes too quickly, preventing the auth URL from being displayed. This issue is identified and will be fixed in a separate commit. Updates #17108 Signed-off-by: Alex Chan <alexc@tailscale.com>
This commit is contained in:
@@ -23,6 +23,7 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -267,52 +268,168 @@ func TestStateSavedOnStart(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOneNodeUpAuth(t *testing.T) {
|
||||
tstest.Shard(t)
|
||||
tstest.Parallel(t)
|
||||
env := NewTestEnv(t, ConfigureControl(func(control *testcontrol.Server) {
|
||||
control.RequireAuth = true
|
||||
}))
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
args []string
|
||||
//
|
||||
// What auth key should we use for control?
|
||||
authKey string
|
||||
//
|
||||
// Is tailscaled already logged in before we run this `up` command?
|
||||
alreadyLoggedIn bool
|
||||
//
|
||||
// Do we need to log in again with a new /auth/ URL?
|
||||
needsNewAuthURL bool
|
||||
}{
|
||||
{
|
||||
name: "up",
|
||||
args: []string{"up"},
|
||||
needsNewAuthURL: true,
|
||||
},
|
||||
{
|
||||
name: "up-with-force-reauth",
|
||||
args: []string{"up", "--force-reauth"},
|
||||
needsNewAuthURL: true,
|
||||
},
|
||||
{
|
||||
name: "up-with-auth-key",
|
||||
args: []string{"up", "--auth-key=opensesame"},
|
||||
authKey: "opensesame",
|
||||
needsNewAuthURL: false,
|
||||
},
|
||||
{
|
||||
name: "up-with-force-reauth-and-auth-key",
|
||||
args: []string{"up", "--force-reauth", "--auth-key=opensesame"},
|
||||
authKey: "opensesame",
|
||||
needsNewAuthURL: false,
|
||||
},
|
||||
{
|
||||
name: "up-after-login",
|
||||
args: []string{"up"},
|
||||
alreadyLoggedIn: true,
|
||||
needsNewAuthURL: false,
|
||||
},
|
||||
// TODO(alexc): This test is failing because of a bug in `tailscale up` where
|
||||
// it waits for ipn to enter the "Running" state. If we're already logged in
|
||||
// and running, this completes immediately, before we've had a chance to show
|
||||
// the user the auth URL.
|
||||
// {
|
||||
// name: "up-with-force-reauth-after-login",
|
||||
// args: []string{"up", "--force-reauth"},
|
||||
// alreadyLoggedIn: true,
|
||||
// needsNewAuthURL: true,
|
||||
// },
|
||||
{
|
||||
name: "up-with-auth-key-after-login",
|
||||
args: []string{"up", "--auth-key=opensesame"},
|
||||
authKey: "opensesame",
|
||||
alreadyLoggedIn: true,
|
||||
needsNewAuthURL: false,
|
||||
},
|
||||
{
|
||||
name: "up-with-force-reauth-and-auth-key-after-login",
|
||||
args: []string{"up", "--force-reauth", "--auth-key=opensesame"},
|
||||
authKey: "opensesame",
|
||||
alreadyLoggedIn: true,
|
||||
needsNewAuthURL: false,
|
||||
},
|
||||
} {
|
||||
tstest.Shard(t)
|
||||
|
||||
n1 := NewTestNode(t, env)
|
||||
d1 := n1.StartDaemon()
|
||||
for _, useSeamlessKeyRenewal := range []bool{true, false} {
|
||||
tt := tt // subtests are run in parallel, rebind tt
|
||||
t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) {
|
||||
tstest.Parallel(t)
|
||||
|
||||
n1.AwaitListening()
|
||||
env := NewTestEnv(t, ConfigureControl(
|
||||
func(control *testcontrol.Server) {
|
||||
if tt.authKey != "" {
|
||||
control.RequireAuthKey = tt.authKey
|
||||
} else {
|
||||
control.RequireAuth = true
|
||||
}
|
||||
|
||||
st := n1.MustStatus()
|
||||
t.Logf("Status: %s", st.BackendState)
|
||||
control.AllNodesSameUser = true
|
||||
|
||||
t.Logf("Running up --login-server=%s ...", env.ControlURL())
|
||||
if useSeamlessKeyRenewal {
|
||||
control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{
|
||||
tailcfg.NodeAttrSeamlessKeyRenewal: []tailcfg.RawMessage{},
|
||||
}
|
||||
}
|
||||
},
|
||||
))
|
||||
|
||||
cmd := n1.Tailscale("up", "--login-server="+env.ControlURL())
|
||||
var authCountAtomic atomic.Int32
|
||||
cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error {
|
||||
t.Logf("saw auth URL %q", urlStr)
|
||||
if env.Control.CompleteAuth(urlStr) {
|
||||
if authCountAtomic.Add(1) > 1 {
|
||||
err := errors.New("completed multple auth URLs")
|
||||
t.Error(err)
|
||||
return err
|
||||
}
|
||||
t.Logf("completed auth path %s", urlStr)
|
||||
return nil
|
||||
n1 := NewTestNode(t, env)
|
||||
d1 := n1.StartDaemon()
|
||||
defer d1.MustCleanShutdown(t)
|
||||
|
||||
cmdArgs := append(tt.args, "--login-server="+env.ControlURL())
|
||||
|
||||
// This handler looks for /auth/ URLs in the stdout from "tailscale up",
|
||||
// and if it sees them, completes the auth process.
|
||||
//
|
||||
// It counts how many auth URLs it's seen.
|
||||
var authCountAtomic atomic.Int32
|
||||
authURLHandler := &authURLParserWriter{fn: func(urlStr string) error {
|
||||
t.Logf("saw auth URL %q", urlStr)
|
||||
if env.Control.CompleteAuth(urlStr) {
|
||||
if authCountAtomic.Add(1) > 1 {
|
||||
err := errors.New("completed multiple auth URLs")
|
||||
t.Error(err)
|
||||
return err
|
||||
}
|
||||
t.Logf("completed login to %s", urlStr)
|
||||
return nil
|
||||
} else {
|
||||
err := fmt.Errorf("Failed to complete initial login to %q", urlStr)
|
||||
t.Fatal(err)
|
||||
return err
|
||||
}
|
||||
}}
|
||||
|
||||
// If we should be logged in at the start of the test case, go ahead
|
||||
// and run the login command.
|
||||
//
|
||||
// Otherwise, just wait for tailscaled to be listening.
|
||||
if tt.alreadyLoggedIn {
|
||||
t.Logf("Running initial login: %s", strings.Join(cmdArgs, " "))
|
||||
cmd := n1.Tailscale(cmdArgs...)
|
||||
cmd.Stdout = authURLHandler
|
||||
cmd.Stderr = cmd.Stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("up: %v", err)
|
||||
}
|
||||
authCountAtomic.Store(0)
|
||||
n1.AwaitRunning()
|
||||
} else {
|
||||
n1.AwaitListening()
|
||||
}
|
||||
|
||||
st := n1.MustStatus()
|
||||
t.Logf("Status: %s", st.BackendState)
|
||||
|
||||
t.Logf("Running command: %s", strings.Join(cmdArgs, " "))
|
||||
cmd := n1.Tailscale(cmdArgs...)
|
||||
cmd.Stdout = authURLHandler
|
||||
cmd.Stderr = cmd.Stdout
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("up: %v", err)
|
||||
}
|
||||
t.Logf("Got IP: %v", n1.AwaitIP4())
|
||||
|
||||
n1.AwaitRunning()
|
||||
|
||||
var expectedAuthUrls int32
|
||||
if tt.needsNewAuthURL {
|
||||
expectedAuthUrls = 1
|
||||
}
|
||||
if n := authCountAtomic.Load(); n != expectedAuthUrls {
|
||||
t.Errorf("Auth URLs completed = %d; want %d", n, expectedAuthUrls)
|
||||
}
|
||||
})
|
||||
}
|
||||
err := fmt.Errorf("Failed to complete auth path to %q", urlStr)
|
||||
t.Error(err)
|
||||
return err
|
||||
}}
|
||||
cmd.Stderr = cmd.Stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("up: %v", err)
|
||||
}
|
||||
t.Logf("Got IP: %v", n1.AwaitIP4())
|
||||
|
||||
n1.AwaitRunning()
|
||||
|
||||
if n := authCountAtomic.Load(); n != 1 {
|
||||
t.Errorf("Auth URLs completed = %d; want 1", n)
|
||||
}
|
||||
|
||||
d1.MustCleanShutdown(t)
|
||||
}
|
||||
|
||||
func TestConfigFileAuthKey(t *testing.T) {
|
||||
|
||||
@@ -66,6 +66,9 @@ type Server struct {
|
||||
// belong to the same user.
|
||||
AllNodesSameUser bool
|
||||
|
||||
// DefaultNodeCapabilities overrides the capability map sent to each client.
|
||||
DefaultNodeCapabilities *tailcfg.NodeCapMap
|
||||
|
||||
// ExplicitBaseURL or HTTPTestServer must be set.
|
||||
ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL
|
||||
HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL
|
||||
@@ -726,6 +729,25 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.
|
||||
// some follow-ups? For now all are successes.
|
||||
}
|
||||
|
||||
// The in-memory list of nodes, users, and logins is keyed by
|
||||
// the node key. If the node key changes, update all the data stores
|
||||
// to use the new node key.
|
||||
s.mu.Lock()
|
||||
if _, oldNodeKeyOk := s.nodes[req.OldNodeKey]; oldNodeKeyOk {
|
||||
if _, newNodeKeyOk := s.nodes[req.NodeKey]; !newNodeKeyOk {
|
||||
s.nodes[req.OldNodeKey].Key = req.NodeKey
|
||||
s.nodes[req.NodeKey] = s.nodes[req.OldNodeKey]
|
||||
|
||||
s.users[req.NodeKey] = s.users[req.OldNodeKey]
|
||||
s.logins[req.NodeKey] = s.logins[req.OldNodeKey]
|
||||
|
||||
delete(s.nodes, req.OldNodeKey)
|
||||
delete(s.users, req.OldNodeKey)
|
||||
delete(s.logins, req.OldNodeKey)
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
nk := req.NodeKey
|
||||
|
||||
user, login := s.getUser(nk)
|
||||
@@ -745,6 +767,19 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.
|
||||
v4Prefix,
|
||||
v6Prefix,
|
||||
}
|
||||
|
||||
var capMap tailcfg.NodeCapMap
|
||||
if s.DefaultNodeCapabilities != nil {
|
||||
capMap = *s.DefaultNodeCapabilities
|
||||
} else {
|
||||
capMap = tailcfg.NodeCapMap{
|
||||
tailcfg.CapabilityHTTPS: []tailcfg.RawMessage{},
|
||||
tailcfg.NodeAttrFunnel: []tailcfg.RawMessage{},
|
||||
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
|
||||
tailcfg.CapabilityFunnelPorts + "?ports=8080,443": []tailcfg.RawMessage{},
|
||||
}
|
||||
}
|
||||
|
||||
node := &tailcfg.Node{
|
||||
ID: tailcfg.NodeID(nodeID),
|
||||
StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(nodeID))),
|
||||
@@ -757,12 +792,8 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.
|
||||
Hostinfo: req.Hostinfo.View(),
|
||||
Name: req.Hostinfo.Hostname,
|
||||
Cap: req.Version,
|
||||
Capabilities: []tailcfg.NodeCapability{
|
||||
tailcfg.CapabilityHTTPS,
|
||||
tailcfg.NodeAttrFunnel,
|
||||
tailcfg.CapabilityFileSharing,
|
||||
tailcfg.CapabilityFunnelPorts + "?ports=8080,443",
|
||||
},
|
||||
CapMap: capMap,
|
||||
Capabilities: slices.Collect(maps.Keys(capMap)),
|
||||
}
|
||||
s.nodes[nk] = node
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user