8000 feat(cli): allow SSH command to connect to running container by johnstcn · Pull Request #16726 · coder/coder · GitHub
[go: up one dir, main page]

Skip to content

feat(cli): allow SSH command to connect to running container #16726

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Feb 28, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
feat(cli): add capability for SSH command to connect to a running con…
…tainer
  • Loading branch information
johnstcn committed Feb 27, 2025
commit 3dc994af41dcfa3a1c9b2c0727a101cf08cdb0d4
2 changes: 2 additions & 0 deletions agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,8 @@ func (a *agent) init() {

return a.reportConnection(id, connectionType, ip)
},

ExperimentalContainersEnabled: a.experimentalDevcontainersEnabled,
})
if err != nil {
panic(err)
Expand Down
60 changes: 48 additions & 12 deletions agent/agentssh/agentssh.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (

"cdr.dev/slog"

"github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/agentexec"
"github.com/coder/coder/v2/agent/agentrsa"
"github.com/coder/coder/v2/agent/usershell"
Expand Down Expand Up @@ -104,6 +105,9 @@ type Config struct {
BlockFileTransfer bool
// ReportConnection.
ReportConnection reportConnectionFunc
// Experimental: allow connecting to running containers if
// CODER_AGENT_DEVCONTAINERS_ENABLE=true.
ExperimentalContainersEnabled bool
}

type Server struct {
Expand Down Expand Up @@ -324,6 +328,22 @@ func (s *sessionCloseTracker) Close() error {
return s.Session.Close()
}

func extractContainerInfo(env []string) (container, containerUser string, filteredEnv []string) {
for _, kv := range env {
if strings.HasPrefix(kv, "CODER_CONTAINER=") {
container = strings.TrimPrefix(kv, "CODER_CONTAINER=")
}

if strings.HasPrefix(kv, "CODER_CONTAINER_USER=") {
containerUser = strings.TrimPrefix(kv, "CODER_CONTAINER_USER=")
}
}

return container, containerUser, slices.DeleteFunc(env, func(kv string) bool {
return strings.HasPrefix(kv, "CODER_CONTAINER=") || strings.HasPrefix(kv, "CODER_CONTAINER_USER=")
})
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

review: inspired by the magic session stuff, this seems to be the only real way to smuggle info betweeen the client and the server.

}

func (s *Server) sessionHandler(session ssh.Session) {
ctx := session.Context()
id := uuid.New()
Expand Down Expand Up @@ -353,6 +373,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
defer s.trackSession(session, false)

reportSession := true

switch magicType {
case MagicSessionTypeVSCode:
s.connCountVSCode.Add(1)
Expand Down Expand Up @@ -395,9 +416,19 @@ func (s *Server) sessionHandler(session ssh.Session) {
return
}

container, containerUser, env := extractContainerInfo(env)
s.logger.Debug(ctx, "container info",
slog.F("container", container),
slog.F("container_user", containerUser),
)

switch ss := session.Subsystem(); ss {
case "":
case "sftp":
if s.config.ExperimentalContainersEnabled && container != "" {
closeCause("sftp not yet supported with containers")
return
}
err := s.sftpHandler(logger, session)
if err != nil {
closeCause(err.Error())
Expand All @@ -422,7 +453,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
env = append(env, fmt.Sprintf("DISPLAY=localhost:%d.%d", display, x11.ScreenNumber))
}

err := s.sessionStart(logger, session, env, magicType)
err := s.sessionStart(logger, session, env, magicType, container, containerUser)
var exitError *exec.ExitError
if xerrors.As(err, &exitError) {
code := exitError.ExitCode()
Expand Down Expand Up @@ -495,30 +526,35 @@ func (s *Server) fileTransferBlocked(session ssh.Session) bool {
return false
}

func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, env []string, magicType MagicSessionType) (retErr error) {
func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, env []string, magicType MagicSessionType, container, containerUser string) (retErr error) {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

review: think it might be time to consider an opts struct?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It could be done, but no need to do that yet IMO. If this grows more then probably.

I'd actually want us to refactor the whole session handling and manage our own *session in session.go or some such. But that's a story for another time.

ctx := session.Context()

magicTypeLabel := magicTypeMetricLabel(magicType)
sshPty, windowSize, isPty := session.Pty()
ptyLabel := "no"
if isPty {
ptyLabel = "yes"
}

cmd, err := s.CreateCommand(ctx, session.RawCommand(), env, nil)
if err != nil {
ptyLabel := "no"
if isPty {
ptyLabel = "yes"
// plumb in envinfoer here to modify command for container exec?
var ei usershell.EnvInfoer
var err error
if s.config.ExperimentalContainersEnabled && container != "" {
ei, err = agentcontainers.EnvInfo(ctx, s.Execer, container, containerUser)
if err != nil {
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "container_env_info").Add(1)
return err
}
}
cmd, err := s.CreateCommand(ctx, session.RawCommand(), env, ei)
if err != nil {
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "create_command").Add(1)
return err
}

if ssh.AgentRequested(session) {
l, err := ssh.NewAgentListener()
if err != nil {
ptyLabel := "no"
if isPty {
ptyLabel = "yes"
}

s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "listener").Add(1)
return xerrors.Errorf("new agent listener: %w", err)
}
Expand Down
45 changes: 45 additions & 0 deletions cli/ssh.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,9 @@ func (r *RootCmd) ssh() *serpent.Command {
appearanceConfig codersdk.AppearanceConfig
networkInfoDir string
networkInfoInterval time.Duration

container string
containerUser string
)
client := new(codersdk.Client)
cmd := &serpent.Command{
Expand Down Expand Up @@ -282,6 +285,23 @@ func (r *RootCmd) ssh() *serpent.Command {
}
conn.AwaitReachable(ctx)

if container != "" {
cts, err := client.WorkspaceAgentListContainers(ctx, workspaceAgent.ID, nil)
if err != nil {
return xerrors.Errorf("list containers: %w", err)
}
var found bool
for _, c := range cts.Containers {
if c.FriendlyName == container || c.ID == container {
found = true
break
}
}
if !found {
return xerrors.Errorf("container not found: %q", container)
}
}

stopPolling := tryPollWorkspaceAutostop(ctx, client, workspace)
defer stopPolling()

Expand Down Expand Up @@ -454,6 +474,17 @@ func (r *RootCmd) ssh() *serpent.Command {
}
}

if container != "" {
for k, v := range map[string]string{
"CODER_CONTAINER": container,
"CODER_CONTAINER_USER": containerUser,
} {
if err := sshSession.Setenv(k, v); err != nil {
return xerrors.Errorf("setenv: %w", err)
}
}
}

err = sshSession.RequestPty("xterm-256color", 128, 128, gossh.TerminalModes{})
if err != nil {
return xerrors.Errorf("request pty: %w", err)
Expand Down Expand Up @@ -594,6 +625,20 @@ func (r *RootCmd) ssh() *serpent.Command {
Default: "5s",
Value: serpent.DurationOf(&networkInfoInterval),
},
{
Flag: "container",
FlagShorthand: "c",
Description: "Specifies a container inside the workspace to connect to.",
Value: serpent.StringOf(&container),
Hidden: true, // Hidden until this features is at least in beta.
},
{
Flag: "container-user",
FlagShorthand: "u",
Description: "When connecting to a container, specifies the user to connect as.",
Value: serpent.StringOf(&containerUser),
Hidden: true, // Hidden until this features is at least in beta.
},
sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)),
}
return cmd
Expand Down
72 changes: 72 additions & 0 deletions cli/ssh_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ import (
"time"

"github.com/google/uuid"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -1924,6 +1926,76 @@ Expire-Date: 0
<-cmdDone
}

func TestSSH_Container(t *testing.T) {
t.Parallel()
if runtime.GOOS != "linux" {
t.Skip("Skipping test on non-Linux platform")
}

t.Run("OK", func(t *testing.T) {
t.Parallel()

client, workspace, agentToken := setupWorkspaceForAgent(t)
ctx := testutil.Context(t, testutil.WaitLong)
pool, err := dockertest.NewPool("")
require.NoError(t, err, "Could not connect to docker")
ct, err := pool.RunWithOptions(&dockertest.RunOptions{
Repository: "busybox",
Tag: "latest",
Cmd: []string{"sleep", "infnity"},
}, func(config *docker.HostConfig) {
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{Name: "no"}
})
require.NoError(t, err, "Could not start container")
// Wait for container to start
require.Eventually(t, func() bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
t.Cleanup(func() {
err := pool.Purge(ct)
require.NoError(t, err, "Could not stop container")
})

_ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
o.ExperimentalContainersEnabled = true
})
_ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()

inv, root := clitest.New(t, "ssh", workspace.Name, "-c", ct.Container.ID)
clitest.SetupConfig(t, client, root)
ptty := ptytest.New(t).Attach(inv)

cmdDone := tGo(t, func() {
err := inv.WithContext(ctx).Run()
assert.NoError(t, err)
})

ptty.ExpectMatch(" #")
ptty.WriteLine("hostname")
ptty.ExpectMatch(ct.Container.Config.Hostname)
ptty.WriteLine("exit")
<-cmdDone
})

t.Run("NotFound", func(t *testing.T) {
t.Parallel()

ctx := testutil.Context(t, testutil.WaitShort)
client, workspace, agentToken := setupWorkspaceForAgent(t)
_ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
o.ExperimentalContainersEnabled = true
})
_ = coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).Wait()

inv, root := clitest.New(t, "ssh", workspace.Name, "-c", uuid.NewString())
clitest.SetupConfig(t, client, root)
err := inv.WithContext(ctx).Run()
require.ErrorContains(t, err, "container not found:")
})
}

// tGoContext runs fn in a goroutine passing a context that will be
// canceled on test completion and wait until fn has finished executing.
// Done and cancel are returned for optionally waiting until completion
Expand Down
Loading
0