diff --git a/AGENTS.md b/AGENTS.md index fa95d2273..54e8a8355 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -138,6 +138,8 @@ CLI (roborev) -> HTTP API -> Daemon -> Worker Pool -> Agent adapters ## Testing +Use `testify` (`github.com/stretchr/testify`) for all test assertions. Use `require.*` for fatal preconditions and `assert.*` for non-fatal checks. Do not use raw `if`/`t.Errorf`/`t.Fatalf` patterns. + - After any Go code changes, run `go fmt ./...` and `go vet ./...` before committing. - Fast test pass: `go test ./...` - Integration tests: `go test -tags=integration ./...` diff --git a/CLAUDE.md b/CLAUDE.md index 64cf4cd17..7cbb0f946 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -341,6 +341,9 @@ roborev tui # Terminal UI - `test` agent is always available (no PATH lookup) - Worker pool test hooks enable deterministic synchronization - Table-driven tests are preferred +- Use `testify` (`assert`/`require`) for all test assertions -- do not use raw `if`/`t.Errorf`/`t.Fatalf` patterns +- `require.*` for fatal preconditions (test cannot continue if this fails), `assert.*` for non-fatal checks +- In tests with more than three assertions, prefer `assert := assert.New(t)` shorthand ## Conventions diff --git a/cmd/roborev/tui/action_test.go b/cmd/roborev/tui/action_test.go index 8c3a6d4b3..388fc69e7 100644 --- a/cmd/roborev/tui/action_test.go +++ b/cmd/roborev/tui/action_test.go @@ -512,7 +512,7 @@ func TestTUICancelJobSuccess(t *testing.T) { } _, m := mockServerModel(t, expectJSONPost(t, "/api/job/cancel", cancelRequest{JobID: 42}, map[string]any{"success": true})) oldFinishedAt := time.Now().Add(-1 * time.Hour) - cmd := m.cancelJob(42, storage.JobStatusRunning, &oldFinishedAt) + cmd := m.cancelJob(42, storage.JobStatusRunning, &oldFinishedAt, false) msg := cmd() result := assertMsgType[cancelResultMsg](t, msg) @@ -530,7 +530,7 @@ func TestTUICancelJobNotFound(t *testing.T) { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(map[string]string{"error": "not found"}) }) - cmd := m.cancelJob(99, storage.JobStatusQueued, nil) + cmd := m.cancelJob(99, storage.JobStatusQueued, nil, false) msg := cmd() result := assertMsgType[cancelResultMsg](t, msg) diff --git a/cmd/roborev/tui/actions.go b/cmd/roborev/tui/actions.go index 92ae5309c..21c81c2c6 100644 --- a/cmd/roborev/tui/actions.go +++ b/cmd/roborev/tui/actions.go @@ -144,22 +144,60 @@ func (m model) markParentClosed(parentJobID int64) tea.Cmd { } } -// cancelJob sends a cancel request to the server -func (m model) cancelJob(jobID int64, oldStatus storage.JobStatus, oldFinishedAt *time.Time) tea.Cmd { +// cancelJob sends a cancel request to the server. +// restoreSelection tells the result handler to reselect the job +// if the request fails and the optimistic update is rolled back. +func (m model) cancelJob( + jobID int64, oldStatus storage.JobStatus, + oldFinishedAt *time.Time, restoreSelection bool, +) tea.Cmd { return func() tea.Msg { - err := m.postJSON("/api/job/cancel", map[string]any{"job_id": jobID}, nil) - return cancelResultMsg{jobID: jobID, oldState: oldStatus, oldFinishedAt: oldFinishedAt, err: err} + err := m.postJSON( + "/api/job/cancel", + map[string]any{"job_id": jobID}, nil, + ) + return cancelResultMsg{ + jobID: jobID, + oldState: oldStatus, + oldFinishedAt: oldFinishedAt, + restoreSelection: restoreSelection, + err: err, + } } } -// rerunJob sends a rerun request to the server for failed/canceled jobs -func (m model) rerunJob(jobID int64, oldStatus storage.JobStatus, oldStartedAt, oldFinishedAt *time.Time, oldError string) tea.Cmd { +// rerunJob sends a rerun request to the server for failed/canceled jobs. +func (m model) rerunJob(snap rerunSnapshot) tea.Cmd { return func() tea.Msg { - err := m.postJSON("/api/job/rerun", map[string]any{"job_id": jobID}, nil) - return rerunResultMsg{jobID: jobID, oldState: oldStatus, oldStartedAt: oldStartedAt, oldFinishedAt: oldFinishedAt, oldError: oldError, err: err} + err := m.postJSON( + "/api/job/rerun", + map[string]any{"job_id": snap.jobID}, nil, + ) + return rerunResultMsg{ + jobID: snap.jobID, + oldState: snap.oldStatus, + oldStartedAt: snap.oldStartedAt, + oldFinishedAt: snap.oldFinishedAt, + oldError: snap.oldError, + oldClosed: snap.oldClosed, + oldVerdict: snap.oldVerdict, + err: err, + } } } +// rerunSnapshot captures job state before an optimistic rerun +// update so it can be rolled back if the server request fails. +type rerunSnapshot struct { + jobID int64 + oldStatus storage.JobStatus + oldStartedAt *time.Time + oldFinishedAt *time.Time + oldError string + oldClosed *bool + oldVerdict *string +} + func (m model) submitComment(jobID int64, text string) tea.Cmd { return func() tea.Msg { commenter := os.Getenv("USER") diff --git a/cmd/roborev/tui/control.go b/cmd/roborev/tui/control.go new file mode 100644 index 000000000..0d7c506c3 --- /dev/null +++ b/cmd/roborev/tui/control.go @@ -0,0 +1,251 @@ +package tui + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net" + "os" + "path/filepath" + "syscall" + "time" + + tea "github.com/charmbracelet/bubbletea" +) + +const controlResponseTimeout = 3 * time.Second + +// removeStaleSocket checks whether socketPath is a leftover Unix +// socket from a previous crash and removes it. Returns an error if +// the path exists but is not a socket (protecting regular files from +// accidental deletion via a mistyped --control-socket). +func removeStaleSocket(socketPath string) error { + fi, err := os.Lstat(socketPath) + if os.IsNotExist(err) { + return nil + } + if err != nil { + return fmt.Errorf("stat socket path: %w", err) + } + if fi.Mode().Type()&os.ModeSocket == 0 { + return fmt.Errorf( + "%s already exists and is not a socket", socketPath, + ) + } + // It's a socket — try to connect to see if it's still live. + conn, dialErr := net.DialTimeout("unix", socketPath, 500*time.Millisecond) + if dialErr == nil { + conn.Close() + return fmt.Errorf( + "%s is already in use by another listener", socketPath, + ) + } + // Only treat ECONNREFUSED as proof that nothing is listening. + // Other dial errors (wrong socket type, permission denied, etc.) + // are ambiguous and could indicate a live non-stream socket. + if !isConnRefused(dialErr) { + return fmt.Errorf( + "%s: cannot determine socket state: %w", + socketPath, dialErr, + ) + } + // ECONNREFUSED — nothing is listening. Safe to remove. + if err := os.Remove(socketPath); err != nil { + return fmt.Errorf("remove stale socket: %w", err) + } + return nil +} + +// isConnRefused returns true when the error chain contains +// ECONNREFUSED, which means a socket exists but nothing is listening. +func isConnRefused(err error) bool { + return errors.Is(err, syscall.ECONNREFUSED) +} + +// startControlListener creates a Unix domain socket and starts +// accepting connections. Each connection receives one JSON command, +// dispatches it through the tea.Program, and returns a JSON response. +// Returns a cleanup function that closes the listener and removes +// the socket file. +// ensureSocketDir creates the socket parent directory with +// owner-only permissions. MkdirAll is a no-op on existing +// directories, so we always chmod explicitly to tighten +// pre-existing dirs (e.g. ~/.roborev created with 0755). +// This must only be called for managed directories (the +// default data dir), never for arbitrary user-supplied paths. +func ensureSocketDir(dir string) error { + if err := os.MkdirAll(dir, 0700); err != nil { + return fmt.Errorf("create socket directory: %w", err) + } + if err := os.Chmod(dir, 0700); err != nil { + return fmt.Errorf("chmod socket directory: %w", err) + } + return nil +} + +func startControlListener( + socketPath string, p *tea.Program, +) (func(), error) { + // Ensure the parent directory exists for custom socket paths. + // Permission tightening is handled separately by ensureSocketDir + // for the default managed directory only. + if err := os.MkdirAll(filepath.Dir(socketPath), 0755); err != nil { + return nil, fmt.Errorf("create socket directory: %w", err) + } + + // Only remove an existing path if it is a stale Unix socket. + // Refusing to remove regular files prevents data loss from + // a mistyped --control-socket path. + if err := removeStaleSocket(socketPath); err != nil { + return nil, err + } + + ln, err := net.Listen("unix", socketPath) + if err != nil { + return nil, fmt.Errorf("listen on %s: %w", socketPath, err) + } + + // Restrict socket permissions to owner-only. + if err := os.Chmod(socketPath, 0600); err != nil { + ln.Close() + return nil, fmt.Errorf("chmod socket: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + go acceptLoop(ctx, ln, p) + + // Disable automatic unlinking so ln.Close() does not remove + // whatever is at socketPath — a successor process may have + // already bound a new socket there. + ln.(*net.UnixListener).SetUnlinkOnClose(false) + + cleanup := func() { + cancel() + os.Remove(socketPath) + ln.Close() + } + return cleanup, nil +} + +func acceptLoop(ctx context.Context, ln net.Listener, p *tea.Program) { + for { + conn, err := ln.Accept() + if err != nil { + // Check if listener was closed (normal shutdown) + select { + case <-ctx.Done(): + return + default: + } + log.Printf("control: accept error: %v", err) + // Back off on transient errors (e.g. EMFILE) to + // avoid a tight CPU-pegging loop. + time.Sleep(100 * time.Millisecond) + continue + } + go handleControlConn(ctx, conn, p) + } +} + +func handleControlConn( + ctx context.Context, conn net.Conn, p *tea.Program, +) { + defer conn.Close() + + // Set read deadline to prevent hung connections + if err := conn.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil { + return + } + + scanner := bufio.NewScanner(conn) + scanner.Buffer(make([]byte, 64*1024), 64*1024) + if !scanner.Scan() { + writeError(conn, "empty request") + return + } + + var req controlRequest + if err := json.Unmarshal(scanner.Bytes(), &req); err != nil { + writeError(conn, "invalid JSON: "+err.Error()) + return + } + + resp := dispatchCommand(ctx, req, p) + writeResponse(conn, resp) +} + +func dispatchCommand( + ctx context.Context, req controlRequest, p *tea.Program, +) controlResponse { + isQuery, isMutation := isControlCommand(req.Command) + + switch { + case isQuery: + return queryViaProgram(ctx, p, req) + case isMutation: + return mutateViaProgram(ctx, p, req) + default: + return controlResponse{ + Error: fmt.Sprintf("unknown command: %s", req.Command), + } + } +} + +// queryViaProgram sends a controlQueryMsg through the program and +// waits for the Update handler to write the response. The control +// listener is only started after the event loop is running (via the +// ready channel in Run), so p.Send will not block here. +func queryViaProgram( + ctx context.Context, p *tea.Program, req controlRequest, +) controlResponse { + respCh := make(chan controlResponse, 1) + p.Send(controlQueryMsg{req: req, respCh: respCh}) + + select { + case resp := <-respCh: + return resp + case <-ctx.Done(): + return controlResponse{Error: "TUI is shutting down"} + case <-time.After(controlResponseTimeout): + return controlResponse{Error: "response timeout"} + } +} + +// mutateViaProgram sends a controlMutationMsg through the program +// and waits for the Update handler to write the response. +func mutateViaProgram( + ctx context.Context, p *tea.Program, req controlRequest, +) controlResponse { + respCh := make(chan controlResponse, 1) + p.Send(controlMutationMsg{req: req, respCh: respCh}) + + select { + case resp := <-respCh: + return resp + case <-ctx.Done(): + return controlResponse{Error: "TUI is shutting down"} + case <-time.After(controlResponseTimeout): + return controlResponse{Error: "response timeout"} + } +} + +func writeResponse(conn net.Conn, resp controlResponse) { + data, err := json.Marshal(resp) + if err != nil { + writeError(conn, "marshal error: "+err.Error()) + return + } + data = append(data, '\n') + _, _ = conn.Write(data) +} + +func writeError(conn net.Conn, msg string) { + resp := controlResponse{Error: msg} + data, _ := json.Marshal(resp) + data = append(data, '\n') + _, _ = conn.Write(data) +} diff --git a/cmd/roborev/tui/control_handlers.go b/cmd/roborev/tui/control_handlers.go new file mode 100644 index 000000000..41e3c75d0 --- /dev/null +++ b/cmd/roborev/tui/control_handlers.go @@ -0,0 +1,623 @@ +package tui + +import ( + "encoding/json" + "fmt" + "slices" + "time" + + tea "github.com/charmbracelet/bubbletea" + "github.com/roborev-dev/roborev/internal/storage" +) + +// handleControlQuery dispatches a control query to the appropriate +// response builder and writes the result to the response channel. +func (m model) handleControlQuery( + msg controlQueryMsg, +) (tea.Model, tea.Cmd) { + var resp controlResponse + switch msg.req.Command { + case "get-state": + resp = m.buildStateResponse() + case "get-filter": + resp = m.buildFilterResponse() + case "get-jobs": + resp = m.buildJobsResponse() + case "get-selected": + resp = m.buildSelectedResponse() + default: + resp = controlResponse{ + Error: fmt.Sprintf("unknown query: %s", msg.req.Command), + } + } + msg.respCh <- resp + return m, nil +} + +// handleControlMutation dispatches a control mutation to the +// appropriate handler, writes the response, and returns the +// updated model with any side-effect tea.Cmd. +func (m model) handleControlMutation( + msg controlMutationMsg, +) (tea.Model, tea.Cmd) { + var resp controlResponse + var cmd tea.Cmd + + switch msg.req.Command { + case "set-filter": + m, resp, cmd = m.handleCtrlSetFilter(msg.req.Params) + case "clear-filter": + m, resp, cmd = m.handleCtrlClearFilter(msg.req.Params) + case "set-hide-closed": + m, resp, cmd = m.handleCtrlSetHideClosed(msg.req.Params) + case "select-job": + m, resp, cmd = m.handleCtrlSelectJob(msg.req.Params) + case "set-view": + m, resp, cmd = m.handleCtrlSetView(msg.req.Params) + case "close-review": + m, resp, cmd = m.handleCtrlCloseReview(msg.req.Params) + case "cancel-job": + m, resp, cmd = m.handleCtrlCancelJob(msg.req.Params) + case "rerun-job": + m, resp, cmd = m.handleCtrlRerunJob(msg.req.Params) + case "quit": + m, resp, cmd = m.handleCtrlQuit() + default: + resp = controlResponse{ + Error: fmt.Sprintf("unknown mutation: %s", msg.req.Command), + } + } + + msg.respCh <- resp + return m, cmd +} + +// --- Query response builders --- + +func (m model) buildStateResponse() controlResponse { + return controlResponse{ + OK: true, + Data: stateSnapshot{ + View: m.currentView.String(), + RepoFilter: copyStrings(m.activeRepoFilter), + BranchFilter: m.activeBranchFilter, + LockedRepo: m.lockedRepoFilter, + LockedBranch: m.lockedBranchFilter, + HideClosed: m.hideClosed, + SelectedJobID: m.selectedJobID, + JobCount: len(m.jobs), + VisibleJobCount: len(m.getVisibleJobs()), + Stats: m.jobStats, + }, + } +} + +func (m model) buildFilterResponse() controlResponse { + type filterData struct { + RepoFilter []string `json:"repo_filter"` + BranchFilter string `json:"branch_filter"` + LockedRepo bool `json:"locked_repo"` + LockedBranch bool `json:"locked_branch"` + FilterStack []string `json:"filter_stack"` + } + return controlResponse{ + OK: true, + Data: filterData{ + RepoFilter: copyStrings(m.activeRepoFilter), + BranchFilter: m.activeBranchFilter, + LockedRepo: m.lockedRepoFilter, + LockedBranch: m.lockedBranchFilter, + FilterStack: copyStrings(m.filterStack), + }, + } +} + +func (m model) buildJobsResponse() controlResponse { + visible := m.getVisibleJobs() + snapshots := make([]jobSnapshot, len(visible)) + for i, job := range visible { + snapshots[i] = makeJobSnapshot(job) + } + return controlResponse{OK: true, Data: snapshots} +} + +func (m model) buildSelectedResponse() controlResponse { + job, ok := m.selectedJob() + if !ok { + return controlResponse{ + OK: true, + Data: selectedSnapshot{}, + } + } + snap := makeJobSnapshot(*job) + hasReview := job.Status == storage.JobStatusDone && + job.Closed != nil + return controlResponse{ + OK: true, + Data: selectedSnapshot{ + Job: &snap, + HasReview: hasReview, + }, + } +} + +func makeJobSnapshot(job storage.ReviewJob) jobSnapshot { + snap := jobSnapshot{ + ID: job.ID, + Agent: job.Agent, + Status: job.Status, + Repo: job.RepoPath, + Branch: job.Branch, + GitRef: job.GitRef, + Subject: job.CommitSubject, + } + // Deep-copy pointer fields so the snapshot is safe to + // marshal in a connection goroutine without racing with + // mutations in the Bubble Tea update loop. + if job.Closed != nil { + c := *job.Closed + snap.Closed = &c + } + if job.Verdict != nil { + v := *job.Verdict + snap.Verdict = &v + } + return snap +} + +// copyStrings returns a shallow copy of a string slice so the +// snapshot doesn't alias the model's backing array. +func copyStrings(s []string) []string { + if s == nil { + return nil + } + cp := make([]string, len(s)) + copy(cp, s) + return cp +} + +// resolveRepoFilter resolves a repo name to root paths. The input +// can be a display name (e.g. "msgvault") or a literal root path +// (e.g. "/home/user/projects/msgvault"). Display names are resolved +// via repoNames (populated from /api/repos at init). +func (m model) resolveRepoFilter(name string) []string { + // Check display name lookup (authoritative, from /api/repos). + if paths, ok := m.repoNames[name]; ok { + return paths + } + + // Check if it matches a root path in the repo list. + for _, paths := range m.repoNames { + if slices.Contains(paths, name) { + return []string{name} + } + } + + // Unknown name — accept as-is so callers that pass a path + // before the repo list is loaded still work. + return []string{name} +} + +// --- Mutation handlers --- +// Each returns (updated model, response, cmd) so value-receiver +// mutations propagate through Bubble Tea's Update chain. + +func (m model) handleCtrlSetFilter( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + Repo *string `json:"repo"` + Branch *string `json:"branch"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + + // Validate all lock constraints before mutating any state + // so a partial-success / partial-error cannot occur. + if params.Repo != nil && m.lockedRepoFilter { + return m, controlResponse{ + Error: "repo filter is locked via --repo flag", + }, nil + } + if params.Branch != nil && m.lockedBranchFilter { + return m, controlResponse{ + Error: "branch filter is locked via --branch flag", + }, nil + } + + if params.Repo != nil { + if *params.Repo == "" { + m.activeRepoFilter = nil + m.removeFilterFromStack(filterTypeRepo) + } else { + m.activeRepoFilter = m.resolveRepoFilter(*params.Repo) + m.pushFilter(filterTypeRepo) + } + } + + if params.Branch != nil { + m.activeBranchFilter = *params.Branch + if *params.Branch == "" { + m.removeFilterFromStack(filterTypeBranch) + } else { + m.pushFilter(filterTypeBranch) + } + } + + m.hasMore = false + m.selectedIdx = -1 + m.selectedJobID = 0 + m.fetchSeq++ + m.queueColGen++ + m.loadingJobs = true + return m, controlResponse{OK: true}, m.fetchJobs() +} + +func (m model) handleCtrlClearFilter( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + Repo bool `json:"repo"` + Branch bool `json:"branch"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + + // Validate all lock constraints before mutating any state. + if params.Repo && m.lockedRepoFilter { + return m, controlResponse{ + Error: "repo filter is locked via --repo flag", + }, nil + } + if params.Branch && m.lockedBranchFilter { + return m, controlResponse{ + Error: "branch filter is locked via --branch flag", + }, nil + } + + if params.Repo { + m.activeRepoFilter = nil + m.removeFilterFromStack(filterTypeRepo) + } + + if params.Branch { + m.activeBranchFilter = "" + m.removeFilterFromStack(filterTypeBranch) + } + + m.hasMore = false + m.selectedIdx = -1 + m.selectedJobID = 0 + m.fetchSeq++ + m.queueColGen++ + m.loadingJobs = true + return m, controlResponse{OK: true}, m.fetchJobs() +} + +func (m model) handleCtrlSetHideClosed( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + HideClosed bool `json:"hide_closed"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + + m.hideClosed = params.HideClosed + m.queueColGen++ + if len(m.jobs) > 0 { + if m.selectedIdx < 0 || + m.selectedIdx >= len(m.jobs) || + !m.isJobVisible(m.jobs[m.selectedIdx]) { + m.selectedIdx = m.findFirstVisibleJob() + m.updateSelectedJobID() + } + } + m.fetchSeq++ + m.loadingJobs = true + return m, controlResponse{OK: true}, m.fetchJobs() +} + +func (m model) handleCtrlSelectJob( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + JobID int64 `json:"job_id"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + if params.JobID == 0 { + return m, controlResponse{Error: "job_id is required"}, nil + } + + for i := range m.jobs { + if m.jobs[i].ID == params.JobID { + if !m.isJobVisible(m.jobs[i]) { + return m, controlResponse{ + Error: fmt.Sprintf( + "job %d is hidden by current filters", + params.JobID, + ), + }, nil + } + m.selectedIdx = i + m.selectedJobID = params.JobID + return m, controlResponse{OK: true}, nil + } + } + return m, controlResponse{ + Error: fmt.Sprintf( + "job %d not found in current view", params.JobID, + ), + }, nil +} + +func (m model) handleCtrlSetView( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + View string `json:"view"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + + v := parseViewKind(params.View) + if v < 0 { + return m, controlResponse{ + Error: fmt.Sprintf( + "invalid view %q (valid: queue, tasks)", params.View, + ), + }, nil + } + + if v == viewTasks && !m.tasksWorkflowEnabled() { + return m, controlResponse{ + Error: "tasks workflow is disabled", + }, nil + } + + m.currentView = v + var cmd tea.Cmd + if v == viewTasks { + cmd = m.fetchFixJobs() + } + return m, controlResponse{OK: true}, cmd +} + +func (m model) handleCtrlCloseReview( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + JobID int64 `json:"job_id"` + Closed *bool `json:"closed"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + if params.JobID == 0 { + return m, controlResponse{ + Error: "job_id is required", + }, nil + } + + // Find the job + var job *storage.ReviewJob + for i := range m.jobs { + if m.jobs[i].ID == params.JobID { + job = &m.jobs[i] + break + } + } + if job == nil { + return m, controlResponse{ + Error: fmt.Sprintf("job %d not found", params.JobID), + }, nil + } + if job.Status != storage.JobStatusDone || job.Closed == nil { + return m, controlResponse{ + Error: fmt.Sprintf( + "job %d has no review to close", params.JobID, + ), + }, nil + } + + oldState := *job.Closed + newState := !oldState + if params.Closed != nil { + newState = *params.Closed + } + if newState == oldState { + return m, controlResponse{OK: true}, nil + } + + m.closedSeq++ + seq := m.closedSeq + *job.Closed = newState + m.pendingClosed[params.JobID] = pendingState{ + newState: newState, seq: seq, + } + m.applyStatsDelta(newState) + + restoreSelection := false + if m.hideClosed && newState && + m.selectedJobID == params.JobID { + idx := m.findPrevVisibleJob(m.selectedIdx) + if idx < 0 { + idx = m.findNextVisibleJob(m.selectedIdx) + } + if idx < 0 { + idx = m.findFirstVisibleJob() + } + if idx >= 0 { + m.selectedIdx = idx + m.updateSelectedJobID() + restoreSelection = true + } else { + m.selectedIdx = -1 + m.selectedJobID = 0 + restoreSelection = true + } + } + + return m, controlResponse{OK: true}, + m.closeReviewInBackground( + params.JobID, newState, oldState, seq, restoreSelection, + ) +} + +func (m model) handleCtrlCancelJob( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + JobID int64 `json:"job_id"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + if params.JobID == 0 { + return m, controlResponse{ + Error: "job_id is required", + }, nil + } + + var job *storage.ReviewJob + for i := range m.jobs { + if m.jobs[i].ID == params.JobID { + job = &m.jobs[i] + break + } + } + if job == nil { + return m, controlResponse{ + Error: fmt.Sprintf("job %d not found", params.JobID), + }, nil + } + + if job.Status != storage.JobStatusRunning && + job.Status != storage.JobStatusQueued { + return m, controlResponse{ + Error: fmt.Sprintf( + "job %d is %s, can only cancel running/queued jobs", + params.JobID, job.Status, + ), + }, nil + } + + oldStatus := job.Status + oldFinishedAt := job.FinishedAt + job.Status = storage.JobStatusCanceled + now := time.Now() + job.FinishedAt = &now + + restoreSelection := false + if m.hideClosed && m.selectedJobID == params.JobID { + idx := m.findPrevVisibleJob(m.selectedIdx) + if idx < 0 { + idx = m.findNextVisibleJob(m.selectedIdx) + } + if idx < 0 { + idx = m.findFirstVisibleJob() + } + if idx >= 0 { + m.selectedIdx = idx + m.updateSelectedJobID() + } else { + m.selectedIdx = -1 + m.selectedJobID = 0 + } + restoreSelection = true + } + + return m, controlResponse{OK: true}, + m.cancelJob( + params.JobID, oldStatus, oldFinishedAt, + restoreSelection, + ) +} + +func (m model) handleCtrlRerunJob( + raw json.RawMessage, +) (model, controlResponse, tea.Cmd) { + var params struct { + JobID int64 `json:"job_id"` + } + if err := json.Unmarshal(raw, ¶ms); err != nil { + return m, controlResponse{ + Error: "invalid params: " + err.Error(), + }, nil + } + if params.JobID == 0 { + return m, controlResponse{ + Error: "job_id is required", + }, nil + } + + var job *storage.ReviewJob + for i := range m.jobs { + if m.jobs[i].ID == params.JobID { + job = &m.jobs[i] + break + } + } + if job == nil { + return m, controlResponse{ + Error: fmt.Sprintf("job %d not found", params.JobID), + }, nil + } + + if job.Status != storage.JobStatusDone && + job.Status != storage.JobStatusFailed && + job.Status != storage.JobStatusCanceled { + return m, controlResponse{ + Error: fmt.Sprintf( + "job %d is %s, can only rerun done/failed/canceled jobs", + params.JobID, job.Status, + ), + }, nil + } + + snap := rerunSnapshot{ + jobID: job.ID, + oldStatus: job.Status, + oldStartedAt: job.StartedAt, + oldFinishedAt: job.FinishedAt, + oldError: job.Error, + oldClosed: job.Closed, + oldVerdict: job.Verdict, + } + job.Status = storage.JobStatusQueued + job.StartedAt = nil + job.FinishedAt = nil + job.Error = "" + // Clear review-derived fields so the rerun job is visible + // under hideClosed and doesn't expose stale verdict data. + // The daemon deletes the review row on rerun; this keeps + // the local optimistic state consistent until the next fetch. + job.Closed = nil + job.Verdict = nil + + return m, controlResponse{OK: true}, m.rerunJob(snap) +} + +func (m model) handleCtrlQuit() (model, controlResponse, tea.Cmd) { + return m, controlResponse{OK: true}, tea.Quit +} diff --git a/cmd/roborev/tui/control_runtime.go b/cmd/roborev/tui/control_runtime.go new file mode 100644 index 000000000..65ea1b3de --- /dev/null +++ b/cmd/roborev/tui/control_runtime.go @@ -0,0 +1,165 @@ +package tui + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/roborev-dev/roborev/internal/config" +) + +// TUIRuntimeInfo stores metadata about a running TUI instance for +// discoverability by external tools. Filter state is intentionally +// omitted — it changes at runtime and should be queried via the +// control socket's get-filters command. +type TUIRuntimeInfo struct { + PID int `json:"pid"` + SocketPath string `json:"socket_path"` + ServerAddr string `json:"server_addr"` +} + +// tuiRuntimePath returns the metadata file path for a given PID. +func tuiRuntimePath(pid int) string { + return filepath.Join( + config.DataDir(), fmt.Sprintf("tui.%d.json", pid), + ) +} + +// WriteTUIRuntime writes the TUI runtime metadata file atomically. +func WriteTUIRuntime(info TUIRuntimeInfo) error { + path := tuiRuntimePath(info.PID) + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + + data, err := json.MarshalIndent(info, "", " ") + if err != nil { + return err + } + + tmpFile, err := os.CreateTemp(dir, "tui.*.json.tmp") + if err != nil { + return err + } + tmpPath := tmpFile.Name() + + success := false + defer func() { + if !success { + os.Remove(tmpPath) + } + }() + + if _, err := tmpFile.Write(data); err != nil { + tmpFile.Close() + return err + } + if err := tmpFile.Close(); err != nil { + return err + } + + if err := os.Rename(tmpPath, path); err != nil { + return err + } + if err := os.Chmod(path, 0600); err != nil { + return err + } + + success = true + return nil +} + +// RemoveTUIRuntime removes the runtime metadata file for the +// current process. The socket file is removed separately by the +// control listener's cleanup function. +func RemoveTUIRuntime() { + os.Remove(tuiRuntimePath(os.Getpid())) +} + +// ListAllTUIRuntimes returns metadata for all discovered TUI +// runtime files. +func ListAllTUIRuntimes() ([]*TUIRuntimeInfo, error) { + dataDir := config.DataDir() + entries, err := os.ReadDir(dataDir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + var runtimes []*TUIRuntimeInfo + for _, entry := range entries { + name := entry.Name() + if !strings.HasPrefix(name, "tui.") || + !strings.HasSuffix(name, ".json") { + continue + } + path := filepath.Join(dataDir, name) + data, err := os.ReadFile(path) + if err != nil { + continue + } + var info TUIRuntimeInfo + if err := json.Unmarshal(data, &info); err != nil { + os.Remove(path) + continue + } + if info.PID <= 0 || info.SocketPath == "" { + os.Remove(path) + continue + } + runtimes = append(runtimes, &info) + } + return runtimes, nil +} + +// CleanupStaleTUIRuntimes removes socket and metadata files for +// TUI processes that are no longer running. +func CleanupStaleTUIRuntimes() int { + runtimes, err := ListAllTUIRuntimes() + if err != nil { + return 0 + } + + cleaned := 0 + for _, info := range runtimes { + if isProcessAlive(info.PID) { + continue + } + // Only remove the socket if it's actually a stale + // socket file. This prevents deleting a live socket + // at a shared custom path or a non-socket file. + // Errors are ignored: the metadata is still cleaned + // since the owning PID is dead. + _ = removeStaleSocket(info.SocketPath) + os.Remove(tuiRuntimePath(info.PID)) + cleaned++ + } + return cleaned +} + +// buildTUIRuntimeInfo constructs the runtime metadata for the +// current process. This is the single source of truth for what +// gets written to the runtime metadata file. +func buildTUIRuntimeInfo( + socketPath, serverAddr string, +) TUIRuntimeInfo { + return TUIRuntimeInfo{ + PID: os.Getpid(), + SocketPath: socketPath, + ServerAddr: serverAddr, + } +} + +// defaultControlSocketPath returns the default socket path for the +// current process: {DataDir}/tui.{PID}.sock +func defaultControlSocketPath() string { + return filepath.Join( + config.DataDir(), + fmt.Sprintf("tui.%d.sock", os.Getpid()), + ) +} diff --git a/cmd/roborev/tui/control_test.go b/cmd/roborev/tui/control_test.go new file mode 100644 index 000000000..cd03aad90 --- /dev/null +++ b/cmd/roborev/tui/control_test.go @@ -0,0 +1,1320 @@ +package tui + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + "time" + + tea "github.com/charmbracelet/bubbletea" + "github.com/roborev-dev/roborev/internal/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// --- Unit tests for control types --- + +func TestViewKindString(t *testing.T) { + tests := []struct { + v viewKind + want string + }{ + {viewQueue, "queue"}, + {viewReview, "review"}, + {viewTasks, "tasks"}, + {viewLog, "log"}, + {viewFilter, "filter"}, + {viewPatch, "patch"}, + {viewKind(999), "unknown"}, + } + for _, tt := range tests { + assert.Equal(t, tt.want, tt.v.String(), + "viewKind(%d).String()", tt.v) + } +} + +func TestParseViewKind(t *testing.T) { + tests := []struct { + s string + want viewKind + }{ + {"queue", viewQueue}, + {"tasks", viewTasks}, + {"invalid", viewKind(-1)}, + {"review", viewKind(-1)}, // only queue and tasks are settable + } + for _, tt := range tests { + assert.Equal(t, tt.want, parseViewKind(tt.s), + "parseViewKind(%q)", tt.s) + } +} + +func TestIsControlCommand(t *testing.T) { + tests := []struct { + cmd string + wantQuery bool + wantMutate bool + }{ + {"get-state", true, false}, + {"get-filter", true, false}, + {"set-filter", false, true}, + {"cancel-job", false, true}, + {"quit", false, true}, + {"bogus", false, false}, + } + for _, tt := range tests { + isQ, isM := isControlCommand(tt.cmd) + assert.Equal(t, tt.wantQuery, isQ, + "isControlCommand(%q) query", tt.cmd) + assert.Equal(t, tt.wantMutate, isM, + "isControlCommand(%q) mutate", tt.cmd) + } +} + +// --- Unit tests for query handlers --- + +func TestBuildStateResponse(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(1, withRepoPath("/a")), + makeJob(2, withRepoPath("/b")), + } + m.selectedJobID = 1 + m.hideClosed = true + + resp := m.buildStateResponse() + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + + data, ok := resp.Data.(stateSnapshot) + require.True(t, ok, "expected stateSnapshot, got %T", resp.Data) + assert.Equal(t, "queue", data.View) + assert.Equal(t, 2, data.JobCount) + assert.True(t, data.HideClosed) +} + +func TestBuildFilterResponse(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.activeRepoFilter = []string{"/repo"} + m.activeBranchFilter = "main" + m.lockedRepoFilter = true + m.filterStack = []string{"repo", "branch"} + + resp := m.buildFilterResponse() + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) +} + +func TestBuildJobsResponse(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(1, withAgent("claude-code"), withRepoPath("/r")), + makeJob(2, withAgent("codex"), withRepoPath("/r")), + } + + resp := m.buildJobsResponse() + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + + jobs, ok := resp.Data.([]jobSnapshot) + require.True(t, ok, "expected []jobSnapshot, got %T", resp.Data) + require.Len(t, jobs, 2) + assert.Equal(t, "claude-code", jobs[0].Agent) +} + +func TestBuildSelectedResponse_NoSelection(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.selectedIdx = -1 + + resp := m.buildSelectedResponse() + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + data := resp.Data.(selectedSnapshot) + assert.Nil(t, data.Job) +} + +func TestBuildSelectedResponse_WithSelection(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(42, withAgent("codex"), withClosed(boolPtr(false))), + } + m.selectedIdx = 0 + m.selectedJobID = 42 + + resp := m.buildSelectedResponse() + data := resp.Data.(selectedSnapshot) + require.NotNil(t, data.Job) + assert.EqualValues(t, 42, data.Job.ID) + assert.True(t, data.HasReview, + "expected has_review=true for done job with closed field") +} + +// --- Unit tests for mutation handlers --- + +func TestHandleCtrlSetFilter(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + repo := "/test/repo" + branch := "feature" + + params, _ := json.Marshal(map[string]string{ + "repo": repo, + "branch": branch, + }) + updated, resp, _ := m.handleCtrlSetFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, []string{repo}, updated.activeRepoFilter) + assert.Equal(t, branch, updated.activeBranchFilter) + assert.Equal(t, -1, updated.selectedIdx) +} + +func TestHandleCtrlSetFilter_DisplayName(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.repoNames = map[string][]string{ + "msgvault": {"/home/user/projects/msgvault"}, + "roborev": {"/home/user/projects/roborev"}, + } + + params, _ := json.Marshal(map[string]string{ + "repo": "msgvault", + }) + updated, resp, _ := m.handleCtrlSetFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, []string{"/home/user/projects/msgvault"}, + updated.activeRepoFilter, + "display name should resolve to root path") +} + +func TestHandleCtrlSetFilter_DisplayNameMultiplePaths(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.repoNames = map[string][]string{ + "backend": { + "/home/user/work/backend", + "/home/user/oss/backend", + }, + "roborev": {"/home/user/projects/roborev"}, + } + + params, _ := json.Marshal(map[string]string{ + "repo": "backend", + }) + updated, resp, _ := m.handleCtrlSetFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Len(t, updated.activeRepoFilter, 2, + "display name matching multiple repos should return all paths") + assert.ElementsMatch(t, + []string{"/home/user/work/backend", "/home/user/oss/backend"}, + updated.activeRepoFilter) +} + +func TestHandleCtrlSetFilter_DisplayNameNotInJobs(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + // Simulate: jobs are loaded for "roborev" but "msgvault" has no + // visible jobs. repoNames (from /api/repos) knows about both. + m.jobs = []storage.ReviewJob{ + makeJob(1, withRepoPath("/home/user/projects/roborev")), + } + m.repoNames = map[string][]string{ + "msgvault": {"/home/user/projects/msgvault"}, + "roborev": {"/home/user/projects/roborev"}, + } + + params, _ := json.Marshal(map[string]string{ + "repo": "msgvault", + }) + updated, resp, _ := m.handleCtrlSetFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, []string{"/home/user/projects/msgvault"}, + updated.activeRepoFilter, + "should resolve repos not in current job page") +} + +func TestRepoNamesNotClobberedByBranchFilteredModal(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + // Simulate init fetch: repoNames knows both repos. + m.repoNames = map[string][]string{ + "msgvault": {"/home/user/projects/msgvault"}, + "roborev": {"/home/user/projects/roborev"}, + } + + // Simulate opening filter modal with branch filter active: + // the branch-filtered /api/repos only returns "roborev". + branchFilteredRepos := reposMsg{ + repos: []repoFilterItem{ + {name: "roborev", rootPaths: []string{"/home/user/projects/roborev"}, count: 5}, + }, + branchFiltered: true, + } + result, _ := m.handleReposMsg(branchFilteredRepos) + updated := result.(model) + + // repoNames should still contain both repos (not overwritten + // by the branch-scoped modal data). + assert.Len(t, updated.repoNames, 2, + "repoNames should not be clobbered by branch-filtered modal") + assert.Contains(t, updated.repoNames, "msgvault", + "msgvault should survive branch-filtered modal refresh") + + // Verify set-filter still resolves the repo excluded from + // the branch-filtered list. + params, _ := json.Marshal(map[string]string{"repo": "msgvault"}) + updated2, resp, _ := updated.handleCtrlSetFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, []string{"/home/user/projects/msgvault"}, + updated2.activeRepoFilter) +} + +func TestFetchRepos_BranchNoneIsUnfiltered(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + // Verify branch key is absent (not just empty). + _, hasBranch := r.URL.Query()["branch"] + assert.False(t, hasBranch, + "branchNone should not send branch param") + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]any{ + "repos": []map[string]any{ + {"name": "myrepo", "root_path": "/r", "count": 1}, + }, + }) + }, + )) + defer ts.Close() + + m := newModel(ts.URL, withExternalIODisabled()) + m.activeBranchFilter = branchNone + + cmd := m.fetchRepos() + msg := cmd() + rMsg, ok := msg.(reposMsg) + require.True(t, ok, "expected reposMsg, got %T", msg) + assert.False(t, rMsg.branchFiltered, + "branchNone should produce branchFiltered=false") +} + +func TestRepoNamesRefreshedByUnfilteredModal(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.repoNames = map[string][]string{ + "roborev": {"/home/user/projects/roborev"}, + } + + // Simulate opening filter modal with no branch filter: + // the unfiltered response includes a newly registered repo. + unfilteredRepos := reposMsg{ + repos: []repoFilterItem{ + {name: "roborev", rootPaths: []string{"/home/user/projects/roborev"}, count: 5}, + {name: "newrepo", rootPaths: []string{"/home/user/projects/newrepo"}, count: 1}, + }, + } + result, _ := m.handleReposMsg(unfilteredRepos) + updated := result.(model) + + assert.Len(t, updated.repoNames, 2, + "unfiltered modal should refresh repoNames") + assert.Contains(t, updated.repoNames, "newrepo", + "newly registered repo should appear in repoNames") +} + +func TestSetFilterFallbackWhenRepoNamesEmpty(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + // Simulate startup fetch failure: repoNames is nil. + m.repoNames = nil + + params, _ := json.Marshal(map[string]string{"repo": "msgvault"}) + updated, resp, _ := m.handleCtrlSetFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + // Falls through to literal — not ideal, but doesn't crash. + assert.Equal(t, []string{"msgvault"}, updated.activeRepoFilter, + "should accept name as-is when repoNames is empty") +} + +func TestHandleCtrlSetFilter_LockedRepo(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.lockedRepoFilter = true + + params, _ := json.Marshal(map[string]string{"repo": "/test/repo"}) + _, resp, _ := m.handleCtrlSetFilter(params) + require.False(t, resp.OK, "expected error for locked repo filter") + assert.NotEmpty(t, resp.Error) +} + +func TestHandleCtrlSetFilter_LockedBranch(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.lockedBranchFilter = true + + params, _ := json.Marshal(map[string]string{"branch": "main"}) + _, resp, _ := m.handleCtrlSetFilter(params) + require.False(t, resp.OK, "expected error for locked branch filter") +} + +func TestHandleCtrlSetFilter_LockedBranchNoRepoMutation(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.lockedBranchFilter = true + m.activeRepoFilter = []string{"/original"} + + // Request both repo + branch; branch is locked so the entire + // request should fail without mutating repo. + params, _ := json.Marshal(map[string]string{ + "repo": "/new", "branch": "main", + }) + updated, resp, _ := m.handleCtrlSetFilter(params) + require.False(t, resp.OK, "expected error for locked branch") + assert.Equal(t, []string{"/original"}, updated.activeRepoFilter, + "repo filter should not be mutated on error") +} + +func TestHandleCtrlSetFilter_ClearRepo(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.activeRepoFilter = []string{"/old"} + m.filterStack = []string{"repo"} + + empty := "" + params, _ := json.Marshal(map[string]*string{"repo": &empty}) + updated, resp, _ := m.handleCtrlSetFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Nil(t, updated.activeRepoFilter) +} + +func TestHandleCtrlClearFilter(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.activeRepoFilter = []string{"/repo"} + m.activeBranchFilter = "main" + m.filterStack = []string{"repo", "branch"} + + params, _ := json.Marshal(map[string]bool{ + "repo": true, "branch": true, + }) + updated, resp, _ := m.handleCtrlClearFilter(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Nil(t, updated.activeRepoFilter) + assert.Empty(t, updated.activeBranchFilter) +} + +func TestHandleCtrlClearFilter_LockedBranchNoRepoMutation(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.lockedBranchFilter = true + m.activeRepoFilter = []string{"/repo"} + m.activeBranchFilter = "main" + m.filterStack = []string{"repo", "branch"} + + params, _ := json.Marshal(map[string]bool{ + "repo": true, "branch": true, + }) + updated, resp, cmd := m.handleCtrlClearFilter(params) + require.False(t, resp.OK, "expected error for locked branch") + assert.Equal(t, []string{"/repo"}, updated.activeRepoFilter, + "repo filter should not be mutated on error") + assert.Equal(t, "main", updated.activeBranchFilter, + "branch filter should not be mutated on error") + assert.Equal(t, []string{"repo", "branch"}, updated.filterStack, + "filter stack should not be mutated on error") + assert.Nil(t, cmd, "no cmd should be returned on error") +} + +func TestHandleCtrlSetHideClosed(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = false + + params, _ := json.Marshal(map[string]bool{"hide_closed": true}) + updated, resp, _ := m.handleCtrlSetHideClosed(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.True(t, updated.hideClosed) +} + +func TestHandleCtrlSelectJob(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(10), makeJob(20), makeJob(30), + } + + params, _ := json.Marshal(map[string]int64{"job_id": 20}) + updated, resp, _ := m.handleCtrlSelectJob(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, 1, updated.selectedIdx) + assert.EqualValues(t, 20, updated.selectedJobID) +} + +func TestHandleCtrlSelectJob_NotFound(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{makeJob(1)} + + params, _ := json.Marshal(map[string]int64{"job_id": 999}) + _, resp, _ := m.handleCtrlSelectJob(params) + require.False(t, resp.OK, "expected error for missing job") +} + +func TestHandleCtrlSelectJob_HiddenByFilter(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(10, withRepoPath("/visible")), + makeJob(20, withRepoPath("/hidden")), + } + m.activeRepoFilter = []string{"/visible"} + + params, _ := json.Marshal(map[string]int64{"job_id": 20}) + _, resp, _ := m.handleCtrlSelectJob(params) + require.False(t, resp.OK, "expected error for hidden job") +} + +func TestHandleCtrlSelectJob_HiddenByClosed(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(10, withClosed(boolPtr(false))), + makeJob(20, withClosed(boolPtr(true))), + } + + params, _ := json.Marshal(map[string]int64{"job_id": 20}) + _, resp, _ := m.handleCtrlSelectJob(params) + require.False(t, resp.OK, "expected error for closed-hidden job") +} + +func TestHandleCtrlSetView(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.currentView = viewQueue + + params, _ := json.Marshal(map[string]string{"view": "queue"}) + updated, resp, _ := m.handleCtrlSetView(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, viewQueue, updated.currentView) +} + +func TestHandleCtrlSetView_Invalid(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + + params, _ := json.Marshal(map[string]string{"view": "review"}) + _, resp, _ := m.handleCtrlSetView(params) + require.False(t, resp.OK, "expected error for unsettable view") +} + +func TestHandleCtrlSetView_TasksDisabled(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.tasksEnabled = false + + params, _ := json.Marshal(map[string]string{"view": "tasks"}) + _, resp, _ := m.handleCtrlSetView(params) + require.False(t, resp.OK, + "expected error when tasks workflow disabled") +} + +func TestHandleCtrlCloseReview(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + closed := false + m.jobs = []storage.ReviewJob{ + makeJob(5, withClosed(&closed)), + } + + closedTrue := true + params, _ := json.Marshal(map[string]any{ + "job_id": int64(5), + "closed": closedTrue, + }) + _, resp, cmd := m.handleCtrlCloseReview(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.NotNil(t, cmd, "expected non-nil cmd for close operation") +} + +func TestHandleCtrlCloseReview_NonSelectedNoReflow(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + closed := false + m.jobs = []storage.ReviewJob{ + makeJob(1, withClosed(boolPtr(false))), + makeJob(2, withClosed(&closed)), + makeJob(3, withClosed(boolPtr(false))), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + // Close job 2, which is not the selected job + params, _ := json.Marshal(map[string]any{ + "job_id": int64(2), + "closed": true, + }) + updated, resp, _ := m.handleCtrlCloseReview(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.EqualValues(t, 1, updated.selectedJobID, + "selection should remain on job 1") + assert.Equal(t, 0, updated.selectedIdx, + "selectedIdx should remain 0") +} + +func TestHandleCtrlCloseReview_NoReview(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(5, withStatus(storage.JobStatusRunning)), + } + + params, _ := json.Marshal(map[string]any{"job_id": int64(5)}) + _, resp, _ := m.handleCtrlCloseReview(params) + require.False(t, resp.OK, "expected error for job without review") +} + +func TestHandleCtrlCloseReview_ClearsSelectionWhenNoneVisible(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + // Only one visible job — closing it leaves no visible jobs. + m.jobs = []storage.ReviewJob{ + makeJob(1, withClosed(boolPtr(false))), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + params, _ := json.Marshal(map[string]any{ + "job_id": int64(1), + "closed": true, + }) + updated, resp, _ := m.handleCtrlCloseReview(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, -1, updated.selectedIdx, + "selectedIdx should be cleared when no visible jobs remain") + assert.EqualValues(t, 0, updated.selectedJobID, + "selectedJobID should be cleared when no visible jobs remain") +} + +func TestHandleCtrlCloseReview_RollbackRestoresSelection(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(1, withClosed(boolPtr(false))), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + params, _ := json.Marshal(map[string]any{ + "job_id": int64(1), + "closed": true, + }) + updated, resp, cmd := m.handleCtrlCloseReview(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + // Selection was cleared (no visible jobs remain). + require.Equal(t, -1, updated.selectedIdx) + + // Simulate server failure — the result handler should + // restore job state and reselect the original job. + msg := cmd() + result, _ := updated.handleClosedResultMsg(msg.(closedResultMsg)) + restored := result.(model) + assert.EqualValues(t, 1, restored.selectedJobID, + "selection should be restored to original job on rollback") + assert.Equal(t, 0, restored.selectedIdx, + "selectedIdx should point to the restored job") +} + +func TestHandleCtrlCancelJob(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(7, withStatus(storage.JobStatusRunning)), + } + + params, _ := json.Marshal(map[string]int64{"job_id": 7}) + updated, resp, cmd := m.handleCtrlCancelJob(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.NotNil(t, cmd, "expected non-nil cmd for cancel") + assert.Equal(t, storage.JobStatusCanceled, updated.jobs[0].Status) +} + +func TestHandleCtrlCancelJob_NonSelectedNoReflow(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(1, withClosed(boolPtr(false))), + makeJob(2, withStatus(storage.JobStatusRunning)), + makeJob(3, withClosed(boolPtr(false))), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + // Cancel job 2, which is not the selected job + params, _ := json.Marshal(map[string]int64{"job_id": 2}) + updated, resp, _ := m.handleCtrlCancelJob(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.EqualValues(t, 1, updated.selectedJobID, + "selection should remain on job 1") + assert.Equal(t, 0, updated.selectedIdx, + "selectedIdx should remain 0") +} + +func TestHandleCtrlCancelJob_ClearsSelectionWhenNoneVisible(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + // Only one visible job — canceling it hides it under hideClosed. + m.jobs = []storage.ReviewJob{ + makeJob(1, withStatus(storage.JobStatusRunning)), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + params, _ := json.Marshal(map[string]int64{"job_id": 1}) + updated, resp, _ := m.handleCtrlCancelJob(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, -1, updated.selectedIdx, + "selectedIdx should be cleared when no visible jobs remain") + assert.EqualValues(t, 0, updated.selectedJobID, + "selectedJobID should be cleared when no visible jobs remain") +} + +func TestHandleCtrlCancelJob_RollbackRestoresSelection(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(1, withStatus(storage.JobStatusRunning)), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + params, _ := json.Marshal(map[string]int64{"job_id": 1}) + updated, resp, cmd := m.handleCtrlCancelJob(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + require.Equal(t, -1, updated.selectedIdx) + + // Simulate server failure — the result handler should + // restore status and reselect the original job. + msg := cmd() + result, _ := updated.handleCancelResultMsg(msg.(cancelResultMsg)) + restored := result.(model) + assert.Equal(t, storage.JobStatusRunning, restored.jobs[0].Status, + "status should be rolled back") + assert.EqualValues(t, 1, restored.selectedJobID, + "selection should be restored on rollback") + assert.Equal(t, 0, restored.selectedIdx, + "selectedIdx should point to the restored job") +} + +func TestHandleCtrlCancelJob_WrongStatus(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(7, withStatus(storage.JobStatusDone)), + } + + params, _ := json.Marshal(map[string]int64{"job_id": 7}) + _, resp, _ := m.handleCtrlCancelJob(params) + require.False(t, resp.OK, "expected error for non-cancellable job") +} + +func TestHandleCancelKey_ClearsSelectionWhenNoneVisible(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.currentView = viewQueue + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(1, withStatus(storage.JobStatusRunning)), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + result, cmd := m.handleCancelKey() + updated := result.(model) + assert.Equal(t, storage.JobStatusCanceled, updated.jobs[0].Status) + assert.Equal(t, -1, updated.selectedIdx, + "selectedIdx should be cleared when no visible jobs remain") + assert.EqualValues(t, 0, updated.selectedJobID, + "selectedJobID should be cleared") + assert.NotNil(t, cmd, "expected non-nil cmd for cancel") +} + +func TestHandleCancelKey_RollbackRestoresSelection(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.currentView = viewQueue + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(1, withStatus(storage.JobStatusRunning)), + } + m.selectedIdx = 0 + m.selectedJobID = 1 + + result, cmd := m.handleCancelKey() + updated := result.(model) + require.Equal(t, -1, updated.selectedIdx) + + // Simulate server failure. + msg := cmd() + rollback, _ := updated.handleCancelResultMsg(msg.(cancelResultMsg)) + restored := rollback.(model) + assert.Equal(t, storage.JobStatusRunning, restored.jobs[0].Status, + "status should be rolled back") + assert.EqualValues(t, 1, restored.selectedJobID, + "selection should be restored on rollback") + assert.Equal(t, 0, restored.selectedIdx, + "selectedIdx should point to the restored job") +} + +func TestHandleCtrlRerunJob(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(8, withStatus(storage.JobStatusFailed)), + } + + params, _ := json.Marshal(map[string]int64{"job_id": 8}) + updated, resp, cmd := m.handleCtrlRerunJob(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.NotNil(t, cmd, "expected non-nil cmd for rerun") + assert.Equal(t, storage.JobStatusQueued, updated.jobs[0].Status) +} + +func TestHandleCtrlRerunJob_ClearsClosedAndVerdict(t *testing.T) { + verdict := "FAIL" + m := newModel(testServerAddr, withExternalIODisabled()) + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(8, + withStatus(storage.JobStatusDone), + withClosed(boolPtr(true)), + func(j *storage.ReviewJob) { j.Verdict = &verdict }, + ), + } + m.selectedIdx = 0 + m.selectedJobID = 8 + + params, _ := json.Marshal(map[string]int64{"job_id": 8}) + updated, resp, _ := m.handleCtrlRerunJob(params) + require.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + assert.Equal(t, storage.JobStatusQueued, updated.jobs[0].Status) + assert.Nil(t, updated.jobs[0].Closed, + "Closed should be cleared on rerun") + assert.Nil(t, updated.jobs[0].Verdict, + "Verdict should be cleared on rerun") + // Job should now be visible under hideClosed + assert.True(t, updated.isJobVisible(updated.jobs[0]), + "rerun job should be visible with hideClosed") +} + +func TestHandleCtrlRerunJob_WrongStatus(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(8, withStatus(storage.JobStatusRunning)), + } + + params, _ := json.Marshal(map[string]int64{"job_id": 8}) + _, resp, _ := m.handleCtrlRerunJob(params) + require.False(t, resp.OK, "expected error for non-rerunnable job") +} + +func TestHandleRerunKey_ClearsClosedAndVerdict(t *testing.T) { + verdict := "FAIL" + m := newModel(testServerAddr, withExternalIODisabled()) + m.currentView = viewQueue + m.hideClosed = true + m.jobs = []storage.ReviewJob{ + makeJob(10, + withStatus(storage.JobStatusDone), + withClosed(boolPtr(true)), + func(j *storage.ReviewJob) { j.Verdict = &verdict }, + ), + } + m.selectedIdx = 0 + m.selectedJobID = 10 + + result, _ := m.handleRerunKey() + updated := result.(model) + assert.Equal(t, storage.JobStatusQueued, updated.jobs[0].Status) + assert.Nil(t, updated.jobs[0].Closed, + "Closed should be cleared on rerun") + assert.Nil(t, updated.jobs[0].Verdict, + "Verdict should be cleared on rerun") + assert.True(t, updated.isJobVisible(updated.jobs[0]), + "rerun job should be visible with hideClosed") +} + +func TestRerunResultMsg_RestoresClosedOnFailure(t *testing.T) { + verdict := "FAIL" + closed := true + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{ + makeJob(10, withStatus(storage.JobStatusQueued)), + } + // Simulate the optimistic update already applied — job is + // queued with nil Closed/Verdict. The error path should + // restore the original values. + msg := rerunResultMsg{ + jobID: 10, + oldState: storage.JobStatusDone, + oldClosed: &closed, + oldVerdict: &verdict, + err: fmt.Errorf("server error"), + } + result, _ := m.handleRerunResultMsg(msg) + updated := result.(model) + assert.Equal(t, storage.JobStatusDone, updated.jobs[0].Status) + require.NotNil(t, updated.jobs[0].Closed) + assert.True(t, *updated.jobs[0].Closed, + "Closed should be restored on failure") + require.NotNil(t, updated.jobs[0].Verdict) + assert.Equal(t, "FAIL", *updated.jobs[0].Verdict, + "Verdict should be restored on failure") +} + +func TestHandleCtrlQuit(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + + updated, resp, cmd := m.handleCtrlQuit() + assert.True(t, resp.OK, "expected OK response") + assert.NotNil(t, cmd, "expected non-nil cmd (tea.Quit)") + _ = updated +} + +func TestNoQuit_QKeyInQueueView(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + m.currentView = viewQueue + + result, cmd := m.Update(tea.KeyMsg{Type: tea.KeyRunes, Runes: []rune{'q'}}) + updated := result.(model) + assert.Equal(t, viewQueue, updated.currentView, + "q should be suppressed in queue view with noQuit") + assert.Nil(t, cmd, "no cmd expected when q is suppressed") +} + +func TestNoQuit_CtrlCStillQuits(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + m.currentView = viewQueue + + _, cmd := m.Update(tea.KeyMsg{Type: tea.KeyCtrlC}) + assert.NotNil(t, cmd, + "ctrl+c should still produce tea.Quit with noQuit") +} + +func TestNoQuit_QStillClosesModal(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + m.currentView = viewReview + m.currentReview = &storage.Review{Agent: "test", Output: "test"} + + result, _ := m.Update(tea.KeyMsg{Type: tea.KeyRunes, Runes: []rune{'q'}}) + updated := result.(model) + assert.Equal(t, viewQueue, updated.currentView, + "q in review view should close modal even with noQuit") +} + +func TestNoQuit_QueueHelpOmitsQuit(t *testing.T) { + normal := newModel(testServerAddr, withExternalIODisabled()) + noQuit := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + + normalRows := normal.queueHelpRows() + noQuitRows := noQuit.queueHelpRows() + + hasQuit := func(rows [][]helpItem) bool { + for _, row := range rows { + for _, item := range row { + if item.key == "q" { + return true + } + } + } + return false + } + + assert.True(t, hasQuit(normalRows), + "normal mode should show q in queue help") + assert.False(t, hasQuit(noQuitRows), + "noQuit mode should omit q from queue help") +} + +func TestNoQuit_HelpViewOmitsQuit(t *testing.T) { + normal := helpLines(true, false) + noQuit := helpLines(true, true) + + contains := func(lines []string, substr string) bool { + for _, line := range lines { + if strings.Contains(line, substr) { + return true + } + } + return false + } + + assert.True(t, contains(normal, "Quit"), + "normal help should mention Quit") + assert.False(t, contains(noQuit, "Quit"), + "noQuit help should omit Quit") +} + +func TestNoQuit_TasksEmptyHelpOmitsQuit(t *testing.T) { + normal := newModel(testServerAddr, withExternalIODisabled()) + normal.currentView = viewTasks + normal.tasksEnabled = true + normal.fixJobs = nil // empty tasks view + + noQuitM := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + noQuitM.currentView = viewTasks + noQuitM.tasksEnabled = true + noQuitM.fixJobs = nil + + normalOut := stripTestANSI(normal.renderTasksView()) + noQuitOut := stripTestANSI(noQuitM.renderTasksView()) + + assert.Contains(t, normalOut, "quit", + "normal tasks empty view should show quit") + assert.NotContains(t, noQuitOut, "quit", + "noQuit tasks empty view should omit quit") +} + +// --- Control message routing through Update() --- + +func TestUpdateRoutesControlQuery(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{makeJob(1)} + + respCh := make(chan controlResponse, 1) + msg := controlQueryMsg{ + req: controlRequest{Command: "get-state"}, + respCh: respCh, + } + + updated, _ := m.Update(msg) + _, ok := updated.(model) + require.True(t, ok, "Update returned %T, want model", updated) + + select { + case resp := <-respCh: + assert.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + default: + require.Fail(t, "no response received on channel") + } +} + +func TestUpdateRoutesControlMutation(t *testing.T) { + m := newModel(testServerAddr, withExternalIODisabled()) + m.jobs = []storage.ReviewJob{makeJob(1), makeJob(2)} + + params, _ := json.Marshal(map[string]int64{"job_id": 2}) + respCh := make(chan controlResponse, 1) + msg := controlMutationMsg{ + req: controlRequest{ + Command: "select-job", Params: params, + }, + respCh: respCh, + } + + updated, _ := m.Update(msg) + um := updated.(model) + assert.EqualValues(t, 2, um.selectedJobID) + + select { + case resp := <-respCh: + assert.True(t, resp.OK, "expected OK, got error: %s", resp.Error) + default: + require.Fail(t, "no response received on channel") + } +} + +// --- Integration test with real Unix socket --- + +func TestControlSocketRoundtrip(t *testing.T) { + tmpDir := t.TempDir() + socketPath := filepath.Join(tmpDir, "test.sock") + + ts := controlTestServer(t) + + m := newModel(ts.URL, withExternalIODisabled()) + + // Provide a pipe for stdin so the program doesn't block on TTY. + r, w, _ := os.Pipe() + p := tea.NewProgram(m, + tea.WithoutRenderer(), + tea.WithInput(r), + ) + runDone := make(chan struct{}) + go func() { _, _ = p.Run(); close(runDone) }() + t.Cleanup(func() { + // Close the write end first so bubbletea's readLoop + // sees EOF and exits before Kill's shutdown closes the + // cancel reader, avoiding a data race on the fd. + w.Close() + p.Kill() + <-runDone + // Bubbletea does not close custom readers passed via + // WithInput, so close the read end after Run exits. + r.Close() + }) + + // Give the program time to complete Init() and reach its + // steady-state event loop. + time.Sleep(500 * time.Millisecond) + + cleanup, err := startControlListener(socketPath, p) + require.NoError(t, err, "startControlListener") + t.Cleanup(cleanup) + + // Test get-state + resp := sendControlCommand(t, socketPath, + `{"command":"get-state"}`) + require.True(t, resp.OK, "get-state failed: %s", resp.Error) + + // Test get-jobs + resp = sendControlCommand(t, socketPath, + `{"command":"get-jobs"}`) + require.True(t, resp.OK, "get-jobs failed: %s", resp.Error) + + // Test set-hide-closed mutation + resp = sendControlCommand(t, socketPath, + `{"command":"set-hide-closed","params":{"hide_closed":true}}`) + require.True(t, resp.OK, + "set-hide-closed failed: %s", resp.Error) + + // Verify state via get-state + resp = sendControlCommand(t, socketPath, + `{"command":"get-state"}`) + require.True(t, resp.OK, + "get-state after mutation: %s", resp.Error) + + // Test unknown command + resp = sendControlCommand(t, socketPath, + `{"command":"nope"}`) + assert.False(t, resp.OK, "expected error for unknown command") +} + +// controlTestServer returns an httptest.Server that handles all +// TUI Init() API requests with valid empty responses. +func controlTestServer(t *testing.T) *httptest.Server { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/api/jobs": + json.NewEncoder(w).Encode(map[string]any{ + "jobs": []any{}, + "has_more": false, + "stats": storage.JobStats{}, + }) + case "/api/status": + json.NewEncoder(w).Encode( + storage.DaemonStatus{Version: "test"}, + ) + default: + json.NewEncoder(w).Encode(map[string]any{ + "ok": true, + }) + } + }, + )) + t.Cleanup(ts.Close) + return ts +} + +func TestControlSocketInvalidJSON(t *testing.T) { + tmpDir := t.TempDir() + socketPath := filepath.Join(tmpDir, "test.sock") + + // Invalid JSON is rejected at the socket handler level before + // Program.Send(), so no real HTTP server needed. + cleanup, err := startControlListener( + socketPath, newTestProgram(t), + ) + require.NoError(t, err, "startControlListener") + t.Cleanup(cleanup) + + resp := sendControlCommand(t, socketPath, `{not json}`) + assert.False(t, resp.OK, "expected error for invalid JSON") +} + +// newTestProgram creates a tea.Program backed by a mock HTTP server +// so that Init() commands complete quickly. +func newTestProgram(t *testing.T) *tea.Program { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(map[string]any{}) + }, + )) + t.Cleanup(ts.Close) + m := newModel(ts.URL, withExternalIODisabled()) + p := tea.NewProgram(m, tea.WithoutRenderer()) + go func() { _, _ = p.Run() }() + t.Cleanup(func() { p.Kill() }) + time.Sleep(100 * time.Millisecond) + return p +} + +// --- Stale socket safety tests --- + +func TestRemoveStaleSocket_NonexistentPath(t *testing.T) { + path := filepath.Join(t.TempDir(), "nosuch.sock") + assert.NoError(t, removeStaleSocket(path)) +} + +func TestRemoveStaleSocket_RegularFileRefused(t *testing.T) { + path := filepath.Join(t.TempDir(), "file.txt") + require.NoError(t, os.WriteFile(path, []byte("data"), 0600)) + + err := removeStaleSocket(path) + require.Error(t, err, "expected error for regular file") + assert.FileExists(t, path, "regular file should not be deleted") +} + +func TestRemoveStaleSocket_StaleSocketRemoved(t *testing.T) { + // Use a short path to stay within the Unix socket length limit. + path := shortSocketPath(t, "stale") + ln, err := net.Listen("unix", path) + require.NoError(t, err) + ln.Close() + + require.NoError(t, removeStaleSocket(path), + "expected stale socket to be removed") + assert.NoFileExists(t, path, "stale socket should be removed") +} + +func TestRemoveStaleSocket_LiveSocketRefused(t *testing.T) { + path := shortSocketPath(t, "live") + ln, err := net.Listen("unix", path) + require.NoError(t, err) + defer ln.Close() + + err = removeStaleSocket(path) + require.Error(t, err, "expected error for live socket") + assert.FileExists(t, path, "live socket should not be deleted") +} + +func TestEnsureSocketDir_CreatesParentDir(t *testing.T) { + base := shortSocketPath(t, "dir") + // Remove the file shortSocketPath created, use it as a subdir. + os.Remove(base) + socketDir := filepath.Join(base, "sub") + t.Cleanup(func() { os.RemoveAll(base) }) + + require.NoError(t, ensureSocketDir(socketDir), + "expected ensureSocketDir to create dirs") + + _, err := os.Stat(socketDir) + require.NoError(t, err, "stat created dir") + // Permission assertions are Unix-only; see + // TestEnsureSocketDirTightensExistingDir in + // control_unix_test.go. +} + +func TestStartControlListener_CreatesCustomParentDir(t *testing.T) { + base := shortSocketPath(t, "cust") + os.Remove(base) + socketPath := filepath.Join(base, "sub", "t.sock") + t.Cleanup(func() { os.RemoveAll(base) }) + + cleanup, err := startControlListener( + socketPath, newTestProgram(t), + ) + require.NoError(t, err, + "custom socket path with missing parent should work") + cleanup() +} + +// shortSocketPath returns a temporary socket path short enough for +// the Unix socket 104-byte name limit on macOS. +func shortSocketPath(t *testing.T, prefix string) string { + t.Helper() + f, err := os.CreateTemp("", prefix) + require.NoError(t, err) + path := f.Name() + f.Close() + os.Remove(path) + t.Cleanup(func() { os.Remove(path) }) + return path +} + +// --- Runtime metadata tests --- + +func TestTUIRuntimeWriteAndRead(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + tmpDir := setupTuiTestEnv(t) + + info := TUIRuntimeInfo{ + PID: 12345, + SocketPath: filepath.Join(tmpDir, "tui.12345.sock"), + ServerAddr: "http://127.0.0.1:7373", + } + + require.NoError(WriteTUIRuntime(info)) + + runtimes, err := ListAllTUIRuntimes() + require.NoError(err) + require.Len(runtimes, 1) + assert.Equal(12345, runtimes[0].PID) + assert.Equal(info.SocketPath, runtimes[0].SocketPath) +} + +func TestCleanupStaleTUIRuntimes(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + setupTuiTestEnv(t) + + // Create a real stale socket (listener closed, PID dead). + sockPath := shortSocketPath(t, "stale") + ln, err := net.Listen("unix", sockPath) + require.NoError(err) + ln.Close() // leave stale socket file behind + + info := TUIRuntimeInfo{ + PID: 999999999, + SocketPath: sockPath, + ServerAddr: "http://127.0.0.1:7373", + } + require.NoError(WriteTUIRuntime(info)) + + cleaned := CleanupStaleTUIRuntimes() + assert.Equal(1, cleaned) + + runtimes, _ := ListAllTUIRuntimes() + assert.Empty(runtimes) + assert.NoFileExists(sockPath, + "stale socket file should be removed") +} + +func TestCleanupStaleTUIRuntimes_NonSocketPreserved(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + tmpDir := setupTuiTestEnv(t) + + // Write metadata pointing to a regular file (not a socket). + // Cleanup should remove the metadata but not the file. + filePath := filepath.Join(tmpDir, "tui.999999999.sock") + require.NoError(os.WriteFile(filePath, []byte{}, 0600)) + + info := TUIRuntimeInfo{ + PID: 999999999, + SocketPath: filePath, + ServerAddr: "http://127.0.0.1:7373", + } + require.NoError(WriteTUIRuntime(info)) + + cleaned := CleanupStaleTUIRuntimes() + assert.Equal(1, cleaned) + + // Metadata should be gone. + runtimes, _ := ListAllTUIRuntimes() + assert.Empty(runtimes) + // The regular file must NOT have been deleted. + assert.FileExists(filePath, + "regular file should not be removed") +} + +// --- Helper --- + +func sendControlCommand( + t *testing.T, socketPath, command string, +) controlResponse { + t.Helper() + + conn, err := net.DialTimeout( + "unix", socketPath, 2*time.Second, + ) + require.NoError(t, err, "dial socket") + defer conn.Close() + + require.NoError(t, + conn.SetDeadline(time.Now().Add(5*time.Second)), + "set deadline") + + _, err = fmt.Fprintf(conn, "%s\n", command) + require.NoError(t, err, "write command") + + buf := make([]byte, 64*1024) + n, err := conn.Read(buf) + require.NoError(t, err, "read response") + + var resp controlResponse + require.NoError(t, + json.Unmarshal(buf[:n], &resp), + "unmarshal response %q", string(buf[:n])) + return resp +} diff --git a/cmd/roborev/tui/control_types.go b/cmd/roborev/tui/control_types.go new file mode 100644 index 000000000..988f9e4e1 --- /dev/null +++ b/cmd/roborev/tui/control_types.go @@ -0,0 +1,152 @@ +package tui + +import ( + "encoding/json" + + tea "github.com/charmbracelet/bubbletea" + "github.com/roborev-dev/roborev/internal/storage" +) + +// controlRequest is the JSON envelope for incoming control commands. +type controlRequest struct { + Command string `json:"command"` + Params json.RawMessage `json:"params,omitempty"` +} + +// controlResponse is the JSON envelope for outgoing control responses. +type controlResponse struct { + OK bool `json:"ok"` + Error string `json:"error,omitempty"` + Data any `json:"data,omitempty"` +} + +// controlQueryMsg is sent through tea.Program.Send to query TUI state. +// The Update handler writes the response to respCh. +type controlQueryMsg struct { + req controlRequest + respCh chan<- controlResponse +} + +// controlMutationMsg is sent through tea.Program.Send to mutate TUI state. +// The Update handler writes the response to respCh and returns side-effect cmds. +type controlMutationMsg struct { + req controlRequest + respCh chan<- controlResponse +} + +// controlSocketReadyMsg is sent to the model after the control +// listener starts successfully, so the model can update runtime +// metadata on reconnect without advertising a dead socket. +type controlSocketReadyMsg struct { + socketPath string +} + +// stateSnapshot is the data payload for the get-state query. +type stateSnapshot struct { + View string `json:"view"` + RepoFilter []string `json:"repo_filter"` + BranchFilter string `json:"branch_filter"` + LockedRepo bool `json:"locked_repo"` + LockedBranch bool `json:"locked_branch"` + HideClosed bool `json:"hide_closed"` + SelectedJobID int64 `json:"selected_job_id"` + JobCount int `json:"job_count"` + VisibleJobCount int `json:"visible_job_count"` + Stats storage.JobStats `json:"stats"` +} + +// jobSnapshot is a lightweight job representation for the get-jobs query. +type jobSnapshot struct { + ID int64 `json:"id"` + Agent string `json:"agent"` + Status storage.JobStatus `json:"status"` + Repo string `json:"repo"` + Branch string `json:"branch"` + GitRef string `json:"git_ref"` + Subject string `json:"subject"` + Closed *bool `json:"closed,omitempty"` + Verdict *string `json:"verdict,omitempty"` +} + +// selectedSnapshot is the data payload for the get-selected query. +type selectedSnapshot struct { + Job *jobSnapshot `json:"job,omitempty"` + HasReview bool `json:"has_review"` +} + +// String returns the display name of a viewKind. +func (v viewKind) String() string { + switch v { + case viewQueue: + return "queue" + case viewReview: + return "review" + case viewKindPrompt: + return "prompt" + case viewFilter: + return "filter" + case viewKindComment: + return "comment" + case viewCommitMsg: + return "commit-msg" + case viewHelp: + return "help" + case viewLog: + return "log" + case viewTasks: + return "tasks" + case viewKindWorktreeConfirm: + return "worktree-confirm" + case viewPatch: + return "patch" + case viewColumnOptions: + return "column-options" + default: + return "unknown" + } +} + +// parseViewKind converts a string to a viewKind. +// Returns -1 if the string is not a recognized view. +func parseViewKind(s string) viewKind { + switch s { + case "queue": + return viewQueue + case "tasks": + return viewTasks + default: + return -1 + } +} + +// controlQueryCommands are commands that only read state. +var controlQueryCommands = map[string]bool{ + "get-state": true, + "get-filter": true, + "get-jobs": true, + "get-selected": true, +} + +// controlMutationCommands are commands that modify state. +var controlMutationCommands = map[string]bool{ + "set-filter": true, + "clear-filter": true, + "set-hide-closed": true, + "select-job": true, + "set-view": true, + "close-review": true, + "cancel-job": true, + "rerun-job": true, + "quit": true, +} + +// isControlCommand returns true and the message type for known commands. +func isControlCommand(cmd string) (isQuery bool, isMutation bool) { + return controlQueryCommands[cmd], controlMutationCommands[cmd] +} + +// Ensure control messages satisfy tea.Msg at compile time. +var ( + _ tea.Msg = controlQueryMsg{} + _ tea.Msg = controlMutationMsg{} +) diff --git a/cmd/roborev/tui/control_unix_test.go b/cmd/roborev/tui/control_unix_test.go new file mode 100644 index 000000000..f711a5595 --- /dev/null +++ b/cmd/roborev/tui/control_unix_test.go @@ -0,0 +1,113 @@ +//go:build !windows + +package tui + +import ( + "encoding/json" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + tea "github.com/charmbracelet/bubbletea" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestControlSocketPermissions(t *testing.T) { + tmpDir := t.TempDir() + socketPath := filepath.Join(tmpDir, "test.sock") + + cleanup, err := startControlListener( + socketPath, newTestProgramUnix(t), + ) + require.NoError(t, err, "startControlListener") + t.Cleanup(cleanup) + + info, err := os.Stat(socketPath) + require.NoError(t, err, "stat socket") + assert.NotZero(t, info.Mode().Type()&os.ModeSocket, + "expected socket type, got %s", info.Mode().Type()) + perm := info.Mode().Perm() + assert.Zero(t, perm&0077, + "socket permissions %o allow group/other access", perm) +} + +func TestEnsureSocketDirTightensExistingDir(t *testing.T) { + socketDir := t.TempDir() + + // Simulate a pre-existing data directory created with 0755. + require.NoError(t, os.Chmod(socketDir, 0755)) + + require.NoError(t, ensureSocketDir(socketDir)) + + di, err := os.Stat(socketDir) + require.NoError(t, err, "stat socket dir") + assert.Equal(t, os.FileMode(0700), di.Mode().Perm(), + "socket directory should be tightened to 0700") +} + +func TestRemoveStaleSocket_IncompatibleSocketRefused(t *testing.T) { + path := shortSocketPath(t, "dgram") + // Create a DGRAM socket -- dial with STREAM will fail with a + // non-ECONNREFUSED error, which should NOT be treated as stale. + fd, err := syscall.Socket( + syscall.AF_UNIX, syscall.SOCK_DGRAM, 0, + ) + require.NoError(t, err, "create dgram socket fd") + defer syscall.Close(fd) + require.NoError(t, + syscall.Bind(fd, &syscall.SockaddrUnix{Name: path}), + "bind dgram socket") + t.Cleanup(func() { os.Remove(path) }) + + err = removeStaleSocket(path) + require.Error(t, err, + "expected error for incompatible socket") + assert.FileExists(t, path, + "incompatible socket should not be deleted") +} + +func TestCleanupDoesNotUnlinkSuccessorSocket(t *testing.T) { + socketPath := shortSocketPath(t, "succ") + + // Listener A binds. + lnA, err := net.Listen("unix", socketPath) + require.NoError(t, err, "bind listener A") + lnA.(*net.UnixListener).SetUnlinkOnClose(false) + + // Simulate the race window: A's cleanup removes its own + // socket, then B binds to the same path before A closes. + require.NoError(t, os.Remove(socketPath)) + lnB, err := net.Listen("unix", socketPath) + require.NoError(t, err, "bind listener B") + t.Cleanup(func() { lnB.Close() }) + + // A's Close must not unlink B's socket. Without + // SetUnlinkOnClose(false), this would delete B's file. + lnA.Close() + + assert.FileExists(t, socketPath, + "successor socket should survive predecessor close") +} + +// newTestProgramUnix creates a tea.Program for Unix-only tests. +func newTestProgramUnix(t *testing.T) *tea.Program { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(map[string]any{}) + }, + )) + t.Cleanup(ts.Close) + m := newModel(ts.URL, withExternalIODisabled()) + p := tea.NewProgram(m, tea.WithoutRenderer()) + go func() { _, _ = p.Run() }() + t.Cleanup(func() { p.Kill() }) + time.Sleep(100 * time.Millisecond) + return p +} diff --git a/cmd/roborev/tui/fetch.go b/cmd/roborev/tui/fetch.go index 3ae089406..4de84bb9b 100644 --- a/cmd/roborev/tui/fetch.go +++ b/cmd/roborev/tui/fetch.go @@ -193,6 +193,44 @@ func (m model) tryReconnect() tea.Cmd { } } +// fetchRepoNames fetches the unfiltered repo list and builds a +// display-name-to-root-paths mapping for control socket resolution. +func (m model) fetchRepoNames() tea.Cmd { + client := m.client + serverAddr := m.serverAddr + + return func() tea.Msg { + resp, err := client.Get(serverAddr + "/api/repos") + if err != nil { + return repoNamesMsg{} // non-fatal; map stays nil + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return repoNamesMsg{} + } + + var result struct { + Repos []struct { + Name string `json:"name"` + RootPath string `json:"root_path"` + } `json:"repos"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return repoNamesMsg{} + } + + names := make(map[string][]string) + for _, r := range result.Repos { + displayName := config.GetDisplayName(r.RootPath) + if displayName == "" { + displayName = r.Name + } + names[displayName] = append(names[displayName], r.RootPath) + } + return repoNamesMsg{names: names} + } +} + func (m model) fetchRepos() tea.Cmd { // Capture values for use in goroutine client := m.client @@ -255,7 +293,9 @@ func (m model) fetchRepos() tea.Cmd { for i, name := range displayNameOrder { repos[i] = *displayNameMap[name] } - return reposMsg{repos: repos} + filtered := activeBranchFilter != "" && + activeBranchFilter != branchNone + return reposMsg{repos: repos, branchFiltered: filtered} } } diff --git a/cmd/roborev/tui/handlers.go b/cmd/roborev/tui/handlers.go index 37f00ced7..9811c9765 100644 --- a/cmd/roborev/tui/handlers.go +++ b/cmd/roborev/tui/handlers.go @@ -92,7 +92,12 @@ func (m model) handleGlobalKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { return m.handleEnterKey() } switch msg.String() { - case "ctrl+c", "ctrl+d", "q": + case "ctrl+c", "ctrl+d": + return m.handleQuitKey() + case "q": + if m.noQuit && m.currentView == viewQueue { + return m, nil + } return m.handleQuitKey() case "home", "g": return m.handleHomeKey() diff --git a/cmd/roborev/tui/handlers_modal.go b/cmd/roborev/tui/handlers_modal.go index 1f140dcea..eed823fe4 100644 --- a/cmd/roborev/tui/handlers_modal.go +++ b/cmd/roborev/tui/handlers_modal.go @@ -269,6 +269,7 @@ func (m model) handleLogKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.logStreaming = false return m, m.cancelJob( job.ID, oldStatus, oldFinishedAt, + false, ) } } @@ -378,7 +379,12 @@ func (m model) handleTasksKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { } switch msg.String() { - case "ctrl+c", "ctrl+d", "q": + case "ctrl+c", "ctrl+d": + return m, tea.Quit + case "q": + if m.noQuit { + return m, nil + } return m, tea.Quit case "esc", "T": m.currentView = viewQueue @@ -434,7 +440,9 @@ func (m model) handleTasksKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { job.Status = storage.JobStatusCanceled now := time.Now() job.FinishedAt = &now - return m, m.cancelJob(job.ID, oldStatus, oldFinishedAt) + return m, m.cancelJob( + job.ID, oldStatus, oldFinishedAt, false, + ) } } return m, nil diff --git a/cmd/roborev/tui/handlers_msg.go b/cmd/roborev/tui/handlers_msg.go index 268a53d2b..05e31617a 100644 --- a/cmd/roborev/tui/handlers_msg.go +++ b/cmd/roborev/tui/handlers_msg.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "log" "slices" "time" @@ -232,11 +233,35 @@ func (m model) handleStatusMsg(msg statusMsg) (tea.Model, tea.Cmd) { return m, nil } +// handleRepoNamesMsg stores the display-name-to-root-paths mapping +// fetched from /api/repos at init, used by control socket set-filter. +func (m model) handleRepoNamesMsg( + msg repoNamesMsg, +) (tea.Model, tea.Cmd) { + if msg.names != nil { + m.repoNames = msg.names + } + return m, nil +} + // handleReposMsg processes repo list results for the filter modal. func (m model) handleReposMsg( msg reposMsg, ) (tea.Model, tea.Cmd) { m.consecutiveErrors = 0 + + // Refresh repoNames when the modal fetch was unfiltered (no + // branch constraint), so newly registered repos are picked up. + // Skip branch-filtered responses — they are a subset and would + // clobber the authoritative mapping. + if !msg.branchFiltered { + names := make(map[string][]string, len(msg.repos)) + for _, r := range msg.repos { + names[r.name] = r.rootPaths + } + m.repoNames = names + } + // Build filterTree from repos (all collapsed, no children) m.filterTree = make([]treeFilterNode, len(msg.repos)) for i, r := range msg.repos { @@ -610,6 +635,9 @@ func (m model) handleCancelResultMsg( if msg.err != nil { m.setJobStatus(msg.jobID, msg.oldState) m.setJobFinishedAt(msg.jobID, msg.oldFinishedAt) + if msg.restoreSelection { + m.selectJobByID(msg.jobID) + } m.err = msg.err } return m, nil @@ -624,6 +652,10 @@ func (m model) handleRerunResultMsg( m.setJobStartedAt(msg.jobID, msg.oldStartedAt) m.setJobFinishedAt(msg.jobID, msg.oldFinishedAt) m.setJobError(msg.jobID, msg.oldError) + m.mutateJob(msg.jobID, func(job *storage.ReviewJob) { + job.Closed = msg.oldClosed + job.Verdict = msg.oldVerdict + }) m.err = msg.err } return m, nil @@ -681,9 +713,24 @@ func (m model) handleReconnectMsg(msg reconnectMsg) (tea.Model, tea.Cmd) { if msg.version != "" { m.daemonVersion = msg.version } + // Update runtime metadata so external tools see the + // new daemon address after reconnect. + if m.controlSocket != "" { + rtInfo := buildTUIRuntimeInfo( + m.controlSocket, m.serverAddr, + ) + if err := WriteTUIRuntime(rtInfo); err != nil { + log.Printf( + "warning: failed to update runtime info: %v", + err, + ) + } + } m.clearFetchFailed() m.loadingJobs = true - cmds := []tea.Cmd{m.fetchJobs(), m.fetchStatus()} + cmds := []tea.Cmd{ + m.fetchJobs(), m.fetchStatus(), m.fetchRepoNames(), + } if cmd := m.fetchUnloadedBranches(); cmd != nil { cmds = append(cmds, cmd) } diff --git a/cmd/roborev/tui/handlers_review.go b/cmd/roborev/tui/handlers_review.go index 5d84d8615..9ba284d68 100644 --- a/cmd/roborev/tui/handlers_review.go +++ b/cmd/roborev/tui/handlers_review.go @@ -104,6 +104,7 @@ func (m model) handleCancelKey() (tea.Model, tea.Cmd) { now := time.Now() job.FinishedAt = &now // Canceled jobs are hidden when hideClosed is active + restoreSelection := false if m.hideClosed { idx := m.findPrevVisibleJob(m.selectedIdx) if idx < 0 { @@ -115,9 +116,15 @@ func (m model) handleCancelKey() (tea.Model, tea.Cmd) { if idx >= 0 { m.selectedIdx = idx m.updateSelectedJobID() + } else { + m.selectedIdx = -1 + m.selectedJobID = 0 } + restoreSelection = true } - return m, m.cancelJob(job.ID, oldStatus, oldFinishedAt) + return m, m.cancelJob( + job.ID, oldStatus, oldFinishedAt, restoreSelection, + ) } return m, nil } @@ -128,15 +135,22 @@ func (m model) handleRerunKey() (tea.Model, tea.Cmd) { return m, nil } if job.Status == storage.JobStatusDone || job.Status == storage.JobStatusFailed || job.Status == storage.JobStatusCanceled { - oldStatus := job.Status - oldStartedAt := job.StartedAt - oldFinishedAt := job.FinishedAt - oldError := job.Error + snap := rerunSnapshot{ + jobID: job.ID, + oldStatus: job.Status, + oldStartedAt: job.StartedAt, + oldFinishedAt: job.FinishedAt, + oldError: job.Error, + oldClosed: job.Closed, + oldVerdict: job.Verdict, + } job.Status = storage.JobStatusQueued job.StartedAt = nil job.FinishedAt = nil job.Error = "" - return m, m.rerunJob(job.ID, oldStatus, oldStartedAt, oldFinishedAt, oldError) + job.Closed = nil + job.Verdict = nil + return m, m.rerunJob(snap) } return m, nil } diff --git a/cmd/roborev/tui/helpers_test.go b/cmd/roborev/tui/helpers_test.go index 273962d28..9de62a861 100644 --- a/cmd/roborev/tui/helpers_test.go +++ b/cmd/roborev/tui/helpers_test.go @@ -546,13 +546,13 @@ func TestQueueHelpRowsTasksWorkflowToggle(t *testing.T) { } func TestHelpLinesShowDisabledTasksShortcuts(t *testing.T) { - disabled := strings.Join(helpLines(false), "\n") + disabled := strings.Join(helpLines(false, false), "\n") assert.Contains(t, stripTestANSI(disabled), "Trigger fix for selected review (disabled)") assert.Contains(t, stripTestANSI(disabled), "Trigger fix (opens inline panel) (disabled)") assert.Contains(t, stripTestANSI(disabled), "Open Tasks view (disabled)") assert.Contains(t, stripTestANSI(disabled), "advanced.tasks_enabled = false") - enabled := strings.Join(helpLines(true), "\n") + enabled := strings.Join(helpLines(true, false), "\n") assert.NotContains(t, stripTestANSI(enabled), "(disabled)") } diff --git a/cmd/roborev/tui/processcheck_unix.go b/cmd/roborev/tui/processcheck_unix.go new file mode 100644 index 000000000..f50fcfd3f --- /dev/null +++ b/cmd/roborev/tui/processcheck_unix.go @@ -0,0 +1,29 @@ +//go:build !windows + +package tui + +import ( + "errors" + "os" + "syscall" +) + +// isProcessAlive checks whether a process with the given PID exists. +// Returns true when the process is running, even if owned by another +// user (EPERM). Only returns false when the process is confirmed +// gone (ESRCH or similar). +func isProcessAlive(pid int) bool { + if pid <= 0 { + return false + } + proc, err := os.FindProcess(pid) + if err != nil { + return false + } + err = proc.Signal(syscall.Signal(0)) + if err == nil { + return true + } + // EPERM means the process exists but belongs to another user. + return errors.Is(err, syscall.EPERM) +} diff --git a/cmd/roborev/tui/processcheck_windows.go b/cmd/roborev/tui/processcheck_windows.go new file mode 100644 index 000000000..529e51bb2 --- /dev/null +++ b/cmd/roborev/tui/processcheck_windows.go @@ -0,0 +1,46 @@ +//go:build windows + +package tui + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "strconv" +) + +// isProcessAlive checks whether a process with the given PID exists +// using tasklist, which works reliably on Windows (unlike signal 0). +func isProcessAlive(pid int) bool { + if pid <= 0 { + return false + } + pidStr := strconv.Itoa(pid) + tasklistExe := filepath.Join( + systemRoot(), "System32", "tasklist.exe", + ) + cmd := exec.Command( + tasklistExe, + "/FI", "PID eq "+pidStr, + "/FO", "CSV", + "/NH", + ) + out, err := cmd.Output() + if err != nil { + // Cannot determine — be conservative, assume alive. + return true + } + quoted := []byte("\"" + pidStr + "\"") + return len(out) > 0 && bytes.Contains(out, quoted) +} + +func systemRoot() string { + if sr := os.Getenv("SystemRoot"); sr != "" { + return sr + } + if wd := os.Getenv("WINDIR"); wd != "" { + return wd + } + return `C:\Windows` +} diff --git a/cmd/roborev/tui/render_log.go b/cmd/roborev/tui/render_log.go index abd21b70f..c9179dd79 100644 --- a/cmd/roborev/tui/render_log.go +++ b/cmd/roborev/tui/render_log.go @@ -113,7 +113,7 @@ func disabledHelpLine(key, desc string) string { return statusStyle.Render(formatHelpLine(key, desc+" (disabled)")) } -func helpLines(tasksEnabled bool) []string { +func helpLines(tasksEnabled, noQuit bool) []string { shortcuts := []struct { group string keys []struct{ key, desc string } @@ -200,10 +200,17 @@ func helpLines(tasksEnabled bool) []string { }, { group: "General", - keys: []struct{ key, desc string }{ - {"?", "Toggle this help"}, - {"q", "Quit (from queue view)"}, - }, + keys: func() []struct{ key, desc string } { + keys := []struct{ key, desc string }{ + {"?", "Toggle this help"}, + } + if !noQuit { + keys = append(keys, struct{ key, desc string }{ + "q", "Quit (from queue view)", + }) + } + return keys + }(), }, } @@ -245,7 +252,7 @@ func helpLines(tasksEnabled bool) []string { func (m model) helpMaxScroll() int { reservedLines := 3 // title + blank + help hint visibleLines := max(m.height-reservedLines, 5) - maxScroll := len(helpLines(m.tasksWorkflowEnabled())) - visibleLines + maxScroll := len(helpLines(m.tasksWorkflowEnabled(), m.noQuit)) - visibleLines if maxScroll < 0 { return 0 } @@ -257,7 +264,7 @@ func (m model) renderHelpView() string { b.WriteString(titleStyle.Render("Keyboard Shortcuts")) b.WriteString("\x1b[K\n\x1b[K\n") - allLines := helpLines(m.tasksWorkflowEnabled()) + allLines := helpLines(m.tasksWorkflowEnabled(), m.noQuit) // Calculate visible area: title(1) + blank(1) + help(1) reservedLines := 3 diff --git a/cmd/roborev/tui/render_queue.go b/cmd/roborev/tui/render_queue.go index 9c2cdfda2..238edfb38 100644 --- a/cmd/roborev/tui/render_queue.go +++ b/cmd/roborev/tui/render_queue.go @@ -48,7 +48,10 @@ func (m model) queueHelpRows() [][]helpItem { if m.tasksWorkflowEnabled() { row2 = append(row2, helpItem{"T", "tasks"}) } - row2 = append(row2, helpItem{"?", "help"}, helpItem{"q", "quit"}) + row2 = append(row2, helpItem{"?", "help"}) + if !m.noQuit { + row2 = append(row2, helpItem{"q", "quit"}) + } return [][]helpItem{row1, row2} } func (m model) queueHelpLines() int { diff --git a/cmd/roborev/tui/render_tasks.go b/cmd/roborev/tui/render_tasks.go index 9478fe108..69edd057d 100644 --- a/cmd/roborev/tui/render_tasks.go +++ b/cmd/roborev/tui/render_tasks.go @@ -156,9 +156,11 @@ func (m model) renderTasksView() string { if len(m.fixJobs) == 0 { b.WriteString("\n No fix tasks. Press F on a review to trigger a background fix.\n") b.WriteString("\n") - b.WriteString(renderHelpTable([][]helpItem{ - {{"T", "back to queue"}, {"F", "fix review"}, {"q", "quit"}}, - }, m.width)) + emptyHelp := []helpItem{{"T", "back to queue"}, {"F", "fix review"}} + if !m.noQuit { + emptyHelp = append(emptyHelp, helpItem{"q", "quit"}) + } + b.WriteString(renderHelpTable([][]helpItem{emptyHelp}, m.width)) b.WriteString("\x1b[K\x1b[J") return b.String() } diff --git a/cmd/roborev/tui/tui.go b/cmd/roborev/tui/tui.go index a66d78527..fec41db1c 100644 --- a/cmd/roborev/tui/tui.go +++ b/cmd/roborev/tui/tui.go @@ -7,6 +7,7 @@ import ( "net/http" neturl "net/url" "os" + "path/filepath" "regexp" "strings" "time" @@ -310,6 +311,11 @@ type model struct { // Display name cache (keyed by repo path) displayNames map[string]string + // Repo name lookup (display name → root paths), populated from + // /api/repos at init. Used by control socket set-filter to resolve + // display names to the root paths that the daemon API expects. + repoNames map[string][]string + // Branch name cache (keyed by job ID) - caches derived branches to avoid repeated git calls branchNames map[int64]string @@ -368,8 +374,11 @@ type model struct { distractionFree bool // hide status line, headers, footer, scroll indicator clipboard ClipboardWriter - tasksEnabled bool // Enables advanced tasks workflow in the TUI - mouseEnabled bool // Enables mouse capture and mouse-driven interactions in the TUI + tasksEnabled bool // Enables advanced tasks workflow in the TUI + mouseEnabled bool // Enables mouse capture and mouse-driven interactions in the TUI + noQuit bool // Suppress keyboard quit (for managed TUI instances) + controlSocket string // Socket path for runtime metadata updates (empty if disabled) + ready chan struct{} // Closed on first Update; signals event loop is running // Review view navigation reviewFromView viewKind // View to return to when exiting review (queue or tasks) @@ -543,6 +552,8 @@ func newModel(serverAddr string, opts ...option) model { mdCache: newMarkdownCache(tabWidth), tasksEnabled: tasksEnabled, mouseEnabled: mouseEnabled, + noQuit: opt.noQuit, + ready: make(chan struct{}), colBordersOn: columnBorders, hiddenColumns: hiddenCols, columnOrder: colOrder, @@ -558,6 +569,7 @@ func (m model) Init() tea.Cmd { m.tick(), m.fetchJobs(), m.fetchStatus(), + m.fetchRepoNames(), m.checkForUpdate(), ) } @@ -675,6 +687,16 @@ func mouseCaptureCmd(v viewKind, mouseEnabled bool) tea.Cmd { } func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { + // Signal that the event loop is running (once). The control + // listener waits on this before accepting connections. + if m.ready != nil { + select { + case <-m.ready: + default: + close(m.ready) + } + } + prevView := m.currentView var result tea.Model @@ -714,6 +736,8 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { result, cmd = m.handleCancelResultMsg(msg) case rerunResultMsg: result, cmd = m.handleRerunResultMsg(msg) + case repoNamesMsg: + result, cmd = m.handleRepoNamesMsg(msg) case reposMsg: result, cmd = m.handleReposMsg(msg) case repoBranchesMsg: @@ -751,6 +775,13 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { 5*time.Second, m.currentView, ) result = m + case controlSocketReadyMsg: + m.controlSocket = msg.socketPath + result = m + case controlQueryMsg: + result, cmd = m.handleControlQuery(msg) + case controlMutationMsg: + result, cmd = m.handleControlMutation(msg) default: // Unknown message types cannot change the view, so no mouse // toggle is needed. If a new message type is added that can @@ -820,9 +851,11 @@ func (m model) View() string { // Config holds resolved parameters for running the TUI. type Config struct { - ServerAddr string - RepoFilter string - BranchFilter string + ServerAddr string + RepoFilter string + BranchFilter string + ControlSocket string // Unix socket path for external control (default: auto) + NoQuit bool // Suppress keyboard quit (for managed TUI instances) } func programOptionsForModel(m model) []tea.ProgramOption { @@ -837,6 +870,9 @@ func programOptionsForModel(m model) []tea.ProgramOption { // Run starts the interactive TUI. func Run(cfg Config) error { + // Clean up sockets from dead TUI processes before starting + CleanupStaleTUIRuntimes() + var opts []option if cfg.RepoFilter != "" { opts = append(opts, withRepoFilter(cfg.RepoFilter)) @@ -844,12 +880,94 @@ func Run(cfg Config) error { if cfg.BranchFilter != "" { opts = append(opts, withBranchFilter(cfg.BranchFilter)) } + if cfg.NoQuit { + opts = append(opts, withNoQuit()) + } + // Resolve socket path before creating the model so the + // model knows its socket path for runtime metadata updates. + socketPath := cfg.ControlSocket + if socketPath == "" { + socketPath = defaultControlSocketPath() + // Only tighten directory permissions for the default + // managed directory. Custom --control-socket paths may + // point anywhere (e.g. /tmp, repo root) and mutating + // their parent would be destructive. + if err := ensureSocketDir( + filepath.Dir(socketPath), + ); err != nil { + log.Printf("warning: control socket disabled: %v", err) + socketPath = "" + } + } + m := newModel(cfg.ServerAddr, opts...) p := tea.NewProgram( m, programOptionsForModel(m)..., ) + + // Start the control listener after the event loop is running + // so that p.Send (used by control handlers) never blocks. The + // model closes m.ready on its first Update call. Skip if + // socketPath is empty (ensureSocketDir failed). + // cleanupDone is closed after socket/runtime cleanup finishes + // so Run() can wait for it before returning, preventing stale + // files if the process exits immediately after p.Run(). + // programDone is closed after p.Run() returns so the goroutine + // can unblock if the program exits before m.ready fires. + cleanupDone := make(chan struct{}) + programDone := make(chan struct{}) + if socketPath != "" { + go func() { + defer close(cleanupDone) + + // Wait for either the event loop to start or the + // program to exit (e.g. terminal init failure). + select { + case <-m.ready: + case <-programDone: + return + } + + cleanup, err := startControlListener(socketPath, p) + if err != nil { + log.Printf( + "warning: control socket disabled: %v", err, + ) + return + } + + // Notify the model that the control socket is + // active so it can update runtime metadata on + // reconnect. This is deferred until after the + // listener succeeds to avoid advertising a + // socket that failed to bind. + p.Send(controlSocketReadyMsg{ + socketPath: socketPath, + }) + + rtInfo := buildTUIRuntimeInfo( + socketPath, cfg.ServerAddr, + ) + if err := WriteTUIRuntime(rtInfo); err != nil { + log.Printf( + "warning: failed to write TUI runtime info: %v", + err, + ) + } + + // Block until the program exits, then clean up. + p.Wait() + cleanup() + RemoveTUIRuntime() + }() + } else { + close(cleanupDone) + } + _, err := p.Run() + close(programDone) + <-cleanupDone return err } diff --git a/cmd/roborev/tui/types.go b/cmd/roborev/tui/types.go index e99328939..5a233f45b 100644 --- a/cmd/roborev/tui/types.go +++ b/cmd/roborev/tui/types.go @@ -146,10 +146,11 @@ type closedResultMsg struct { err error } type cancelResultMsg struct { - jobID int64 - oldState storage.JobStatus - oldFinishedAt *time.Time - err error + jobID int64 + oldState storage.JobStatus + oldFinishedAt *time.Time + restoreSelection bool + err error } type rerunResultMsg struct { jobID int64 @@ -157,6 +158,8 @@ type rerunResultMsg struct { oldStartedAt *time.Time oldFinishedAt *time.Time oldError string + oldClosed *bool + oldVerdict *string err error } type errMsg error @@ -174,7 +177,15 @@ type updateCheckMsg struct { isDevBuild bool // True if running a dev build } type reposMsg struct { - repos []repoFilterItem + repos []repoFilterItem + branchFiltered bool // true if fetched with a branch constraint +} + +// repoNamesMsg delivers the display-name-to-root-paths mapping from +// /api/repos, used by the control socket to resolve display names in +// set-filter commands. Fetched once at init. +type repoNamesMsg struct { + names map[string][]string // display name → root paths } type branchesMsg struct { backfillCount int // Number of branches successfully backfilled to the database @@ -290,6 +301,7 @@ func withAutoFilterRepo(repo string) option { type options struct { repoFilter string // --repo flag: lock filter to this repo path branchFilter string // --branch flag: lock filter to this branch + noQuit bool // --no-quit flag: suppress keyboard quit disableExternalIO bool // tests: disable daemon/config/git calls autoFilterRepo bool // tests: simulate auto_filter_repo config autoFilterBranch bool // tests: simulate auto_filter_branch config @@ -297,6 +309,11 @@ type options struct { cwdBranch string // tests: simulate detected branch } +// withNoQuit disables keyboard-initiated quit (q key). +func withNoQuit() option { + return func(o *options) { o.noQuit = true } +} + // errNoLog is a sentinel error returned when the job log API // returns 404 (job has no log output yet or was never started). var errNoLog = errors.New("no log available") diff --git a/cmd/roborev/tui_cmd.go b/cmd/roborev/tui_cmd.go index b29cbc021..96b003a3b 100644 --- a/cmd/roborev/tui_cmd.go +++ b/cmd/roborev/tui_cmd.go @@ -12,6 +12,8 @@ func tuiCmd() *cobra.Command { var addr string var repoFilter string var branchFilter string + var controlSocket string + var noQuit bool cmd := &cobra.Command{ Use: "tui", @@ -63,9 +65,11 @@ to the current branch. Use = syntax for explicit values: } return tui.Run(tui.Config{ - ServerAddr: addr, - RepoFilter: repoFilter, - BranchFilter: branchFilter, + ServerAddr: addr, + RepoFilter: repoFilter, + BranchFilter: branchFilter, + ControlSocket: controlSocket, + NoQuit: noQuit, }) }, } @@ -84,6 +88,14 @@ to the current branch. Use = syntax for explicit values: "lock filter to a branch (default: current branch)", ) cmd.Flag("branch").NoOptDefVal = "HEAD" + cmd.Flags().StringVar( + &controlSocket, "control-socket", "", + "Unix socket path for external control (default: auto)", + ) + cmd.Flags().BoolVar( + &noQuit, "no-quit", false, + "suppress keyboard quit (for managed TUI instances)", + ) return cmd }