Compare commits

...

5 Commits

Author SHA1 Message Date
Affaan Mustafa
148fc726cb feat(ecc2): implement live output streaming per agent (#774)
- PTY output capture via tokio::process with stdout/stderr piping
- Ring buffer (1000 lines) per session
- Output pane wired to show selected session with auto-scroll
- Broadcast channel for output events
2026-03-24 03:54:15 -07:00
Affaan Mustafa
67306c22cd test: align antigravity manifest expectations 2026-03-24 03:50:46 -07:00
Affaan Mustafa
b2407ab3f5 fix(ecc2): sync catalog counts for scaffold CI 2026-03-24 03:43:48 -07:00
Affaan Mustafa
00dce30d3b feat: scaffold ECC 2.0 Rust TUI — agentic IDE control plane
Initial scaffold for ECC 2.0, a terminal-native agentic IDE built with
Ratatui. Compiles to a 3.4MB single binary.

Core modules:
- Session manager with SQLite-backed state store
- TUI dashboard with split-pane layout (sessions, output, metrics)
- Worktree orchestration (auto-create per agent session)
- Observability with tool call risk scoring
- Inter-agent communication via SQLite mailbox
- Background daemon with heartbeat monitoring
- CLI with start/stop/sessions/status/daemon subcommands

Tech stack: Rust + Ratatui + Crossterm + Tokio + rusqlite + git2 + clap
2026-03-24 03:43:05 -07:00
Affaan Mustafa
df4f2df297 feat: add 6 gap-closing skills — browser QA, design system, product lens, canary watch, benchmark, safety guard
Closes competitive gaps with gstack:
- browser-qa: automated visual testing via browser MCP
- design-system: generate, audit, and detect AI slop in UI
- product-lens: product diagnostic, founder review, feature prioritization
- canary-watch: post-deploy monitoring with alert thresholds
- benchmark: performance baseline and regression detection
- safety-guard: prevent destructive operations in autonomous sessions
2026-03-23 04:31:17 -07:00
28 changed files with 4280 additions and 12 deletions

3
.gitignore vendored
View File

@@ -83,6 +83,9 @@ temp/
*.bak
*.backup
# Rust build artifacts
ecc2/target/
# Bootstrap pipeline outputs
# Generated lock files in tool subdirectories
.opencode/package-lock.json

View File

@@ -1,6 +1,6 @@
# Everything Claude Code (ECC) — Agent Instructions
This is a **production-ready AI coding plugin** providing 28 specialized agents, 119 skills, 60 commands, and automated hook workflows for software development.
This is a **production-ready AI coding plugin** providing 28 specialized agents, 125 skills, 60 commands, and automated hook workflows for software development.
**Version:** 1.9.0

View File

@@ -212,7 +212,7 @@ For manual install instructions see the README in the `rules/` folder.
/plugin list everything-claude-code@everything-claude-code
```
**That's it!** You now have access to 28 agents, 119 skills, and 60 commands.
**That's it!** You now have access to 28 agents, 125 skills, and 60 commands.
---
@@ -1085,7 +1085,7 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|---------|-------------|----------|--------|
| Agents | ✅ 28 agents | ✅ 12 agents | **Claude Code leads** |
| Commands | ✅ 60 commands | ✅ 31 commands | **Claude Code leads** |
| Skills | ✅ 119 skills | ✅ 37 skills | **Claude Code leads** |
| Skills | ✅ 125 skills | ✅ 37 skills | **Claude Code leads** |
| Hooks | ✅ 8 event types | ✅ 11 events | **OpenCode has more!** |
| Rules | ✅ 29 rules | ✅ 13 instructions | **Claude Code leads** |
| MCP Servers | ✅ 14 servers | ✅ Full | **Full parity** |

2016
ecc2/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

52
ecc2/Cargo.toml Normal file
View File

@@ -0,0 +1,52 @@
[package]
name = "ecc-tui"
version = "0.1.0"
edition = "2021"
description = "ECC 2.0 — Agentic IDE control plane with TUI dashboard"
license = "MIT"
authors = ["Affaan Mustafa <me@affaanmustafa.com>"]
repository = "https://github.com/affaan-m/everything-claude-code"
[dependencies]
# TUI
ratatui = "0.29"
crossterm = "0.28"
# Async runtime
tokio = { version = "1", features = ["full"] }
# State store
rusqlite = { version = "0.32", features = ["bundled"] }
# Git integration
git2 = "0.19"
# Serialization
serde = { version = "1", features = ["derive"] }
serde_json = "1"
toml = "0.8"
# CLI
clap = { version = "4", features = ["derive"] }
# Logging & tracing
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Error handling
anyhow = "1"
thiserror = "2"
# Time
chrono = { version = "0.4", features = ["serde"] }
# UUID for session IDs
uuid = { version = "1", features = ["v4"] }
# Directory paths
dirs = "6"
[profile.release]
lto = true
codegen-units = 1
strip = true

33
ecc2/src/comms/mod.rs Normal file
View File

@@ -0,0 +1,33 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use crate::session::store::StateStore;
/// Message types for inter-agent communication.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MessageType {
/// Task handoff from one agent to another
TaskHandoff { task: String, context: String },
/// Agent requesting information from another
Query { question: String },
/// Response to a query
Response { answer: String },
/// Notification of completion
Completed { summary: String, files_changed: Vec<String> },
/// Conflict detected (e.g., two agents editing the same file)
Conflict { file: String, description: String },
}
/// Send a structured message between sessions.
pub fn send(db: &StateStore, from: &str, to: &str, msg: &MessageType) -> Result<()> {
let content = serde_json::to_string(msg)?;
let msg_type = match msg {
MessageType::TaskHandoff { .. } => "task_handoff",
MessageType::Query { .. } => "query",
MessageType::Response { .. } => "response",
MessageType::Completed { .. } => "completed",
MessageType::Conflict { .. } => "conflict",
};
db.send_message(from, to, &content, msg_type)?;
Ok(())
}

54
ecc2/src/config/mod.rs Normal file
View File

@@ -0,0 +1,54 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub db_path: PathBuf,
pub worktree_root: PathBuf,
pub max_parallel_sessions: usize,
pub max_parallel_worktrees: usize,
pub session_timeout_secs: u64,
pub heartbeat_interval_secs: u64,
pub default_agent: String,
pub theme: Theme,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Theme {
Dark,
Light,
}
impl Default for Config {
fn default() -> Self {
let home = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
Self {
db_path: home.join(".claude").join("ecc2.db"),
worktree_root: PathBuf::from("/tmp/ecc-worktrees"),
max_parallel_sessions: 8,
max_parallel_worktrees: 6,
session_timeout_secs: 3600,
heartbeat_interval_secs: 30,
default_agent: "claude".to_string(),
theme: Theme::Dark,
}
}
}
impl Config {
pub fn load() -> Result<Self> {
let config_path = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join(".claude")
.join("ecc2.toml");
if config_path.exists() {
let content = std::fs::read_to_string(&config_path)?;
let config: Config = toml::from_str(&content)?;
Ok(config)
} else {
Ok(Config::default())
}
}
}

94
ecc2/src/main.rs Normal file
View File

@@ -0,0 +1,94 @@
mod config;
mod session;
mod tui;
mod worktree;
mod observability;
mod comms;
use anyhow::Result;
use clap::Parser;
use tracing_subscriber::EnvFilter;
#[derive(Parser, Debug)]
#[command(name = "ecc", version, about = "ECC 2.0 — Agentic IDE control plane")]
struct Cli {
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(clap::Subcommand, Debug)]
enum Commands {
/// Launch the TUI dashboard
Dashboard,
/// Start a new agent session
Start {
/// Task description for the agent
#[arg(short, long)]
task: String,
/// Agent type (claude, codex, custom)
#[arg(short, long, default_value = "claude")]
agent: String,
/// Create a dedicated worktree for this session
#[arg(short, long)]
worktree: bool,
},
/// List active sessions
Sessions,
/// Show session details
Status {
/// Session ID or alias
session_id: Option<String>,
},
/// Stop a running session
Stop {
/// Session ID or alias
session_id: String,
},
/// Run as background daemon
Daemon,
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.init();
let cli = Cli::parse();
let cfg = config::Config::load()?;
let db = session::store::StateStore::open(&cfg.db_path)?;
match cli.command {
Some(Commands::Dashboard) | None => {
tui::app::run(db, cfg).await?;
}
Some(Commands::Start { task, agent, worktree: use_worktree }) => {
let session_id = session::manager::create_session(
&db, &cfg, &task, &agent, use_worktree,
).await?;
println!("Session started: {session_id}");
}
Some(Commands::Sessions) => {
let sessions = session::manager::list_sessions(&db)?;
for s in sessions {
println!("{} [{}] {}", s.id, s.state, s.task);
}
}
Some(Commands::Status { session_id }) => {
let id = session_id.unwrap_or_else(|| "latest".to_string());
let status = session::manager::get_status(&db, &id)?;
println!("{status}");
}
Some(Commands::Stop { session_id }) => {
session::manager::stop_session(&db, &session_id).await?;
println!("Session stopped: {session_id}");
}
Some(Commands::Daemon) => {
println!("Starting ECC daemon...");
session::daemon::run(db, cfg).await?;
}
}
Ok(())
}

View File

@@ -0,0 +1,54 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use crate::session::store::StateStore;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolCallEvent {
pub session_id: String,
pub tool_name: String,
pub input_summary: String,
pub output_summary: String,
pub duration_ms: u64,
pub risk_score: f64,
}
impl ToolCallEvent {
/// Compute risk score based on tool type and input patterns.
pub fn compute_risk(tool_name: &str, input: &str) -> f64 {
let mut score: f64 = 0.0;
// Destructive tools get higher base risk
match tool_name {
"Bash" => score += 0.3,
"Write" => score += 0.2,
"Edit" => score += 0.1,
_ => score += 0.05,
}
// Dangerous patterns in bash commands
if tool_name == "Bash" {
if input.contains("rm -rf") || input.contains("--force") {
score += 0.4;
}
if input.contains("git push") || input.contains("git reset") {
score += 0.3;
}
if input.contains("sudo") || input.contains("chmod 777") {
score += 0.5;
}
}
score.min(1.0)
}
}
pub fn log_tool_call(db: &StateStore, event: &ToolCallEvent) -> Result<()> {
db.send_message(
&event.session_id,
"observability",
&serde_json::to_string(event)?,
"tool_call",
)?;
Ok(())
}

View File

@@ -0,0 +1,46 @@
use anyhow::Result;
use std::time::Duration;
use tokio::time;
use super::store::StateStore;
use super::SessionState;
use crate::config::Config;
/// Background daemon that monitors sessions, handles heartbeats,
/// and cleans up stale resources.
pub async fn run(db: StateStore, cfg: Config) -> Result<()> {
tracing::info!("ECC daemon started");
let heartbeat_interval = Duration::from_secs(cfg.heartbeat_interval_secs);
let timeout = Duration::from_secs(cfg.session_timeout_secs);
loop {
if let Err(e) = check_sessions(&db, timeout) {
tracing::error!("Session check failed: {e}");
}
time::sleep(heartbeat_interval).await;
}
}
fn check_sessions(db: &StateStore, timeout: Duration) -> Result<()> {
let sessions = db.list_sessions()?;
for session in sessions {
if session.state != SessionState::Running {
continue;
}
let elapsed = chrono::Utc::now()
.signed_duration_since(session.updated_at)
.to_std()
.unwrap_or(Duration::ZERO);
if elapsed > timeout {
tracing::warn!("Session {} timed out after {:?}", session.id, elapsed);
db.update_state(&session.id, &SessionState::Failed)?;
}
}
Ok(())
}

View File

@@ -0,0 +1,76 @@
use anyhow::Result;
use std::fmt;
use super::{Session, SessionMetrics, SessionState};
use super::store::StateStore;
use crate::config::Config;
use crate::worktree;
pub async fn create_session(
db: &StateStore,
cfg: &Config,
task: &str,
agent_type: &str,
use_worktree: bool,
) -> Result<String> {
let id = uuid::Uuid::new_v4().to_string()[..8].to_string();
let now = chrono::Utc::now();
let wt = if use_worktree {
Some(worktree::create_for_session(&id, cfg)?)
} else {
None
};
let session = Session {
id: id.clone(),
task: task.to_string(),
agent_type: agent_type.to_string(),
state: SessionState::Pending,
worktree: wt,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
};
db.insert_session(&session)?;
Ok(id)
}
pub fn list_sessions(db: &StateStore) -> Result<Vec<Session>> {
db.list_sessions()
}
pub fn get_status(db: &StateStore, id: &str) -> Result<SessionStatus> {
let session = db
.get_session(id)?
.ok_or_else(|| anyhow::anyhow!("Session not found: {id}"))?;
Ok(SessionStatus(session))
}
pub async fn stop_session(db: &StateStore, id: &str) -> Result<()> {
db.update_state(id, &SessionState::Stopped)?;
Ok(())
}
pub struct SessionStatus(Session);
impl fmt::Display for SessionStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = &self.0;
writeln!(f, "Session: {}", s.id)?;
writeln!(f, "Task: {}", s.task)?;
writeln!(f, "Agent: {}", s.agent_type)?;
writeln!(f, "State: {}", s.state)?;
if let Some(ref wt) = s.worktree {
writeln!(f, "Branch: {}", wt.branch)?;
writeln!(f, "Worktree: {}", wt.path.display())?;
}
writeln!(f, "Tokens: {}", s.metrics.tokens_used)?;
writeln!(f, "Tools: {}", s.metrics.tool_calls)?;
writeln!(f, "Files: {}", s.metrics.files_changed)?;
writeln!(f, "Cost: ${:.4}", s.metrics.cost_usd)?;
writeln!(f, "Created: {}", s.created_at)?;
write!(f, "Updated: {}", s.updated_at)
}
}

61
ecc2/src/session/mod.rs Normal file
View File

@@ -0,0 +1,61 @@
pub mod daemon;
pub mod manager;
pub mod output;
pub mod runtime;
pub mod store;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Session {
pub id: String,
pub task: String,
pub agent_type: String,
pub state: SessionState,
pub worktree: Option<WorktreeInfo>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub metrics: SessionMetrics,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum SessionState {
Pending,
Running,
Idle,
Completed,
Failed,
Stopped,
}
impl fmt::Display for SessionState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SessionState::Pending => write!(f, "pending"),
SessionState::Running => write!(f, "running"),
SessionState::Idle => write!(f, "idle"),
SessionState::Completed => write!(f, "completed"),
SessionState::Failed => write!(f, "failed"),
SessionState::Stopped => write!(f, "stopped"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WorktreeInfo {
pub path: PathBuf,
pub branch: String,
pub base_branch: String,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct SessionMetrics {
pub tokens_used: u64,
pub tool_calls: u64,
pub files_changed: u32,
pub duration_secs: u64,
pub cost_usd: f64,
}

149
ecc2/src/session/output.rs Normal file
View File

@@ -0,0 +1,149 @@
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, MutexGuard};
use serde::{Deserialize, Serialize};
use tokio::sync::broadcast;
pub const OUTPUT_BUFFER_LIMIT: usize = 1000;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum OutputStream {
Stdout,
Stderr,
}
impl OutputStream {
pub fn as_str(self) -> &'static str {
match self {
Self::Stdout => "stdout",
Self::Stderr => "stderr",
}
}
pub fn from_db_value(value: &str) -> Self {
match value {
"stderr" => Self::Stderr,
_ => Self::Stdout,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct OutputLine {
pub stream: OutputStream,
pub text: String,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct OutputEvent {
pub session_id: String,
pub line: OutputLine,
}
#[derive(Clone)]
pub struct SessionOutputStore {
capacity: usize,
buffers: Arc<Mutex<HashMap<String, VecDeque<OutputLine>>>>,
tx: broadcast::Sender<OutputEvent>,
}
impl Default for SessionOutputStore {
fn default() -> Self {
Self::new(OUTPUT_BUFFER_LIMIT)
}
}
impl SessionOutputStore {
pub fn new(capacity: usize) -> Self {
let capacity = capacity.max(1);
let (tx, _) = broadcast::channel(capacity.max(16));
Self {
capacity,
buffers: Arc::new(Mutex::new(HashMap::new())),
tx,
}
}
pub fn subscribe(&self) -> broadcast::Receiver<OutputEvent> {
self.tx.subscribe()
}
pub fn push_line(&self, session_id: &str, stream: OutputStream, text: impl Into<String>) {
let line = OutputLine {
stream,
text: text.into(),
};
{
let mut buffers = self.lock_buffers();
let buffer = buffers.entry(session_id.to_string()).or_default();
buffer.push_back(line.clone());
while buffer.len() > self.capacity {
let _ = buffer.pop_front();
}
}
let _ = self.tx.send(OutputEvent {
session_id: session_id.to_string(),
line,
});
}
pub fn replace_lines(&self, session_id: &str, lines: Vec<OutputLine>) {
let mut buffer: VecDeque<OutputLine> = lines.into_iter().collect();
while buffer.len() > self.capacity {
let _ = buffer.pop_front();
}
self.lock_buffers().insert(session_id.to_string(), buffer);
}
pub fn lines(&self, session_id: &str) -> Vec<OutputLine> {
self.lock_buffers()
.get(session_id)
.map(|buffer| buffer.iter().cloned().collect())
.unwrap_or_default()
}
fn lock_buffers(&self) -> MutexGuard<'_, HashMap<String, VecDeque<OutputLine>>> {
self.buffers
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
}
#[cfg(test)]
mod tests {
use super::{OutputStream, SessionOutputStore};
#[test]
fn ring_buffer_keeps_most_recent_lines() {
let store = SessionOutputStore::new(3);
store.push_line("session-1", OutputStream::Stdout, "line-1");
store.push_line("session-1", OutputStream::Stdout, "line-2");
store.push_line("session-1", OutputStream::Stdout, "line-3");
store.push_line("session-1", OutputStream::Stdout, "line-4");
let lines = store.lines("session-1");
let texts: Vec<_> = lines.iter().map(|line| line.text.as_str()).collect();
assert_eq!(texts, vec!["line-2", "line-3", "line-4"]);
}
#[tokio::test]
async fn pushing_output_broadcasts_events() {
let store = SessionOutputStore::new(8);
let mut rx = store.subscribe();
store.push_line("session-1", OutputStream::Stderr, "problem");
let event = rx.recv().await.expect("broadcast event");
assert_eq!(event.session_id, "session-1");
assert_eq!(event.line.stream, OutputStream::Stderr);
assert_eq!(event.line.text, "problem");
}
}

176
ecc2/src/session/runtime.rs Normal file
View File

@@ -0,0 +1,176 @@
use std::path::{Path, PathBuf};
use std::process::ExitStatus;
use std::process::Stdio;
use anyhow::{Context, Result};
use tokio::io::{AsyncBufReadExt, AsyncRead, BufReader};
use tokio::process::Command;
use super::output::{OutputStream, SessionOutputStore};
use super::store::StateStore;
use super::SessionState;
pub async fn capture_command_output(
db_path: PathBuf,
session_id: String,
mut command: Command,
output_store: SessionOutputStore,
) -> Result<ExitStatus> {
let result = async {
let mut child = command
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.with_context(|| format!("Failed to start process for session {}", session_id))?;
update_session_state(&db_path, &session_id, SessionState::Running)?;
let stdout = child.stdout.take().context("Child stdout was not piped")?;
let stderr = child.stderr.take().context("Child stderr was not piped")?;
let stdout_task = tokio::spawn(capture_stream(
db_path.clone(),
session_id.clone(),
stdout,
OutputStream::Stdout,
output_store.clone(),
));
let stderr_task = tokio::spawn(capture_stream(
db_path.clone(),
session_id.clone(),
stderr,
OutputStream::Stderr,
output_store,
));
let status = child.wait().await?;
stdout_task.await??;
stderr_task.await??;
let final_state = if status.success() {
SessionState::Completed
} else {
SessionState::Failed
};
update_session_state(&db_path, &session_id, final_state)?;
Ok(status)
}
.await;
if result.is_err() {
let _ = update_session_state(&db_path, &session_id, SessionState::Failed);
}
result
}
async fn capture_stream<R>(
db_path: PathBuf,
session_id: String,
reader: R,
stream: OutputStream,
output_store: SessionOutputStore,
) -> Result<()>
where
R: AsyncRead + Unpin,
{
let mut lines = BufReader::new(reader).lines();
while let Some(line) = lines.next_line().await? {
output_store.push_line(&session_id, stream, line.clone());
append_output_line(&db_path, &session_id, stream, &line)?;
}
Ok(())
}
fn append_output_line(
db_path: &Path,
session_id: &str,
stream: OutputStream,
line: &str,
) -> Result<()> {
let db = StateStore::open(db_path)?;
db.append_output_line(session_id, stream, line)?;
Ok(())
}
fn update_session_state(db_path: &Path, session_id: &str, state: SessionState) -> Result<()> {
let db = StateStore::open(db_path)?;
db.update_state(session_id, &state)?;
Ok(())
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::env;
use anyhow::Result;
use chrono::Utc;
use tokio::process::Command;
use uuid::Uuid;
use super::capture_command_output;
use crate::session::output::{SessionOutputStore, OUTPUT_BUFFER_LIMIT};
use crate::session::store::StateStore;
use crate::session::{Session, SessionMetrics, SessionState};
#[tokio::test]
async fn capture_command_output_persists_lines_and_events() -> Result<()> {
let db_path = env::temp_dir().join(format!("ecc2-runtime-{}.db", Uuid::new_v4()));
let db = StateStore::open(&db_path)?;
let session_id = "session-1".to_string();
let now = Utc::now();
db.insert_session(&Session {
id: session_id.clone(),
task: "stream output".to_string(),
agent_type: "test".to_string(),
state: SessionState::Pending,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
})?;
let output_store = SessionOutputStore::default();
let mut rx = output_store.subscribe();
let mut command = Command::new("/bin/sh");
command
.arg("-c")
.arg("printf 'alpha\\n'; printf 'beta\\n' >&2");
let status =
capture_command_output(db_path.clone(), session_id.clone(), command, output_store)
.await?;
assert!(status.success());
let db = StateStore::open(&db_path)?;
let session = db
.get_session(&session_id)?
.expect("session should still exist");
assert_eq!(session.state, SessionState::Completed);
let lines = db.get_output_lines(&session_id, OUTPUT_BUFFER_LIMIT)?;
let texts: HashSet<_> = lines.iter().map(|line| line.text.as_str()).collect();
assert_eq!(lines.len(), 2);
assert!(texts.contains("alpha"));
assert!(texts.contains("beta"));
let mut events = Vec::new();
while let Ok(event) = rx.try_recv() {
events.push(event.line.text);
}
assert_eq!(events.len(), 2);
assert!(events.iter().any(|line| line == "alpha"));
assert!(events.iter().any(|line| line == "beta"));
let _ = std::fs::remove_file(db_path);
Ok(())
}
}

309
ecc2/src/session/store.rs Normal file
View File

@@ -0,0 +1,309 @@
use anyhow::Result;
use rusqlite::Connection;
use std::path::Path;
use std::time::Duration;
use super::output::{OutputLine, OutputStream, OUTPUT_BUFFER_LIMIT};
use super::{Session, SessionMetrics, SessionState};
pub struct StateStore {
conn: Connection,
}
impl StateStore {
pub fn open(path: &Path) -> Result<Self> {
let conn = Connection::open(path)?;
conn.busy_timeout(Duration::from_secs(5))?;
let store = Self { conn };
store.init_schema()?;
Ok(store)
}
fn init_schema(&self) -> Result<()> {
self.conn.execute_batch(
"
CREATE TABLE IF NOT EXISTS sessions (
id TEXT PRIMARY KEY,
task TEXT NOT NULL,
agent_type TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'pending',
worktree_path TEXT,
worktree_branch TEXT,
worktree_base TEXT,
tokens_used INTEGER DEFAULT 0,
tool_calls INTEGER DEFAULT 0,
files_changed INTEGER DEFAULT 0,
duration_secs INTEGER DEFAULT 0,
cost_usd REAL DEFAULT 0.0,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS tool_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL REFERENCES sessions(id),
tool_name TEXT NOT NULL,
input_summary TEXT,
output_summary TEXT,
duration_ms INTEGER,
risk_score REAL DEFAULT 0.0,
timestamp TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
from_session TEXT NOT NULL,
to_session TEXT NOT NULL,
content TEXT NOT NULL,
msg_type TEXT NOT NULL DEFAULT 'info',
read INTEGER DEFAULT 0,
timestamp TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS session_output (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL REFERENCES sessions(id),
stream TEXT NOT NULL,
line TEXT NOT NULL,
timestamp TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_sessions_state ON sessions(state);
CREATE INDEX IF NOT EXISTS idx_tool_log_session ON tool_log(session_id);
CREATE INDEX IF NOT EXISTS idx_messages_to ON messages(to_session, read);
CREATE INDEX IF NOT EXISTS idx_session_output_session
ON session_output(session_id, id);
",
)?;
Ok(())
}
pub fn insert_session(&self, session: &Session) -> Result<()> {
self.conn.execute(
"INSERT INTO sessions (id, task, agent_type, state, worktree_path, worktree_branch, worktree_base, created_at, updated_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
rusqlite::params![
session.id,
session.task,
session.agent_type,
session.state.to_string(),
session.worktree.as_ref().map(|w| w.path.to_string_lossy().to_string()),
session.worktree.as_ref().map(|w| w.branch.clone()),
session.worktree.as_ref().map(|w| w.base_branch.clone()),
session.created_at.to_rfc3339(),
session.updated_at.to_rfc3339(),
],
)?;
Ok(())
}
pub fn update_state(&self, session_id: &str, state: &SessionState) -> Result<()> {
self.conn.execute(
"UPDATE sessions SET state = ?1, updated_at = ?2 WHERE id = ?3",
rusqlite::params![
state.to_string(),
chrono::Utc::now().to_rfc3339(),
session_id,
],
)?;
Ok(())
}
pub fn update_metrics(&self, session_id: &str, metrics: &SessionMetrics) -> Result<()> {
self.conn.execute(
"UPDATE sessions SET tokens_used = ?1, tool_calls = ?2, files_changed = ?3, duration_secs = ?4, cost_usd = ?5, updated_at = ?6 WHERE id = ?7",
rusqlite::params![
metrics.tokens_used,
metrics.tool_calls,
metrics.files_changed,
metrics.duration_secs,
metrics.cost_usd,
chrono::Utc::now().to_rfc3339(),
session_id,
],
)?;
Ok(())
}
pub fn list_sessions(&self) -> Result<Vec<Session>> {
let mut stmt = self.conn.prepare(
"SELECT id, task, agent_type, state, worktree_path, worktree_branch, worktree_base,
tokens_used, tool_calls, files_changed, duration_secs, cost_usd,
created_at, updated_at
FROM sessions ORDER BY updated_at DESC",
)?;
let sessions = stmt
.query_map([], |row| {
let state_str: String = row.get(3)?;
let state = match state_str.as_str() {
"running" => SessionState::Running,
"idle" => SessionState::Idle,
"completed" => SessionState::Completed,
"failed" => SessionState::Failed,
"stopped" => SessionState::Stopped,
_ => SessionState::Pending,
};
let worktree_path: Option<String> = row.get(4)?;
let worktree = worktree_path.map(|p| super::WorktreeInfo {
path: std::path::PathBuf::from(p),
branch: row.get::<_, String>(5).unwrap_or_default(),
base_branch: row.get::<_, String>(6).unwrap_or_default(),
});
let created_str: String = row.get(12)?;
let updated_str: String = row.get(13)?;
Ok(Session {
id: row.get(0)?,
task: row.get(1)?,
agent_type: row.get(2)?,
state,
worktree,
created_at: chrono::DateTime::parse_from_rfc3339(&created_str)
.unwrap_or_default()
.with_timezone(&chrono::Utc),
updated_at: chrono::DateTime::parse_from_rfc3339(&updated_str)
.unwrap_or_default()
.with_timezone(&chrono::Utc),
metrics: SessionMetrics {
tokens_used: row.get(7)?,
tool_calls: row.get(8)?,
files_changed: row.get(9)?,
duration_secs: row.get(10)?,
cost_usd: row.get(11)?,
},
})
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(sessions)
}
pub fn get_session(&self, id: &str) -> Result<Option<Session>> {
let sessions = self.list_sessions()?;
Ok(sessions
.into_iter()
.find(|s| s.id == id || s.id.starts_with(id)))
}
pub fn send_message(&self, from: &str, to: &str, content: &str, msg_type: &str) -> Result<()> {
self.conn.execute(
"INSERT INTO messages (from_session, to_session, content, msg_type, timestamp)
VALUES (?1, ?2, ?3, ?4, ?5)",
rusqlite::params![from, to, content, msg_type, chrono::Utc::now().to_rfc3339()],
)?;
Ok(())
}
pub fn append_output_line(
&self,
session_id: &str,
stream: OutputStream,
line: &str,
) -> Result<()> {
let now = chrono::Utc::now().to_rfc3339();
self.conn.execute(
"INSERT INTO session_output (session_id, stream, line, timestamp)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![session_id, stream.as_str(), line, now],
)?;
self.conn.execute(
"DELETE FROM session_output
WHERE session_id = ?1
AND id NOT IN (
SELECT id
FROM session_output
WHERE session_id = ?1
ORDER BY id DESC
LIMIT ?2
)",
rusqlite::params![session_id, OUTPUT_BUFFER_LIMIT as i64],
)?;
self.conn.execute(
"UPDATE sessions SET updated_at = ?1 WHERE id = ?2",
rusqlite::params![chrono::Utc::now().to_rfc3339(), session_id],
)?;
Ok(())
}
pub fn get_output_lines(&self, session_id: &str, limit: usize) -> Result<Vec<OutputLine>> {
let mut stmt = self.conn.prepare(
"SELECT stream, line
FROM (
SELECT id, stream, line
FROM session_output
WHERE session_id = ?1
ORDER BY id DESC
LIMIT ?2
)
ORDER BY id ASC",
)?;
let lines = stmt
.query_map(rusqlite::params![session_id, limit as i64], |row| {
let stream: String = row.get(0)?;
let text: String = row.get(1)?;
Ok(OutputLine {
stream: OutputStream::from_db_value(&stream),
text,
})
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(lines)
}
}
#[cfg(test)]
mod tests {
use std::env;
use anyhow::Result;
use chrono::Utc;
use uuid::Uuid;
use super::StateStore;
use crate::session::output::{OutputStream, OUTPUT_BUFFER_LIMIT};
use crate::session::{Session, SessionMetrics, SessionState};
#[test]
fn append_output_line_keeps_latest_buffer_window() -> Result<()> {
let db_path = env::temp_dir().join(format!("ecc2-store-{}.db", Uuid::new_v4()));
let db = StateStore::open(&db_path)?;
let now = Utc::now();
db.insert_session(&Session {
id: "session-1".to_string(),
task: "buffer output".to_string(),
agent_type: "claude".to_string(),
state: SessionState::Running,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
})?;
for index in 0..(OUTPUT_BUFFER_LIMIT + 5) {
db.append_output_line("session-1", OutputStream::Stdout, &format!("line-{index}"))?;
}
let lines = db.get_output_lines("session-1", OUTPUT_BUFFER_LIMIT)?;
let texts: Vec<_> = lines.iter().map(|line| line.text.as_str()).collect();
assert_eq!(lines.len(), OUTPUT_BUFFER_LIMIT);
assert_eq!(texts.first().copied(), Some("line-5"));
let expected_last_line = format!("line-{}", OUTPUT_BUFFER_LIMIT + 4);
assert_eq!(texts.last().copied(), Some(expected_last_line.as_str()));
let _ = std::fs::remove_file(db_path);
Ok(())
}
}

52
ecc2/src/tui/app.rs Normal file
View File

@@ -0,0 +1,52 @@
use anyhow::Result;
use crossterm::{
event::{self, Event, KeyCode, KeyModifiers},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::prelude::*;
use std::io;
use std::time::Duration;
use super::dashboard::Dashboard;
use crate::config::Config;
use crate::session::store::StateStore;
pub async fn run(db: StateStore, cfg: Config) -> Result<()> {
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let mut dashboard = Dashboard::new(db, cfg);
loop {
terminal.draw(|frame| dashboard.render(frame))?;
if event::poll(Duration::from_millis(250))? {
if let Event::Key(key) = event::read()? {
match (key.modifiers, key.code) {
(KeyModifiers::CONTROL, KeyCode::Char('c')) => break,
(_, KeyCode::Char('q')) => break,
(_, KeyCode::Tab) => dashboard.next_pane(),
(KeyModifiers::SHIFT, KeyCode::BackTab) => dashboard.prev_pane(),
(_, KeyCode::Char('j')) | (_, KeyCode::Down) => dashboard.scroll_down(),
(_, KeyCode::Char('k')) | (_, KeyCode::Up) => dashboard.scroll_up(),
(_, KeyCode::Char('n')) => dashboard.new_session(),
(_, KeyCode::Char('s')) => dashboard.stop_selected(),
(_, KeyCode::Char('r')) => dashboard.refresh(),
(_, KeyCode::Char('?')) => dashboard.toggle_help(),
_ => {}
}
}
}
dashboard.tick().await;
}
disable_raw_mode()?;
execute!(terminal.backend_mut(), LeaveAlternateScreen)?;
Ok(())
}

512
ecc2/src/tui/dashboard.rs Normal file
View File

@@ -0,0 +1,512 @@
use std::collections::HashMap;
use ratatui::{
prelude::*,
widgets::{Block, Borders, List, ListItem, Paragraph, Tabs},
};
use tokio::sync::broadcast;
use crate::config::Config;
use crate::session::output::{OutputEvent, OutputLine, SessionOutputStore, OUTPUT_BUFFER_LIMIT};
use crate::session::store::StateStore;
use crate::session::{Session, SessionState};
pub struct Dashboard {
db: StateStore,
output_store: SessionOutputStore,
output_rx: broadcast::Receiver<OutputEvent>,
sessions: Vec<Session>,
session_output_cache: HashMap<String, Vec<OutputLine>>,
selected_pane: Pane,
selected_session: usize,
show_help: bool,
output_follow: bool,
output_scroll_offset: usize,
last_output_height: usize,
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Pane {
Sessions,
Output,
Metrics,
}
impl Dashboard {
pub fn new(db: StateStore, cfg: Config) -> Self {
Self::with_output_store(db, cfg, SessionOutputStore::default())
}
pub fn with_output_store(
db: StateStore,
_cfg: Config,
output_store: SessionOutputStore,
) -> Self {
let sessions = db.list_sessions().unwrap_or_default();
let output_rx = output_store.subscribe();
let mut dashboard = Self {
db,
output_store,
output_rx,
sessions,
session_output_cache: HashMap::new(),
selected_pane: Pane::Sessions,
selected_session: 0,
show_help: false,
output_follow: true,
output_scroll_offset: 0,
last_output_height: 0,
};
dashboard.sync_selected_output();
dashboard
}
pub fn render(&mut self, frame: &mut Frame) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3), // Header
Constraint::Min(10), // Main content
Constraint::Length(3), // Status bar
])
.split(frame.area());
self.render_header(frame, chunks[0]);
if self.show_help {
self.render_help(frame, chunks[1]);
} else {
let main_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage(35), // Session list
Constraint::Percentage(65), // Output/details
])
.split(chunks[1]);
self.render_sessions(frame, main_chunks[0]);
let right_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Percentage(70), // Output
Constraint::Percentage(30), // Metrics
])
.split(main_chunks[1]);
self.render_output(frame, right_chunks[0]);
self.render_metrics(frame, right_chunks[1]);
}
self.render_status_bar(frame, chunks[2]);
}
fn render_header(&self, frame: &mut Frame, area: Rect) {
let running = self
.sessions
.iter()
.filter(|s| s.state == SessionState::Running)
.count();
let total = self.sessions.len();
let title = format!(" ECC 2.0 | {running} running / {total} total ");
let tabs = Tabs::new(vec!["Sessions", "Output", "Metrics"])
.block(Block::default().borders(Borders::ALL).title(title))
.select(match self.selected_pane {
Pane::Sessions => 0,
Pane::Output => 1,
Pane::Metrics => 2,
})
.highlight_style(
Style::default()
.fg(Color::Cyan)
.add_modifier(Modifier::BOLD),
);
frame.render_widget(tabs, area);
}
fn render_sessions(&self, frame: &mut Frame, area: Rect) {
let items: Vec<ListItem> = self
.sessions
.iter()
.enumerate()
.map(|(i, s)| {
let state_icon = match s.state {
SessionState::Running => "",
SessionState::Idle => "",
SessionState::Completed => "",
SessionState::Failed => "",
SessionState::Stopped => "",
SessionState::Pending => "",
};
let style = if i == self.selected_session {
Style::default()
.fg(Color::Cyan)
.add_modifier(Modifier::BOLD)
} else {
Style::default()
};
let text = format!(
"{state_icon} {} [{}] {}",
&s.id[..8.min(s.id.len())],
s.agent_type,
s.task
);
ListItem::new(text).style(style)
})
.collect();
let border_style = if self.selected_pane == Pane::Sessions {
Style::default().fg(Color::Cyan)
} else {
Style::default()
};
let list = List::new(items).block(
Block::default()
.borders(Borders::ALL)
.title(" Sessions ")
.border_style(border_style),
);
frame.render_widget(list, area);
}
fn render_output(&mut self, frame: &mut Frame, area: Rect) {
self.sync_output_scroll(area.height.saturating_sub(2) as usize);
let content = if self.sessions.get(self.selected_session).is_some() {
let lines = self.selected_output_lines();
if lines.is_empty() {
"Waiting for session output...".to_string()
} else {
lines
.iter()
.map(|line| line.text.as_str())
.collect::<Vec<_>>()
.join("\n")
}
} else {
"No sessions. Press 'n' to start one.".to_string()
};
let border_style = if self.selected_pane == Pane::Output {
Style::default().fg(Color::Cyan)
} else {
Style::default()
};
let paragraph = Paragraph::new(content)
.block(
Block::default()
.borders(Borders::ALL)
.title(" Output ")
.border_style(border_style),
)
.scroll((self.output_scroll_offset as u16, 0));
frame.render_widget(paragraph, area);
}
fn render_metrics(&self, frame: &mut Frame, area: Rect) {
let content = if let Some(session) = self.sessions.get(self.selected_session) {
let m = &session.metrics;
format!(
"Tokens: {} | Tools: {} | Files: {} | Cost: ${:.4} | Duration: {}s",
m.tokens_used, m.tool_calls, m.files_changed, m.cost_usd, m.duration_secs
)
} else {
"No metrics available".to_string()
};
let border_style = if self.selected_pane == Pane::Metrics {
Style::default().fg(Color::Cyan)
} else {
Style::default()
};
let paragraph = Paragraph::new(content).block(
Block::default()
.borders(Borders::ALL)
.title(" Metrics ")
.border_style(border_style),
);
frame.render_widget(paragraph, area);
}
fn render_status_bar(&self, frame: &mut Frame, area: Rect) {
let text = " [n]ew session [s]top [Tab] switch pane [j/k] scroll [?] help [q]uit ";
let paragraph = Paragraph::new(text)
.style(Style::default().fg(Color::DarkGray))
.block(Block::default().borders(Borders::ALL));
frame.render_widget(paragraph, area);
}
fn render_help(&self, frame: &mut Frame, area: Rect) {
let help = vec![
"Keyboard Shortcuts:",
"",
" n New session",
" s Stop selected session",
" Tab Next pane",
" S-Tab Previous pane",
" j/↓ Scroll down",
" k/↑ Scroll up",
" r Refresh",
" ? Toggle help",
" q/C-c Quit",
];
let paragraph = Paragraph::new(help.join("\n")).block(
Block::default()
.borders(Borders::ALL)
.title(" Help ")
.border_style(Style::default().fg(Color::Yellow)),
);
frame.render_widget(paragraph, area);
}
pub fn next_pane(&mut self) {
self.selected_pane = match self.selected_pane {
Pane::Sessions => Pane::Output,
Pane::Output => Pane::Metrics,
Pane::Metrics => Pane::Sessions,
};
}
pub fn prev_pane(&mut self) {
self.selected_pane = match self.selected_pane {
Pane::Sessions => Pane::Metrics,
Pane::Output => Pane::Sessions,
Pane::Metrics => Pane::Output,
};
}
pub fn scroll_down(&mut self) {
if self.selected_pane == Pane::Sessions && !self.sessions.is_empty() {
self.selected_session = (self.selected_session + 1).min(self.sessions.len() - 1);
self.reset_output_view();
self.sync_selected_output();
} else if self.selected_pane == Pane::Output {
let max_scroll = self.max_output_scroll();
if self.output_follow {
return;
}
if self.output_scroll_offset >= max_scroll.saturating_sub(1) {
self.output_follow = true;
self.output_scroll_offset = max_scroll;
} else {
self.output_scroll_offset = self.output_scroll_offset.saturating_add(1);
}
}
}
pub fn scroll_up(&mut self) {
if self.selected_pane == Pane::Sessions {
self.selected_session = self.selected_session.saturating_sub(1);
self.reset_output_view();
self.sync_selected_output();
} else if self.selected_pane == Pane::Output {
if self.output_follow {
self.output_follow = false;
self.output_scroll_offset = self.max_output_scroll();
}
self.output_scroll_offset = self.output_scroll_offset.saturating_sub(1);
} else {
self.output_scroll_offset = self.output_scroll_offset.saturating_sub(1);
}
}
pub fn new_session(&mut self) {
// TODO: Open a dialog to create a new session
tracing::info!("New session dialog requested");
}
pub fn stop_selected(&mut self) {
if let Some(session) = self.sessions.get(self.selected_session) {
let _ = self.db.update_state(&session.id, &SessionState::Stopped);
self.refresh();
}
}
pub fn refresh(&mut self) {
self.sessions = self.db.list_sessions().unwrap_or_default();
self.clamp_selected_session();
self.sync_selected_output();
}
pub fn toggle_help(&mut self) {
self.show_help = !self.show_help;
}
pub async fn tick(&mut self) {
loop {
match self.output_rx.try_recv() {
Ok(_event) => {}
Err(broadcast::error::TryRecvError::Empty) => break,
Err(broadcast::error::TryRecvError::Lagged(_)) => continue,
Err(broadcast::error::TryRecvError::Closed) => break,
}
}
self.sessions = self.db.list_sessions().unwrap_or_default();
self.clamp_selected_session();
self.sync_selected_output();
}
fn clamp_selected_session(&mut self) {
if self.sessions.is_empty() {
self.selected_session = 0;
} else {
self.selected_session = self.selected_session.min(self.sessions.len() - 1);
}
}
fn sync_selected_output(&mut self) {
let Some(session_id) = self.selected_session_id().map(ToOwned::to_owned) else {
return;
};
match self.db.get_output_lines(&session_id, OUTPUT_BUFFER_LIMIT) {
Ok(lines) => {
self.output_store.replace_lines(&session_id, lines.clone());
self.session_output_cache.insert(session_id, lines);
}
Err(error) => {
tracing::warn!("Failed to load session output: {error}");
}
}
}
fn selected_session_id(&self) -> Option<&str> {
self.sessions
.get(self.selected_session)
.map(|session| session.id.as_str())
}
fn selected_output_lines(&self) -> &[OutputLine] {
self.selected_session_id()
.and_then(|session_id| self.session_output_cache.get(session_id))
.map(Vec::as_slice)
.unwrap_or(&[])
}
fn sync_output_scroll(&mut self, viewport_height: usize) {
self.last_output_height = viewport_height.max(1);
let max_scroll = self.max_output_scroll();
if self.output_follow {
self.output_scroll_offset = max_scroll;
} else {
self.output_scroll_offset = self.output_scroll_offset.min(max_scroll);
}
}
fn max_output_scroll(&self) -> usize {
self.selected_output_lines()
.len()
.saturating_sub(self.last_output_height.max(1))
}
fn reset_output_view(&mut self) {
self.output_follow = true;
self.output_scroll_offset = 0;
}
#[cfg(test)]
fn selected_output_text(&self) -> String {
self.selected_output_lines()
.iter()
.map(|line| line.text.clone())
.collect::<Vec<_>>()
.join("\n")
}
}
#[cfg(test)]
mod tests {
use std::env;
use anyhow::Result;
use chrono::Utc;
use uuid::Uuid;
use super::{Dashboard, Pane};
use crate::config::Config;
use crate::session::output::OutputStream;
use crate::session::store::StateStore;
use crate::session::{Session, SessionMetrics, SessionState};
#[test]
fn refresh_loads_selected_session_output_and_follows_tail() -> Result<()> {
let db_path = env::temp_dir().join(format!("ecc2-dashboard-{}.db", Uuid::new_v4()));
let db = StateStore::open(&db_path)?;
let now = Utc::now();
db.insert_session(&Session {
id: "session-1".to_string(),
task: "tail output".to_string(),
agent_type: "claude".to_string(),
state: SessionState::Running,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
})?;
for index in 0..12 {
db.append_output_line("session-1", OutputStream::Stdout, &format!("line {index}"))?;
}
let mut dashboard = Dashboard::new(db, Config::default());
dashboard.selected_pane = Pane::Output;
dashboard.refresh();
dashboard.sync_output_scroll(4);
assert_eq!(dashboard.output_scroll_offset, 8);
assert!(dashboard.selected_output_text().contains("line 11"));
let _ = std::fs::remove_file(db_path);
Ok(())
}
#[test]
fn scrolling_up_disables_follow_mode() -> Result<()> {
let db_path = env::temp_dir().join(format!("ecc2-dashboard-{}.db", Uuid::new_v4()));
let db = StateStore::open(&db_path)?;
let now = Utc::now();
db.insert_session(&Session {
id: "session-1".to_string(),
task: "inspect output".to_string(),
agent_type: "claude".to_string(),
state: SessionState::Running,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
})?;
for index in 0..6 {
db.append_output_line("session-1", OutputStream::Stdout, &format!("line {index}"))?;
}
let mut dashboard = Dashboard::new(db, Config::default());
dashboard.selected_pane = Pane::Output;
dashboard.refresh();
dashboard.sync_output_scroll(3);
dashboard.scroll_up();
assert!(!dashboard.output_follow);
assert_eq!(dashboard.output_scroll_offset, 2);
let _ = std::fs::remove_file(db_path);
Ok(())
}
}

3
ecc2/src/tui/mod.rs Normal file
View File

@@ -0,0 +1,3 @@
pub mod app;
mod dashboard;
mod widgets;

6
ecc2/src/tui/widgets.rs Normal file
View File

@@ -0,0 +1,6 @@
// Custom TUI widgets for ECC 2.0
// TODO: Implement custom widgets:
// - TokenMeter: visual token usage bar with budget threshold
// - DiffViewer: side-by-side syntax-highlighted diff display
// - ProgressTimeline: session timeline with tool call markers
// - AgentTree: hierarchical view of parent/child agent sessions

80
ecc2/src/worktree/mod.rs Normal file
View File

@@ -0,0 +1,80 @@
use anyhow::{Context, Result};
use std::path::PathBuf;
use std::process::Command;
use crate::config::Config;
use crate::session::WorktreeInfo;
/// Create a new git worktree for an agent session.
pub fn create_for_session(session_id: &str, cfg: &Config) -> Result<WorktreeInfo> {
let branch = format!("ecc/{session_id}");
let path = cfg.worktree_root.join(session_id);
// Get current branch as base
let base = get_current_branch()?;
std::fs::create_dir_all(&cfg.worktree_root)
.context("Failed to create worktree root directory")?;
let output = Command::new("git")
.args(["worktree", "add", "-b", &branch])
.arg(&path)
.arg("HEAD")
.output()
.context("Failed to run git worktree add")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
anyhow::bail!("git worktree add failed: {stderr}");
}
tracing::info!("Created worktree at {} on branch {}", path.display(), branch);
Ok(WorktreeInfo {
path,
branch,
base_branch: base,
})
}
/// Remove a worktree and its branch.
pub fn remove(path: &PathBuf) -> Result<()> {
let output = Command::new("git")
.args(["worktree", "remove", "--force"])
.arg(path)
.output()
.context("Failed to remove worktree")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
tracing::warn!("Worktree removal warning: {stderr}");
}
Ok(())
}
/// List all active worktrees.
pub fn list() -> Result<Vec<String>> {
let output = Command::new("git")
.args(["worktree", "list", "--porcelain"])
.output()
.context("Failed to list worktrees")?;
let stdout = String::from_utf8_lossy(&output.stdout);
let worktrees: Vec<String> = stdout
.lines()
.filter(|l| l.starts_with("worktree "))
.map(|l| l.trim_start_matches("worktree ").to_string())
.collect();
Ok(worktrees)
}
fn get_current_branch() -> Result<String> {
let output = Command::new("git")
.args(["rev-parse", "--abbrev-ref", "HEAD"])
.output()
.context("Failed to get current branch")?;
Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
}

87
skills/benchmark/SKILL.md Normal file
View File

@@ -0,0 +1,87 @@
# Benchmark — Performance Baseline & Regression Detection
## When to Use
- Before and after a PR to measure performance impact
- Setting up performance baselines for a project
- When users report "it feels slow"
- Before a launch — ensure you meet performance targets
- Comparing your stack against alternatives
## How It Works
### Mode 1: Page Performance
Measures real browser metrics via browser MCP:
```
1. Navigate to each target URL
2. Measure Core Web Vitals:
- LCP (Largest Contentful Paint) — target < 2.5s
- CLS (Cumulative Layout Shift) — target < 0.1
- INP (Interaction to Next Paint) — target < 200ms
- FCP (First Contentful Paint) — target < 1.8s
- TTFB (Time to First Byte) — target < 800ms
3. Measure resource sizes:
- Total page weight (target < 1MB)
- JS bundle size (target < 200KB gzipped)
- CSS size
- Image weight
- Third-party script weight
4. Count network requests
5. Check for render-blocking resources
```
### Mode 2: API Performance
Benchmarks API endpoints:
```
1. Hit each endpoint 100 times
2. Measure: p50, p95, p99 latency
3. Track: response size, status codes
4. Test under load: 10 concurrent requests
5. Compare against SLA targets
```
### Mode 3: Build Performance
Measures development feedback loop:
```
1. Cold build time
2. Hot reload time (HMR)
3. Test suite duration
4. TypeScript check time
5. Lint time
6. Docker build time
```
### Mode 4: Before/After Comparison
Run before and after a change to measure impact:
```
/benchmark baseline # saves current metrics
# ... make changes ...
/benchmark compare # compares against baseline
```
Output:
```
| Metric | Before | After | Delta | Verdict |
|--------|--------|-------|-------|---------|
| LCP | 1.2s | 1.4s | +200ms | ⚠ WARN |
| Bundle | 180KB | 175KB | -5KB | ✓ BETTER |
| Build | 12s | 14s | +2s | ⚠ WARN |
```
## Output
Stores baselines in `.ecc/benchmarks/` as JSON. Git-tracked so the team shares baselines.
## Integration
- CI: run `/benchmark compare` on every PR
- Pair with `/canary-watch` for post-deploy monitoring
- Pair with `/browser-qa` for full pre-ship checklist

View File

@@ -0,0 +1,81 @@
# Browser QA — Automated Visual Testing & Interaction
## When to Use
- After deploying a feature to staging/preview
- When you need to verify UI behavior across pages
- Before shipping — confirm layouts, forms, interactions actually work
- When reviewing PRs that touch frontend code
- Accessibility audits and responsive testing
## How It Works
Uses the browser automation MCP (claude-in-chrome, Playwright, or Puppeteer) to interact with live pages like a real user.
### Phase 1: Smoke Test
```
1. Navigate to target URL
2. Check for console errors (filter noise: analytics, third-party)
3. Verify no 4xx/5xx in network requests
4. Screenshot above-the-fold on desktop + mobile viewport
5. Check Core Web Vitals: LCP < 2.5s, CLS < 0.1, INP < 200ms
```
### Phase 2: Interaction Test
```
1. Click every nav link — verify no dead links
2. Submit forms with valid data — verify success state
3. Submit forms with invalid data — verify error state
4. Test auth flow: login → protected page → logout
5. Test critical user journeys (checkout, onboarding, search)
```
### Phase 3: Visual Regression
```
1. Screenshot key pages at 3 breakpoints (375px, 768px, 1440px)
2. Compare against baseline screenshots (if stored)
3. Flag layout shifts > 5px, missing elements, overflow
4. Check dark mode if applicable
```
### Phase 4: Accessibility
```
1. Run axe-core or equivalent on each page
2. Flag WCAG AA violations (contrast, labels, focus order)
3. Verify keyboard navigation works end-to-end
4. Check screen reader landmarks
```
## Output Format
```markdown
## QA Report — [URL] — [timestamp]
### Smoke Test
- Console errors: 0 critical, 2 warnings (analytics noise)
- Network: all 200/304, no failures
- Core Web Vitals: LCP 1.2s ✓, CLS 0.02 ✓, INP 89ms ✓
### Interactions
- [✓] Nav links: 12/12 working
- [✗] Contact form: missing error state for invalid email
- [✓] Auth flow: login/logout working
### Visual
- [✗] Hero section overflows on 375px viewport
- [✓] Dark mode: all pages consistent
### Accessibility
- 2 AA violations: missing alt text on hero image, low contrast on footer links
### Verdict: SHIP WITH FIXES (2 issues, 0 blockers)
```
## Integration
Works with any browser MCP:
- `mChild__claude-in-chrome__*` tools (preferred — uses your actual Chrome)
- Playwright via `mcp__browserbase__*`
- Direct Puppeteer scripts
Pair with `/canary-watch` for post-deploy monitoring.

View File

@@ -0,0 +1,93 @@
# Canary Watch — Post-Deploy Monitoring
## When to Use
- After deploying to production or staging
- After merging a risky PR
- When you want to verify a fix actually fixed it
- Continuous monitoring during a launch window
- After dependency upgrades
## How It Works
Monitors a deployed URL for regressions. Runs in a loop until stopped or until the watch window expires.
### What It Watches
```
1. HTTP Status — is the page returning 200?
2. Console Errors — new errors that weren't there before?
3. Network Failures — failed API calls, 5xx responses?
4. Performance — LCP/CLS/INP regression vs baseline?
5. Content — did key elements disappear? (h1, nav, footer, CTA)
6. API Health — are critical endpoints responding within SLA?
```
### Watch Modes
**Quick check** (default): single pass, report results
```
/canary-watch https://myapp.com
```
**Sustained watch**: check every N minutes for M hours
```
/canary-watch https://myapp.com --interval 5m --duration 2h
```
**Diff mode**: compare staging vs production
```
/canary-watch --compare https://staging.myapp.com https://myapp.com
```
### Alert Thresholds
```yaml
critical: # immediate alert
- HTTP status != 200
- Console error count > 5 (new errors only)
- LCP > 4s
- API endpoint returns 5xx
warning: # flag in report
- LCP increased > 500ms from baseline
- CLS > 0.1
- New console warnings
- Response time > 2x baseline
info: # log only
- Minor performance variance
- New network requests (third-party scripts added?)
```
### Notifications
When a critical threshold is crossed:
- Desktop notification (macOS/Linux)
- Optional: Slack/Discord webhook
- Log to `~/.claude/canary-watch.log`
## Output
```markdown
## Canary Report — myapp.com — 2026-03-23 03:15 PST
### Status: HEALTHY ✓
| Check | Result | Baseline | Delta |
|-------|--------|----------|-------|
| HTTP | 200 ✓ | 200 | — |
| Console errors | 0 ✓ | 0 | — |
| LCP | 1.8s ✓ | 1.6s | +200ms |
| CLS | 0.01 ✓ | 0.01 | — |
| API /health | 145ms ✓ | 120ms | +25ms |
### No regressions detected. Deploy is clean.
```
## Integration
Pair with:
- `/browser-qa` for pre-deploy verification
- Hooks: add as a PostToolUse hook on `git push` to auto-check after deploys
- CI: run in GitHub Actions after deploy step

View File

@@ -0,0 +1,76 @@
# Design System — Generate & Audit Visual Systems
## When to Use
- Starting a new project that needs a design system
- Auditing an existing codebase for visual consistency
- Before a redesign — understand what you have
- When the UI looks "off" but you can't pinpoint why
- Reviewing PRs that touch styling
## How It Works
### Mode 1: Generate Design System
Analyzes your codebase and generates a cohesive design system:
```
1. Scan CSS/Tailwind/styled-components for existing patterns
2. Extract: colors, typography, spacing, border-radius, shadows, breakpoints
3. Research 3 competitor sites for inspiration (via browser MCP)
4. Propose a design token set (JSON + CSS custom properties)
5. Generate DESIGN.md with rationale for each decision
6. Create an interactive HTML preview page (self-contained, no deps)
```
Output: `DESIGN.md` + `design-tokens.json` + `design-preview.html`
### Mode 2: Visual Audit
Scores your UI across 10 dimensions (0-10 each):
```
1. Color consistency — are you using your palette or random hex values?
2. Typography hierarchy — clear h1 > h2 > h3 > body > caption?
3. Spacing rhythm — consistent scale (4px/8px/16px) or arbitrary?
4. Component consistency — do similar elements look similar?
5. Responsive behavior — fluid or broken at breakpoints?
6. Dark mode — complete or half-done?
7. Animation — purposeful or gratuitous?
8. Accessibility — contrast ratios, focus states, touch targets
9. Information density — cluttered or clean?
10. Polish — hover states, transitions, loading states, empty states
```
Each dimension gets a score, specific examples, and a fix with exact file:line.
### Mode 3: AI Slop Detection
Identifies generic AI-generated design patterns:
```
- Gratuitous gradients on everything
- Purple-to-blue defaults
- "Glass morphism" cards with no purpose
- Rounded corners on things that shouldn't be rounded
- Excessive animations on scroll
- Generic hero with centered text over stock gradient
- Sans-serif font stack with no personality
```
## Examples
**Generate for a SaaS app:**
```
/design-system generate --style minimal --palette earth-tones
```
**Audit existing UI:**
```
/design-system audit --url http://localhost:3000 --pages / /pricing /docs
```
**Check for AI slop:**
```
/design-system slop-check
```

View File

@@ -0,0 +1,79 @@
# Product Lens — Think Before You Build
## When to Use
- Before starting any feature — validate the "why"
- Weekly product review — are we building the right thing?
- When stuck choosing between features
- Before a launch — sanity check the user journey
- When converting a vague idea into a spec
## How It Works
### Mode 1: Product Diagnostic
Like YC office hours but automated. Asks the hard questions:
```
1. Who is this for? (specific person, not "developers")
2. What's the pain? (quantify: how often, how bad, what do they do today?)
3. Why now? (what changed that makes this possible/necessary?)
4. What's the 10-star version? (if money/time were unlimited)
5. What's the MVP? (smallest thing that proves the thesis)
6. What's the anti-goal? (what are you explicitly NOT building?)
7. How do you know it's working? (metric, not vibes)
```
Output: a `PRODUCT-BRIEF.md` with answers, risks, and a go/no-go recommendation.
### Mode 2: Founder Review
Reviews your current project through a founder lens:
```
1. Read README, CLAUDE.md, package.json, recent commits
2. Infer: what is this trying to be?
3. Score: product-market fit signals (0-10)
- Usage growth trajectory
- Retention indicators (repeat contributors, return users)
- Revenue signals (pricing page, billing code, Stripe integration)
- Competitive moat (what's hard to copy?)
4. Identify: the one thing that would 10x this
5. Flag: things you're building that don't matter
```
### Mode 3: User Journey Audit
Maps the actual user experience:
```
1. Clone/install the product as a new user
2. Document every friction point (confusing steps, errors, missing docs)
3. Time each step
4. Compare to competitor onboarding
5. Score: time-to-value (how long until the user gets their first win?)
6. Recommend: top 3 fixes for onboarding
```
### Mode 4: Feature Prioritization
When you have 10 ideas and need to pick 2:
```
1. List all candidate features
2. Score each on: impact (1-5) × confidence (1-5) ÷ effort (1-5)
3. Rank by ICE score
4. Apply constraints: runway, team size, dependencies
5. Output: prioritized roadmap with rationale
```
## Output
All modes output actionable docs, not essays. Every recommendation has a specific next step.
## Integration
Pair with:
- `/browser-qa` to verify the user journey audit findings
- `/design-system audit` for visual polish assessment
- `/canary-watch` for post-launch monitoring

View File

@@ -0,0 +1,69 @@
# Safety Guard — Prevent Destructive Operations
## When to Use
- When working on production systems
- When agents are running autonomously (full-auto mode)
- When you want to restrict edits to a specific directory
- During sensitive operations (migrations, deploys, data changes)
## How It Works
Three modes of protection:
### Mode 1: Careful Mode
Intercepts destructive commands before execution and warns:
```
Watched patterns:
- rm -rf (especially /, ~, or project root)
- git push --force
- git reset --hard
- git checkout . (discard all changes)
- DROP TABLE / DROP DATABASE
- docker system prune
- kubectl delete
- chmod 777
- sudo rm
- npm publish (accidental publishes)
- Any command with --no-verify
```
When detected: shows what the command does, asks for confirmation, suggests safer alternative.
### Mode 2: Freeze Mode
Locks file edits to a specific directory tree:
```
/safety-guard freeze src/components/
```
Any Write/Edit outside `src/components/` is blocked with an explanation. Useful when you want an agent to focus on one area without touching unrelated code.
### Mode 3: Guard Mode (Careful + Freeze combined)
Both protections active. Maximum safety for autonomous agents.
```
/safety-guard guard --dir src/api/ --allow-read-all
```
Agents can read anything but only write to `src/api/`. Destructive commands are blocked everywhere.
### Unlock
```
/safety-guard off
```
## Implementation
Uses PreToolUse hooks to intercept Bash, Write, Edit, and MultiEdit tool calls. Checks the command/path against the active rules before allowing execution.
## Integration
- Enable by default for `codex -a never` sessions
- Pair with observability risk scoring in ECC 2.0
- Logs all blocked actions to `~/.claude/safety-guard.log`

View File

@@ -112,14 +112,17 @@ function runTests() {
);
})) passed++; else failed++;
if (test('resolves antigravity profiles by skipping incompatible dependency trees', () => {
if (test('resolves antigravity profiles while skipping only unsupported modules', () => {
const projectRoot = '/workspace/app';
const plan = resolveInstallPlan({ profileId: 'core', target: 'antigravity', projectRoot });
assert.deepStrictEqual(plan.selectedModuleIds, ['rules-core', 'agents-core', 'commands-core']);
assert.deepStrictEqual(
plan.selectedModuleIds,
['rules-core', 'agents-core', 'commands-core', 'platform-configs', 'workflow-quality']
);
assert.ok(plan.skippedModuleIds.includes('hooks-runtime'));
assert.ok(plan.skippedModuleIds.includes('platform-configs'));
assert.ok(plan.skippedModuleIds.includes('workflow-quality'));
assert.ok(!plan.skippedModuleIds.includes('platform-configs'));
assert.ok(!plan.skippedModuleIds.includes('workflow-quality'));
assert.strictEqual(plan.targetAdapterId, 'antigravity-project');
assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.agent'));
})) passed++; else failed++;

View File

@@ -258,7 +258,7 @@ function runTests() {
}
})) passed++; else failed++;
if (test('installs antigravity manifest profiles while skipping incompatible modules', () => {
if (test('installs antigravity manifest profiles while skipping only unsupported modules', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
@@ -269,14 +269,18 @@ function runTests() {
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'rules', 'common-coding-style.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'skills', 'architect.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'workflows', 'plan.md')));
assert.ok(!fs.existsSync(path.join(projectDir, '.agent', 'skills', 'tdd-workflow', 'SKILL.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'skills', 'tdd-workflow', 'SKILL.md')));
const state = readJson(path.join(projectDir, '.agent', 'ecc-install-state.json'));
assert.strictEqual(state.request.profile, 'core');
assert.strictEqual(state.request.legacyMode, false);
assert.deepStrictEqual(state.resolution.selectedModules, ['rules-core', 'agents-core', 'commands-core']);
assert.ok(state.resolution.skippedModules.includes('workflow-quality'));
assert.ok(state.resolution.skippedModules.includes('platform-configs'));
assert.deepStrictEqual(
state.resolution.selectedModules,
['rules-core', 'agents-core', 'commands-core', 'platform-configs', 'workflow-quality']
);
assert.ok(state.resolution.skippedModules.includes('hooks-runtime'));
assert.ok(!state.resolution.skippedModules.includes('workflow-quality'));
assert.ok(!state.resolution.skippedModules.includes('platform-configs'));
} finally {
cleanup(homeDir);
cleanup(projectDir);