feat: add ecc2 decision log audit trail

This commit is contained in:
Affaan Mustafa
2026-04-09 21:57:28 -07:00
parent 913c00c74d
commit b48a52f9a0
4 changed files with 616 additions and 7 deletions

View File

@@ -260,6 +260,37 @@ enum Commands {
#[arg(long)]
json: bool,
},
/// Log a significant agent decision for auditability
LogDecision {
/// Session ID or alias. Omit to log against the latest session.
session_id: Option<String>,
/// The chosen decision or direction
#[arg(long)]
decision: String,
/// Why the agent made this choice
#[arg(long)]
reasoning: String,
/// Alternative considered and rejected; repeat for multiple entries
#[arg(long = "alternative")]
alternatives: Vec<String>,
/// Emit machine-readable JSON instead of the human summary
#[arg(long)]
json: bool,
},
/// Show recent decision-log entries
Decisions {
/// Session ID or alias. Omit to read the latest session.
session_id: Option<String>,
/// Show decision log entries across all sessions
#[arg(long)]
all: bool,
/// Emit machine-readable JSON instead of the human summary
#[arg(long)]
json: bool,
/// Maximum decision-log entries to return
#[arg(long, default_value_t = 20)]
limit: usize,
},
/// Export sessions, tool spans, and metrics in OTLP-compatible JSON
ExportOtel {
/// Session ID or alias. Omit to export all sessions.
@@ -872,6 +903,45 @@ async fn main() -> Result<()> {
println!("{}", format_prune_worktrees_human(&outcome));
}
}
Some(Commands::LogDecision {
session_id,
decision,
reasoning,
alternatives,
json,
}) => {
let resolved_id = resolve_session_id(&db, session_id.as_deref().unwrap_or("latest"))?;
let entry = db.insert_decision(&resolved_id, &decision, &alternatives, &reasoning)?;
if json {
println!("{}", serde_json::to_string_pretty(&entry)?);
} else {
println!("{}", format_logged_decision_human(&entry));
}
}
Some(Commands::Decisions {
session_id,
all,
json,
limit,
}) => {
if all && session_id.is_some() {
return Err(anyhow::anyhow!(
"decisions does not accept a session ID when --all is set"
));
}
let entries = if all {
db.list_decisions(limit)?
} else {
let resolved_id =
resolve_session_id(&db, session_id.as_deref().unwrap_or("latest"))?;
db.list_decisions_for_session(&resolved_id, limit)?
};
if json {
println!("{}", serde_json::to_string_pretty(&entries)?);
} else {
println!("{}", format_decisions_human(&entries, all));
}
}
Some(Commands::ExportOtel { session_id, output }) => {
sync_runtime_session_metrics(&db, &cfg)?;
let resolved_session_id = session_id
@@ -1641,6 +1711,63 @@ fn format_prune_worktrees_human(outcome: &session::manager::WorktreePruneOutcome
lines.join("\n")
}
fn format_logged_decision_human(entry: &session::DecisionLogEntry) -> String {
let mut lines = vec![
format!("Logged decision for {}", short_session(&entry.session_id)),
format!("Decision: {}", entry.decision),
format!("Why: {}", entry.reasoning),
];
if entry.alternatives.is_empty() {
lines.push("Alternatives: none recorded".to_string());
} else {
lines.push("Alternatives:".to_string());
for alternative in &entry.alternatives {
lines.push(format!("- {alternative}"));
}
}
lines.push(format!(
"Recorded at: {}",
entry.timestamp.format("%Y-%m-%d %H:%M:%S UTC")
));
lines.join("\n")
}
fn format_decisions_human(entries: &[session::DecisionLogEntry], include_session: bool) -> String {
if entries.is_empty() {
return if include_session {
"No decision-log entries across all sessions yet.".to_string()
} else {
"No decision-log entries for this session yet.".to_string()
};
}
let mut lines = vec![format!("Decision log: {} entries", entries.len())];
for entry in entries {
let prefix = if include_session {
format!("{} | ", short_session(&entry.session_id))
} else {
String::new()
};
lines.push(format!(
"- [{}] {prefix}{}",
entry.timestamp.format("%H:%M:%S"),
entry.decision
));
lines.push(format!(" why {}", entry.reasoning));
if entry.alternatives.is_empty() {
lines.push(" alternatives none recorded".to_string());
} else {
for alternative in &entry.alternatives {
lines.push(format!(" alternative {alternative}"));
}
}
}
lines.join("\n")
}
fn format_merge_queue_human(report: &session::manager::MergeQueueReport) -> String {
let mut lines = Vec::new();
lines.push(format!(
@@ -3259,6 +3386,87 @@ mod tests {
}
}
#[test]
fn cli_parses_log_decision_command() {
let cli = Cli::try_parse_from([
"ecc",
"log-decision",
"latest",
"--decision",
"Use sqlite",
"--reasoning",
"It is already embedded",
"--alternative",
"json files",
"--alternative",
"memory only",
"--json",
])
.expect("log-decision should parse");
match cli.command {
Some(Commands::LogDecision {
session_id,
decision,
reasoning,
alternatives,
json,
}) => {
assert_eq!(session_id.as_deref(), Some("latest"));
assert_eq!(decision, "Use sqlite");
assert_eq!(reasoning, "It is already embedded");
assert_eq!(alternatives, vec!["json files", "memory only"]);
assert!(json);
}
_ => panic!("expected log-decision subcommand"),
}
}
#[test]
fn cli_parses_decisions_command() {
let cli = Cli::try_parse_from(["ecc", "decisions", "--all", "--limit", "5", "--json"])
.expect("decisions should parse");
match cli.command {
Some(Commands::Decisions {
session_id,
all,
json,
limit,
}) => {
assert!(session_id.is_none());
assert!(all);
assert!(json);
assert_eq!(limit, 5);
}
_ => panic!("expected decisions subcommand"),
}
}
#[test]
fn format_decisions_human_renders_details() {
let text = format_decisions_human(
&[session::DecisionLogEntry {
id: 1,
session_id: "sess-12345678".to_string(),
decision: "Use sqlite for the shared context graph".to_string(),
alternatives: vec!["json files".to_string(), "memory only".to_string()],
reasoning: "SQLite keeps the audit trail queryable.".to_string(),
timestamp: chrono::DateTime::parse_from_rfc3339("2026-04-09T01:02:03Z")
.unwrap()
.with_timezone(&chrono::Utc),
}],
true,
);
assert!(text.contains("Decision log: 1 entries"));
assert!(text.contains("sess-123"));
assert!(text.contains("Use sqlite for the shared context graph"));
assert!(text.contains("why SQLite keeps the audit trail queryable."));
assert!(text.contains("alternative json files"));
assert!(text.contains("alternative memory only"));
}
#[test]
fn cli_parses_coordination_status_json_flag() {
let cli = Cli::try_parse_from(["ecc", "coordination-status", "--json"])

View File

@@ -142,6 +142,16 @@ pub struct FileActivityEntry {
pub timestamp: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct DecisionLogEntry {
pub id: i64,
pub session_id: String,
pub decision: String,
pub alternatives: Vec<String>,
pub reasoning: String,
pub timestamp: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum FileActivityAction {

View File

@@ -13,8 +13,9 @@ use crate::observability::{ToolCallEvent, ToolLogEntry, ToolLogPage};
use super::output::{OutputLine, OutputStream, OUTPUT_BUFFER_LIMIT};
use super::{
default_project_label, default_task_group_label, normalize_group_label, FileActivityAction,
FileActivityEntry, Session, SessionMessage, SessionMetrics, SessionState, WorktreeInfo,
default_project_label, default_task_group_label, normalize_group_label, DecisionLogEntry,
FileActivityAction, FileActivityEntry, Session, SessionMessage, SessionMetrics, SessionState,
WorktreeInfo,
};
pub struct StateStore {
@@ -193,6 +194,15 @@ impl StateStore {
timestamp TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS decision_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL REFERENCES sessions(id) ON DELETE CASCADE,
decision TEXT NOT NULL,
alternatives_json TEXT NOT NULL DEFAULT '[]',
reasoning TEXT NOT NULL,
timestamp TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS pending_worktree_queue (
session_id TEXT PRIMARY KEY REFERENCES sessions(id) ON DELETE CASCADE,
repo_root TEXT NOT NULL,
@@ -225,12 +235,11 @@ impl StateStore {
CREATE INDEX IF NOT EXISTS idx_sessions_state ON sessions(state);
CREATE INDEX IF NOT EXISTS idx_tool_log_session ON tool_log(session_id);
CREATE UNIQUE INDEX IF NOT EXISTS idx_tool_log_hook_event
ON tool_log(hook_event_id)
WHERE hook_event_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_messages_to ON messages(to_session, read);
CREATE INDEX IF NOT EXISTS idx_session_output_session
ON session_output(session_id, id);
CREATE INDEX IF NOT EXISTS idx_decision_log_session
ON decision_log(session_id, timestamp, id);
CREATE INDEX IF NOT EXISTS idx_pending_worktree_queue_requested_at
ON pending_worktree_queue(requested_at, session_id);
@@ -1423,6 +1432,84 @@ impl StateStore {
.map_err(Into::into)
}
pub fn insert_decision(
&self,
session_id: &str,
decision: &str,
alternatives: &[String],
reasoning: &str,
) -> Result<DecisionLogEntry> {
let timestamp = chrono::Utc::now();
let alternatives_json = serde_json::to_string(alternatives)
.context("Failed to serialize decision alternatives")?;
self.conn.execute(
"INSERT INTO decision_log (session_id, decision, alternatives_json, reasoning, timestamp)
VALUES (?1, ?2, ?3, ?4, ?5)",
rusqlite::params![
session_id,
decision,
alternatives_json,
reasoning,
timestamp.to_rfc3339(),
],
)?;
Ok(DecisionLogEntry {
id: self.conn.last_insert_rowid(),
session_id: session_id.to_string(),
decision: decision.to_string(),
alternatives: alternatives.to_vec(),
reasoning: reasoning.to_string(),
timestamp,
})
}
pub fn list_decisions_for_session(
&self,
session_id: &str,
limit: usize,
) -> Result<Vec<DecisionLogEntry>> {
let mut stmt = self.conn.prepare(
"SELECT id, session_id, decision, alternatives_json, reasoning, timestamp
FROM (
SELECT id, session_id, decision, alternatives_json, reasoning, timestamp
FROM decision_log
WHERE session_id = ?1
ORDER BY timestamp DESC, id DESC
LIMIT ?2
)
ORDER BY timestamp ASC, id ASC",
)?;
let entries = stmt
.query_map(rusqlite::params![session_id, limit as i64], |row| {
map_decision_log_entry(row)
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(entries)
}
pub fn list_decisions(&self, limit: usize) -> Result<Vec<DecisionLogEntry>> {
let mut stmt = self.conn.prepare(
"SELECT id, session_id, decision, alternatives_json, reasoning, timestamp
FROM (
SELECT id, session_id, decision, alternatives_json, reasoning, timestamp
FROM decision_log
ORDER BY timestamp DESC, id DESC
LIMIT ?1
)
ORDER BY timestamp ASC, id ASC",
)?;
let entries = stmt
.query_map(rusqlite::params![limit as i64], map_decision_log_entry)?
.collect::<Result<Vec<_>, _>>()?;
Ok(entries)
}
pub fn daemon_activity(&self) -> Result<DaemonActivity> {
self.conn
.query_row(
@@ -2037,6 +2124,34 @@ fn session_state_supports_overlap(state: &SessionState) -> bool {
)
}
fn map_decision_log_entry(row: &rusqlite::Row<'_>) -> rusqlite::Result<DecisionLogEntry> {
let alternatives_json = row
.get::<_, Option<String>>(3)?
.unwrap_or_else(|| "[]".to_string());
let alternatives = serde_json::from_str(&alternatives_json).map_err(|error| {
rusqlite::Error::FromSqlConversionFailure(3, rusqlite::types::Type::Text, Box::new(error))
})?;
let timestamp = row.get::<_, String>(5)?;
let timestamp = chrono::DateTime::parse_from_rfc3339(&timestamp)
.map(|value| value.with_timezone(&chrono::Utc))
.map_err(|error| {
rusqlite::Error::FromSqlConversionFailure(
5,
rusqlite::types::Type::Text,
Box::new(error),
)
})?;
Ok(DecisionLogEntry {
id: row.get(0)?,
session_id: row.get(1)?,
decision: row.get(2)?,
alternatives,
reasoning: row.get(4)?,
timestamp,
})
}
fn file_overlap_is_relevant(current: &FileActivityEntry, other: &FileActivityEntry) -> bool {
current.path == other.path
&& !(matches!(current.action, FileActivityAction::Read)
@@ -2467,6 +2582,151 @@ mod tests {
Ok(())
}
#[test]
fn open_migrates_legacy_tool_log_before_creating_hook_event_index() -> Result<()> {
let tempdir = TestDir::new("store-legacy-hook-event")?;
let db_path = tempdir.path().join("state.db");
let conn = Connection::open(&db_path)?;
conn.execute_batch(
"
CREATE TABLE sessions (
id TEXT PRIMARY KEY,
task TEXT NOT NULL,
agent_type TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'pending',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE tool_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL,
tool_name TEXT NOT NULL,
input_summary TEXT,
output_summary TEXT,
duration_ms INTEGER,
risk_score REAL DEFAULT 0.0,
timestamp TEXT NOT NULL
);
",
)?;
drop(conn);
let db = StateStore::open(&db_path)?;
assert!(db.has_column("tool_log", "hook_event_id")?);
let conn = Connection::open(&db_path)?;
let index_count: i64 = conn.query_row(
"SELECT COUNT(*)
FROM sqlite_master
WHERE type = 'index' AND name = 'idx_tool_log_hook_event'",
[],
|row| row.get(0),
)?;
assert_eq!(index_count, 1);
Ok(())
}
#[test]
fn insert_and_list_decisions_for_session() -> Result<()> {
let tempdir = TestDir::new("store-decisions")?;
let db = StateStore::open(&tempdir.path().join("state.db"))?;
let now = Utc::now();
db.insert_session(&Session {
id: "session-1".to_string(),
task: "architect".to_string(),
project: "workspace".to_string(),
task_group: "general".to_string(),
agent_type: "claude".to_string(),
working_dir: PathBuf::from("/tmp"),
state: SessionState::Running,
pid: None,
worktree: None,
created_at: now,
updated_at: now,
last_heartbeat_at: now,
metrics: SessionMetrics::default(),
})?;
db.insert_decision(
"session-1",
"Use sqlite for the shared context graph",
&["json files".to_string(), "memory only".to_string()],
"SQLite keeps the audit trail queryable from both CLI and TUI.",
)?;
db.insert_decision(
"session-1",
"Keep decision logging append-only",
&["mutable edits".to_string()],
"Append-only history preserves operator trust and timeline integrity.",
)?;
let entries = db.list_decisions_for_session("session-1", 10)?;
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].session_id, "session-1");
assert_eq!(
entries[0].decision,
"Use sqlite for the shared context graph"
);
assert_eq!(
entries[0].alternatives,
vec!["json files".to_string(), "memory only".to_string()]
);
assert_eq!(entries[1].decision, "Keep decision logging append-only");
assert_eq!(
entries[1].reasoning,
"Append-only history preserves operator trust and timeline integrity."
);
Ok(())
}
#[test]
fn list_recent_decisions_across_sessions_returns_latest_subset_in_order() -> Result<()> {
let tempdir = TestDir::new("store-decisions-all")?;
let db = StateStore::open(&tempdir.path().join("state.db"))?;
let now = Utc::now();
for session_id in ["session-a", "session-b", "session-c"] {
db.insert_session(&Session {
id: session_id.to_string(),
task: "decision log".to_string(),
project: "workspace".to_string(),
task_group: "general".to_string(),
agent_type: "claude".to_string(),
working_dir: PathBuf::from("/tmp"),
state: SessionState::Running,
pid: None,
worktree: None,
created_at: now,
updated_at: now,
last_heartbeat_at: now,
metrics: SessionMetrics::default(),
})?;
}
db.insert_decision("session-a", "Oldest", &[], "first")?;
std::thread::sleep(std::time::Duration::from_millis(2));
db.insert_decision("session-b", "Middle", &[], "second")?;
std::thread::sleep(std::time::Duration::from_millis(2));
db.insert_decision("session-c", "Newest", &[], "third")?;
let entries = db.list_decisions(2)?;
assert_eq!(
entries
.iter()
.map(|entry| entry.decision.as_str())
.collect::<Vec<_>>(),
vec!["Middle", "Newest"]
);
assert_eq!(entries[0].session_id, "session-b");
assert_eq!(entries[1].session_id, "session-c");
Ok(())
}
#[test]
fn refresh_session_durations_updates_running_and_terminal_sessions() -> Result<()> {
let tempdir = TestDir::new("store-duration-metrics")?;

View File

@@ -22,7 +22,9 @@ use crate::session::output::{
OutputEvent, OutputLine, OutputStream, SessionOutputStore, OUTPUT_BUFFER_LIMIT,
};
use crate::session::store::{DaemonActivity, FileActivityOverlap, StateStore};
use crate::session::{FileActivityEntry, Session, SessionGrouping, SessionMessage, SessionState};
use crate::session::{
DecisionLogEntry, FileActivityEntry, Session, SessionGrouping, SessionMessage, SessionState,
};
use crate::worktree;
#[cfg(test)]
@@ -215,6 +217,7 @@ enum TimelineEventFilter {
Messages,
ToolCalls,
FileChanges,
Decisions,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -257,6 +260,7 @@ enum TimelineEventType {
Message,
ToolCall,
FileChange,
Decision,
}
#[derive(Debug, Clone)]
@@ -1025,6 +1029,11 @@ impl Dashboard {
TimelineEventFilter::FileChanges,
OutputTimeFilter::AllTime,
) => "No file-change events across all sessions yet.",
(
SearchScope::AllSessions,
TimelineEventFilter::Decisions,
OutputTimeFilter::AllTime,
) => "No decision-log events across all sessions yet.",
(SearchScope::AllSessions, TimelineEventFilter::All, _) => {
"No timeline events across all sessions in the selected time range."
}
@@ -1040,6 +1049,9 @@ impl Dashboard {
(SearchScope::AllSessions, TimelineEventFilter::FileChanges, _) => {
"No file-change events across all sessions in the selected time range."
}
(SearchScope::AllSessions, TimelineEventFilter::Decisions, _) => {
"No decision-log events across all sessions in the selected time range."
}
(SearchScope::SelectedSession, TimelineEventFilter::All, OutputTimeFilter::AllTime) => {
"No timeline events for this session yet."
}
@@ -1063,6 +1075,11 @@ impl Dashboard {
TimelineEventFilter::FileChanges,
OutputTimeFilter::AllTime,
) => "No file-change events for this session yet.",
(
SearchScope::SelectedSession,
TimelineEventFilter::Decisions,
OutputTimeFilter::AllTime,
) => "No decision-log events for this session yet.",
(SearchScope::SelectedSession, TimelineEventFilter::All, _) => {
"No timeline events in the selected time range."
}
@@ -1078,6 +1095,9 @@ impl Dashboard {
(SearchScope::SelectedSession, TimelineEventFilter::FileChanges, _) => {
"No file-change events in the selected time range."
}
(SearchScope::SelectedSession, TimelineEventFilter::Decisions, _) => {
"No decision-log events in the selected time range."
}
}
}
@@ -4926,6 +4946,18 @@ impl Dashboard {
}
}));
let decisions = self
.db
.list_decisions_for_session(&session.id, 32)
.unwrap_or_default();
events.extend(decisions.into_iter().map(|entry| TimelineEvent {
occurred_at: entry.timestamp,
session_id: session.id.clone(),
event_type: TimelineEventType::Decision,
summary: decision_log_summary(&entry),
detail_lines: decision_log_detail_lines(&entry),
}));
let tool_logs = self
.db
.query_tool_logs(&session.id, 1, 128)
@@ -5613,6 +5645,23 @@ impl Dashboard {
}
}
}
let recent_decisions = self
.db
.list_decisions_for_session(&session.id, 5)
.unwrap_or_default();
if !recent_decisions.is_empty() {
lines.push("Recent decisions".to_string());
for entry in recent_decisions {
lines.push(format!(
"- {} {}",
self.short_timestamp(&entry.timestamp.to_rfc3339()),
decision_log_summary(&entry)
));
for detail in decision_log_detail_lines(&entry).into_iter().take(3) {
lines.push(format!(" {}", detail));
}
}
}
let file_overlaps = self
.db
.list_file_overlaps(&session.id, 3)
@@ -6361,7 +6410,8 @@ impl TimelineEventFilter {
Self::Lifecycle => Self::Messages,
Self::Messages => Self::ToolCalls,
Self::ToolCalls => Self::FileChanges,
Self::FileChanges => Self::All,
Self::FileChanges => Self::Decisions,
Self::Decisions => Self::All,
}
}
@@ -6372,6 +6422,7 @@ impl TimelineEventFilter {
Self::Messages => event_type == TimelineEventType::Message,
Self::ToolCalls => event_type == TimelineEventType::ToolCall,
Self::FileChanges => event_type == TimelineEventType::FileChange,
Self::Decisions => event_type == TimelineEventType::Decision,
}
}
@@ -6382,6 +6433,7 @@ impl TimelineEventFilter {
Self::Messages => "messages",
Self::ToolCalls => "tool calls",
Self::FileChanges => "file changes",
Self::Decisions => "decisions",
}
}
@@ -6392,6 +6444,7 @@ impl TimelineEventFilter {
Self::Messages => " messages",
Self::ToolCalls => " tool calls",
Self::FileChanges => " file changes",
Self::Decisions => " decisions",
}
}
}
@@ -6403,6 +6456,7 @@ impl TimelineEventType {
Self::Message => "message",
Self::ToolCall => "tool",
Self::FileChange => "file-change",
Self::Decision => "decision",
}
}
}
@@ -7332,6 +7386,28 @@ fn file_overlap_summary(entry: &FileActivityOverlap, timestamp: &str) -> String
)
}
fn decision_log_summary(entry: &DecisionLogEntry) -> String {
format!("decided {}", truncate_for_dashboard(&entry.decision, 72))
}
fn decision_log_detail_lines(entry: &DecisionLogEntry) -> Vec<String> {
let mut lines = vec![format!(
"why {}",
truncate_for_dashboard(&entry.reasoning, 72)
)];
if entry.alternatives.is_empty() {
lines.push("alternatives none recorded".to_string());
} else {
for alternative in entry.alternatives.iter().take(3) {
lines.push(format!(
"alternative {}",
truncate_for_dashboard(alternative, 72)
));
}
}
lines
}
fn tool_log_detail_lines(entry: &ToolLogEntry) -> Vec<String> {
let mut lines = Vec::new();
if !entry.trigger_summary.trim().is_empty() {
@@ -8994,6 +9070,61 @@ diff --git a/src/lib.rs b/src/lib.rs\n\
Ok(())
}
#[test]
fn timeline_and_metrics_render_decision_log_entries() -> Result<()> {
let now = Utc::now();
let mut session = sample_session(
"focus-12345678",
"planner",
SessionState::Running,
Some("ecc/focus"),
256,
7,
);
session.created_at = now - chrono::Duration::hours(1);
session.updated_at = now - chrono::Duration::minutes(2);
let mut dashboard = test_dashboard(vec![session.clone()], 0);
dashboard.db.insert_session(&session)?;
dashboard.db.insert_decision(
&session.id,
"Use sqlite for the shared context graph",
&["json files".to_string(), "memory only".to_string()],
"SQLite keeps the audit trail queryable from CLI and TUI.",
)?;
dashboard.toggle_timeline_mode();
let rendered = dashboard.rendered_output_text(180, 30);
assert!(rendered.contains("decision"));
assert!(rendered.contains("decided Use sqlite for the shared context graph"));
assert!(rendered.contains("why SQLite keeps the audit trail queryable"));
assert!(rendered.contains("alternative json files"));
assert!(rendered.contains("alternative memory only"));
let metrics_text = dashboard.selected_session_metrics_text();
assert!(metrics_text.contains("Recent decisions"));
assert!(metrics_text.contains("decided Use sqlite for the shared context graph"));
assert!(metrics_text.contains("alternative json files"));
dashboard.cycle_timeline_event_filter();
dashboard.cycle_timeline_event_filter();
dashboard.cycle_timeline_event_filter();
dashboard.cycle_timeline_event_filter();
dashboard.cycle_timeline_event_filter();
assert_eq!(
dashboard.timeline_event_filter,
TimelineEventFilter::Decisions
);
assert_eq!(
dashboard.operator_note.as_deref(),
Some("timeline filter set to decisions")
);
assert_eq!(dashboard.output_title(), " Timeline decisions ");
Ok(())
}
#[test]
fn timeline_time_filter_hides_old_events() {
let now = Utc::now();