Compare commits

...

30 Commits

Author SHA1 Message Date
Affaan Mustafa
678fb6f0d3 Merge pull request #846 from pythonstrup/feat/desktop-notify-hook
feat: add macOS desktop notification Stop hook
2026-03-25 03:19:13 -07:00
Affaan Mustafa
401e26a45a Merge pull request #880 from affaan-m/dependabot/cargo/ecc2/cargo-3169503097
chore(deps): bump git2 from 0.19.0 to 0.20.4 in /ecc2 in the cargo group across 1 directory
2026-03-25 03:04:43 -07:00
Affaan Mustafa
eb934afbb5 Merge pull request #888 from affaan-m/feat/ecc2-risk-scoring
feat(ecc2): add tool risk scoring and actions
2026-03-25 03:01:12 -07:00
Affaan Mustafa
8303970258 feat(ecc2): add tool risk scoring and actions 2026-03-25 06:00:34 -04:00
Affaan Mustafa
319f9efafb Merge pull request #887 from affaan-m/feat/ecc2-tool-logging
feat(ecc2): add tool call logging and history
2026-03-25 02:51:20 -07:00
Affaan Mustafa
6c2a3a2bae feat(ecc2): add tool call logging and history 2026-03-25 05:50:31 -04:00
Affaan Mustafa
adaeab9dba Merge pull request #886 from affaan-m/feat/ecc2-split-pane
feat(ecc2): add split-pane dashboard resizing
2026-03-25 02:46:08 -07:00
Affaan Mustafa
8981dd6067 feat(ecc2): add split-pane dashboard resizing 2026-03-25 05:45:43 -04:00
Affaan Mustafa
4105a2f36c Merge pull request #885 from affaan-m/feat/ecc2-crash-resume
feat(ecc2): add crash resume session recovery
2026-03-25 01:37:35 -07:00
Affaan Mustafa
0166231ddb feat(ecc2): add crash resume session recovery 2026-03-25 04:36:12 -04:00
Affaan Mustafa
cf439dd481 Merge pull request #882 from affaan-m/feat/ecc2-live-streaming
feat(ecc2): live output streaming per agent
2026-03-25 01:31:53 -07:00
Affaan Mustafa
9903ae528b fix: restore antigravity install target metadata 2026-03-25 04:24:19 -04:00
Affaan Mustafa
44c2bf6f7b feat(ecc2): implement live output streaming per agent (#774)
- PTY output capture via tokio::process with stdout/stderr piping
- Ring buffer (1000 lines) per session
- Output pane wired to show selected session with auto-scroll
- Broadcast channel for output events
2026-03-25 04:19:50 -04:00
Affaan Mustafa
e78c092499 fix(ci): restore validation and antigravity target safety 2026-03-25 04:19:50 -04:00
Affaan Mustafa
61f70de479 Merge pull request #903 from affaan-m/fix/session-manager-843-supersede-853
fix: fold blocker-lane session and hook hardening into one PR
2026-03-25 01:16:28 -07:00
Affaan Mustafa
776ac439f3 test: cover canonical session duplicate precedence 2026-03-25 04:01:23 -04:00
Affaan Mustafa
b19b4c6b5e fix: finish blocker lane hook and install regressions 2026-03-25 04:00:50 -04:00
Affaan Mustafa
b5157f4ed1 test: relax sync-ecc shell parsing 2026-03-25 03:56:10 -04:00
Affaan Mustafa
2d1e384eef test: isolate suggest-compact counter fixtures 2026-03-25 03:51:15 -04:00
Affaan Mustafa
9c5ca92e6e fix: finish hook fallback and canonical session follow-ups 2026-03-25 03:44:03 -04:00
Affaan Mustafa
7b510c886e fix: harden session hook guards and session ID handling 2026-03-25 03:36:36 -04:00
Affaan Mustafa
c1b47ac9db Merge pull request #883 from affaan-m/feat/ecc2-status-panel
feat(ecc2): agent status panel with Table widget
2026-03-25 00:18:05 -07:00
Affaan Mustafa
3f02fa439a feat(ecc2): implement agent status panel with Table widget (#773)
- Table widget with columns: ID, Agent, State, Branch, Tokens, Duration
- Color-coded states: green=Running, yellow=Idle, red=Failed, gray=Stopped, blue=Completed
- Summary bar with running/completed/failed counts
- Row selection highlighting
2026-03-25 03:07:51 -04:00
Jonghyeok Park
f6b10481f3 fix: add spawnSync error logging and restore 5s timeout
- Check spawnSync result and log warning on failure via stderr
- Restore osascript timeout to 5000ms, increase hook deadline to 10s
  for sufficient headroom
2026-03-25 16:03:21 +09:00
Jonghyeok Park
d3699f9010 fix: use AppleScript-safe escaping and reduce spawnSync timeout
- Replace JSON.stringify with curly quote substitution for AppleScript
  compatibility (AppleScript does not support \" backslash escapes)
- Reduce spawnSync timeout from 5000ms to 3000ms to leave headroom
  within the 5s hook deadline
2026-03-25 16:03:21 +09:00
Jonghyeok Park
445ae5099d feat: add macOS desktop notification Stop hook
Add a new Stop hook that sends a native macOS notification with the
task summary (first line of last_assistant_message) when Claude finishes
responding. Uses osascript via spawnSync for shell injection safety.
Supports run-with-flags fast require() path. Only active on standard
and strict profiles; silently skips on non-macOS platforms.
2026-03-25 16:03:21 +09:00
Affaan Mustafa
00bc7f30be fix: resolve blocker PR validation regressions 2026-03-25 01:34:29 -04:00
Affaan Mustafa
1d0aa5ac2a fix: fold session manager blockers into one candidate 2026-03-24 23:08:27 -04:00
dependabot[bot]
e883385ab0 chore(deps): bump git2 in /ecc2 in the cargo group across 1 directory
Bumps the cargo group with 1 update in the /ecc2 directory: [git2](https://github.com/rust-lang/git2-rs).


Updates `git2` from 0.19.0 to 0.20.4
- [Changelog](https://github.com/rust-lang/git2-rs/blob/git2-0.20.4/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/git2-rs/compare/git2-0.19.0...git2-0.20.4)

---
updated-dependencies:
- dependency-name: git2
  dependency-version: 0.20.4
  dependency-type: direct:production
  dependency-group: cargo
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-03-25 02:45:34 +00:00
Affaan Mustafa
7726c25e46 fix(ci): restore validation and antigravity target safety 2026-03-23 14:29:21 -07:00
49 changed files with 4146 additions and 632 deletions

View File

@@ -175,6 +175,10 @@ jobs:
run: node scripts/ci/validate-skills.js
continue-on-error: false
- name: Validate install manifests
run: node scripts/ci/validate-install-manifests.js
continue-on-error: false
- name: Validate rules
run: node scripts/ci/validate-rules.js
continue-on-error: false

View File

@@ -39,5 +39,8 @@ jobs:
- name: Validate skills
run: node scripts/ci/validate-skills.js
- name: Validate install manifests
run: node scripts/ci/validate-install-manifests.js
- name: Validate rules
run: node scripts/ci/validate-rules.js

View File

@@ -142,7 +142,7 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
```
agents/ — 28 specialized subagents
skills/ — 117 workflow skills and domain knowledge
skills/ — 125 workflow skills and domain knowledge
commands/ — 60 slash commands
hooks/ — Trigger-based automations
rules/ — Always-follow guidelines (common + per-language)

View File

@@ -1,5 +1,5 @@
---
description: Load the most recent session file from ~/.claude/sessions/ and resume work with full context from where the last session ended.
description: Load the most recent session file from ~/.claude/session-data/ and resume work with full context from where the last session ended.
---
# Resume Session Command
@@ -17,10 +17,10 @@ This command is the counterpart to `/save-session`.
## Usage
```
/resume-session # loads most recent file in ~/.claude/sessions/
/resume-session # loads most recent file in ~/.claude/session-data/
/resume-session 2024-01-15 # loads most recent session for that date
/resume-session ~/.claude/sessions/2024-01-15-session.tmp # loads a specific legacy-format file
/resume-session ~/.claude/sessions/2024-01-15-abc123de-session.tmp # loads a current short-id session file
/resume-session ~/.claude/session-data/2024-01-15-abc123de-session.tmp # loads a current short-id session file
/resume-session ~/.claude/sessions/2024-01-15-session.tmp # loads a specific legacy-format file
```
## Process
@@ -29,19 +29,20 @@ This command is the counterpart to `/save-session`.
If no argument provided:
1. Check `~/.claude/sessions/`
1. Check `~/.claude/session-data/`
2. Pick the most recently modified `*-session.tmp` file
3. If the folder does not exist or has no matching files, tell the user:
```
No session files found in ~/.claude/sessions/
No session files found in ~/.claude/session-data/
Run /save-session at the end of a session to create one.
```
Then stop.
If an argument is provided:
- If it looks like a date (`YYYY-MM-DD`), search `~/.claude/sessions/` for files matching
`YYYY-MM-DD-session.tmp` (legacy format) or `YYYY-MM-DD-<shortid>-session.tmp` (current format)
- If it looks like a date (`YYYY-MM-DD`), search `~/.claude/session-data/` first, then the legacy
`~/.claude/sessions/`, for files matching `YYYY-MM-DD-session.tmp` (legacy format) or
`YYYY-MM-DD-<shortid>-session.tmp` (current format)
and load the most recently modified variant for that date
- If it looks like a file path, read that file directly
- If not found, report clearly and stop
@@ -114,7 +115,7 @@ Report: "Session file found but appears empty or unreadable. You may need to cre
## Example Output
```
SESSION LOADED: /Users/you/.claude/sessions/2024-01-15-abc123de-session.tmp
SESSION LOADED: /Users/you/.claude/session-data/2024-01-15-abc123de-session.tmp
════════════════════════════════════════════════
PROJECT: my-app — JWT Authentication

View File

@@ -1,5 +1,5 @@
---
description: Save current session state to a dated file in ~/.claude/sessions/ so work can be resumed in a future session with full context.
description: Save current session state to a dated file in ~/.claude/session-data/ so work can be resumed in a future session with full context.
---
# Save Session Command
@@ -29,19 +29,19 @@ Before writing the file, collect:
Create the canonical sessions folder in the user's Claude home directory:
```bash
mkdir -p ~/.claude/sessions
mkdir -p ~/.claude/session-data
```
### Step 3: Write the session file
Create `~/.claude/sessions/YYYY-MM-DD-<short-id>-session.tmp`, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`:
Create `~/.claude/session-data/YYYY-MM-DD-<short-id>-session.tmp`, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`:
- Allowed characters: lowercase `a-z`, digits `0-9`, hyphens `-`
- Minimum length: 8 characters
- No uppercase letters, no underscores, no spaces
- Compatibility characters: letters `a-z` / `A-Z`, digits `0-9`, hyphens `-`, underscores `_`
- Compatibility minimum length: 1 character
- Recommended style for new files: lowercase letters, digits, and hyphens with 8+ characters to avoid collisions
Valid examples: `abc123de`, `a1b2c3d4`, `frontend-worktree-1`
Invalid examples: `ABC123de` (uppercase), `short` (under 8 chars), `test_id1` (underscore)
Valid examples: `abc123de`, `a1b2c3d4`, `frontend-worktree-1`, `ChezMoi_2`
Avoid for new files: `A`, `test_id1`, `ABC123de`
Full valid filename example: `2024-01-15-abc123de-session.tmp`
@@ -271,5 +271,5 @@ Then test with Postman — the response should include a `Set-Cookie` header.
- The "What Did NOT Work" section is the most critical — future sessions will blindly retry failed approaches without it
- If the user asks to save mid-session (not just at the end), save what's known so far and mark in-progress items clearly
- The file is meant to be read by Claude at the start of the next session via `/resume-session`
- Use the canonical global session store: `~/.claude/sessions/`
- Use the canonical global session store: `~/.claude/session-data/`
- Prefer the short-id filename form (`YYYY-MM-DD-<short-id>-session.tmp`) for any new session file

View File

@@ -4,7 +4,7 @@ description: Manage Claude Code session history, aliases, and session metadata.
# Sessions Command
Manage Claude Code session history - list, load, alias, and edit sessions stored in `~/.claude/sessions/`.
Manage Claude Code session history - list, load, alias, and edit sessions stored in `~/.claude/session-data/` with legacy reads from `~/.claude/sessions/`.
## Usage
@@ -89,7 +89,7 @@ const size = sm.getSessionSize(session.sessionPath);
const aliases = aa.getAliasesForSession(session.filename);
console.log('Session: ' + session.filename);
console.log('Path: ~/.claude/sessions/' + session.filename);
console.log('Path: ' + session.sessionPath);
console.log('');
console.log('Statistics:');
console.log(' Lines: ' + stats.lineCount);
@@ -327,7 +327,7 @@ $ARGUMENTS:
## Notes
- Sessions are stored as markdown files in `~/.claude/sessions/`
- Sessions are stored as markdown files in `~/.claude/session-data/` with legacy reads from `~/.claude/sessions/`
- Aliases are stored in `~/.claude/session-aliases.json`
- Session IDs can be shortened (first 4-8 characters usually unique enough)
- Use aliases for frequently referenced sessions

8
ecc2/Cargo.lock generated
View File

@@ -438,9 +438,9 @@ dependencies = [
[[package]]
name = "git2"
version = "0.19.0"
version = "0.20.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724"
checksum = "7b88256088d75a56f8ecfa070513a775dd9107f6530ef14919dac831af9cfe2b"
dependencies = [
"bitflags",
"libc",
@@ -725,9 +725,9 @@ checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
[[package]]
name = "libgit2-sys"
version = "0.17.0+1.8.1"
version = "0.18.3+1.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10472326a8a6477c3c20a64547b0059e4b0d086869eee31e6d7da728a8eb7224"
checksum = "c9b3acc4b91781bb0b3386669d325163746af5f6e4f73e6d2d630e09a35f3487"
dependencies = [
"cc",
"libc",

View File

@@ -19,7 +19,7 @@ tokio = { version = "1", features = ["full"] }
rusqlite = { version = "0.32", features = ["bundled"] }
# Git integration
git2 = "0.19"
git2 = "0.20"
# Serialization
serde = { version = "1", features = ["derive"] }

View File

@@ -2,6 +2,23 @@ use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum PaneLayout {
#[default]
Horizontal,
Vertical,
Grid,
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
#[serde(default)]
pub struct RiskThresholds {
pub review: f64,
pub confirm: f64,
pub block: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
@@ -15,6 +32,8 @@ pub struct Config {
pub cost_budget_usd: f64,
pub token_budget: u64,
pub theme: Theme,
pub pane_layout: PaneLayout,
pub risk_thresholds: RiskThresholds,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -37,11 +56,19 @@ impl Default for Config {
cost_budget_usd: 10.0,
token_budget: 500_000,
theme: Theme::Dark,
pane_layout: PaneLayout::Horizontal,
risk_thresholds: Self::RISK_THRESHOLDS,
}
}
}
impl Config {
pub const RISK_THRESHOLDS: RiskThresholds = RiskThresholds {
review: 0.35,
confirm: 0.60,
block: 0.85,
};
pub fn load() -> Result<Self> {
let config_path = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
@@ -58,9 +85,15 @@ impl Config {
}
}
impl Default for RiskThresholds {
fn default() -> Self {
Config::RISK_THRESHOLDS
}
}
#[cfg(test)]
mod tests {
use super::Config;
use super::{Config, PaneLayout};
#[test]
fn default_includes_positive_budget_thresholds() {
@@ -88,5 +121,24 @@ theme = "Dark"
assert_eq!(config.cost_budget_usd, defaults.cost_budget_usd);
assert_eq!(config.token_budget, defaults.token_budget);
assert_eq!(config.pane_layout, defaults.pane_layout);
assert_eq!(config.risk_thresholds, defaults.risk_thresholds);
}
#[test]
fn default_pane_layout_is_horizontal() {
assert_eq!(Config::default().pane_layout, PaneLayout::Horizontal);
}
#[test]
fn pane_layout_deserializes_from_toml() {
let config: Config = toml::from_str(r#"pane_layout = "grid""#).unwrap();
assert_eq!(config.pane_layout, PaneLayout::Grid);
}
#[test]
fn default_risk_thresholds_are_applied() {
assert_eq!(Config::default().risk_thresholds, Config::RISK_THRESHOLDS);
}
}

View File

@@ -7,6 +7,7 @@ mod worktree;
use anyhow::Result;
use clap::Parser;
use std::path::PathBuf;
use tracing_subscriber::EnvFilter;
#[derive(Parser, Debug)]
@@ -44,8 +45,24 @@ enum Commands {
/// Session ID or alias
session_id: String,
},
/// Resume a failed or stopped session
Resume {
/// Session ID or alias
session_id: String,
},
/// Run as background daemon
Daemon,
#[command(hide = true)]
RunSession {
#[arg(long)]
session_id: String,
#[arg(long)]
task: String,
#[arg(long)]
agent: String,
#[arg(long)]
cwd: PathBuf,
},
}
#[tokio::main]
@@ -87,11 +104,39 @@ async fn main() -> Result<()> {
session::manager::stop_session(&db, &session_id).await?;
println!("Session stopped: {session_id}");
}
Some(Commands::Resume { session_id }) => {
let resumed_id = session::manager::resume_session(&db, &session_id).await?;
println!("Session resumed: {resumed_id}");
}
Some(Commands::Daemon) => {
println!("Starting ECC daemon...");
session::daemon::run(db, cfg).await?;
}
Some(Commands::RunSession {
session_id,
task,
agent,
cwd,
}) => {
session::manager::run_session(&cfg, &session_id, &task, &agent, &cwd).await?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cli_parses_resume_command() {
let cli = Cli::try_parse_from(["ecc", "resume", "deadbeef"])
.expect("resume subcommand should parse");
match cli.command {
Some(Commands::Resume { session_id }) => assert_eq!(session_id, "deadbeef"),
_ => panic!("expected resume subcommand"),
}
}
}

View File

@@ -1,6 +1,7 @@
use anyhow::Result;
use anyhow::{bail, Result};
use serde::{Deserialize, Serialize};
use crate::config::{Config, RiskThresholds};
use crate::session::store::StateStore;
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -13,42 +14,396 @@ pub struct ToolCallEvent {
pub risk_score: f64,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RiskAssessment {
pub score: f64,
pub reasons: Vec<String>,
pub suggested_action: SuggestedAction,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SuggestedAction {
Allow,
Review,
RequireConfirmation,
Block,
}
impl ToolCallEvent {
/// Compute risk score based on tool type and input patterns.
pub fn compute_risk(tool_name: &str, input: &str) -> f64 {
let mut score: f64 = 0.0;
pub fn new(
session_id: impl Into<String>,
tool_name: impl Into<String>,
input_summary: impl Into<String>,
output_summary: impl Into<String>,
duration_ms: u64,
) -> Self {
let tool_name = tool_name.into();
let input_summary = input_summary.into();
// Destructive tools get higher base risk
match tool_name {
"Bash" => score += 0.3,
"Write" => score += 0.2,
"Edit" => score += 0.1,
_ => score += 0.05,
Self {
session_id: session_id.into(),
risk_score: Self::compute_risk(&tool_name, &input_summary, &Config::RISK_THRESHOLDS)
.score,
tool_name,
input_summary,
output_summary: output_summary.into(),
duration_ms,
}
}
/// Compute risk from the tool type and input characteristics.
pub fn compute_risk(
tool_name: &str,
input: &str,
thresholds: &RiskThresholds,
) -> RiskAssessment {
let normalized_tool = tool_name.to_ascii_lowercase();
let normalized_input = input.to_ascii_lowercase();
let mut score = 0.0;
let mut reasons = Vec::new();
let (base_score, base_reason) = base_tool_risk(&normalized_tool);
score += base_score;
if let Some(reason) = base_reason {
reasons.push(reason.to_string());
}
// Dangerous patterns in bash commands
if tool_name == "Bash" {
if input.contains("rm -rf") || input.contains("--force") {
score += 0.4;
}
if input.contains("git push") || input.contains("git reset") {
score += 0.3;
}
if input.contains("sudo") || input.contains("chmod 777") {
score += 0.5;
}
let (file_sensitivity_score, file_sensitivity_reason) =
assess_file_sensitivity(&normalized_input);
score += file_sensitivity_score;
if let Some(reason) = file_sensitivity_reason {
reasons.push(reason);
}
score.min(1.0)
let (blast_radius_score, blast_radius_reason) = assess_blast_radius(&normalized_input);
score += blast_radius_score;
if let Some(reason) = blast_radius_reason {
reasons.push(reason);
}
let (irreversibility_score, irreversibility_reason) =
assess_irreversibility(&normalized_input);
score += irreversibility_score;
if let Some(reason) = irreversibility_reason {
reasons.push(reason);
}
let score = score.clamp(0.0, 1.0);
let suggested_action = SuggestedAction::from_score(score, thresholds);
RiskAssessment {
score,
reasons,
suggested_action,
}
}
}
pub fn log_tool_call(db: &StateStore, event: &ToolCallEvent) -> Result<()> {
db.send_message(
&event.session_id,
"observability",
&serde_json::to_string(event)?,
"tool_call",
)?;
Ok(())
impl SuggestedAction {
fn from_score(score: f64, thresholds: &RiskThresholds) -> Self {
if score >= thresholds.block {
Self::Block
} else if score >= thresholds.confirm {
Self::RequireConfirmation
} else if score >= thresholds.review {
Self::Review
} else {
Self::Allow
}
}
}
fn base_tool_risk(tool_name: &str) -> (f64, Option<&'static str>) {
match tool_name {
"bash" => (
0.20,
Some("shell execution can modify local or shared state"),
),
"write" | "multiedit" => (0.15, Some("writes files directly")),
"edit" => (0.10, Some("modifies existing files")),
_ => (0.05, None),
}
}
fn assess_file_sensitivity(input: &str) -> (f64, Option<String>) {
const SECRET_PATTERNS: &[&str] = &[
".env",
"secret",
"credential",
"token",
"api_key",
"apikey",
"auth",
"id_rsa",
".pem",
".key",
];
const SHARED_INFRA_PATTERNS: &[&str] = &[
"cargo.toml",
"package.json",
"dockerfile",
".github/workflows",
"schema",
"migration",
"production",
];
if contains_any(input, SECRET_PATTERNS) {
(
0.25,
Some("targets a sensitive file or credential surface".to_string()),
)
} else if contains_any(input, SHARED_INFRA_PATTERNS) {
(
0.15,
Some("targets shared infrastructure or release-critical files".to_string()),
)
} else {
(0.0, None)
}
}
fn assess_blast_radius(input: &str) -> (f64, Option<String>) {
const LARGE_SCOPE_PATTERNS: &[&str] = &[
"**",
"/*",
"--all",
"--recursive",
"entire repo",
"all files",
"across src/",
"find ",
" xargs ",
];
const SHARED_STATE_PATTERNS: &[&str] = &[
"git push --force",
"git push -f",
"origin main",
"origin master",
"rm -rf .",
"rm -rf /",
];
if contains_any(input, SHARED_STATE_PATTERNS) {
(
0.35,
Some("has a broad blast radius across shared state or history".to_string()),
)
} else if contains_any(input, LARGE_SCOPE_PATTERNS) {
(
0.25,
Some("has a broad blast radius across multiple files or directories".to_string()),
)
} else {
(0.0, None)
}
}
fn assess_irreversibility(input: &str) -> (f64, Option<String>) {
const HIGH_IRREVERSIBILITY_PATTERNS: &[&str] = &[
"rm -rf",
"git reset --hard",
"git clean -fd",
"drop database",
"drop table",
"truncate ",
"shred ",
];
const MODERATE_IRREVERSIBILITY_PATTERNS: &[&str] =
&["rm -f", "git push --force", "git push -f", "delete from"];
if contains_any(input, HIGH_IRREVERSIBILITY_PATTERNS) {
(
0.45,
Some("includes an irreversible or destructive operation".to_string()),
)
} else if contains_any(input, MODERATE_IRREVERSIBILITY_PATTERNS) {
(
0.40,
Some("includes an irreversible or difficult-to-undo operation".to_string()),
)
} else {
(0.0, None)
}
}
fn contains_any(input: &str, patterns: &[&str]) -> bool {
patterns.iter().any(|pattern| input.contains(pattern))
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ToolLogEntry {
pub id: i64,
pub session_id: String,
pub tool_name: String,
pub input_summary: String,
pub output_summary: String,
pub duration_ms: u64,
pub risk_score: f64,
pub timestamp: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ToolLogPage {
pub entries: Vec<ToolLogEntry>,
pub page: u64,
pub page_size: u64,
pub total: u64,
}
pub struct ToolLogger<'a> {
db: &'a StateStore,
}
impl<'a> ToolLogger<'a> {
pub fn new(db: &'a StateStore) -> Self {
Self { db }
}
pub fn log(&self, event: &ToolCallEvent) -> Result<ToolLogEntry> {
let timestamp = chrono::Utc::now().to_rfc3339();
self.db.insert_tool_log(
&event.session_id,
&event.tool_name,
&event.input_summary,
&event.output_summary,
event.duration_ms,
event.risk_score,
&timestamp,
)
}
pub fn query(&self, session_id: &str, page: u64, page_size: u64) -> Result<ToolLogPage> {
if page_size == 0 {
bail!("page_size must be greater than 0");
}
self.db.query_tool_logs(session_id, page.max(1), page_size)
}
}
pub fn log_tool_call(db: &StateStore, event: &ToolCallEvent) -> Result<ToolLogEntry> {
ToolLogger::new(db).log(event)
}
#[cfg(test)]
mod tests {
use super::{SuggestedAction, ToolCallEvent, ToolLogger};
use crate::config::Config;
use crate::session::store::StateStore;
use crate::session::{Session, SessionMetrics, SessionState};
use std::path::PathBuf;
fn test_db_path() -> PathBuf {
std::env::temp_dir().join(format!("ecc2-observability-{}.db", uuid::Uuid::new_v4()))
}
fn test_session(id: &str) -> Session {
let now = chrono::Utc::now();
Session {
id: id.to_string(),
task: "test task".to_string(),
agent_type: "claude".to_string(),
state: SessionState::Pending,
pid: None,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
}
}
#[test]
fn computes_sensitive_file_risk() {
let assessment = ToolCallEvent::compute_risk(
"Write",
"Update .env.production with rotated API token",
&Config::RISK_THRESHOLDS,
);
assert!(assessment.score >= Config::RISK_THRESHOLDS.review);
assert_eq!(assessment.suggested_action, SuggestedAction::Review);
assert!(assessment
.reasons
.iter()
.any(|reason| reason.contains("sensitive file")));
}
#[test]
fn computes_blast_radius_risk() {
let assessment = ToolCallEvent::compute_risk(
"Edit",
"Apply the same replacement across src/**/*.rs",
&Config::RISK_THRESHOLDS,
);
assert!(assessment.score >= Config::RISK_THRESHOLDS.review);
assert_eq!(assessment.suggested_action, SuggestedAction::Review);
assert!(assessment
.reasons
.iter()
.any(|reason| reason.contains("blast radius")));
}
#[test]
fn computes_irreversible_risk() {
let assessment = ToolCallEvent::compute_risk(
"Bash",
"rm -f /tmp/ecc-temp.txt",
&Config::RISK_THRESHOLDS,
);
assert!(assessment.score >= Config::RISK_THRESHOLDS.confirm);
assert_eq!(
assessment.suggested_action,
SuggestedAction::RequireConfirmation,
);
assert!(assessment
.reasons
.iter()
.any(|reason| reason.contains("irreversible")));
}
#[test]
fn blocks_combined_high_risk_operations() {
let assessment = ToolCallEvent::compute_risk(
"Bash",
"rm -rf . && git push --force origin main",
&Config::RISK_THRESHOLDS,
);
assert!(assessment.score >= Config::RISK_THRESHOLDS.block);
assert_eq!(assessment.suggested_action, SuggestedAction::Block);
}
#[test]
fn logger_persists_entries_and_paginates() -> anyhow::Result<()> {
let db_path = test_db_path();
let db = StateStore::open(&db_path)?;
db.insert_session(&test_session("sess-1"))?;
let logger = ToolLogger::new(&db);
logger.log(&ToolCallEvent::new("sess-1", "Read", "first", "ok", 5))?;
logger.log(&ToolCallEvent::new("sess-1", "Write", "second", "ok", 15))?;
logger.log(&ToolCallEvent::new("sess-1", "Bash", "third", "ok", 25))?;
let first_page = logger.query("sess-1", 1, 2)?;
assert_eq!(first_page.total, 3);
assert_eq!(first_page.entries.len(), 2);
assert_eq!(first_page.entries[0].tool_name, "Bash");
assert_eq!(first_page.entries[1].tool_name, "Write");
let second_page = logger.query("sess-1", 2, 2)?;
assert_eq!(second_page.total, 3);
assert_eq!(second_page.entries.len(), 1);
assert_eq!(second_page.entries[0].tool_name, "Read");
std::fs::remove_file(&db_path).ok();
Ok(())
}
}

View File

@@ -10,6 +10,7 @@ use crate::config::Config;
/// and cleans up stale resources.
pub async fn run(db: StateStore, cfg: Config) -> Result<()> {
tracing::info!("ECC daemon started");
resume_crashed_sessions(&db)?;
let heartbeat_interval = Duration::from_secs(cfg.heartbeat_interval_secs);
let timeout = Duration::from_secs(cfg.session_timeout_secs);
@@ -23,6 +24,43 @@ pub async fn run(db: StateStore, cfg: Config) -> Result<()> {
}
}
pub fn resume_crashed_sessions(db: &StateStore) -> Result<()> {
let failed_sessions = resume_crashed_sessions_with(db, pid_is_alive)?;
if failed_sessions > 0 {
tracing::warn!("Marked {failed_sessions} crashed sessions as failed during daemon startup");
}
Ok(())
}
fn resume_crashed_sessions_with<F>(db: &StateStore, is_pid_alive: F) -> Result<usize>
where
F: Fn(u32) -> bool,
{
let sessions = db.list_sessions()?;
let mut failed_sessions = 0;
for session in sessions {
if session.state != SessionState::Running {
continue;
}
let is_alive = session.pid.is_some_and(&is_pid_alive);
if is_alive {
continue;
}
tracing::warn!(
"Session {} was left running with stale pid {:?}; marking it failed",
session.id,
session.pid
);
db.update_state_and_pid(&session.id, &SessionState::Failed, None)?;
failed_sessions += 1;
}
Ok(failed_sessions)
}
fn check_sessions(db: &StateStore, timeout: Duration) -> Result<()> {
let sessions = db.list_sessions()?;
@@ -38,9 +76,102 @@ fn check_sessions(db: &StateStore, timeout: Duration) -> Result<()> {
if elapsed > timeout {
tracing::warn!("Session {} timed out after {:?}", session.id, elapsed);
db.update_state(&session.id, &SessionState::Failed)?;
db.update_state_and_pid(&session.id, &SessionState::Failed, None)?;
}
}
Ok(())
}
#[cfg(unix)]
fn pid_is_alive(pid: u32) -> bool {
if pid == 0 {
return false;
}
// SAFETY: kill(pid, 0) probes process existence without delivering a signal.
let result = unsafe { libc::kill(pid as libc::pid_t, 0) };
if result == 0 {
return true;
}
matches!(
std::io::Error::last_os_error().raw_os_error(),
Some(code) if code == libc::EPERM
)
}
#[cfg(not(unix))]
fn pid_is_alive(_pid: u32) -> bool {
false
}
#[cfg(test)]
mod tests {
use super::*;
use crate::session::{Session, SessionMetrics, SessionState};
use std::path::PathBuf;
fn temp_db_path() -> PathBuf {
std::env::temp_dir().join(format!("ecc2-daemon-test-{}.db", uuid::Uuid::new_v4()))
}
fn sample_session(id: &str, state: SessionState, pid: Option<u32>) -> Session {
let now = chrono::Utc::now();
Session {
id: id.to_string(),
task: "Recover crashed worker".to_string(),
agent_type: "claude".to_string(),
state,
pid,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
}
}
#[test]
fn resume_crashed_sessions_marks_dead_running_sessions_failed() -> Result<()> {
let path = temp_db_path();
let store = StateStore::open(&path)?;
store.insert_session(&sample_session(
"deadbeef",
SessionState::Running,
Some(4242),
))?;
resume_crashed_sessions_with(&store, |_| false)?;
let session = store
.get_session("deadbeef")?
.expect("session should still exist");
assert_eq!(session.state, SessionState::Failed);
assert_eq!(session.pid, None);
let _ = std::fs::remove_file(path);
Ok(())
}
#[test]
fn resume_crashed_sessions_keeps_live_running_sessions_running() -> Result<()> {
let path = temp_db_path();
let store = StateStore::open(&path)?;
store.insert_session(&sample_session(
"alive123",
SessionState::Running,
Some(7777),
))?;
resume_crashed_sessions_with(&store, |_| true)?;
let session = store
.get_session("alive123")?
.expect("session should still exist");
assert_eq!(session.state, SessionState::Running);
assert_eq!(session.pid, Some(7777));
let _ = std::fs::remove_file(path);
Ok(())
}
}

View File

@@ -4,9 +4,12 @@ use std::path::{Path, PathBuf};
use std::process::Stdio;
use tokio::process::Command;
use super::output::SessionOutputStore;
use super::runtime::capture_command_output;
use super::store::StateStore;
use super::{Session, SessionMetrics, SessionState};
use crate::config::Config;
use crate::observability::{log_tool_call, ToolCallEvent, ToolLogEntry, ToolLogPage, ToolLogger};
use crate::worktree;
pub async fn create_session(
@@ -18,18 +21,7 @@ pub async fn create_session(
) -> Result<String> {
let repo_root =
std::env::current_dir().context("Failed to resolve current working directory")?;
let agent_program = agent_program(agent_type)?;
create_session_in_dir(
db,
cfg,
task,
agent_type,
use_worktree,
&repo_root,
&agent_program,
)
.await
queue_session_in_dir(db, cfg, task, agent_type, use_worktree, &repo_root).await
}
pub fn list_sessions(db: &StateStore) -> Result<Vec<Session>> {
@@ -45,6 +37,59 @@ pub async fn stop_session(db: &StateStore, id: &str) -> Result<()> {
stop_session_with_options(db, id, true).await
}
pub fn record_tool_call(
db: &StateStore,
session_id: &str,
tool_name: &str,
input_summary: &str,
output_summary: &str,
duration_ms: u64,
) -> Result<ToolLogEntry> {
let session = db
.get_session(session_id)?
.ok_or_else(|| anyhow::anyhow!("Session not found: {session_id}"))?;
let event = ToolCallEvent::new(
session.id.clone(),
tool_name,
input_summary,
output_summary,
duration_ms,
);
let entry = log_tool_call(db, &event)?;
db.increment_tool_calls(&session.id)?;
Ok(entry)
}
pub fn query_tool_calls(
db: &StateStore,
session_id: &str,
page: u64,
page_size: u64,
) -> Result<ToolLogPage> {
let session = db
.get_session(session_id)?
.ok_or_else(|| anyhow::anyhow!("Session not found: {session_id}"))?;
ToolLogger::new(db).query(&session.id, page, page_size)
}
pub async fn resume_session(db: &StateStore, id: &str) -> Result<String> {
let session = resolve_session(db, id)?;
if session.state == SessionState::Completed {
anyhow::bail!("Completed sessions cannot be resumed: {}", session.id);
}
if session.state == SessionState::Running {
anyhow::bail!("Session is already running: {}", session.id);
}
db.update_state_and_pid(&session.id, &SessionState::Pending, None)?;
Ok(session.id)
}
fn agent_program(agent_type: &str) -> Result<PathBuf> {
match agent_type {
"claude" => Ok(PathBuf::from("claude")),
@@ -62,6 +107,97 @@ fn resolve_session(db: &StateStore, id: &str) -> Result<Session> {
session.ok_or_else(|| anyhow::anyhow!("Session not found: {id}"))
}
pub async fn run_session(
cfg: &Config,
session_id: &str,
task: &str,
agent_type: &str,
working_dir: &Path,
) -> Result<()> {
let db = StateStore::open(&cfg.db_path)?;
let session = resolve_session(&db, session_id)?;
if session.state != SessionState::Pending {
tracing::info!(
"Skipping run_session for {} because state is {}",
session_id,
session.state
);
return Ok(());
}
let agent_program = agent_program(agent_type)?;
let command = build_agent_command(&agent_program, task, session_id, working_dir);
capture_command_output(
cfg.db_path.clone(),
session_id.to_string(),
command,
SessionOutputStore::default(),
)
.await?;
Ok(())
}
async fn queue_session_in_dir(
db: &StateStore,
cfg: &Config,
task: &str,
agent_type: &str,
use_worktree: bool,
repo_root: &Path,
) -> Result<String> {
let session = build_session_record(task, agent_type, use_worktree, cfg, repo_root)?;
db.insert_session(&session)?;
let working_dir = session
.worktree
.as_ref()
.map(|worktree| worktree.path.as_path())
.unwrap_or(repo_root);
match spawn_session_runner(task, &session.id, agent_type, working_dir).await {
Ok(()) => Ok(session.id),
Err(error) => {
db.update_state(&session.id, &SessionState::Failed)?;
if let Some(worktree) = session.worktree.as_ref() {
let _ = crate::worktree::remove(&worktree.path);
}
Err(error.context(format!("Failed to queue session {}", session.id)))
}
}
}
fn build_session_record(
task: &str,
agent_type: &str,
use_worktree: bool,
cfg: &Config,
repo_root: &Path,
) -> Result<Session> {
let id = uuid::Uuid::new_v4().to_string()[..8].to_string();
let now = chrono::Utc::now();
let worktree = if use_worktree {
Some(worktree::create_for_session_in_repo(&id, cfg, repo_root)?)
} else {
None
};
Ok(Session {
id,
task: task.to_string(),
agent_type: agent_type.to_string(),
state: SessionState::Pending,
pid: None,
worktree,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
})
}
async fn create_session_in_dir(
db: &StateStore,
cfg: &Config,
@@ -71,26 +207,7 @@ async fn create_session_in_dir(
repo_root: &Path,
agent_program: &Path,
) -> Result<String> {
let id = uuid::Uuid::new_v4().to_string()[..8].to_string();
let now = chrono::Utc::now();
let wt = if use_worktree {
Some(worktree::create_for_session_in_repo(&id, cfg, repo_root)?)
} else {
None
};
let session = Session {
id: id.clone(),
task: task.to_string(),
agent_type: agent_type.to_string(),
state: SessionState::Pending,
pid: None,
worktree: wt,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
};
let session = build_session_record(task, agent_type, use_worktree, cfg, repo_root)?;
db.insert_session(&session)?;
@@ -118,19 +235,60 @@ async fn create_session_in_dir(
}
}
async fn spawn_session_runner(
task: &str,
session_id: &str,
agent_type: &str,
working_dir: &Path,
) -> Result<()> {
let current_exe = std::env::current_exe().context("Failed to resolve ECC executable path")?;
let child = Command::new(&current_exe)
.arg("run-session")
.arg("--session-id")
.arg(session_id)
.arg("--task")
.arg(task)
.arg("--agent")
.arg(agent_type)
.arg("--cwd")
.arg(working_dir)
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
.with_context(|| {
format!(
"Failed to spawn ECC runner from {}",
current_exe.display()
)
})?;
child
.id()
.ok_or_else(|| anyhow::anyhow!("ECC runner did not expose a process id"))?;
Ok(())
}
fn build_agent_command(agent_program: &Path, task: &str, session_id: &str, working_dir: &Path) -> Command {
let mut command = Command::new(agent_program);
command
.arg("--print")
.arg("--name")
.arg(format!("ecc-{session_id}"))
.arg(task)
.current_dir(working_dir)
.stdin(Stdio::null());
command
}
async fn spawn_claude_code(
agent_program: &Path,
task: &str,
session_id: &str,
working_dir: &Path,
) -> Result<u32> {
let child = Command::new(agent_program)
.arg("--print")
.arg("--name")
.arg(format!("ecc-{session_id}"))
.arg(task)
.current_dir(working_dir)
.stdin(Stdio::null())
let mut command = build_agent_command(agent_program, task, session_id, working_dir);
let child = command
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
@@ -238,7 +396,7 @@ impl fmt::Display for SessionStatus {
#[cfg(test)]
mod tests {
use super::*;
use crate::config::{Config, Theme};
use crate::config::{Config, PaneLayout, Theme};
use crate::session::{Session, SessionMetrics, SessionState};
use anyhow::{Context, Result};
use chrono::{Duration, Utc};
@@ -281,7 +439,11 @@ mod tests {
session_timeout_secs: 60,
heartbeat_interval_secs: 5,
default_agent: "claude".to_string(),
cost_budget_usd: 10.0,
token_budget: 500_000,
theme: Theme::Dark,
pane_layout: PaneLayout::Horizontal,
risk_thresholds: Config::RISK_THRESHOLDS,
}
}
@@ -469,6 +631,36 @@ mod tests {
Ok(())
}
#[tokio::test(flavor = "current_thread")]
async fn resume_session_requeues_failed_session() -> Result<()> {
let tempdir = TestDir::new("manager-resume-session")?;
let cfg = build_config(tempdir.path());
let db = StateStore::open(&cfg.db_path)?;
let now = Utc::now();
db.insert_session(&Session {
id: "deadbeef".to_string(),
task: "resume previous task".to_string(),
agent_type: "claude".to_string(),
state: SessionState::Failed,
pid: Some(31337),
worktree: None,
created_at: now - Duration::minutes(1),
updated_at: now,
metrics: SessionMetrics::default(),
})?;
let resumed_id = resume_session(&db, "deadbeef").await?;
let resumed = db
.get_session(&resumed_id)?
.context("resumed session should exist")?;
assert_eq!(resumed.state, SessionState::Pending);
assert_eq!(resumed.pid, None);
Ok(())
}
#[test]
fn get_status_supports_latest_alias() -> Result<()> {
let tempdir = TestDir::new("manager-latest-status")?;

View File

@@ -1,5 +1,7 @@
pub mod daemon;
pub mod manager;
pub mod output;
pub mod runtime;
pub mod store;
use chrono::{DateTime, Utc};

149
ecc2/src/session/output.rs Normal file
View File

@@ -0,0 +1,149 @@
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, MutexGuard};
use serde::{Deserialize, Serialize};
use tokio::sync::broadcast;
pub const OUTPUT_BUFFER_LIMIT: usize = 1000;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum OutputStream {
Stdout,
Stderr,
}
impl OutputStream {
pub fn as_str(self) -> &'static str {
match self {
Self::Stdout => "stdout",
Self::Stderr => "stderr",
}
}
pub fn from_db_value(value: &str) -> Self {
match value {
"stderr" => Self::Stderr,
_ => Self::Stdout,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct OutputLine {
pub stream: OutputStream,
pub text: String,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct OutputEvent {
pub session_id: String,
pub line: OutputLine,
}
#[derive(Clone)]
pub struct SessionOutputStore {
capacity: usize,
buffers: Arc<Mutex<HashMap<String, VecDeque<OutputLine>>>>,
tx: broadcast::Sender<OutputEvent>,
}
impl Default for SessionOutputStore {
fn default() -> Self {
Self::new(OUTPUT_BUFFER_LIMIT)
}
}
impl SessionOutputStore {
pub fn new(capacity: usize) -> Self {
let capacity = capacity.max(1);
let (tx, _) = broadcast::channel(capacity.max(16));
Self {
capacity,
buffers: Arc::new(Mutex::new(HashMap::new())),
tx,
}
}
pub fn subscribe(&self) -> broadcast::Receiver<OutputEvent> {
self.tx.subscribe()
}
pub fn push_line(&self, session_id: &str, stream: OutputStream, text: impl Into<String>) {
let line = OutputLine {
stream,
text: text.into(),
};
{
let mut buffers = self.lock_buffers();
let buffer = buffers.entry(session_id.to_string()).or_default();
buffer.push_back(line.clone());
while buffer.len() > self.capacity {
let _ = buffer.pop_front();
}
}
let _ = self.tx.send(OutputEvent {
session_id: session_id.to_string(),
line,
});
}
pub fn replace_lines(&self, session_id: &str, lines: Vec<OutputLine>) {
let mut buffer: VecDeque<OutputLine> = lines.into_iter().collect();
while buffer.len() > self.capacity {
let _ = buffer.pop_front();
}
self.lock_buffers().insert(session_id.to_string(), buffer);
}
pub fn lines(&self, session_id: &str) -> Vec<OutputLine> {
self.lock_buffers()
.get(session_id)
.map(|buffer| buffer.iter().cloned().collect())
.unwrap_or_default()
}
fn lock_buffers(&self) -> MutexGuard<'_, HashMap<String, VecDeque<OutputLine>>> {
self.buffers
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
}
#[cfg(test)]
mod tests {
use super::{OutputStream, SessionOutputStore};
#[test]
fn ring_buffer_keeps_most_recent_lines() {
let store = SessionOutputStore::new(3);
store.push_line("session-1", OutputStream::Stdout, "line-1");
store.push_line("session-1", OutputStream::Stdout, "line-2");
store.push_line("session-1", OutputStream::Stdout, "line-3");
store.push_line("session-1", OutputStream::Stdout, "line-4");
let lines = store.lines("session-1");
let texts: Vec<_> = lines.iter().map(|line| line.text.as_str()).collect();
assert_eq!(texts, vec!["line-2", "line-3", "line-4"]);
}
#[tokio::test]
async fn pushing_output_broadcasts_events() {
let store = SessionOutputStore::new(8);
let mut rx = store.subscribe();
store.push_line("session-1", OutputStream::Stderr, "problem");
let event = rx.recv().await.expect("broadcast event");
assert_eq!(event.session_id, "session-1");
assert_eq!(event.line.stream, OutputStream::Stderr);
assert_eq!(event.line.text, "problem");
}
}

290
ecc2/src/session/runtime.rs Normal file
View File

@@ -0,0 +1,290 @@
use std::path::PathBuf;
use std::process::{ExitStatus, Stdio};
use anyhow::{Context, Result};
use tokio::io::{AsyncBufReadExt, AsyncRead, BufReader};
use tokio::process::Command;
use tokio::sync::{mpsc, oneshot};
use super::output::{OutputStream, SessionOutputStore};
use super::store::StateStore;
use super::SessionState;
type DbAck = std::result::Result<(), String>;
enum DbMessage {
UpdateState {
state: SessionState,
ack: oneshot::Sender<DbAck>,
},
UpdatePid {
pid: Option<u32>,
ack: oneshot::Sender<DbAck>,
},
AppendOutputLine {
stream: OutputStream,
line: String,
ack: oneshot::Sender<DbAck>,
},
}
#[derive(Clone)]
struct DbWriter {
tx: mpsc::UnboundedSender<DbMessage>,
}
impl DbWriter {
fn start(db_path: PathBuf, session_id: String) -> Self {
let (tx, rx) = mpsc::unbounded_channel();
std::thread::spawn(move || run_db_writer(db_path, session_id, rx));
Self { tx }
}
async fn update_state(&self, state: SessionState) -> Result<()> {
self.send(|ack| DbMessage::UpdateState { state, ack }).await
}
async fn update_pid(&self, pid: Option<u32>) -> Result<()> {
self.send(|ack| DbMessage::UpdatePid { pid, ack }).await
}
async fn append_output_line(&self, stream: OutputStream, line: String) -> Result<()> {
self.send(|ack| DbMessage::AppendOutputLine { stream, line, ack })
.await
}
async fn send<F>(&self, build: F) -> Result<()>
where
F: FnOnce(oneshot::Sender<DbAck>) -> DbMessage,
{
let (ack_tx, ack_rx) = oneshot::channel();
self.tx
.send(build(ack_tx))
.map_err(|_| anyhow::anyhow!("DB writer channel closed"))?;
match ack_rx.await {
Ok(Ok(())) => Ok(()),
Ok(Err(error)) => Err(anyhow::anyhow!(error)),
Err(_) => Err(anyhow::anyhow!("DB writer acknowledgement dropped")),
}
}
}
fn run_db_writer(
db_path: PathBuf,
session_id: String,
mut rx: mpsc::UnboundedReceiver<DbMessage>,
) {
let (opened, open_error) = match StateStore::open(&db_path) {
Ok(db) => (Some(db), None),
Err(error) => (None, Some(error.to_string())),
};
while let Some(message) = rx.blocking_recv() {
match message {
DbMessage::UpdateState { state, ack } => {
let result = match opened.as_ref() {
Some(db) => db.update_state(&session_id, &state).map_err(|error| error.to_string()),
None => Err(open_error
.clone()
.unwrap_or_else(|| "Failed to open state store".to_string())),
};
let _ = ack.send(result);
}
DbMessage::UpdatePid { pid, ack } => {
let result = match opened.as_ref() {
Some(db) => db.update_pid(&session_id, pid).map_err(|error| error.to_string()),
None => Err(open_error
.clone()
.unwrap_or_else(|| "Failed to open state store".to_string())),
};
let _ = ack.send(result);
}
DbMessage::AppendOutputLine { stream, line, ack } => {
let result = match opened.as_ref() {
Some(db) => db
.append_output_line(&session_id, stream, &line)
.map_err(|error| error.to_string()),
None => Err(open_error
.clone()
.unwrap_or_else(|| "Failed to open state store".to_string())),
};
let _ = ack.send(result);
}
}
}
}
pub async fn capture_command_output(
db_path: PathBuf,
session_id: String,
mut command: Command,
output_store: SessionOutputStore,
) -> Result<ExitStatus> {
let db_writer = DbWriter::start(db_path, session_id.clone());
let result = async {
let mut child = command
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.with_context(|| format!("Failed to start process for session {}", session_id))?;
let stdout = match child.stdout.take() {
Some(stdout) => stdout,
None => {
let _ = child.kill().await;
let _ = child.wait().await;
anyhow::bail!("Child stdout was not piped");
}
};
let stderr = match child.stderr.take() {
Some(stderr) => stderr,
None => {
let _ = child.kill().await;
let _ = child.wait().await;
anyhow::bail!("Child stderr was not piped");
}
};
let pid = child
.id()
.ok_or_else(|| anyhow::anyhow!("Spawned process did not expose a process id"))?;
db_writer.update_pid(Some(pid)).await?;
db_writer.update_state(SessionState::Running).await?;
let stdout_task = tokio::spawn(capture_stream(
session_id.clone(),
stdout,
OutputStream::Stdout,
output_store.clone(),
db_writer.clone(),
));
let stderr_task = tokio::spawn(capture_stream(
session_id.clone(),
stderr,
OutputStream::Stderr,
output_store,
db_writer.clone(),
));
let status = child.wait().await?;
stdout_task.await??;
stderr_task.await??;
let final_state = if status.success() {
SessionState::Completed
} else {
SessionState::Failed
};
db_writer.update_pid(None).await?;
db_writer.update_state(final_state).await?;
Ok(status)
}
.await;
if result.is_err() {
let _ = db_writer.update_pid(None).await;
let _ = db_writer.update_state(SessionState::Failed).await;
}
result
}
async fn capture_stream<R>(
session_id: String,
reader: R,
stream: OutputStream,
output_store: SessionOutputStore,
db_writer: DbWriter,
) -> Result<()>
where
R: AsyncRead + Unpin,
{
let mut lines = BufReader::new(reader).lines();
while let Some(line) = lines.next_line().await? {
db_writer
.append_output_line(stream, line.clone())
.await?;
output_store.push_line(&session_id, stream, line);
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::env;
use anyhow::Result;
use chrono::Utc;
use tokio::process::Command;
use uuid::Uuid;
use super::capture_command_output;
use crate::session::output::{SessionOutputStore, OUTPUT_BUFFER_LIMIT};
use crate::session::store::StateStore;
use crate::session::{Session, SessionMetrics, SessionState};
#[tokio::test]
async fn capture_command_output_persists_lines_and_events() -> Result<()> {
let db_path = env::temp_dir().join(format!("ecc2-runtime-{}.db", Uuid::new_v4()));
let db = StateStore::open(&db_path)?;
let session_id = "session-1".to_string();
let now = Utc::now();
db.insert_session(&Session {
id: session_id.clone(),
task: "stream output".to_string(),
agent_type: "test".to_string(),
state: SessionState::Pending,
pid: None,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
})?;
let output_store = SessionOutputStore::default();
let mut rx = output_store.subscribe();
let mut command = Command::new("/bin/sh");
command
.arg("-c")
.arg("printf 'alpha\\n'; printf 'beta\\n' >&2");
let status =
capture_command_output(db_path.clone(), session_id.clone(), command, output_store)
.await?;
assert!(status.success());
let db = StateStore::open(&db_path)?;
let session = db
.get_session(&session_id)?
.expect("session should still exist");
assert_eq!(session.state, SessionState::Completed);
assert_eq!(session.pid, None);
let lines = db.get_output_lines(&session_id, OUTPUT_BUFFER_LIMIT)?;
let texts: HashSet<_> = lines.iter().map(|line| line.text.as_str()).collect();
assert_eq!(lines.len(), 2);
assert!(texts.contains("alpha"));
assert!(texts.contains("beta"));
let mut events = Vec::new();
while let Ok(event) = rx.try_recv() {
events.push(event.line.text);
}
assert_eq!(events.len(), 2);
assert!(events.iter().any(|line| line == "alpha"));
assert!(events.iter().any(|line| line == "beta"));
let _ = std::fs::remove_file(db_path);
Ok(())
}
}

View File

@@ -1,7 +1,11 @@
use anyhow::{Context, Result};
use rusqlite::{Connection, OptionalExtension};
use std::path::Path;
use std::path::{Path, PathBuf};
use std::time::Duration;
use crate::observability::{ToolLogEntry, ToolLogPage};
use super::output::{OutputLine, OutputStream, OUTPUT_BUFFER_LIMIT};
use super::{Session, SessionMetrics, SessionState};
pub struct StateStore {
@@ -11,6 +15,8 @@ pub struct StateStore {
impl StateStore {
pub fn open(path: &Path) -> Result<Self> {
let conn = Connection::open(path)?;
conn.execute_batch("PRAGMA foreign_keys = ON;")?;
conn.busy_timeout(Duration::from_secs(5))?;
let store = Self { conn };
store.init_schema()?;
Ok(store)
@@ -58,9 +64,19 @@ impl StateStore {
timestamp TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS session_output (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL REFERENCES sessions(id),
stream TEXT NOT NULL,
line TEXT NOT NULL,
timestamp TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_sessions_state ON sessions(state);
CREATE INDEX IF NOT EXISTS idx_tool_log_session ON tool_log(session_id);
CREATE INDEX IF NOT EXISTS idx_messages_to ON messages(to_session, read);
CREATE INDEX IF NOT EXISTS idx_session_output_session
ON session_output(session_id, id);
",
)?;
self.ensure_session_columns()?;
@@ -97,7 +113,10 @@ impl StateStore {
session.agent_type,
session.state.to_string(),
session.pid.map(i64::from),
session.worktree.as_ref().map(|w| w.path.to_string_lossy().to_string()),
session
.worktree
.as_ref()
.map(|w| w.path.to_string_lossy().to_string()),
session.worktree.as_ref().map(|w| w.branch.clone()),
session.worktree.as_ref().map(|w| w.base_branch.clone()),
session.created_at.to_rfc3339(),
@@ -107,6 +126,29 @@ impl StateStore {
Ok(())
}
pub fn update_state_and_pid(
&self,
session_id: &str,
state: &SessionState,
pid: Option<u32>,
) -> Result<()> {
let updated = self.conn.execute(
"UPDATE sessions SET state = ?1, pid = ?2, updated_at = ?3 WHERE id = ?4",
rusqlite::params![
state.to_string(),
pid.map(i64::from),
chrono::Utc::now().to_rfc3339(),
session_id,
],
)?;
if updated == 0 {
anyhow::bail!("Session not found: {session_id}");
}
Ok(())
}
pub fn update_state(&self, session_id: &str, state: &SessionState) -> Result<()> {
let current_state = self
.conn
@@ -176,6 +218,14 @@ impl StateStore {
Ok(())
}
pub fn increment_tool_calls(&self, session_id: &str) -> Result<()> {
self.conn.execute(
"UPDATE sessions SET tool_calls = tool_calls + 1, updated_at = ?1 WHERE id = ?2",
rusqlite::params![chrono::Utc::now().to_rfc3339(), session_id],
)?;
Ok(())
}
pub fn list_sessions(&self) -> Result<Vec<Session>> {
let mut stmt = self.conn.prepare(
"SELECT id, task, agent_type, state, pid, worktree_path, worktree_branch, worktree_base,
@@ -190,8 +240,8 @@ impl StateStore {
let state = SessionState::from_db_value(&state_str);
let worktree_path: Option<String> = row.get(5)?;
let worktree = worktree_path.map(|p| super::WorktreeInfo {
path: std::path::PathBuf::from(p),
let worktree = worktree_path.map(|path| super::WorktreeInfo {
path: PathBuf::from(path),
branch: row.get::<_, String>(6).unwrap_or_default(),
base_branch: row.get::<_, String>(7).unwrap_or_default(),
});
@@ -234,8 +284,9 @@ impl StateStore {
let sessions = self.list_sessions()?;
Ok(sessions
.into_iter()
.find(|s| s.id == id || s.id.starts_with(id)))
.find(|session| session.id == id || session.id.starts_with(id)))
}
pub fn send_message(&self, from: &str, to: &str, content: &str, msg_type: &str) -> Result<()> {
self.conn.execute(
"INSERT INTO messages (from_session, to_session, content, msg_type, timestamp)
@@ -244,15 +295,158 @@ impl StateStore {
)?;
Ok(())
}
pub fn append_output_line(
&self,
session_id: &str,
stream: OutputStream,
line: &str,
) -> Result<()> {
let now = chrono::Utc::now().to_rfc3339();
self.conn.execute(
"INSERT INTO session_output (session_id, stream, line, timestamp)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![session_id, stream.as_str(), line, now],
)?;
self.conn.execute(
"DELETE FROM session_output
WHERE session_id = ?1
AND id NOT IN (
SELECT id
FROM session_output
WHERE session_id = ?1
ORDER BY id DESC
LIMIT ?2
)",
rusqlite::params![session_id, OUTPUT_BUFFER_LIMIT as i64],
)?;
self.conn.execute(
"UPDATE sessions SET updated_at = ?1 WHERE id = ?2",
rusqlite::params![chrono::Utc::now().to_rfc3339(), session_id],
)?;
Ok(())
}
pub fn get_output_lines(&self, session_id: &str, limit: usize) -> Result<Vec<OutputLine>> {
let mut stmt = self.conn.prepare(
"SELECT stream, line
FROM (
SELECT id, stream, line
FROM session_output
WHERE session_id = ?1
ORDER BY id DESC
LIMIT ?2
)
ORDER BY id ASC",
)?;
let lines = stmt
.query_map(rusqlite::params![session_id, limit as i64], |row| {
let stream: String = row.get(0)?;
let text: String = row.get(1)?;
Ok(OutputLine {
stream: OutputStream::from_db_value(&stream),
text,
})
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(lines)
}
pub fn insert_tool_log(
&self,
session_id: &str,
tool_name: &str,
input_summary: &str,
output_summary: &str,
duration_ms: u64,
risk_score: f64,
timestamp: &str,
) -> Result<ToolLogEntry> {
self.conn.execute(
"INSERT INTO tool_log (session_id, tool_name, input_summary, output_summary, duration_ms, risk_score, timestamp)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
rusqlite::params![
session_id,
tool_name,
input_summary,
output_summary,
duration_ms,
risk_score,
timestamp,
],
)?;
Ok(ToolLogEntry {
id: self.conn.last_insert_rowid(),
session_id: session_id.to_string(),
tool_name: tool_name.to_string(),
input_summary: input_summary.to_string(),
output_summary: output_summary.to_string(),
duration_ms,
risk_score,
timestamp: timestamp.to_string(),
})
}
pub fn query_tool_logs(
&self,
session_id: &str,
page: u64,
page_size: u64,
) -> Result<ToolLogPage> {
let page = page.max(1);
let offset = (page - 1) * page_size;
let total: u64 = self.conn.query_row(
"SELECT COUNT(*) FROM tool_log WHERE session_id = ?1",
rusqlite::params![session_id],
|row| row.get(0),
)?;
let mut stmt = self.conn.prepare(
"SELECT id, session_id, tool_name, input_summary, output_summary, duration_ms, risk_score, timestamp
FROM tool_log
WHERE session_id = ?1
ORDER BY timestamp DESC, id DESC
LIMIT ?2 OFFSET ?3",
)?;
let entries = stmt
.query_map(rusqlite::params![session_id, page_size, offset], |row| {
Ok(ToolLogEntry {
id: row.get(0)?,
session_id: row.get(1)?,
tool_name: row.get(2)?,
input_summary: row.get::<_, Option<String>>(3)?.unwrap_or_default(),
output_summary: row.get::<_, Option<String>>(4)?.unwrap_or_default(),
duration_ms: row.get::<_, Option<u64>>(5)?.unwrap_or_default(),
risk_score: row.get::<_, Option<f64>>(6)?.unwrap_or_default(),
timestamp: row.get(7)?,
})
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(ToolLogPage {
entries,
page,
page_size,
total,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::session::{Session, SessionMetrics, SessionState};
use chrono::{Duration, Utc};
use chrono::{Duration as ChronoDuration, Utc};
use std::fs;
use std::path::{Path, PathBuf};
struct TestDir {
path: PathBuf,
@@ -286,7 +480,7 @@ mod tests {
state,
pid: None,
worktree: None,
created_at: now - Duration::minutes(1),
created_at: now - ChronoDuration::minutes(1),
updated_at: now,
metrics: SessionMetrics::default(),
}
@@ -346,4 +540,37 @@ mod tests {
assert!(column_names.iter().any(|column| column == "pid"));
Ok(())
}
#[test]
fn append_output_line_keeps_latest_buffer_window() -> Result<()> {
let tempdir = TestDir::new("store-output")?;
let db = StateStore::open(&tempdir.path().join("state.db"))?;
let now = Utc::now();
db.insert_session(&Session {
id: "session-1".to_string(),
task: "buffer output".to_string(),
agent_type: "claude".to_string(),
state: SessionState::Running,
pid: None,
worktree: None,
created_at: now,
updated_at: now,
metrics: SessionMetrics::default(),
})?;
for index in 0..(OUTPUT_BUFFER_LIMIT + 5) {
db.append_output_line("session-1", OutputStream::Stdout, &format!("line-{index}"))?;
}
let lines = db.get_output_lines("session-1", OUTPUT_BUFFER_LIMIT)?;
let texts: Vec<_> = lines.iter().map(|line| line.text.as_str()).collect();
assert_eq!(lines.len(), OUTPUT_BUFFER_LIMIT);
assert_eq!(texts.first().copied(), Some("line-5"));
let expected_last_line = format!("line-{}", OUTPUT_BUFFER_LIMIT + 4);
assert_eq!(texts.last().copied(), Some(expected_last_line.as_str()));
Ok(())
}
}

View File

@@ -32,6 +32,10 @@ pub async fn run(db: StateStore, cfg: Config) -> Result<()> {
(_, KeyCode::Char('q')) => break,
(_, KeyCode::Tab) => dashboard.next_pane(),
(KeyModifiers::SHIFT, KeyCode::BackTab) => dashboard.prev_pane(),
(_, KeyCode::Char('+')) | (_, KeyCode::Char('=')) => {
dashboard.increase_pane_size()
}
(_, KeyCode::Char('-')) => dashboard.decrease_pane_size(),
(_, KeyCode::Char('j')) | (_, KeyCode::Down) => dashboard.scroll_down(),
(_, KeyCode::Char('k')) | (_, KeyCode::Up) => dashboard.scroll_up(),
(_, KeyCode::Char('n')) => dashboard.new_session(),

File diff suppressed because it is too large Load Diff

View File

@@ -48,6 +48,7 @@ User request → Claude picks a tool → PreToolUse hook runs → Tool executes
| **Session summary** | `Stop` | Persists session state when transcript path is available |
| **Pattern extraction** | `Stop` | Evaluates session for extractable patterns (continuous learning) |
| **Cost tracker** | `Stop` | Emits lightweight run-cost telemetry markers |
| **Desktop notify** | `Stop` | Sends macOS desktop notification with task summary (standard+) |
| **Session end marker** | `SessionEnd` | Lifecycle marker and cleanup log |
## Customizing Hooks

View File

@@ -136,7 +136,7 @@
"hooks": [
{
"type": "command",
"command": "bash -lc 'input=$(cat); for root in \"${CLAUDE_PLUGIN_ROOT:-}\" \"$HOME/.claude/plugins/everything-claude-code\" \"$HOME/.claude/plugins/everything-claude-code@everything-claude-code\" \"$HOME/.claude/plugins/marketplace/everything-claude-code\"; do if [ -n \"$root\" ] && [ -f \"$root/scripts/hooks/run-with-flags.js\" ]; then printf \"%s\" \"$input\" | node \"$root/scripts/hooks/run-with-flags.js\" \"session:start\" \"scripts/hooks/session-start.js\" \"minimal,standard,strict\"; exit $?; fi; done; for parent in \"$HOME/.claude/plugins\" \"$HOME/.claude/plugins/marketplace\"; do if [ -d \"$parent\" ]; then candidate=$(find \"$parent\" -maxdepth 2 -type f -path \"*/scripts/hooks/run-with-flags.js\" 2>/dev/null | head -n 1); if [ -n \"$candidate\" ]; then root=$(dirname \"$(dirname \"$(dirname \"$candidate\")\")\"); printf \"%s\" \"$input\" | node \"$root/scripts/hooks/run-with-flags.js\" \"session:start\" \"scripts/hooks/session-start.js\" \"minimal,standard,strict\"; exit $?; fi; fi; done; echo \"[SessionStart] WARNING: could not resolve ECC plugin root; skipping session-start hook\" >&2; printf \"%s\" \"$input\"; exit 0'"
"command": "node -e \"const fs=require('fs');const path=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const rel=path.join('scripts','hooks','run-with-flags.js');const hasRunnerRoot=candidate=>{const value=typeof candidate==='string'?candidate.trim():'';return value.length>0&&fs.existsSync(path.join(path.resolve(value),rel));};const root=(()=>{const envRoot=process.env.CLAUDE_PLUGIN_ROOT||'';if(hasRunnerRoot(envRoot))return path.resolve(envRoot.trim());const home=require('os').homedir();const claudeDir=path.join(home,'.claude');if(hasRunnerRoot(claudeDir))return claudeDir;for(const candidate of [path.join(claudeDir,'plugins','everything-claude-code'),path.join(claudeDir,'plugins','everything-claude-code@everything-claude-code'),path.join(claudeDir,'plugins','marketplace','everything-claude-code')]){if(hasRunnerRoot(candidate))return candidate;}try{const cacheBase=path.join(claudeDir,'plugins','cache','everything-claude-code');for(const org of fs.readdirSync(cacheBase,{withFileTypes:true})){if(!org.isDirectory())continue;for(const version of fs.readdirSync(path.join(cacheBase,org.name),{withFileTypes:true})){if(!version.isDirectory())continue;const candidate=path.join(cacheBase,org.name,version.name);if(hasRunnerRoot(candidate))return candidate;}}}catch{}return claudeDir;})();const script=path.join(root,rel);if(fs.existsSync(script)){const result=spawnSync(process.execPath,[script,'session:start','scripts/hooks/session-start.js','minimal,standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});const stdout=typeof result.stdout==='string'?result.stdout:'';if(stdout)process.stdout.write(stdout);else process.stdout.write(raw);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[SessionStart] ERROR: session-start hook failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);}process.stderr.write('[SessionStart] WARNING: could not resolve ECC plugin root; skipping session-start hook'+String.fromCharCode(10));process.stdout.write(raw);\""
}
],
"description": "Load previous context and detect package manager on new session"
@@ -289,6 +289,18 @@
}
],
"description": "Track token and cost metrics per session"
},
{
"matcher": "*",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:desktop-notify\" \"scripts/hooks/desktop-notify.js\" \"standard,strict\"",
"async": true,
"timeout": 10
}
],
"description": "Send macOS desktop notification with task summary when Claude responds"
}
],
"SessionEnd": [

View File

@@ -63,7 +63,8 @@
"description": "Runtime hook configs and hook script helpers.",
"paths": [
"hooks",
"scripts/hooks"
"scripts/hooks",
"scripts/lib"
],
"targets": [
"claude",

View File

@@ -24,9 +24,11 @@ log() {
run_or_echo() {
if [[ "$MODE" == "dry-run" ]]; then
printf '[dry-run] %s\n' "$*"
printf '[dry-run]'
printf ' %q' "$@"
printf '\n'
else
eval "$*"
"$@"
fi
}
@@ -41,14 +43,14 @@ log "Global hooks destination: $DEST_DIR"
if [[ -d "$DEST_DIR" ]]; then
log "Backing up existing hooks directory to $BACKUP_DIR"
run_or_echo "mkdir -p \"$BACKUP_DIR\""
run_or_echo "cp -R \"$DEST_DIR\" \"$BACKUP_DIR/hooks\""
run_or_echo mkdir -p "$BACKUP_DIR"
run_or_echo cp -R "$DEST_DIR" "$BACKUP_DIR/hooks"
fi
run_or_echo "mkdir -p \"$DEST_DIR\""
run_or_echo "cp \"$SOURCE_DIR/pre-commit\" \"$DEST_DIR/pre-commit\""
run_or_echo "cp \"$SOURCE_DIR/pre-push\" \"$DEST_DIR/pre-push\""
run_or_echo "chmod +x \"$DEST_DIR/pre-commit\" \"$DEST_DIR/pre-push\""
run_or_echo mkdir -p "$DEST_DIR"
run_or_echo cp "$SOURCE_DIR/pre-commit" "$DEST_DIR/pre-commit"
run_or_echo cp "$SOURCE_DIR/pre-push" "$DEST_DIR/pre-push"
run_or_echo chmod +x "$DEST_DIR/pre-commit" "$DEST_DIR/pre-push"
if [[ "$MODE" == "apply" ]]; then
prev_hooks_path="$(git config --global core.hooksPath || true)"
@@ -56,7 +58,7 @@ if [[ "$MODE" == "apply" ]]; then
log "Previous global hooksPath: $prev_hooks_path"
fi
fi
run_or_echo "git config --global core.hooksPath \"$DEST_DIR\""
run_or_echo git config --global core.hooksPath "$DEST_DIR"
log "Installed ECC global git hooks."
log "Disable per repo by creating .ecc-hooks-disable in project root."

View File

@@ -61,11 +61,34 @@ const PROTECTED_FILES = new Set([
'.markdownlintrc',
]);
function parseInput(inputOrRaw) {
if (typeof inputOrRaw === 'string') {
try {
return inputOrRaw.trim() ? JSON.parse(inputOrRaw) : {};
} catch {
return {};
}
}
return inputOrRaw && typeof inputOrRaw === 'object' ? inputOrRaw : {};
}
/**
* Exportable run() for in-process execution via run-with-flags.js.
* Avoids the ~50-100ms spawnSync overhead when available.
*/
function run(input) {
function run(inputOrRaw, options = {}) {
if (options.truncated) {
return {
exitCode: 2,
stderr:
`BLOCKED: Hook input exceeded ${options.maxStdin || MAX_STDIN} bytes. ` +
'Refusing to bypass config-protection on a truncated payload. ' +
'Retry with a smaller edit or disable the config-protection hook temporarily.'
};
}
const input = parseInput(inputOrRaw);
const filePath = input?.tool_input?.file_path || input?.tool_input?.file || '';
if (!filePath) return { exitCode: 0 };
@@ -75,9 +98,9 @@ function run(input) {
exitCode: 2,
stderr:
`BLOCKED: Modifying ${basename} is not allowed. ` +
`Fix the source code to satisfy linter/formatter rules instead of ` +
`weakening the config. If this is a legitimate config change, ` +
`disable the config-protection hook temporarily.`,
'Fix the source code to satisfy linter/formatter rules instead of ' +
'weakening the config. If this is a legitimate config change, ' +
'disable the config-protection hook temporarily.',
};
}
@@ -87,7 +110,7 @@ function run(input) {
module.exports = { run };
// Stdin fallback for spawnSync execution
let truncated = false;
let truncated = /^(1|true|yes)$/i.test(String(process.env.ECC_HOOK_INPUT_TRUNCATED || ''));
process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (raw.length < MAX_STDIN) {
@@ -100,25 +123,17 @@ process.stdin.on('data', chunk => {
});
process.stdin.on('end', () => {
// If stdin was truncated, the JSON is likely malformed. Fail open but
// log a warning so the issue is visible. The run() path (used by
// run-with-flags.js in-process) is not affected by this.
if (truncated) {
process.stderr.write('[config-protection] Warning: stdin exceeded 1MB, skipping check\n');
process.stdout.write(raw);
return;
const result = run(raw, {
truncated,
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN,
});
if (result.stderr) {
process.stderr.write(result.stderr + '\n');
}
try {
const input = raw.trim() ? JSON.parse(raw) : {};
const result = run(input);
if (result.exitCode === 2) {
process.stderr.write(result.stderr + '\n');
process.exit(2);
}
} catch {
// Keep hook non-blocking on parse errors.
if (result.exitCode === 2) {
process.exit(2);
}
process.stdout.write(raw);

View File

@@ -0,0 +1,94 @@
#!/usr/bin/env node
/**
* Desktop Notification Hook (Stop)
*
* Sends a native desktop notification with the task summary when Claude
* finishes responding. Currently supports macOS (osascript); other
* platforms exit silently. Windows (PowerShell) and Linux (notify-send)
* support is planned.
*
* Hook ID : stop:desktop-notify
* Profiles: standard, strict
*/
'use strict';
const { spawnSync } = require('child_process');
const { isMacOS, log } = require('../lib/utils');
const TITLE = 'Claude Code';
const MAX_BODY_LENGTH = 100;
/**
* Extract a short summary from the last assistant message.
* Takes the first non-empty line and truncates to MAX_BODY_LENGTH chars.
*/
function extractSummary(message) {
if (!message || typeof message !== 'string') return 'Done';
const firstLine = message
.split('\n')
.map(l => l.trim())
.find(l => l.length > 0);
if (!firstLine) return 'Done';
return firstLine.length > MAX_BODY_LENGTH
? `${firstLine.slice(0, MAX_BODY_LENGTH)}...`
: firstLine;
}
/**
* Send a macOS notification via osascript.
* AppleScript strings do not support backslash escapes, so we replace
* double quotes with curly quotes and strip backslashes before embedding.
*/
function notifyMacOS(title, body) {
const safeBody = body.replace(/\\/g, '').replace(/"/g, '\u201C');
const safeTitle = title.replace(/\\/g, '').replace(/"/g, '\u201C');
const script = `display notification "${safeBody}" with title "${safeTitle}"`;
const result = spawnSync('osascript', ['-e', script], { stdio: 'ignore', timeout: 5000 });
if (result.error || result.status !== 0) {
log(`[DesktopNotify] osascript failed: ${result.error ? result.error.message : `exit ${result.status}`}`);
}
}
// TODO: future platform support
// function notifyWindows(title, body) { ... }
// function notifyLinux(title, body) { ... }
/**
* Fast-path entry point for run-with-flags.js (avoids extra process spawn).
*/
function run(raw) {
try {
if (!isMacOS) return raw;
const input = raw.trim() ? JSON.parse(raw) : {};
const summary = extractSummary(input.last_assistant_message);
notifyMacOS(TITLE, summary);
} catch (err) {
log(`[DesktopNotify] Error: ${err.message}`);
}
return raw;
}
module.exports = { run };
// Legacy stdin path (when invoked directly rather than via run-with-flags)
if (require.main === module) {
const MAX_STDIN = 1024 * 1024;
let data = '';
process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (data.length < MAX_STDIN) {
data += chunk.substring(0, MAX_STDIN - data.length);
}
});
process.stdin.on('end', () => {
const output = run(data);
if (output) process.stdout.write(output);
});
}

View File

@@ -10,6 +10,7 @@
* - policy_violation: Actions that violate configured policies
* - security_finding: Security-relevant tool invocations
* - approval_requested: Operations requiring explicit approval
* - hook_input_truncated: Hook input exceeded the safe inspection limit
*
* Enable: Set ECC_GOVERNANCE_CAPTURE=1
* Configure session: Set ECC_SESSION_ID for session correlation
@@ -101,6 +102,37 @@ function detectSensitivePath(filePath) {
return SENSITIVE_PATHS.some(pattern => pattern.test(filePath));
}
function fingerprintCommand(command) {
if (!command || typeof command !== 'string') return null;
return crypto.createHash('sha256').update(command).digest('hex').slice(0, 12);
}
function summarizeCommand(command) {
if (!command || typeof command !== 'string') {
return {
commandName: null,
commandFingerprint: null,
};
}
const trimmed = command.trim();
if (!trimmed) {
return {
commandName: null,
commandFingerprint: null,
};
}
return {
commandName: trimmed.split(/\s+/)[0] || null,
commandFingerprint: fingerprintCommand(trimmed),
};
}
function emitGovernanceEvent(event) {
process.stderr.write(`[governance] ${JSON.stringify(event)}\n`);
}
/**
* Analyze a hook input payload and return governance events to capture.
*
@@ -146,6 +178,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
if (toolName === 'Bash') {
const command = toolInput.command || '';
const approvalFindings = detectApprovalRequired(command);
const commandSummary = summarizeCommand(command);
if (approvalFindings.length > 0) {
events.push({
@@ -155,7 +188,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
payload: {
toolName,
hookPhase,
command: command.slice(0, 200),
...commandSummary,
matchedPatterns: approvalFindings.map(f => f.pattern),
severity: 'high',
},
@@ -188,6 +221,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
if (SECURITY_RELEVANT_TOOLS.has(toolName) && hookPhase === 'post') {
const command = toolInput.command || '';
const hasElevated = /sudo\s/.test(command) || /chmod\s/.test(command) || /chown\s/.test(command);
const commandSummary = summarizeCommand(command);
if (hasElevated) {
events.push({
@@ -197,7 +231,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
payload: {
toolName,
hookPhase,
command: command.slice(0, 200),
...commandSummary,
reason: 'elevated_privilege_command',
severity: 'medium',
},
@@ -216,16 +250,32 @@ function analyzeForGovernanceEvents(input, context = {}) {
* @param {string} rawInput - Raw JSON string from stdin
* @returns {string} The original input (pass-through)
*/
function run(rawInput) {
function run(rawInput, options = {}) {
// Gate on feature flag
if (String(process.env.ECC_GOVERNANCE_CAPTURE || '').toLowerCase() !== '1') {
return rawInput;
}
const sessionId = process.env.ECC_SESSION_ID || null;
const hookPhase = process.env.CLAUDE_HOOK_EVENT_NAME || 'unknown';
if (options.truncated) {
emitGovernanceEvent({
id: generateEventId(),
sessionId,
eventType: 'hook_input_truncated',
payload: {
hookPhase: hookPhase.startsWith('Pre') ? 'pre' : 'post',
sizeLimitBytes: options.maxStdin || MAX_STDIN,
severity: 'warning',
},
resolvedAt: null,
resolution: null,
});
}
try {
const input = JSON.parse(rawInput);
const sessionId = process.env.ECC_SESSION_ID || null;
const hookPhase = process.env.CLAUDE_HOOK_EVENT_NAME || 'unknown';
const events = analyzeForGovernanceEvents(input, {
sessionId,
@@ -233,13 +283,8 @@ function run(rawInput) {
});
if (events.length > 0) {
// Write events to stderr as JSON-lines for the caller to capture.
// The state store write is async and handled by a separate process
// to avoid blocking the hook pipeline.
for (const event of events) {
process.stderr.write(
`[governance] ${JSON.stringify(event)}\n`
);
emitGovernanceEvent(event);
}
}
} catch {
@@ -252,16 +297,25 @@ function run(rawInput) {
// ── stdin entry point ────────────────────────────────
if (require.main === module) {
let raw = '';
let truncated = /^(1|true|yes)$/i.test(String(process.env.ECC_HOOK_INPUT_TRUNCATED || ''));
process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (raw.length < MAX_STDIN) {
const remaining = MAX_STDIN - raw.length;
raw += chunk.substring(0, remaining);
if (chunk.length > remaining) {
truncated = true;
}
} else {
truncated = true;
}
});
process.stdin.on('end', () => {
const result = run(raw);
const result = run(raw, {
truncated,
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN,
});
process.stdout.write(result);
});
}

View File

@@ -99,15 +99,21 @@ function saveState(filePath, state) {
function readRawStdin() {
return new Promise(resolve => {
let raw = '';
let truncated = /^(1|true|yes)$/i.test(String(process.env.ECC_HOOK_INPUT_TRUNCATED || ''));
process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (raw.length < MAX_STDIN) {
const remaining = MAX_STDIN - raw.length;
raw += chunk.substring(0, remaining);
if (chunk.length > remaining) {
truncated = true;
}
} else {
truncated = true;
}
});
process.stdin.on('end', () => resolve(raw));
process.stdin.on('error', () => resolve(raw));
process.stdin.on('end', () => resolve({ raw, truncated }));
process.stdin.on('error', () => resolve({ raw, truncated }));
});
}
@@ -155,6 +161,18 @@ function extractMcpTarget(input) {
};
}
function extractMcpTargetFromRaw(raw) {
const toolNameMatch = raw.match(/"(?:tool_name|name)"\s*:\s*"([^"]+)"/);
const serverMatch = raw.match(/"(?:server|mcp_server|connector)"\s*:\s*"([^"]+)"/);
const toolMatch = raw.match(/"(?:tool|mcp_tool)"\s*:\s*"([^"]+)"/);
return extractMcpTarget({
tool_name: toolNameMatch ? toolNameMatch[1] : '',
server: serverMatch ? serverMatch[1] : undefined,
tool: toolMatch ? toolMatch[1] : undefined
});
}
function resolveServerConfig(serverName) {
for (const filePath of configPaths()) {
const data = readJsonFile(filePath);
@@ -559,9 +577,9 @@ async function handlePostToolUseFailure(rawInput, input, target, statePathValue,
}
async function main() {
const rawInput = await readRawStdin();
const { raw: rawInput, truncated } = await readRawStdin();
const input = safeParse(rawInput);
const target = extractMcpTarget(input);
const target = extractMcpTarget(input) || (truncated ? extractMcpTargetFromRaw(rawInput) : null);
if (!target) {
process.stdout.write(rawInput);
@@ -569,6 +587,19 @@ async function main() {
return;
}
if (truncated) {
const limit = Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN;
const logs = [
shouldFailOpen()
? `[MCPHealthCheck] Hook input exceeded ${limit} bytes while checking ${target.server}; allowing ${target.tool || 'tool'} because fail-open mode is enabled`
: `[MCPHealthCheck] Hook input exceeded ${limit} bytes while checking ${target.server}; blocking ${target.tool || 'tool'} to avoid bypassing MCP health checks`
];
emitLogs(logs);
process.stdout.write(rawInput);
process.exit(shouldFailOpen() ? 0 : 2);
return;
}
const eventName = process.env.CLAUDE_HOOK_EVENT_NAME || 'PreToolUse';
const now = Date.now();
const statePathValue = stateFilePath();

View File

@@ -18,18 +18,66 @@ const MAX_STDIN = 1024 * 1024;
function readStdinRaw() {
return new Promise(resolve => {
let raw = '';
let truncated = false;
process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (raw.length < MAX_STDIN) {
const remaining = MAX_STDIN - raw.length;
raw += chunk.substring(0, remaining);
if (chunk.length > remaining) {
truncated = true;
}
} else {
truncated = true;
}
});
process.stdin.on('end', () => resolve(raw));
process.stdin.on('error', () => resolve(raw));
process.stdin.on('end', () => resolve({ raw, truncated }));
process.stdin.on('error', () => resolve({ raw, truncated }));
});
}
function writeStderr(stderr) {
if (typeof stderr !== 'string' || stderr.length === 0) {
return;
}
process.stderr.write(stderr.endsWith('\n') ? stderr : `${stderr}\n`);
}
function emitHookResult(raw, output) {
if (typeof output === 'string' || Buffer.isBuffer(output)) {
process.stdout.write(String(output));
return 0;
}
if (output && typeof output === 'object') {
writeStderr(output.stderr);
if (Object.prototype.hasOwnProperty.call(output, 'stdout')) {
process.stdout.write(String(output.stdout ?? ''));
} else if (!Number.isInteger(output.exitCode) || output.exitCode === 0) {
process.stdout.write(raw);
}
return Number.isInteger(output.exitCode) ? output.exitCode : 0;
}
process.stdout.write(raw);
return 0;
}
function writeLegacySpawnOutput(raw, result) {
const stdout = typeof result.stdout === 'string' ? result.stdout : '';
if (stdout) {
process.stdout.write(stdout);
return;
}
if (Number.isInteger(result.status) && result.status === 0) {
process.stdout.write(raw);
}
}
function getPluginRoot() {
if (process.env.CLAUDE_PLUGIN_ROOT && process.env.CLAUDE_PLUGIN_ROOT.trim()) {
return process.env.CLAUDE_PLUGIN_ROOT;
@@ -39,7 +87,7 @@ function getPluginRoot() {
async function main() {
const [, , hookId, relScriptPath, profilesCsv] = process.argv;
const raw = await readStdinRaw();
const { raw, truncated } = await readStdinRaw();
if (!hookId || !relScriptPath) {
process.stdout.write(raw);
@@ -89,8 +137,8 @@ async function main() {
if (hookModule && typeof hookModule.run === 'function') {
try {
const output = hookModule.run(raw);
if (output !== null && output !== undefined) process.stdout.write(output);
const output = hookModule.run(raw, { truncated, maxStdin: MAX_STDIN });
process.exit(emitHookResult(raw, output));
} catch (runErr) {
process.stderr.write(`[Hook] run() error for ${hookId}: ${runErr.message}\n`);
process.stdout.write(raw);
@@ -99,19 +147,32 @@ async function main() {
}
// Legacy path: spawn a child Node process for hooks without run() export
const result = spawnSync('node', [scriptPath], {
const result = spawnSync(process.execPath, [scriptPath], {
input: raw,
encoding: 'utf8',
env: process.env,
env: {
...process.env,
ECC_HOOK_INPUT_TRUNCATED: truncated ? '1' : '0',
ECC_HOOK_INPUT_MAX_BYTES: String(MAX_STDIN)
},
cwd: process.cwd(),
timeout: 30000
});
if (result.stdout) process.stdout.write(result.stdout);
writeLegacySpawnOutput(raw, result);
if (result.stderr) process.stderr.write(result.stderr);
const code = Number.isInteger(result.status) ? result.status : 0;
process.exit(code);
if (result.error || result.signal || result.status === null) {
const failureDetail = result.error
? result.error.message
: result.signal
? `terminated by signal ${result.signal}`
: 'missing exit status';
writeStderr(`[Hook] legacy hook execution failed for ${hookId}: ${failureDetail}`);
process.exit(1);
}
process.exit(Number.isInteger(result.status) ? result.status : 0);
}
main().catch(err => {

View File

@@ -11,28 +11,59 @@
const {
getSessionsDir,
getSessionSearchDirs,
getLearnedSkillsDir,
findFiles,
ensureDir,
readFile,
stripAnsi,
log,
output
log
} = require('../lib/utils');
const { getPackageManager, getSelectionPrompt } = require('../lib/package-manager');
const { listAliases } = require('../lib/session-aliases');
const { detectProjectType } = require('../lib/project-detect');
const path = require('path');
function dedupeRecentSessions(searchDirs) {
const recentSessionsByName = new Map();
for (const [dirIndex, dir] of searchDirs.entries()) {
const matches = findFiles(dir, '*-session.tmp', { maxAge: 7 });
for (const match of matches) {
const basename = path.basename(match.path);
const current = {
...match,
basename,
dirIndex,
};
const existing = recentSessionsByName.get(basename);
if (
!existing
|| current.mtime > existing.mtime
|| (current.mtime === existing.mtime && current.dirIndex < existing.dirIndex)
) {
recentSessionsByName.set(basename, current);
}
}
}
return Array.from(recentSessionsByName.values())
.sort((left, right) => right.mtime - left.mtime || left.dirIndex - right.dirIndex);
}
async function main() {
const sessionsDir = getSessionsDir();
const learnedDir = getLearnedSkillsDir();
const additionalContextParts = [];
// Ensure directories exist
ensureDir(sessionsDir);
ensureDir(learnedDir);
// Check for recent session files (last 7 days)
const recentSessions = findFiles(sessionsDir, '*-session.tmp', { maxAge: 7 });
const recentSessions = dedupeRecentSessions(getSessionSearchDirs());
if (recentSessions.length > 0) {
const latest = recentSessions[0];
@@ -43,7 +74,7 @@ async function main() {
const content = stripAnsi(readFile(latest.path));
if (content && !content.includes('[Session context goes here]')) {
// Only inject if the session has actual content (not the blank template)
output(`Previous session summary:\n${content}`);
additionalContextParts.push(`Previous session summary:\n${content}`);
}
}
@@ -84,15 +115,49 @@ async function main() {
parts.push(`frameworks: ${projectInfo.frameworks.join(', ')}`);
}
log(`[SessionStart] Project detected — ${parts.join('; ')}`);
output(`Project type: ${JSON.stringify(projectInfo)}`);
additionalContextParts.push(`Project type: ${JSON.stringify(projectInfo)}`);
} else {
log('[SessionStart] No specific project type detected');
}
process.exit(0);
await writeSessionStartPayload(additionalContextParts.join('\n\n'));
}
function writeSessionStartPayload(additionalContext) {
return new Promise((resolve, reject) => {
let settled = false;
const payload = JSON.stringify({
hookSpecificOutput: {
hookEventName: 'SessionStart',
additionalContext
}
});
const handleError = (err) => {
if (settled) return;
settled = true;
if (err) {
log(`[SessionStart] stdout write error: ${err.message}`);
}
reject(err || new Error('stdout stream error'));
};
process.stdout.once('error', handleError);
process.stdout.write(payload, (err) => {
process.stdout.removeListener('error', handleError);
if (settled) return;
settled = true;
if (err) {
log(`[SessionStart] stdout write error: ${err.message}`);
reject(err);
return;
}
resolve();
});
});
}
main().catch(err => {
console.error('[SessionStart] Error:', err.message);
process.exit(0); // Don't block on errors
process.exitCode = 0; // Don't block on errors
});

View File

@@ -10,8 +10,9 @@ const os = require('os');
* Tries, in order:
* 1. CLAUDE_PLUGIN_ROOT env var (set by Claude Code for hooks, or by user)
* 2. Standard install location (~/.claude/) — when scripts exist there
* 3. Plugin cache auto-detection — scans ~/.claude/plugins/cache/everything-claude-code/
* 4. Fallback to ~/.claude/ (original behaviour)
* 3. Exact legacy plugin roots under ~/.claude/plugins/
* 4. Plugin cache auto-detection — scans ~/.claude/plugins/cache/everything-claude-code/
* 5. Fallback to ~/.claude/ (original behaviour)
*
* @param {object} [options]
* @param {string} [options.homeDir] Override home directory (for testing)
@@ -38,6 +39,20 @@ function resolveEccRoot(options = {}) {
return claudeDir;
}
// Exact legacy plugin install locations. These preserve backwards
// compatibility without scanning arbitrary plugin trees.
const legacyPluginRoots = [
path.join(claudeDir, 'plugins', 'everything-claude-code'),
path.join(claudeDir, 'plugins', 'everything-claude-code@everything-claude-code'),
path.join(claudeDir, 'plugins', 'marketplace', 'everything-claude-code')
];
for (const candidate of legacyPluginRoots) {
if (fs.existsSync(path.join(candidate, probe))) {
return candidate;
}
}
// Plugin cache — Claude Code stores marketplace plugins under
// ~/.claude/plugins/cache/<plugin-name>/<org>/<version>/
try {
@@ -81,7 +96,7 @@ function resolveEccRoot(options = {}) {
* const _r = <paste INLINE_RESOLVE>;
* const sm = require(_r + '/scripts/lib/session-manager');
*/
const INLINE_RESOLVE = `(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b))for(var v of f.readdirSync(p.join(b,o))){var c=p.join(b,o,v);if(f.existsSync(p.join(c,q)))return c}}catch(x){}return d})()`;
const INLINE_RESOLVE = `(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})()`;
module.exports = {
resolveEccRoot,

View File

@@ -1,6 +1,7 @@
/**
* Session Manager Library for Claude Code.
* Provides CRUD operations for session files stored as markdown in ~/.claude/sessions/.
* Provides CRUD operations for session files stored as markdown in
* ~/.claude/session-data/ with legacy read compatibility for ~/.claude/sessions/.
*/
/** Parsed metadata from a session filename */

View File

@@ -2,7 +2,8 @@
* Session Manager Library for Claude Code
* Provides core session CRUD operations for listing, loading, and managing sessions
*
* Sessions are stored as markdown files in ~/.claude/sessions/ with format:
* Sessions are stored as markdown files in ~/.claude/session-data/ with
* legacy read compatibility for ~/.claude/sessions/:
* - YYYY-MM-DD-session.tmp (old format)
* - YYYY-MM-DD-<short-id>-session.tmp (new format)
*/
@@ -12,6 +13,7 @@ const path = require('path');
const {
getSessionsDir,
getSessionSearchDirs,
readFile,
log
} = require('./utils');
@@ -30,6 +32,7 @@ const SESSION_FILENAME_REGEX = /^(\d{4}-\d{2}-\d{2})(?:-([a-zA-Z0-9_][a-zA-Z0-9_
* @returns {object|null} Parsed metadata or null if invalid
*/
function parseSessionFilename(filename) {
if (!filename || typeof filename !== 'string') return null;
const match = filename.match(SESSION_FILENAME_REGEX);
if (!match) return null;
@@ -66,6 +69,145 @@ function getSessionPath(filename) {
return path.join(getSessionsDir(), filename);
}
function getSessionCandidates(options = {}) {
const {
date = null,
search = null
} = options;
const candidates = [];
for (const sessionsDir of getSessionSearchDirs()) {
if (!fs.existsSync(sessionsDir)) {
continue;
}
let entries;
try {
entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
} catch (error) {
log(`[SessionManager] Error reading sessions directory ${sessionsDir}: ${error.message}`);
continue;
}
for (const entry of entries) {
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
const filename = entry.name;
const metadata = parseSessionFilename(filename);
if (!metadata) continue;
if (date && metadata.date !== date) continue;
if (search && !metadata.shortId.includes(search)) continue;
const sessionPath = path.join(sessionsDir, filename);
let stats;
try {
stats = fs.statSync(sessionPath);
} catch (error) {
log(`[SessionManager] Error stating session ${sessionPath}: ${error.message}`);
continue;
}
candidates.push({
...metadata,
sessionPath,
hasContent: stats.size > 0,
size: stats.size,
modifiedTime: stats.mtime,
createdTime: stats.birthtime || stats.ctime
});
}
}
const deduped = [];
const seenFilenames = new Set();
for (const session of candidates) {
if (seenFilenames.has(session.filename)) {
continue;
}
seenFilenames.add(session.filename);
deduped.push(session);
}
deduped.sort((a, b) => b.modifiedTime - a.modifiedTime);
return deduped;
}
function buildSessionRecord(sessionPath, metadata) {
let stats;
try {
stats = fs.statSync(sessionPath);
} catch (error) {
log(`[SessionManager] Error stating session ${sessionPath}: ${error.message}`);
return null;
}
return {
...metadata,
sessionPath,
hasContent: stats.size > 0,
size: stats.size,
modifiedTime: stats.mtime,
createdTime: stats.birthtime || stats.ctime
};
}
function sessionMatchesId(metadata, normalizedSessionId) {
const filename = metadata.filename;
const shortIdMatch = metadata.shortId !== 'no-id' && metadata.shortId.startsWith(normalizedSessionId);
const filenameMatch = filename === normalizedSessionId || filename === `${normalizedSessionId}.tmp`;
const noIdMatch = metadata.shortId === 'no-id' && filename === `${normalizedSessionId}-session.tmp`;
return shortIdMatch || filenameMatch || noIdMatch;
}
function getMatchingSessionCandidates(normalizedSessionId) {
const matches = [];
const seenFilenames = new Set();
for (const sessionsDir of getSessionSearchDirs()) {
if (!fs.existsSync(sessionsDir)) {
continue;
}
let entries;
try {
entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
} catch (error) {
log(`[SessionManager] Error reading sessions directory ${sessionsDir}: ${error.message}`);
continue;
}
for (const entry of entries) {
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
const metadata = parseSessionFilename(entry.name);
if (!metadata || !sessionMatchesId(metadata, normalizedSessionId)) {
continue;
}
if (seenFilenames.has(metadata.filename)) {
continue;
}
const sessionPath = path.join(sessionsDir, metadata.filename);
const sessionRecord = buildSessionRecord(sessionPath, metadata);
if (!sessionRecord) {
continue;
}
seenFilenames.add(metadata.filename);
matches.push(sessionRecord);
}
}
matches.sort((a, b) => b.modifiedTime - a.modifiedTime);
return matches;
}
/**
* Read and parse session markdown content
* @param {string} sessionPath - Full path to session file
@@ -228,58 +370,12 @@ function getAllSessions(options = {}) {
const limitNum = Number(rawLimit);
const limit = Number.isNaN(limitNum) ? 50 : Math.max(1, Math.floor(limitNum));
const sessionsDir = getSessionsDir();
const sessions = getSessionCandidates({ date, search });
if (!fs.existsSync(sessionsDir)) {
if (sessions.length === 0) {
return { sessions: [], total: 0, offset, limit, hasMore: false };
}
const entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
const sessions = [];
for (const entry of entries) {
// Skip non-files (only process .tmp files)
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
const filename = entry.name;
const metadata = parseSessionFilename(filename);
if (!metadata) continue;
// Apply date filter
if (date && metadata.date !== date) {
continue;
}
// Apply search filter (search in short ID)
if (search && !metadata.shortId.includes(search)) {
continue;
}
const sessionPath = path.join(sessionsDir, filename);
// Get file stats (wrapped in try-catch to handle TOCTOU race where
// file is deleted between readdirSync and statSync)
let stats;
try {
stats = fs.statSync(sessionPath);
} catch {
continue; // File was deleted between readdir and stat
}
sessions.push({
...metadata,
sessionPath,
hasContent: stats.size > 0,
size: stats.size,
modifiedTime: stats.mtime,
createdTime: stats.birthtime || stats.ctime
});
}
// Sort by modified time (newest first)
sessions.sort((a, b) => b.modifiedTime - a.modifiedTime);
// Apply pagination
const paginatedSessions = sessions.slice(offset, offset + limit);
@@ -299,55 +395,28 @@ function getAllSessions(options = {}) {
* @returns {object|null} Session object or null if not found
*/
function getSessionById(sessionId, includeContent = false) {
const sessionsDir = getSessionsDir();
if (!fs.existsSync(sessionsDir)) {
if (typeof sessionId !== 'string') {
return null;
}
const entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
const normalizedSessionId = sessionId.trim();
if (!normalizedSessionId) {
return null;
}
for (const entry of entries) {
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
const sessions = getMatchingSessionCandidates(normalizedSessionId);
const filename = entry.name;
const metadata = parseSessionFilename(filename);
if (!metadata) continue;
// Check if session ID matches (short ID or full filename without .tmp)
const shortIdMatch = sessionId.length > 0 && metadata.shortId !== 'no-id' && metadata.shortId.startsWith(sessionId);
const filenameMatch = filename === sessionId || filename === `${sessionId}.tmp`;
const noIdMatch = metadata.shortId === 'no-id' && filename === `${sessionId}-session.tmp`;
if (!shortIdMatch && !filenameMatch && !noIdMatch) {
continue;
}
const sessionPath = path.join(sessionsDir, filename);
let stats;
try {
stats = fs.statSync(sessionPath);
} catch {
return null; // File was deleted between readdir and stat
}
const session = {
...metadata,
sessionPath,
size: stats.size,
modifiedTime: stats.mtime,
createdTime: stats.birthtime || stats.ctime
};
for (const session of sessions) {
const sessionRecord = { ...session };
if (includeContent) {
session.content = getSessionContent(sessionPath);
session.metadata = parseSessionMetadata(session.content);
sessionRecord.content = getSessionContent(sessionRecord.sessionPath);
sessionRecord.metadata = parseSessionMetadata(sessionRecord.content);
// Pass pre-read content to avoid a redundant disk read
session.stats = getSessionStats(session.content || '');
sessionRecord.stats = getSessionStats(sessionRecord.content || '');
}
return session;
return sessionRecord;
}
return null;

View File

@@ -18,9 +18,15 @@ export function getHomeDir(): string;
/** Get the Claude config directory (~/.claude) */
export function getClaudeDir(): string;
/** Get the sessions directory (~/.claude/sessions) */
/** Get the canonical ECC sessions directory (~/.claude/session-data) */
export function getSessionsDir(): string;
/** Get the legacy Claude-managed sessions directory (~/.claude/sessions) */
export function getLegacySessionsDir(): string;
/** Get session directories to search, with canonical storage first and legacy fallback second */
export function getSessionSearchDirs(): string[];
/** Get the learned skills directory (~/.claude/skills/learned) */
export function getLearnedSkillsDir(): string;
@@ -47,9 +53,16 @@ export function getDateTimeString(): string;
// --- Session/Project ---
/**
* Sanitize a string for use as a session filename segment.
* Replaces invalid characters, strips leading dots, and returns null when
* nothing meaningful remains. Non-ASCII names are hashed for stability.
*/
export function sanitizeSessionId(raw: string | null | undefined): string | null;
/**
* Get short session ID from CLAUDE_SESSION_ID environment variable.
* Returns last 8 characters, falls back to project name then the provided fallback.
* Returns last 8 characters, falls back to a sanitized project name then the provided fallback.
*/
export function getSessionIdShort(fallback?: string): string;

View File

@@ -6,12 +6,20 @@
const fs = require('fs');
const path = require('path');
const os = require('os');
const crypto = require('crypto');
const { execSync, spawnSync } = require('child_process');
// Platform detection
const isWindows = process.platform === 'win32';
const isMacOS = process.platform === 'darwin';
const isLinux = process.platform === 'linux';
const SESSION_DATA_DIR_NAME = 'session-data';
const LEGACY_SESSIONS_DIR_NAME = 'sessions';
const WINDOWS_RESERVED_SESSION_IDS = new Set([
'CON', 'PRN', 'AUX', 'NUL',
'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'
]);
/**
* Get the user's home directory (cross-platform)
@@ -31,7 +39,21 @@ function getClaudeDir() {
* Get the sessions directory
*/
function getSessionsDir() {
return path.join(getClaudeDir(), 'sessions');
return path.join(getClaudeDir(), SESSION_DATA_DIR_NAME);
}
/**
* Get the legacy sessions directory used by older ECC installs
*/
function getLegacySessionsDir() {
return path.join(getClaudeDir(), LEGACY_SESSIONS_DIR_NAME);
}
/**
* Get all session directories to search, in canonical-first order
*/
function getSessionSearchDirs() {
return Array.from(new Set([getSessionsDir(), getLegacySessionsDir()]));
}
/**
@@ -107,16 +129,52 @@ function getProjectName() {
return path.basename(process.cwd()) || null;
}
/**
* Sanitize a string for use as a session filename segment.
* Replaces invalid characters with hyphens, collapses runs, strips
* leading/trailing hyphens, and removes leading dots so hidden-dir names
* like ".claude" map cleanly to "claude".
*
* Pure non-ASCII inputs get a stable 8-char hash so distinct names do not
* collapse to the same fallback session id. Mixed-script inputs retain their
* ASCII part and gain a short hash suffix for disambiguation.
*/
function sanitizeSessionId(raw) {
if (!raw || typeof raw !== 'string') return null;
const hasNonAscii = Array.from(raw).some(char => char.codePointAt(0) > 0x7f);
const normalized = raw.replace(/^\.+/, '');
const sanitized = normalized
.replace(/[^a-zA-Z0-9_-]/g, '-')
.replace(/-{2,}/g, '-')
.replace(/^-+|-+$/g, '');
if (sanitized.length > 0) {
const suffix = crypto.createHash('sha256').update(normalized).digest('hex').slice(0, 6);
if (WINDOWS_RESERVED_SESSION_IDS.has(sanitized.toUpperCase())) {
return `${sanitized}-${suffix}`;
}
if (!hasNonAscii) return sanitized;
return `${sanitized}-${suffix}`;
}
const meaningful = normalized.replace(/[\s\p{P}]/gu, '');
if (meaningful.length === 0) return null;
return crypto.createHash('sha256').update(normalized).digest('hex').slice(0, 8);
}
/**
* Get short session ID from CLAUDE_SESSION_ID environment variable
* Returns last 8 characters, falls back to project name then 'default'
* Returns last 8 characters, falls back to a sanitized project name then 'default'.
*/
function getSessionIdShort(fallback = 'default') {
const sessionId = process.env.CLAUDE_SESSION_ID;
if (sessionId && sessionId.length > 0) {
return sessionId.slice(-8);
const sanitized = sanitizeSessionId(sessionId.slice(-8));
if (sanitized) return sanitized;
}
return getProjectName() || fallback;
return sanitizeSessionId(getProjectName()) || sanitizeSessionId(fallback) || 'default';
}
/**
@@ -525,6 +583,8 @@ module.exports = {
getHomeDir,
getClaudeDir,
getSessionsDir,
getLegacySessionsDir,
getSessionSearchDirs,
getLearnedSkillsDir,
getTempDir,
ensureDir,
@@ -535,6 +595,7 @@ module.exports = {
getDateTimeString,
// Session/Project
sanitizeSessionId,
getSessionIdShort,
getGitRepoName,
getProjectName,

View File

@@ -43,9 +43,11 @@ log() { printf '[ecc-sync] %s\n' "$*"; }
run_or_echo() {
if [[ "$MODE" == "dry-run" ]]; then
printf '[dry-run] %s\n' "$*"
printf '[dry-run]'
printf ' %q' "$@"
printf '\n'
else
eval "$@"
"$@"
fi
}
@@ -149,10 +151,10 @@ log "Repo root: $REPO_ROOT"
log "Codex home: $CODEX_HOME"
log "Creating backup folder: $BACKUP_DIR"
run_or_echo "mkdir -p \"$BACKUP_DIR\""
run_or_echo "cp \"$CONFIG_FILE\" \"$BACKUP_DIR/config.toml\""
run_or_echo mkdir -p "$BACKUP_DIR"
run_or_echo cp "$CONFIG_FILE" "$BACKUP_DIR/config.toml"
if [[ -f "$AGENTS_FILE" ]]; then
run_or_echo "cp \"$AGENTS_FILE\" \"$BACKUP_DIR/AGENTS.md\""
run_or_echo cp "$AGENTS_FILE" "$BACKUP_DIR/AGENTS.md"
fi
ECC_BEGIN_MARKER="<!-- BEGIN ECC -->"
@@ -234,19 +236,19 @@ else
fi
log "Syncing ECC Codex skills"
run_or_echo "mkdir -p \"$SKILLS_DEST\""
run_or_echo mkdir -p "$SKILLS_DEST"
skills_count=0
for skill_dir in "$SKILLS_SRC"/*; do
[[ -d "$skill_dir" ]] || continue
skill_name="$(basename "$skill_dir")"
dest="$SKILLS_DEST/$skill_name"
run_or_echo "rm -rf \"$dest\""
run_or_echo "cp -R \"$skill_dir\" \"$dest\""
run_or_echo rm -rf "$dest"
run_or_echo cp -R "$skill_dir" "$dest"
skills_count=$((skills_count + 1))
done
log "Generating prompt files from ECC commands"
run_or_echo "mkdir -p \"$PROMPTS_DEST\""
run_or_echo mkdir -p "$PROMPTS_DEST"
manifest="$PROMPTS_DEST/ecc-prompts-manifest.txt"
if [[ "$MODE" == "dry-run" ]]; then
printf '[dry-run] > %s\n' "$manifest"

View File

@@ -55,15 +55,18 @@ analyze_observations() {
# Sample recent observations instead of loading the entire file (#521).
# This prevents multi-MB payloads from being passed to the LLM.
MAX_ANALYSIS_LINES="${ECC_OBSERVER_MAX_ANALYSIS_LINES:-500}"
analysis_file="$(mktemp "${TMPDIR:-/tmp}/ecc-observer-analysis.XXXXXX.jsonl")"
observer_tmp_dir="${PROJECT_DIR}/.observer-tmp"
mkdir -p "$observer_tmp_dir"
analysis_file="$(mktemp "${observer_tmp_dir}/ecc-observer-analysis.XXXXXX.jsonl")"
tail -n "$MAX_ANALYSIS_LINES" "$OBSERVATIONS_FILE" > "$analysis_file"
analysis_count=$(wc -l < "$analysis_file" 2>/dev/null || echo 0)
echo "[$(date)] Using last $analysis_count of $obs_count observations for analysis" >> "$LOG_FILE"
prompt_file="$(mktemp "${TMPDIR:-/tmp}/ecc-observer-prompt.XXXXXX")"
prompt_file="$(mktemp "${observer_tmp_dir}/ecc-observer-prompt.XXXXXX")"
cat > "$prompt_file" <<PROMPT
Read ${analysis_file} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
If you find 3+ occurrences of the same pattern, create an instinct file in ${INSTINCTS_DIR}/<id>.md.
If you find 3+ occurrences of the same pattern, you MUST write an instinct file directly to ${INSTINCTS_DIR}/<id>.md using the Write tool.
Do NOT ask for permission to write files, do NOT describe what you would write, and do NOT stop at analysis when a qualifying pattern exists.
CRITICAL: Every instinct file MUST use this exact format:
@@ -92,6 +95,7 @@ Rules:
- Be conservative, only clear patterns with 3+ observations
- Use narrow, specific triggers
- Never include actual code snippets, only describe patterns
- When a qualifying pattern exists, write or update the instinct file in this run instead of asking for confirmation
- If a similar instinct already exists in ${INSTINCTS_DIR}/, update it instead of creating a duplicate
- The YAML frontmatter (between --- markers) with id field is MANDATORY
- If a pattern seems universal (not project-specific), set scope to global instead of project

View File

@@ -0,0 +1,157 @@
/**
* Tests for scripts/hooks/config-protection.js via run-with-flags.js
*/
const assert = require('assert');
const fs = require('fs');
const path = require('path');
const { spawnSync } = require('child_process');
const runner = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'run-with-flags.js');
function test(name, fn) {
try {
fn();
console.log(`${name}`);
return true;
} catch (error) {
console.log(`${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runHook(input, env = {}) {
const rawInput = typeof input === 'string' ? input : JSON.stringify(input);
const result = spawnSync('node', [runner, 'pre:config-protection', 'scripts/hooks/config-protection.js', 'standard,strict'], {
input: rawInput,
encoding: 'utf8',
env: {
...process.env,
ECC_HOOK_PROFILE: 'standard',
...env
},
timeout: 15000,
stdio: ['pipe', 'pipe', 'pipe']
});
return {
code: Number.isInteger(result.status) ? result.status : 1,
stdout: result.stdout || '',
stderr: result.stderr || ''
};
}
function runCustomHook(pluginRoot, hookId, relScriptPath, input, env = {}) {
const rawInput = typeof input === 'string' ? input : JSON.stringify(input);
const result = spawnSync('node', [runner, hookId, relScriptPath, 'standard,strict'], {
input: rawInput,
encoding: 'utf8',
env: {
...process.env,
CLAUDE_PLUGIN_ROOT: pluginRoot,
ECC_HOOK_PROFILE: 'standard',
...env
},
timeout: 15000,
stdio: ['pipe', 'pipe', 'pipe']
});
return {
code: Number.isInteger(result.status) ? result.status : 1,
stdout: result.stdout || '',
stderr: result.stderr || ''
};
}
function runTests() {
console.log('\n=== Testing config-protection ===\n');
let passed = 0;
let failed = 0;
if (test('blocks protected config file edits through run-with-flags', () => {
const input = {
tool_name: 'Write',
tool_input: {
file_path: '.eslintrc.js',
content: 'module.exports = {};'
}
};
const result = runHook(input);
assert.strictEqual(result.code, 2, 'Expected protected config edit to be blocked');
assert.strictEqual(result.stdout, '', 'Blocked hook should not echo raw input');
assert.ok(result.stderr.includes('BLOCKED: Modifying .eslintrc.js is not allowed.'), `Expected block message, got: ${result.stderr}`);
})) passed++; else failed++;
if (test('passes through safe file edits unchanged', () => {
const input = {
tool_name: 'Write',
tool_input: {
file_path: 'src/index.js',
content: 'console.log("ok");'
}
};
const rawInput = JSON.stringify(input);
const result = runHook(input);
assert.strictEqual(result.code, 0, 'Expected safe file edit to pass');
assert.strictEqual(result.stdout, rawInput, 'Expected exact raw JSON passthrough');
assert.strictEqual(result.stderr, '', 'Expected no stderr for safe edits');
})) passed++; else failed++;
if (test('blocks truncated protected config payloads instead of failing open', () => {
const rawInput = JSON.stringify({
tool_name: 'Write',
tool_input: {
file_path: '.eslintrc.js',
content: 'x'.repeat(1024 * 1024 + 2048)
}
});
const result = runHook(rawInput);
assert.strictEqual(result.code, 2, 'Expected truncated protected payload to be blocked');
assert.strictEqual(result.stdout, '', 'Blocked truncated payload should not echo raw input');
assert.ok(result.stderr.includes('Hook input exceeded 1048576 bytes'), `Expected size warning, got: ${result.stderr}`);
assert.ok(result.stderr.includes('truncated payload'), `Expected truncated payload warning, got: ${result.stderr}`);
})) passed++; else failed++;
if (test('legacy hooks do not echo raw input when they fail without stdout', () => {
const pluginRoot = path.join(__dirname, '..', `tmp-runner-plugin-${Date.now()}`);
const scriptDir = path.join(pluginRoot, 'scripts', 'hooks');
const scriptPath = path.join(scriptDir, 'legacy-block.js');
try {
fs.mkdirSync(scriptDir, { recursive: true });
fs.writeFileSync(
scriptPath,
'#!/usr/bin/env node\nprocess.stderr.write("blocked by legacy hook\\n");\nprocess.exit(2);\n'
);
const rawInput = JSON.stringify({
tool_name: 'Write',
tool_input: {
file_path: '.eslintrc.js',
content: 'module.exports = {};'
}
});
const result = runCustomHook(pluginRoot, 'pre:legacy-block', 'scripts/hooks/legacy-block.js', rawInput);
assert.strictEqual(result.code, 2, 'Expected failing legacy hook exit code to propagate');
assert.strictEqual(result.stdout, '', 'Expected failing legacy hook to avoid raw passthrough');
assert.ok(result.stderr.includes('blocked by legacy hook'), `Expected legacy hook stderr, got: ${result.stderr}`);
} finally {
try {
fs.rmSync(pluginRoot, { recursive: true, force: true });
} catch {
// best-effort cleanup
}
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -156,6 +156,35 @@ async function runTests() {
assert.strictEqual(approvalEvent.payload.severity, 'high');
})) passed += 1; else failed += 1;
if (await test('approval events fingerprint commands instead of storing raw command text', async () => {
const command = 'git push origin main --force';
const events = analyzeForGovernanceEvents({
tool_name: 'Bash',
tool_input: { command },
});
const approvalEvent = events.find(e => e.eventType === 'approval_requested');
assert.ok(approvalEvent);
assert.strictEqual(approvalEvent.payload.commandName, 'git');
assert.ok(/^[a-f0-9]{12}$/.test(approvalEvent.payload.commandFingerprint), 'Expected short command fingerprint');
assert.ok(!Object.prototype.hasOwnProperty.call(approvalEvent.payload, 'command'), 'Should not store raw command text');
})) passed += 1; else failed += 1;
if (await test('security findings fingerprint elevated commands instead of storing raw command text', async () => {
const command = 'sudo chmod 600 ~/.ssh/id_rsa';
const events = analyzeForGovernanceEvents({
tool_name: 'Bash',
tool_input: { command },
}, {
hookPhase: 'post',
});
const securityEvent = events.find(e => e.eventType === 'security_finding');
assert.ok(securityEvent);
assert.strictEqual(securityEvent.payload.commandName, 'sudo');
assert.ok(/^[a-f0-9]{12}$/.test(securityEvent.payload.commandFingerprint), 'Expected short command fingerprint');
assert.ok(!Object.prototype.hasOwnProperty.call(securityEvent.payload, 'command'), 'Should not store raw command text');
})) passed += 1; else failed += 1;
if (await test('analyzeForGovernanceEvents detects sensitive file access', async () => {
const events = analyzeForGovernanceEvents({
tool_name: 'Edit',
@@ -273,6 +302,43 @@ async function runTests() {
}
})) passed += 1; else failed += 1;
if (await test('run() emits hook_input_truncated event without logging raw command text', async () => {
const original = process.env.ECC_GOVERNANCE_CAPTURE;
const originalHookEvent = process.env.CLAUDE_HOOK_EVENT_NAME;
const originalWrite = process.stderr.write;
const stderr = [];
process.env.ECC_GOVERNANCE_CAPTURE = '1';
process.env.CLAUDE_HOOK_EVENT_NAME = 'PreToolUse';
process.stderr.write = (chunk, encoding, callback) => {
stderr.push(String(chunk));
if (typeof encoding === 'function') encoding();
if (typeof callback === 'function') callback();
return true;
};
try {
const input = JSON.stringify({ tool_name: 'Bash', tool_input: { command: 'rm -rf /tmp/important' } });
const result = run(input, { truncated: true, maxStdin: 1024 });
assert.strictEqual(result, input);
} finally {
process.stderr.write = originalWrite;
if (original !== undefined) {
process.env.ECC_GOVERNANCE_CAPTURE = original;
} else {
delete process.env.ECC_GOVERNANCE_CAPTURE;
}
if (originalHookEvent !== undefined) {
process.env.CLAUDE_HOOK_EVENT_NAME = originalHookEvent;
} else {
delete process.env.CLAUDE_HOOK_EVENT_NAME;
}
}
const combined = stderr.join('');
assert.ok(combined.includes('"eventType":"hook_input_truncated"'), 'Should emit truncation event');
assert.ok(combined.includes('"sizeLimitBytes":1024'), 'Should record the truncation limit');
assert.ok(!combined.includes('rm -rf /tmp/important'), 'Should not leak raw command text to governance logs');
})) passed += 1; else failed += 1;
if (await test('run() can detect multiple event types in one input', async () => {
// Bash command with force push AND secret in command
const events = analyzeForGovernanceEvents({

View File

@@ -82,6 +82,22 @@ function sleepMs(ms) {
Atomics.wait(new Int32Array(new SharedArrayBuffer(4)), 0, 0, ms);
}
function getCanonicalSessionsDir(homeDir) {
return path.join(homeDir, '.claude', 'session-data');
}
function getLegacySessionsDir(homeDir) {
return path.join(homeDir, '.claude', 'sessions');
}
function getSessionStartAdditionalContext(stdout) {
assert.ok(stdout.trim(), 'Expected SessionStart hook to emit stdout payload');
const payload = JSON.parse(stdout);
assert.strictEqual(payload.hookSpecificOutput?.hookEventName, 'SessionStart', 'Should emit SessionStart hook payload');
assert.strictEqual(typeof payload.hookSpecificOutput?.additionalContext, 'string', 'Should include additionalContext text');
return payload.hookSpecificOutput.additionalContext;
}
// Test helper
function test(name, fn) {
try {
@@ -336,7 +352,7 @@ async function runTests() {
if (
await asyncTest('exits 0 even with isolated empty HOME', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-iso-start-${Date.now()}`);
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
try {
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
@@ -364,7 +380,7 @@ async function runTests() {
if (
await asyncTest('skips template session content', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-tpl-start-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getLegacySessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
@@ -378,8 +394,8 @@ async function runTests() {
USERPROFILE: isoHome
});
assert.strictEqual(result.code, 0);
// stdout should NOT contain the template content
assert.ok(!result.stdout.includes('Previous session summary'), 'Should not inject template session content');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(!additionalContext.includes('Previous session summary'), 'Should not inject template session content');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -391,7 +407,7 @@ async function runTests() {
if (
await asyncTest('injects real session content', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-real-start-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getLegacySessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
@@ -405,8 +421,47 @@ async function runTests() {
USERPROFILE: isoHome
});
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Previous session summary'), 'Should inject real session content');
assert.ok(result.stdout.includes('authentication refactor'), 'Should include session content text');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(additionalContext.includes('Previous session summary'), 'Should inject real session content');
assert.ok(additionalContext.includes('authentication refactor'), 'Should include session content text');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
})
)
passed++;
else failed++;
if (
await asyncTest('prefers canonical session-data content over legacy duplicates', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-canonical-start-${Date.now()}`);
const canonicalDir = getCanonicalSessionsDir(isoHome);
const legacyDir = getLegacySessionsDir(isoHome);
const now = new Date();
const filename = `${now.toISOString().slice(0, 10)}-dupe1234-session.tmp`;
const canonicalFile = path.join(canonicalDir, filename);
const legacyFile = path.join(legacyDir, filename);
const canonicalTime = new Date(now.getTime() - 60 * 1000);
const legacyTime = new Date(canonicalTime.getTime());
fs.mkdirSync(canonicalDir, { recursive: true });
fs.mkdirSync(legacyDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
fs.writeFileSync(canonicalFile, '# Canonical Session\n\nUse the canonical session-data copy.\n');
fs.writeFileSync(legacyFile, '# Legacy Session\n\nDo not prefer the legacy duplicate.\n');
fs.utimesSync(canonicalFile, canonicalTime, canonicalTime);
fs.utimesSync(legacyFile, legacyTime, legacyTime);
try {
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
HOME: isoHome,
USERPROFILE: isoHome
});
assert.strictEqual(result.code, 0);
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(additionalContext.includes('canonical session-data copy'));
assert.ok(!additionalContext.includes('legacy duplicate'));
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -418,7 +473,7 @@ async function runTests() {
if (
await asyncTest('strips ANSI escape codes from injected session content', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-ansi-start-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getLegacySessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
@@ -434,9 +489,10 @@ async function runTests() {
USERPROFILE: isoHome
});
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Previous session summary'), 'Should inject real session content');
assert.ok(result.stdout.includes('Windows terminal handling'), 'Should preserve sanitized session text');
assert.ok(!result.stdout.includes('\x1b['), 'Should not emit ANSI escape codes');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(additionalContext.includes('Previous session summary'), 'Should inject real session content');
assert.ok(additionalContext.includes('Windows terminal handling'), 'Should preserve sanitized session text');
assert.ok(!additionalContext.includes('\x1b['), 'Should not emit ANSI escape codes');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -450,7 +506,7 @@ async function runTests() {
const isoHome = path.join(os.tmpdir(), `ecc-skills-start-${Date.now()}`);
const learnedDir = path.join(isoHome, '.claude', 'skills', 'learned');
fs.mkdirSync(learnedDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
// Create learned skill files
fs.writeFileSync(path.join(learnedDir, 'testing-patterns.md'), '# Testing');
@@ -548,7 +604,7 @@ async function runTests() {
// Check if session file was created
// Note: Without CLAUDE_SESSION_ID, falls back to project/worktree name (not 'default')
// Use local time to match the script's getDateString() function
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
const now = new Date();
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
@@ -581,7 +637,7 @@ async function runTests() {
// Check if session file was created with session ID
// Use local time to match the script's getDateString() function
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
const now = new Date();
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
const sessionFile = path.join(sessionsDir, `${today}-${expectedShortId}-session.tmp`);
@@ -614,7 +670,7 @@ async function runTests() {
const now = new Date();
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
const sessionFile = path.join(isoHome, '.claude', 'sessions', `${today}-${expectedShortId}-session.tmp`);
const sessionFile = path.join(getCanonicalSessionsDir(isoHome), `${today}-${expectedShortId}-session.tmp`);
const content = fs.readFileSync(sessionFile, 'utf8');
assert.ok(content.includes(`**Project:** ${project}`), 'Should persist project metadata');
@@ -652,7 +708,7 @@ async function runTests() {
if (
await asyncTest('creates compaction log', async () => {
await runScript(path.join(scriptsDir, 'pre-compact.js'));
const logFile = path.join(os.homedir(), '.claude', 'sessions', 'compaction-log.txt');
const logFile = path.join(getCanonicalSessionsDir(os.homedir()), 'compaction-log.txt');
assert.ok(fs.existsSync(logFile), 'Compaction log should exist');
})
)
@@ -662,7 +718,7 @@ async function runTests() {
if (
await asyncTest('annotates active session file with compaction marker', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-compact-annotate-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
// Create an active .tmp session file
@@ -688,7 +744,7 @@ async function runTests() {
if (
await asyncTest('compaction log contains timestamp', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-compact-ts-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
try {
@@ -1544,7 +1600,7 @@ async function runTests() {
assert.strictEqual(result.code, 0, 'Should handle backticks without crash');
// Find the session file in the temp HOME
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1579,7 +1635,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1613,7 +1669,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1648,7 +1704,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1686,7 +1742,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1723,7 +1779,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1757,7 +1813,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1800,7 +1856,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -1873,9 +1929,8 @@ async function runTests() {
const isNpx = hook.command.startsWith('npx ');
const isSkillScript = hook.command.includes('/skills/') && (/^(bash|sh)\s/.test(hook.command) || hook.command.startsWith('${CLAUDE_PLUGIN_ROOT}/skills/'));
const isHookShellWrapper = /^(bash|sh)\s+["']?\$\{CLAUDE_PLUGIN_ROOT\}\/scripts\/hooks\/run-with-flags-shell\.sh/.test(hook.command);
const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js');
assert.ok(
isNode || isNpx || isSkillScript || isHookShellWrapper || isSessionStartFallback,
isNode || isNpx || isSkillScript || isHookShellWrapper,
`Hook command should use node or approved shell wrapper: ${hook.command.substring(0, 100)}...`
);
}
@@ -1892,7 +1947,25 @@ async function runTests() {
else failed++;
if (
test('script references use CLAUDE_PLUGIN_ROOT variable (except SessionStart fallback)', () => {
test('SessionStart hook uses safe inline resolver without plugin-tree scanning', () => {
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
const sessionStartHook = hooks.hooks.SessionStart?.[0]?.hooks?.[0];
assert.ok(sessionStartHook, 'Should define a SessionStart hook');
assert.ok(sessionStartHook.command.startsWith('node -e "'), 'SessionStart should use inline node resolver');
assert.ok(sessionStartHook.command.includes('session:start'), 'SessionStart should invoke the session:start profile');
assert.ok(sessionStartHook.command.includes('run-with-flags.js'), 'SessionStart should resolve the runner script');
assert.ok(sessionStartHook.command.includes('CLAUDE_PLUGIN_ROOT'), 'SessionStart should consult CLAUDE_PLUGIN_ROOT');
assert.ok(sessionStartHook.command.includes('plugins'), 'SessionStart should probe known plugin roots');
assert.ok(!sessionStartHook.command.includes('find '), 'Should not scan arbitrary plugin paths with find');
assert.ok(!sessionStartHook.command.includes('head -n 1'), 'Should not pick the first matching plugin path');
})
)
passed++;
else failed++;
if (
test('script references use CLAUDE_PLUGIN_ROOT variable or safe SessionStart inline resolver', () => {
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
@@ -1901,8 +1974,8 @@ async function runTests() {
for (const hook of entry.hooks) {
if (hook.type === 'command' && hook.command.includes('scripts/hooks/')) {
// Check for the literal string "${CLAUDE_PLUGIN_ROOT}" in the command
const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js');
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartFallback;
const isSessionStartInlineResolver = hook.command.startsWith('node -e') && hook.command.includes('session:start') && hook.command.includes('run-with-flags.js');
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartInlineResolver;
assert.ok(hasPluginRoot, `Script paths should use CLAUDE_PLUGIN_ROOT: ${hook.command.substring(0, 80)}...`);
}
}
@@ -2766,7 +2839,7 @@ async function runTests() {
if (
await asyncTest('updates Last Updated timestamp in existing session file', async () => {
const testDir = createTestDir();
const sessionsDir = path.join(testDir, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(testDir);
fs.mkdirSync(sessionsDir, { recursive: true });
// Get the expected filename
@@ -2798,7 +2871,7 @@ async function runTests() {
if (
await asyncTest('normalizes existing session headers with project, branch, and worktree metadata', async () => {
const testDir = createTestDir();
const sessionsDir = path.join(testDir, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(testDir);
fs.mkdirSync(sessionsDir, { recursive: true });
const utils = require('../../scripts/lib/utils');
@@ -2831,7 +2904,7 @@ async function runTests() {
if (
await asyncTest('replaces blank template with summary when updating existing file', async () => {
const testDir = createTestDir();
const sessionsDir = path.join(testDir, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(testDir);
fs.mkdirSync(sessionsDir, { recursive: true });
const utils = require('../../scripts/lib/utils');
@@ -2869,7 +2942,7 @@ async function runTests() {
if (
await asyncTest('always updates session summary content on session end', async () => {
const testDir = createTestDir();
const sessionsDir = path.join(testDir, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(testDir);
fs.mkdirSync(sessionsDir, { recursive: true });
const utils = require('../../scripts/lib/utils');
@@ -2906,7 +2979,7 @@ async function runTests() {
if (
await asyncTest('only annotates *-session.tmp files, not other .tmp files', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-compact-glob-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
// Create a session .tmp file and a non-session .tmp file
@@ -2937,7 +3010,7 @@ async function runTests() {
if (
await asyncTest('handles no active session files gracefully', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-compact-nosession-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
try {
@@ -2976,7 +3049,7 @@ async function runTests() {
assert.strictEqual(result.code, 0);
// With no user messages, extractSessionSummary returns null → blank template
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -3016,7 +3089,7 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -3192,7 +3265,7 @@ async function runTests() {
if (
await asyncTest('exits 0 with empty sessions directory (no recent sessions)', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-start-empty-${Date.now()}`);
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
try {
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
@@ -3201,7 +3274,8 @@ async function runTests() {
});
assert.strictEqual(result.code, 0, 'Should exit 0 with no sessions');
// Should NOT inject any previous session data (stdout should be empty or minimal)
assert.ok(!result.stdout.includes('Previous session summary'), 'Should not inject when no sessions');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(!additionalContext.includes('Previous session summary'), 'Should not inject when no sessions');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -3213,7 +3287,7 @@ async function runTests() {
if (
await asyncTest('does not inject blank template session into context', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-start-blank-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
@@ -3229,7 +3303,8 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
// Should NOT inject blank template
assert.ok(!result.stdout.includes('Previous session summary'), 'Should skip blank template sessions');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(!additionalContext.includes('Previous session summary'), 'Should skip blank template sessions');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -3825,7 +3900,7 @@ async function runTests() {
if (
await asyncTest('annotates only the newest session file when multiple exist', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-compact-multi-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
// Create two session files with different mtimes
@@ -3877,7 +3952,7 @@ async function runTests() {
assert.strictEqual(result.code, 0);
// Find the session file and verify newlines were collapsed
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -3903,7 +3978,7 @@ async function runTests() {
if (
await asyncTest('does not inject empty session file content into context', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-start-empty-file-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
@@ -3919,7 +3994,8 @@ async function runTests() {
});
assert.strictEqual(result.code, 0, 'Should exit 0 with empty session file');
// readFile returns '' (falsy) → the if (content && ...) guard skips injection
assert.ok(!result.stdout.includes('Previous session summary'), 'Should NOT inject empty string into context');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(!additionalContext.includes('Previous session summary'), 'Should NOT inject empty string into context');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -3963,7 +4039,7 @@ async function runTests() {
if (
await asyncTest('summary omits Files Modified and Tools Used when none found', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-notools-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const testDir = createTestDir();
@@ -4001,7 +4077,7 @@ async function runTests() {
if (
await asyncTest('reports available session aliases on startup', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-start-alias-${Date.now()}`);
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
// Pre-populate the aliases file
@@ -4038,7 +4114,7 @@ async function runTests() {
if (
await asyncTest('parallel compaction runs all append to log without loss', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-compact-par-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
try {
@@ -4073,7 +4149,7 @@ async function runTests() {
const isoHome = path.join(os.tmpdir(), `ecc-start-blocked-${Date.now()}`);
fs.mkdirSync(path.join(isoHome, '.claude'), { recursive: true });
// Block sessions dir creation by placing a file at that path
fs.writeFileSync(path.join(isoHome, '.claude', 'sessions'), 'blocked');
fs.writeFileSync(getCanonicalSessionsDir(isoHome), 'blocked');
try {
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
@@ -4136,7 +4212,7 @@ async function runTests() {
if (
await asyncTest('excludes session files older than 7 days', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-start-7day-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
@@ -4159,8 +4235,9 @@ async function runTests() {
});
assert.strictEqual(result.code, 0);
assert.ok(result.stderr.includes('1 recent session'), `Should find 1 recent session (6.9-day included, 8-day excluded), stderr: ${result.stderr}`);
assert.ok(result.stdout.includes('RECENT CONTENT HERE'), 'Should inject the 6.9-day-old session content');
assert.ok(!result.stdout.includes('OLD CONTENT SHOULD NOT APPEAR'), 'Should NOT inject the 8-day-old session content');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(additionalContext.includes('RECENT CONTENT HERE'), 'Should inject the 6.9-day-old session content');
assert.ok(!additionalContext.includes('OLD CONTENT SHOULD NOT APPEAR'), 'Should NOT inject the 8-day-old session content');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -4174,7 +4251,7 @@ async function runTests() {
if (
await asyncTest('injects newest session when multiple recent sessions exist', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-start-multi-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
@@ -4198,7 +4275,8 @@ async function runTests() {
assert.strictEqual(result.code, 0);
assert.ok(result.stderr.includes('2 recent session'), `Should find 2 recent sessions, stderr: ${result.stderr}`);
// Should inject the NEWER session, not the older one
assert.ok(result.stdout.includes('NEWER_CONTEXT_MARKER'), 'Should inject the newest session content');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(additionalContext.includes('NEWER_CONTEXT_MARKER'), 'Should inject the newest session content');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}
@@ -4305,7 +4383,7 @@ async function runTests() {
return;
}
const isoHome = path.join(os.tmpdir(), `ecc-start-unreadable-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
// Create a session file with real content, then make it unreadable
@@ -4320,7 +4398,8 @@ async function runTests() {
});
assert.strictEqual(result.code, 0, 'Should exit 0 even with unreadable session file');
// readFile returns null for unreadable files → content is null → no injection
assert.ok(!result.stdout.includes('Sensitive session content'), 'Should NOT inject content from unreadable file');
const additionalContext = getSessionStartAdditionalContext(result.stdout);
assert.ok(!additionalContext.includes('Sensitive session content'), 'Should NOT inject content from unreadable file');
} finally {
try {
fs.chmodSync(sessionFile, 0o644);
@@ -4366,7 +4445,7 @@ async function runTests() {
return;
}
const isoHome = path.join(os.tmpdir(), `ecc-compact-ro-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
// Create a session file then make it read-only
@@ -4407,7 +4486,7 @@ async function runTests() {
if (
await asyncTest('logs warning when existing session file lacks Last Updated field', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-end-nots-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
// Create transcript with a user message so a summary is produced
@@ -4498,7 +4577,7 @@ async function runTests() {
if (
await asyncTest('extracts user messages from role-only format (no type field)', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-role-only-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const testDir = createTestDir();
@@ -4534,7 +4613,7 @@ async function runTests() {
if (
await asyncTest('logs "Transcript not found" for nonexistent transcript_path', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-notfound-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const stdinJson = JSON.stringify({ transcript_path: '/tmp/nonexistent-transcript-99999.jsonl' });
@@ -4563,7 +4642,7 @@ async function runTests() {
if (
await asyncTest('extracts tool name and file path from entry.name/entry.input (not tool_name/tool_input)', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-r70-entryname-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const transcriptPath = path.join(isoHome, 'transcript.jsonl');
@@ -4611,7 +4690,7 @@ async function runTests() {
await asyncTest('shows selection prompt when no package manager preference found (default source)', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-r71-ss-default-${Date.now()}`);
const isoProject = path.join(isoHome, 'project');
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
fs.mkdirSync(isoProject, { recursive: true });
// No package.json, no lock files, no package-manager.json — forces default source
@@ -4758,7 +4837,7 @@ async function runTests() {
if (
await asyncTest('extracts user messages from entries where only message.role is user (not type or role)', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-msgrole-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const testDir = createTestDir();
@@ -4825,7 +4904,7 @@ async function runTests() {
// session-end.js line 50-55: rawContent is checked for string, then array, else ''
// When content is a number (42), neither branch matches, text = '', message is skipped.
const isoHome = path.join(os.tmpdir(), `ecc-r81-numcontent-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const transcriptPath = path.join(isoHome, 'transcript.jsonl');
@@ -4874,7 +4953,7 @@ async function runTests() {
if (
await asyncTest('collects tool name from entry with tool_name but non-tool_use type', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-r82-toolname-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const transcriptPath = path.join(isoHome, 'transcript.jsonl');
@@ -4912,7 +4991,7 @@ async function runTests() {
if (
await asyncTest('preserves file when marker present but regex does not match corrupted template', async () => {
const isoHome = path.join(os.tmpdir(), `ecc-r82-tmpl-${Date.now()}`);
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
const sessionsDir = getCanonicalSessionsDir(isoHome);
fs.mkdirSync(sessionsDir, { recursive: true });
const today = new Date().toISOString().split('T')[0];
@@ -5072,7 +5151,7 @@ Some random content without the expected ### Context to Load section
assert.strictEqual(result.code, 0, 'Should exit 0');
// Read the session file to verify tool names and file paths were extracted
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {
@@ -5193,7 +5272,7 @@ Some random content without the expected ### Context to Load section
});
assert.strictEqual(result.code, 0, 'Should exit 0');
const claudeDir = path.join(testDir, '.claude', 'sessions');
const claudeDir = getCanonicalSessionsDir(testDir);
if (fs.existsSync(claudeDir)) {
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
if (files.length > 0) {

View File

@@ -79,6 +79,25 @@ function runHook(input, env = {}) {
};
}
function runRawHook(rawInput, env = {}) {
const result = spawnSync('node', [script], {
input: rawInput,
encoding: 'utf8',
env: {
...process.env,
ECC_HOOK_PROFILE: 'standard',
...env
},
timeout: 15000,
stdio: ['pipe', 'pipe', 'pipe']
});
return {
code: result.status || 0,
stdout: result.stdout || '',
stderr: result.stderr || ''
};
}
async function runTests() {
console.log('\n=== Testing mcp-health-check.js ===\n');
@@ -95,6 +114,19 @@ async function runTests() {
assert.strictEqual(result.stderr, '', 'Expected no stderr for non-MCP tool');
})) passed++; else failed++;
if (test('blocks truncated MCP hook input by default', () => {
const rawInput = JSON.stringify({ tool_name: 'mcp__flaky__search', tool_input: {} });
const result = runRawHook(rawInput, {
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_HOOK_INPUT_TRUNCATED: '1',
ECC_HOOK_INPUT_MAX_BYTES: '512'
});
assert.strictEqual(result.code, 2, 'Expected truncated MCP input to block by default');
assert.strictEqual(result.stdout, rawInput, 'Expected raw input passthrough on stdout');
assert.ok(result.stderr.includes('Hook input exceeded 512 bytes'), `Expected size warning, got: ${result.stderr}`);
assert.ok(/blocking search/i.test(result.stderr), `Expected blocking message, got: ${result.stderr}`);
})) passed++; else failed++;
if (await asyncTest('marks healthy command MCP servers and allows the tool call', async () => {
const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json');

View File

@@ -148,6 +148,24 @@ test('analysis temp file is created and cleaned up', () => {
assert.ok(content.includes('rm -f "$prompt_file" "$analysis_file"'), 'Should clean up both prompt and analysis temp files');
});
test('observer-loop uses project-local temp directory for analysis artifacts', () => {
const content = fs.readFileSync(observerLoopPath, 'utf8');
assert.ok(content.includes('observer_tmp_dir="${PROJECT_DIR}/.observer-tmp"'), 'Should keep observer temp files inside the project');
assert.ok(content.includes('mktemp "${observer_tmp_dir}/ecc-observer-analysis.'), 'Analysis temp file should use the project temp dir');
assert.ok(content.includes('mktemp "${observer_tmp_dir}/ecc-observer-prompt.'), 'Prompt temp file should use the project temp dir');
});
test('observer-loop prompt requires direct instinct writes without asking permission', () => {
const content = fs.readFileSync(observerLoopPath, 'utf8');
const heredocStart = content.indexOf('cat > "$prompt_file" <<PROMPT');
const heredocEnd = content.indexOf('\nPROMPT', heredocStart + 1);
assert.ok(heredocStart > 0, 'Should find prompt heredoc start');
assert.ok(heredocEnd > heredocStart, 'Should find prompt heredoc end');
const promptSection = content.substring(heredocStart, heredocEnd);
assert.ok(promptSection.includes('MUST write an instinct file directly'), 'Prompt should require direct file creation');
assert.ok(promptSection.includes('Do NOT ask for permission'), 'Prompt should forbid permission-seeking');
assert.ok(promptSection.includes('write or update the instinct file in this run'), 'Prompt should require same-run writes');
});
test('prompt references analysis_file not full OBSERVATIONS_FILE', () => {
const content = fs.readFileSync(observerLoopPath, 'utf8');
// The prompt heredoc should reference analysis_file for the Read instruction.

View File

@@ -54,47 +54,56 @@ function getCounterFilePath(sessionId) {
return path.join(os.tmpdir(), `claude-tool-count-${sessionId}`);
}
let counterContextSeq = 0;
function createCounterContext(prefix = 'test-compact') {
counterContextSeq += 1;
const sessionId = `${prefix}-${Date.now()}-${counterContextSeq}`;
const counterFile = getCounterFilePath(sessionId);
return {
sessionId,
counterFile,
cleanup() {
try {
fs.unlinkSync(counterFile);
} catch (_err) {
// Ignore missing temp files between runs
}
}
};
}
function runTests() {
console.log('\n=== Testing suggest-compact.js ===\n');
let passed = 0;
let failed = 0;
// Use a unique session ID per test run to avoid collisions
const testSession = `test-compact-${Date.now()}`;
const counterFile = getCounterFilePath(testSession);
// Cleanup helper
function cleanupCounter() {
try {
fs.unlinkSync(counterFile);
} catch (_err) {
// Ignore error
}
}
// Basic functionality
console.log('Basic counter functionality:');
if (test('creates counter file on first run', () => {
cleanupCounter();
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
assert.strictEqual(result.code, 0, 'Should exit 0');
assert.ok(fs.existsSync(counterFile), 'Counter file should be created');
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Counter should be 1 after first run');
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('increments counter on subsequent runs', () => {
cleanupCounter();
runCompact({ CLAUDE_SESSION_ID: testSession });
runCompact({ CLAUDE_SESSION_ID: testSession });
runCompact({ CLAUDE_SESSION_ID: testSession });
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
runCompact({ CLAUDE_SESSION_ID: sessionId });
runCompact({ CLAUDE_SESSION_ID: sessionId });
runCompact({ CLAUDE_SESSION_ID: sessionId });
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 3, 'Counter should be 3 after three runs');
cleanupCounter();
cleanup();
})) passed++;
else failed++;
@@ -102,28 +111,30 @@ function runTests() {
console.log('\nThreshold suggestion:');
if (test('suggests compact at threshold (COMPACT_THRESHOLD=3)', () => {
cleanupCounter();
const { sessionId, cleanup } = createCounterContext();
cleanup();
// Run 3 times with threshold=3
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
assert.ok(
result.stderr.includes('3 tool calls reached') || result.stderr.includes('consider /compact'),
`Should suggest compact at threshold. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('does NOT suggest compact before threshold', () => {
cleanupCounter();
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '5' });
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '5' });
const { sessionId, cleanup } = createCounterContext();
cleanup();
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '5' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '5' });
assert.ok(
!result.stderr.includes('StrategicCompact'),
'Should NOT suggest compact before threshold'
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
@@ -131,18 +142,19 @@ function runTests() {
console.log('\nInterval suggestion:');
if (test('suggests at threshold + 25 interval', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
// Set counter to threshold+24 (so next run = threshold+25)
// threshold=3, so we need count=28 → 25 calls past threshold
// Write 27 to the counter file, next run will be 28 = 3 + 25
fs.writeFileSync(counterFile, '27');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
// count=28, threshold=3, 28-3=25, 25 % 25 === 0 → should suggest
assert.ok(
result.stderr.includes('28 tool calls') || result.stderr.includes('checkpoint'),
`Should suggest at threshold+25 interval. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
@@ -150,42 +162,45 @@ function runTests() {
console.log('\nEnvironment variable handling:');
if (test('uses default threshold (50) when COMPACT_THRESHOLD is not set', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
// Write counter to 49, next run will be 50 = default threshold
fs.writeFileSync(counterFile, '49');
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
// Remove COMPACT_THRESHOLD from env
assert.ok(
result.stderr.includes('50 tool calls reached'),
`Should use default threshold of 50. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('ignores invalid COMPACT_THRESHOLD (negative)', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '49');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '-5' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '-5' });
// Invalid threshold falls back to 50
assert.ok(
result.stderr.includes('50 tool calls reached'),
`Should fallback to 50 for negative threshold. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('ignores non-numeric COMPACT_THRESHOLD', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '49');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: 'abc' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: 'abc' });
// NaN falls back to 50
assert.ok(
result.stderr.includes('50 tool calls reached'),
`Should fallback to 50 for non-numeric threshold. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
@@ -193,38 +208,41 @@ function runTests() {
console.log('\nCorrupted counter file:');
if (test('resets counter on corrupted file content', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, 'not-a-number');
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
assert.strictEqual(result.code, 0);
// Corrupted file → parsed is NaN → falls back to count=1
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Should reset to 1 on corrupted file');
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('resets counter on extremely large value', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
// Value > 1000000 should be clamped
fs.writeFileSync(counterFile, '9999999');
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
assert.strictEqual(result.code, 0);
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Should reset to 1 for value > 1000000');
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('handles empty counter file', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '');
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
assert.strictEqual(result.code, 0);
// Empty file → bytesRead=0 → count starts at 1
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Should start at 1 for empty file');
cleanupCounter();
cleanup();
})) passed++;
else failed++;
@@ -255,10 +273,11 @@ function runTests() {
console.log('\nExit code:');
if (test('always exits 0 (never blocks Claude)', () => {
cleanupCounter();
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
const { sessionId, cleanup } = createCounterContext();
cleanup();
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
assert.strictEqual(result.code, 0, 'Should always exit 0');
cleanupCounter();
cleanup();
})) passed++;
else failed++;
@@ -266,48 +285,52 @@ function runTests() {
console.log('\nThreshold boundary values:');
if (test('rejects COMPACT_THRESHOLD=0 (falls back to 50)', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '49');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '0' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '0' });
// 0 is invalid (must be > 0), falls back to 50, count becomes 50 → should suggest
assert.ok(
result.stderr.includes('50 tool calls reached'),
`Should fallback to 50 for threshold=0. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('accepts COMPACT_THRESHOLD=10000 (boundary max)', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '9999');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '10000' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '10000' });
// count becomes 10000, threshold=10000 → should suggest
assert.ok(
result.stderr.includes('10000 tool calls reached'),
`Should accept threshold=10000. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('rejects COMPACT_THRESHOLD=10001 (falls back to 50)', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '49');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '10001' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '10001' });
// 10001 > 10000, invalid, falls back to 50, count becomes 50 → should suggest
assert.ok(
result.stderr.includes('50 tool calls reached'),
`Should fallback to 50 for threshold=10001. Got stderr: ${result.stderr}`
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('rejects float COMPACT_THRESHOLD (e.g. 3.5)', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '49');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3.5' });
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3.5' });
// parseInt('3.5') = 3, which is valid (> 0 && <= 10000)
// count becomes 50, threshold=3, 50-3=47, 47%25≠0 and 50≠3 → no suggestion
assert.strictEqual(result.code, 0);
@@ -316,28 +339,30 @@ function runTests() {
!result.stderr.includes('StrategicCompact'),
'Float threshold should be parseInt-ed to 3, no suggestion at count=50'
);
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('counter value at exact boundary 1000000 is valid', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '999999');
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
// 999999 is valid (> 0, <= 1000000), count becomes 1000000
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1000000, 'Counter at 1000000 boundary should be valid');
cleanupCounter();
cleanup();
})) passed++;
else failed++;
if (test('counter value at 1000001 is clamped (reset to 1)', () => {
cleanupCounter();
const { sessionId, counterFile, cleanup } = createCounterContext();
cleanup();
fs.writeFileSync(counterFile, '1000001');
runCompact({ CLAUDE_SESSION_ID: testSession });
runCompact({ CLAUDE_SESSION_ID: sessionId });
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Counter > 1000000 should be reset to 1');
cleanupCounter();
cleanup();
})) passed++;
else failed++;

View File

@@ -90,6 +90,14 @@ function runHookWithInput(scriptPath, input = {}, env = {}, timeoutMs = 10000) {
});
}
function getSessionStartPayload(stdout) {
assert.ok(stdout.trim(), 'Expected SessionStart hook to emit stdout payload');
const payload = JSON.parse(stdout);
assert.strictEqual(payload.hookSpecificOutput?.hookEventName, 'SessionStart');
assert.strictEqual(typeof payload.hookSpecificOutput?.additionalContext, 'string');
return payload;
}
/**
* Run a hook command string exactly as declared in hooks.json.
* Supports wrapped node script commands and shell wrappers.
@@ -249,11 +257,14 @@ async function runTests() {
// ==========================================
console.log('\nHook Output Format:');
if (await asyncTest('hooks output messages to stderr (not stdout)', async () => {
if (await asyncTest('session-start logs diagnostics to stderr and emits structured stdout when context exists', async () => {
const result = await runHookWithInput(path.join(scriptsDir, 'session-start.js'), {});
// Session-start should write info to stderr
assert.ok(result.stderr.length > 0, 'Should have stderr output');
assert.ok(result.stderr.includes('[SessionStart]'), 'Should have [SessionStart] prefix');
const payload = getSessionStartPayload(result.stdout);
assert.ok(payload.hookSpecificOutput, 'Should include hookSpecificOutput');
assert.strictEqual(payload.hookSpecificOutput.hookEventName, 'SessionStart');
})) passed++; else failed++;
if (await asyncTest('PreCompact hook logs to stderr', async () => {

View File

@@ -4,8 +4,9 @@
* Covers the ECC root resolution fallback chain:
* 1. CLAUDE_PLUGIN_ROOT env var
* 2. Standard install (~/.claude/)
* 3. Plugin cache auto-detection
* 4. Fallback to ~/.claude/
* 3. Exact legacy plugin roots under ~/.claude/plugins/
* 4. Plugin cache auto-detection
* 5. Fallback to ~/.claude/
*/
const assert = require('assert');
@@ -39,6 +40,13 @@ function setupStandardInstall(homeDir) {
return claudeDir;
}
function setupLegacyPluginInstall(homeDir, segments) {
const legacyDir = path.join(homeDir, '.claude', 'plugins', ...segments);
const scriptDir = path.join(legacyDir, 'scripts', 'lib');
fs.mkdirSync(scriptDir, { recursive: true });
fs.writeFileSync(path.join(scriptDir, 'utils.js'), '// stub');
return legacyDir;
}
function setupPluginCache(homeDir, orgName, version) {
const cacheDir = path.join(
homeDir, '.claude', 'plugins', 'cache',
@@ -103,6 +111,50 @@ function runTests() {
}
})) passed++; else failed++;
if (test('finds exact legacy plugin install at ~/.claude/plugins/everything-claude-code', () => {
const homeDir = createTempDir();
try {
const expected = setupLegacyPluginInstall(homeDir, ['everything-claude-code']);
const result = resolveEccRoot({ envRoot: '', homeDir });
assert.strictEqual(result, expected);
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
}
})) passed++; else failed++;
if (test('finds exact legacy plugin install at ~/.claude/plugins/everything-claude-code@everything-claude-code', () => {
const homeDir = createTempDir();
try {
const expected = setupLegacyPluginInstall(homeDir, ['everything-claude-code@everything-claude-code']);
const result = resolveEccRoot({ envRoot: '', homeDir });
assert.strictEqual(result, expected);
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
}
})) passed++; else failed++;
if (test('finds marketplace legacy plugin install at ~/.claude/plugins/marketplace/everything-claude-code', () => {
const homeDir = createTempDir();
try {
const expected = setupLegacyPluginInstall(homeDir, ['marketplace', 'everything-claude-code']);
const result = resolveEccRoot({ envRoot: '', homeDir });
assert.strictEqual(result, expected);
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
}
})) passed++; else failed++;
if (test('prefers exact legacy plugin install over plugin cache', () => {
const homeDir = createTempDir();
try {
const expected = setupLegacyPluginInstall(homeDir, ['marketplace', 'everything-claude-code']);
setupPluginCache(homeDir, 'everything-claude-code', '1.8.0');
const result = resolveEccRoot({ envRoot: '', homeDir });
assert.strictEqual(result, expected);
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
}
})) passed++; else failed++;
// ─── Plugin Cache Auto-Detection ───
if (test('discovers plugin root from cache directory', () => {
@@ -207,6 +259,22 @@ function runTests() {
assert.strictEqual(result, '/inline/test/root');
})) passed++; else failed++;
if (test('INLINE_RESOLVE discovers exact legacy plugin root when env var is unset', () => {
const homeDir = createTempDir();
try {
const expected = setupLegacyPluginInstall(homeDir, ['marketplace', 'everything-claude-code']);
const { execFileSync } = require('child_process');
const result = execFileSync('node', [
'-e', `console.log(${INLINE_RESOLVE})`,
], {
env: { PATH: process.env.PATH, HOME: homeDir, USERPROFILE: homeDir },
encoding: 'utf8',
}).trim();
assert.strictEqual(result, expected);
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
}
})) passed++; else failed++;
if (test('INLINE_RESOLVE discovers plugin cache when env var is unset', () => {
const homeDir = createTempDir();
try {

View File

@@ -341,8 +341,10 @@ src/main.ts
// Override HOME to a temp dir for isolated getAllSessions/getSessionById tests
// On Windows, os.homedir() uses USERPROFILE, not HOME — set both for cross-platform
const tmpHome = path.join(os.tmpdir(), `ecc-session-mgr-test-${Date.now()}`);
const tmpSessionsDir = path.join(tmpHome, '.claude', 'sessions');
fs.mkdirSync(tmpSessionsDir, { recursive: true });
const tmpCanonicalSessionsDir = path.join(tmpHome, '.claude', 'session-data');
const tmpLegacySessionsDir = path.join(tmpHome, '.claude', 'sessions');
fs.mkdirSync(tmpCanonicalSessionsDir, { recursive: true });
fs.mkdirSync(tmpLegacySessionsDir, { recursive: true });
const origHome = process.env.HOME;
const origUserProfile = process.env.USERPROFILE;
@@ -355,7 +357,10 @@ src/main.ts
{ name: '2026-02-10-session.tmp', content: '# Old format session' },
];
for (let i = 0; i < testSessions.length; i++) {
const filePath = path.join(tmpSessionsDir, testSessions[i].name);
const targetDir = testSessions[i].name === '2026-02-10-session.tmp'
? tmpLegacySessionsDir
: tmpCanonicalSessionsDir;
const filePath = path.join(targetDir, testSessions[i].name);
fs.writeFileSync(filePath, testSessions[i].content);
// Stagger modification times so sort order is deterministic
const mtime = new Date(Date.now() - (testSessions.length - i) * 60000);
@@ -399,6 +404,23 @@ src/main.ts
assert.strictEqual(result.sessions[0].shortId, 'abcd1234');
})) passed++; else failed++;
if (test('getAllSessions prefers canonical session-data duplicates over newer legacy copies', () => {
const duplicateName = '2026-01-15-abcd1234-session.tmp';
const legacyDuplicatePath = path.join(tmpLegacySessionsDir, duplicateName);
const legacyMtime = new Date(Date.now() + 60000);
try {
fs.writeFileSync(legacyDuplicatePath, '# Legacy duplicate');
fs.utimesSync(legacyDuplicatePath, legacyMtime, legacyMtime);
const result = sessionManager.getAllSessions({ search: 'abcd', limit: 100 });
assert.strictEqual(result.total, 1, 'Duplicate filenames should be deduped');
assert.ok(result.sessions[0].sessionPath.includes('session-data'), 'Canonical session-data copy should win');
} finally {
fs.rmSync(legacyDuplicatePath, { force: true });
}
})) passed++; else failed++;
if (test('getAllSessions returns sorted by newest first', () => {
const result = sessionManager.getAllSessions({ limit: 100 });
for (let i = 1; i < result.sessions.length; i++) {
@@ -423,8 +445,8 @@ src/main.ts
})) passed++; else failed++;
if (test('getAllSessions ignores non-.tmp files', () => {
fs.writeFileSync(path.join(tmpSessionsDir, 'notes.txt'), 'not a session');
fs.writeFileSync(path.join(tmpSessionsDir, 'compaction-log.txt'), 'log');
fs.writeFileSync(path.join(tmpCanonicalSessionsDir, 'notes.txt'), 'not a session');
fs.writeFileSync(path.join(tmpCanonicalSessionsDir, 'compaction-log.txt'), 'log');
const result = sessionManager.getAllSessions({ limit: 100 });
assert.strictEqual(result.total, 5, 'Should only count .tmp session files');
})) passed++; else failed++;
@@ -444,6 +466,23 @@ src/main.ts
assert.strictEqual(result.shortId, 'abcd1234');
})) passed++; else failed++;
if (test('getSessionById prefers canonical session-data duplicates over newer legacy copies', () => {
const duplicateName = '2026-01-15-abcd1234-session.tmp';
const legacyDuplicatePath = path.join(tmpLegacySessionsDir, duplicateName);
const legacyMtime = new Date(Date.now() + 120000);
try {
fs.writeFileSync(legacyDuplicatePath, '# Legacy duplicate');
fs.utimesSync(legacyDuplicatePath, legacyMtime, legacyMtime);
const result = sessionManager.getSessionById('abcd1234');
assert.ok(result, 'Should still resolve the duplicate session');
assert.ok(result.sessionPath.includes('session-data'), 'Canonical session-data copy should win');
} finally {
fs.rmSync(legacyDuplicatePath, { force: true });
}
})) passed++; else failed++;
if (test('getSessionById finds by full filename', () => {
const result = sessionManager.getSessionById('2026-01-15-abcd1234-session.tmp');
assert.ok(result, 'Should find session by full filename');
@@ -477,6 +516,12 @@ src/main.ts
assert.strictEqual(result, null, 'Empty string should not match any session');
})) passed++; else failed++;
if (test('getSessionById returns null for non-string IDs', () => {
assert.strictEqual(sessionManager.getSessionById(null), null);
assert.strictEqual(sessionManager.getSessionById(undefined), null);
assert.strictEqual(sessionManager.getSessionById(42), null);
})) passed++; else failed++;
if (test('getSessionById metadata and stats populated when includeContent=true', () => {
const result = sessionManager.getSessionById('abcd1234', true);
assert.ok(result, 'Should find session');
@@ -990,7 +1035,7 @@ src/main.ts
assert.ok(result.endsWith(filename), `Path should end with filename, got: ${result}`);
// Since HOME is overridden, sessions dir should be under tmpHome
assert.ok(result.includes('.claude'), 'Path should include .claude directory');
assert.ok(result.includes('sessions'), 'Path should include sessions directory');
assert.ok(result.includes('session-data'), 'Path should use canonical session-data directory');
})) passed++; else failed++;
// ── Round 66: getSessionById noIdMatch path (date-only string for old format) ──
@@ -1601,18 +1646,13 @@ src/main.ts
'Null search should return sessions (confirming they exist but space filtered them)');
})) passed++; else failed++;
// ── Round 98: getSessionById with null sessionId throws TypeError ──
console.log('\nRound 98: getSessionById (null sessionId — crashes at line 297):');
// ── Round 98: getSessionById with null sessionId returns null ──
console.log('\nRound 98: getSessionById (null sessionId — guarded null return):');
if (test('getSessionById(null) throws TypeError when session files exist', () => {
// session-manager.js line 297: `sessionId.length > 0` — calling .length on null
// throws TypeError because there's no early guard for null/undefined input.
// This only surfaces when valid .tmp files exist in the sessions directory.
assert.throws(
() => sessionManager.getSessionById(null),
{ name: 'TypeError' },
'null.length should throw TypeError (no input guard at function entry)'
);
if (test('getSessionById(null) returns null when session files exist', () => {
// Keep a populated sessions directory so the early input guard is exercised even when
// candidate files are present.
assert.strictEqual(sessionManager.getSessionById(null), null);
})) passed++; else failed++;
// Cleanup test environment for Rounds 95-98 that needed sessions
@@ -1629,18 +1669,13 @@ src/main.ts
// best-effort
}
// ── Round 98: parseSessionFilename with null input throws TypeError ──
console.log('\nRound 98: parseSessionFilename (null input — crashes at line 30):');
// ── Round 98: parseSessionFilename with null input returns null ──
console.log('\nRound 98: parseSessionFilename (null input is safely rejected):');
if (test('parseSessionFilename(null) throws TypeError because null has no .match()', () => {
// session-manager.js line 30: `filename.match(SESSION_FILENAME_REGEX)`
// When filename is null, null.match() throws TypeError.
// Function lacks a type guard like `if (!filename || typeof filename !== 'string')`.
assert.throws(
() => sessionManager.parseSessionFilename(null),
{ name: 'TypeError' },
'null.match() should throw TypeError (no type guard on filename parameter)'
);
if (test('parseSessionFilename(null) returns null instead of throwing', () => {
assert.strictEqual(sessionManager.parseSessionFilename(null), null);
assert.strictEqual(sessionManager.parseSessionFilename(undefined), null);
assert.strictEqual(sessionManager.parseSessionFilename(123), null);
})) passed++; else failed++;
// ── Round 99: writeSessionContent with null path returns false (error caught) ──

View File

@@ -7,6 +7,7 @@
const assert = require('assert');
const path = require('path');
const fs = require('fs');
const { spawnSync } = require('child_process');
// Import the module
const utils = require('../../scripts/lib/utils');
@@ -68,7 +69,13 @@ function runTests() {
const sessionsDir = utils.getSessionsDir();
const claudeDir = utils.getClaudeDir();
assert.ok(sessionsDir.startsWith(claudeDir), 'Sessions should be under Claude dir');
assert.ok(sessionsDir.includes('sessions'), 'Should contain sessions');
assert.ok(sessionsDir.endsWith(path.join('.claude', 'session-data')) || sessionsDir.endsWith('/.claude/session-data'), 'Should use canonical session-data directory');
})) passed++; else failed++;
if (test('getSessionSearchDirs includes canonical and legacy paths', () => {
const searchDirs = utils.getSessionSearchDirs();
assert.strictEqual(searchDirs[0], utils.getSessionsDir(), 'Canonical session dir should be searched first');
assert.strictEqual(searchDirs[1], utils.getLegacySessionsDir(), 'Legacy session dir should be searched second');
})) passed++; else failed++;
if (test('getTempDir returns valid temp directory', () => {
@@ -118,17 +125,94 @@ function runTests() {
assert.ok(name && name.length > 0);
})) passed++; else failed++;
// sanitizeSessionId tests
console.log('\nsanitizeSessionId:');
if (test('sanitizeSessionId strips leading dots', () => {
assert.strictEqual(utils.sanitizeSessionId('.claude'), 'claude');
})) passed++; else failed++;
if (test('sanitizeSessionId replaces dots and spaces', () => {
assert.strictEqual(utils.sanitizeSessionId('my.project'), 'my-project');
assert.strictEqual(utils.sanitizeSessionId('my project'), 'my-project');
})) passed++; else failed++;
if (test('sanitizeSessionId replaces special chars and collapses runs', () => {
assert.strictEqual(utils.sanitizeSessionId('project@v2'), 'project-v2');
assert.strictEqual(utils.sanitizeSessionId('a...b'), 'a-b');
})) passed++; else failed++;
if (test('sanitizeSessionId preserves valid chars', () => {
assert.strictEqual(utils.sanitizeSessionId('my-project_123'), 'my-project_123');
})) passed++; else failed++;
if (test('sanitizeSessionId appends hash suffix for all Windows reserved device names', () => {
for (const reservedName of ['CON', 'prn', 'Aux', 'nul', 'COM1', 'lpt9']) {
const sanitized = utils.sanitizeSessionId(reservedName);
assert.ok(sanitized, `Expected sanitized output for ${reservedName}`);
assert.notStrictEqual(sanitized.toUpperCase(), reservedName.toUpperCase());
assert.ok(/-[a-f0-9]{6}$/i.test(sanitized), `Expected deterministic hash suffix for ${reservedName}, got ${sanitized}`);
}
})) passed++; else failed++;
if (test('sanitizeSessionId returns null for empty or punctuation-only values', () => {
assert.strictEqual(utils.sanitizeSessionId(''), null);
assert.strictEqual(utils.sanitizeSessionId(null), null);
assert.strictEqual(utils.sanitizeSessionId(undefined), null);
assert.strictEqual(utils.sanitizeSessionId('...'), null);
assert.strictEqual(utils.sanitizeSessionId('…'), null);
})) passed++; else failed++;
if (test('sanitizeSessionId returns stable hashes for non-ASCII values', () => {
const chinese = utils.sanitizeSessionId('我的项目');
const cyrillic = utils.sanitizeSessionId('проект');
const emoji = utils.sanitizeSessionId('🚀🎉');
assert.ok(/^[a-f0-9]{8}$/.test(chinese), `Expected 8-char hash, got: ${chinese}`);
assert.ok(/^[a-f0-9]{8}$/.test(cyrillic), `Expected 8-char hash, got: ${cyrillic}`);
assert.ok(/^[a-f0-9]{8}$/.test(emoji), `Expected 8-char hash, got: ${emoji}`);
assert.notStrictEqual(chinese, cyrillic);
assert.notStrictEqual(chinese, emoji);
assert.strictEqual(utils.sanitizeSessionId('日本語プロジェクト'), utils.sanitizeSessionId('日本語プロジェクト'));
})) passed++; else failed++;
if (test('sanitizeSessionId disambiguates mixed-script names from pure ASCII', () => {
const mixed = utils.sanitizeSessionId('我的app');
const mixedTwo = utils.sanitizeSessionId('他的app');
const pure = utils.sanitizeSessionId('app');
assert.strictEqual(pure, 'app');
assert.ok(mixed.startsWith('app-'), `Expected mixed-script prefix, got: ${mixed}`);
assert.notStrictEqual(mixed, pure);
assert.notStrictEqual(mixed, mixedTwo);
})) passed++; else failed++;
if (test('sanitizeSessionId is idempotent', () => {
for (const input of ['.claude', 'my.project', 'project@v2', 'a...b', 'my-project_123']) {
const once = utils.sanitizeSessionId(input);
const twice = utils.sanitizeSessionId(once);
assert.strictEqual(once, twice, `Expected idempotent result for ${input}`);
}
})) passed++; else failed++;
if (test('sanitizeSessionId preserves readable prefixes for Windows reserved device names', () => {
const con = utils.sanitizeSessionId('CON');
const aux = utils.sanitizeSessionId('aux');
assert.ok(con.startsWith('CON-'), `Expected CON to get a suffix, got: ${con}`);
assert.ok(aux.startsWith('aux-'), `Expected aux to get a suffix, got: ${aux}`);
assert.notStrictEqual(utils.sanitizeSessionId('COM1'), 'COM1');
})) passed++; else failed++;
// Session ID tests
console.log('\nSession ID Functions:');
if (test('getSessionIdShort falls back to project name', () => {
if (test('getSessionIdShort falls back to sanitized project name', () => {
const original = process.env.CLAUDE_SESSION_ID;
delete process.env.CLAUDE_SESSION_ID;
try {
const shortId = utils.getSessionIdShort();
assert.strictEqual(shortId, utils.getProjectName());
assert.strictEqual(shortId, utils.sanitizeSessionId(utils.getProjectName()));
} finally {
if (original) process.env.CLAUDE_SESSION_ID = original;
if (original !== undefined) process.env.CLAUDE_SESSION_ID = original;
else delete process.env.CLAUDE_SESSION_ID;
}
})) passed++; else failed++;
@@ -154,6 +238,28 @@ function runTests() {
}
})) passed++; else failed++;
if (test('getSessionIdShort sanitizes explicit fallback parameter', () => {
if (process.platform === 'win32') {
console.log(' (skipped — root CWD differs on Windows)');
return true;
}
const utilsPath = path.join(__dirname, '..', '..', 'scripts', 'lib', 'utils.js');
const script = `
const utils = require('${utilsPath.replace(/'/g, "\\'")}');
process.stdout.write(utils.getSessionIdShort('my.fallback'));
`;
const result = spawnSync('node', ['-e', script], {
encoding: 'utf8',
cwd: '/',
env: { ...process.env, CLAUDE_SESSION_ID: '' },
timeout: 10000
});
assert.strictEqual(result.status, 0, `Expected exit 0, got ${result.status}. stderr: ${result.stderr}`);
assert.strictEqual(result.stdout, 'my-fallback');
})) passed++; else failed++;
// File operations tests
console.log('\nFile Operations:');
@@ -1415,25 +1521,26 @@ function runTests() {
// ── Round 97: getSessionIdShort with whitespace-only CLAUDE_SESSION_ID ──
console.log('\nRound 97: getSessionIdShort (whitespace-only session ID):');
if (test('getSessionIdShort returns whitespace when CLAUDE_SESSION_ID is all spaces', () => {
// utils.js line 116: if (sessionId && sessionId.length > 0) — ' ' is truthy
// and has length > 0, so it passes the check instead of falling back.
const original = process.env.CLAUDE_SESSION_ID;
try {
process.env.CLAUDE_SESSION_ID = ' '; // 10 spaces
const result = utils.getSessionIdShort('fallback');
// slice(-8) on 10 spaces returns 8 spaces — not the expected fallback
assert.strictEqual(result, ' ',
'Whitespace-only ID should return 8 trailing spaces (no trim check)');
assert.strictEqual(result.trim().length, 0,
'Result should be entirely whitespace (demonstrating the missing trim)');
} finally {
if (original !== undefined) {
process.env.CLAUDE_SESSION_ID = original;
} else {
delete process.env.CLAUDE_SESSION_ID;
}
if (test('getSessionIdShort sanitizes whitespace-only CLAUDE_SESSION_ID to fallback', () => {
if (process.platform === 'win32') {
console.log(' (skipped — root CWD differs on Windows)');
return true;
}
const utilsPath = path.join(__dirname, '..', '..', 'scripts', 'lib', 'utils.js');
const script = `
const utils = require('${utilsPath.replace(/'/g, "\\'")}');
process.stdout.write(utils.getSessionIdShort('fallback'));
`;
const result = spawnSync('node', ['-e', script], {
encoding: 'utf8',
cwd: '/',
env: { ...process.env, CLAUDE_SESSION_ID: ' ' },
timeout: 10000
});
assert.strictEqual(result.status, 0, `Expected exit 0, got ${result.status}. stderr: ${result.stderr}`);
assert.strictEqual(result.stdout, 'fallback');
})) passed++; else failed++;
// ── Round 97: countInFile with same RegExp object called twice (lastIndex reuse) ──

View File

@@ -0,0 +1,94 @@
/**
* Tests for Codex shell helpers.
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { spawnSync } = require('child_process');
const repoRoot = path.join(__dirname, '..', '..');
const installScript = path.join(repoRoot, 'scripts', 'codex', 'install-global-git-hooks.sh');
const installSource = fs.readFileSync(installScript, 'utf8');
function test(name, fn) {
try {
fn();
console.log(`${name}`);
return true;
} catch (error) {
console.log(`${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function toBashPath(filePath) {
if (process.platform !== 'win32') {
return filePath;
}
return String(filePath)
.replace(/^([A-Za-z]):/, (_, driveLetter) => `/${driveLetter.toLowerCase()}`)
.replace(/\\/g, '/');
}
function runBash(scriptPath, args = [], env = {}, cwd = repoRoot) {
return spawnSync('bash', [toBashPath(scriptPath), ...args], {
cwd,
env: {
...process.env,
...env
},
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe']
});
}
let passed = 0;
let failed = 0;
if (
test('install-global-git-hooks.sh does not use eval and executes argv directly', () => {
assert.ok(!installSource.includes('eval "$*"'), 'Expected installer to avoid eval');
assert.ok(installSource.includes(' "$@"'), 'Expected installer to execute argv directly');
assert.ok(installSource.includes(`printf ' %q' "$@"`), 'Expected dry-run logging to shell-escape argv');
})
)
passed++;
else failed++;
if (
test('install-global-git-hooks.sh handles shell-sensitive hook paths without shell injection', () => {
const homeDir = createTempDir('codex-hooks-home-');
const weirdHooksDir = path.join(homeDir, "git-hooks 'quoted' & spaced");
try {
const result = runBash(installScript, [], {
HOME: toBashPath(homeDir),
ECC_GLOBAL_HOOKS_DIR: toBashPath(weirdHooksDir)
});
assert.strictEqual(result.status, 0, result.stderr || result.stdout);
assert.ok(fs.existsSync(path.join(weirdHooksDir, 'pre-commit')));
assert.ok(fs.existsSync(path.join(weirdHooksDir, 'pre-push')));
} finally {
cleanup(homeDir);
}
})
)
passed++;
else failed++;
console.log(`\nPassed: ${passed}`);
console.log(`Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);

View File

@@ -94,6 +94,7 @@ function runTests() {
assert.ok(fs.existsSync(path.join(claudeRoot, 'rules', 'typescript', 'testing.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'commands', 'plan.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'scripts', 'hooks', 'session-end.js')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'scripts', 'lib', 'utils.js')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'skills', 'tdd-workflow', 'SKILL.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'skills', 'coding-standards', 'SKILL.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'plugin.json')));
@@ -132,6 +133,7 @@ function runTests() {
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'commands', 'plan.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks', 'session-start.js')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'scripts', 'lib', 'utils.js')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'skills', 'tdd-workflow', 'SKILL.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'skills', 'coding-standards', 'SKILL.md')));
@@ -239,6 +241,7 @@ function runTests() {
assert.ok(fs.existsSync(path.join(claudeRoot, 'commands', 'plan.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'scripts', 'hooks', 'session-end.js')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'scripts', 'lib', 'session-manager.js')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'plugin.json')));
const state = readJson(path.join(claudeRoot, 'ecc', 'install-state.json'));

View File

@@ -0,0 +1,80 @@
/**
* Source-level tests for scripts/sync-ecc-to-codex.sh
*/
const assert = require('assert');
const fs = require('fs');
const path = require('path');
const scriptPath = path.join(__dirname, '..', '..', 'scripts', 'sync-ecc-to-codex.sh');
const source = fs.readFileSync(scriptPath, 'utf8');
const normalizedSource = source.replace(/\r\n/g, '\n');
const runOrEchoSource = (() => {
const start = normalizedSource.indexOf('run_or_echo() {');
if (start < 0) {
return '';
}
let depth = 0;
let bodyStart = normalizedSource.indexOf('{', start);
if (bodyStart < 0) {
return '';
}
for (let i = bodyStart; i < normalizedSource.length; i++) {
const char = normalizedSource[i];
if (char === '{') {
depth += 1;
} else if (char === '}') {
depth -= 1;
if (depth === 0) {
return normalizedSource.slice(start, i + 1);
}
}
}
return '';
})();
function test(name, fn) {
try {
fn();
console.log(`${name}`);
return true;
} catch (error) {
console.log(`${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing sync-ecc-to-codex.sh ===\n');
let passed = 0;
let failed = 0;
if (test('run_or_echo does not use eval', () => {
assert.ok(runOrEchoSource, 'Expected to locate run_or_echo function body');
assert.ok(!runOrEchoSource.includes('eval "$@"'), 'run_or_echo should not execute through eval');
})) passed++; else failed++;
if (test('run_or_echo executes argv directly', () => {
assert.ok(runOrEchoSource.includes(' "$@"'), 'run_or_echo should execute the argv vector directly');
})) passed++; else failed++;
if (test('dry-run output shell-escapes argv', () => {
assert.ok(runOrEchoSource.includes(`printf ' %q' "$@"`), 'Dry-run mode should print shell-escaped argv');
})) passed++; else failed++;
if (test('filesystem-changing calls use argv-form run_or_echo invocations', () => {
assert.ok(source.includes('run_or_echo mkdir -p "$BACKUP_DIR"'), 'mkdir should use argv form');
assert.ok(source.includes('run_or_echo rm -rf "$dest"'), 'rm should use argv form');
assert.ok(source.includes('run_or_echo cp -R "$skill_dir" "$dest"'), 'recursive copy should use argv form');
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();