mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 13:43:26 +08:00
Compare commits
47 Commits
1823b441a9
...
f6ebc2a3c2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6ebc2a3c2 | ||
|
|
443986e086 | ||
|
|
c92d3f908f | ||
|
|
b868f42ad1 | ||
|
|
842ff2eff6 | ||
|
|
b678c2f1b0 | ||
|
|
dc11fc2fd8 | ||
|
|
0daa5cb070 | ||
|
|
e2040b46b3 | ||
|
|
c93c218cb8 | ||
|
|
b497135b95 | ||
|
|
554b5d6704 | ||
|
|
bb9df39d96 | ||
|
|
72de0a4e2c | ||
|
|
167b105cac | ||
|
|
b1eb99d961 | ||
|
|
992688a674 | ||
|
|
253645b5e4 | ||
|
|
b3db83d018 | ||
|
|
d903053830 | ||
|
|
6bbcbec23d | ||
|
|
f4758ff8f0 | ||
|
|
4ff4872bf3 | ||
|
|
27dce7794a | ||
|
|
a62a3a2416 | ||
|
|
d9331cb17f | ||
|
|
f33ed4c49e | ||
|
|
2dbba8877b | ||
|
|
5398ac793d | ||
|
|
0e0319a1c2 | ||
|
|
c1919bb879 | ||
|
|
6dcb5daa5c | ||
|
|
e96b522af0 | ||
|
|
34edb59e19 | ||
|
|
37309d47b7 | ||
|
|
3f651b7c3c | ||
|
|
e9343c844b | ||
|
|
7b94b51269 | ||
|
|
6f95dbe7ba | ||
|
|
02120fbf5f | ||
|
|
a4848da38b | ||
|
|
307ee05b2d | ||
|
|
c1b6e0bf11 | ||
|
|
654731f232 | ||
|
|
95f63c3cb0 | ||
|
|
49aee612fb | ||
|
|
4843a06b3a |
@@ -88,7 +88,12 @@ def load_all_instincts() -> list[dict]:
|
||||
for directory in [PERSONAL_DIR, INHERITED_DIR]:
|
||||
if not directory.exists():
|
||||
continue
|
||||
for file in directory.glob("*.yaml"):
|
||||
yaml_files = sorted(
|
||||
set(directory.glob("*.yaml"))
|
||||
| set(directory.glob("*.yml"))
|
||||
| set(directory.glob("*.md"))
|
||||
)
|
||||
for file in yaml_files:
|
||||
try:
|
||||
content = file.read_text()
|
||||
parsed = parse_instinct_file(content)
|
||||
@@ -433,15 +438,96 @@ def cmd_evolve(args):
|
||||
print()
|
||||
|
||||
if args.generate:
|
||||
print("\n[Would generate evolved structures here]")
|
||||
print(" Skills would be saved to:", EVOLVED_DIR / "skills")
|
||||
print(" Commands would be saved to:", EVOLVED_DIR / "commands")
|
||||
print(" Agents would be saved to:", EVOLVED_DIR / "agents")
|
||||
generated = _generate_evolved(skill_candidates, workflow_instincts, agent_candidates)
|
||||
if generated:
|
||||
print(f"\n✅ Generated {len(generated)} evolved structures:")
|
||||
for path in generated:
|
||||
print(f" {path}")
|
||||
else:
|
||||
print("\nNo structures generated (need higher-confidence clusters).")
|
||||
|
||||
print(f"\n{'='*60}\n")
|
||||
return 0
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Generate Evolved Structures
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def _generate_evolved(skill_candidates: list, workflow_instincts: list, agent_candidates: list) -> list[str]:
|
||||
"""Generate skill/command/agent files from analyzed instinct clusters."""
|
||||
generated = []
|
||||
|
||||
# Generate skills from top candidates
|
||||
for cand in skill_candidates[:5]:
|
||||
trigger = cand['trigger'].strip()
|
||||
if not trigger:
|
||||
continue
|
||||
name = re.sub(r'[^a-z0-9]+', '-', trigger.lower()).strip('-')[:30]
|
||||
if not name:
|
||||
continue
|
||||
|
||||
skill_dir = EVOLVED_DIR / "skills" / name
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
content = f"# {name}\n\n"
|
||||
content += f"Evolved from {len(cand['instincts'])} instincts "
|
||||
content += f"(avg confidence: {cand['avg_confidence']:.0%})\n\n"
|
||||
content += f"## When to Apply\n\n"
|
||||
content += f"Trigger: {trigger}\n\n"
|
||||
content += f"## Actions\n\n"
|
||||
for inst in cand['instincts']:
|
||||
inst_content = inst.get('content', '')
|
||||
action_match = re.search(r'## Action\s*\n\s*(.+?)(?:\n\n|\n##|$)', inst_content, re.DOTALL)
|
||||
action = action_match.group(1).strip() if action_match else inst.get('id', 'unnamed')
|
||||
content += f"- {action}\n"
|
||||
|
||||
(skill_dir / "SKILL.md").write_text(content)
|
||||
generated.append(str(skill_dir / "SKILL.md"))
|
||||
|
||||
# Generate commands from workflow instincts
|
||||
for inst in workflow_instincts[:5]:
|
||||
trigger = inst.get('trigger', 'unknown')
|
||||
cmd_name = re.sub(r'[^a-z0-9]+', '-', trigger.lower().replace('when ', '').replace('implementing ', ''))
|
||||
cmd_name = cmd_name.strip('-')[:20]
|
||||
if not cmd_name:
|
||||
continue
|
||||
|
||||
cmd_file = EVOLVED_DIR / "commands" / f"{cmd_name}.md"
|
||||
content = f"# {cmd_name}\n\n"
|
||||
content += f"Evolved from instinct: {inst.get('id', 'unnamed')}\n"
|
||||
content += f"Confidence: {inst.get('confidence', 0.5):.0%}\n\n"
|
||||
content += inst.get('content', '')
|
||||
|
||||
cmd_file.write_text(content)
|
||||
generated.append(str(cmd_file))
|
||||
|
||||
# Generate agents from complex clusters
|
||||
for cand in agent_candidates[:3]:
|
||||
trigger = cand['trigger'].strip()
|
||||
agent_name = re.sub(r'[^a-z0-9]+', '-', trigger.lower()).strip('-')[:20]
|
||||
if not agent_name:
|
||||
continue
|
||||
|
||||
agent_file = EVOLVED_DIR / "agents" / f"{agent_name}.md"
|
||||
domains = ', '.join(cand['domains'])
|
||||
instinct_ids = [i.get('id', 'unnamed') for i in cand['instincts']]
|
||||
|
||||
content = f"---\nmodel: sonnet\ntools: Read, Grep, Glob\n---\n"
|
||||
content += f"# {agent_name}\n\n"
|
||||
content += f"Evolved from {len(cand['instincts'])} instincts "
|
||||
content += f"(avg confidence: {cand['avg_confidence']:.0%})\n"
|
||||
content += f"Domains: {domains}\n\n"
|
||||
content += f"## Source Instincts\n\n"
|
||||
for iid in instinct_ids:
|
||||
content += f"- {iid}\n"
|
||||
|
||||
agent_file.write_text(content)
|
||||
generated.append(str(agent_file))
|
||||
|
||||
return generated
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Main
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
@@ -25,7 +25,7 @@ async function main() {
|
||||
// Track tool call count (increment in a temp file)
|
||||
// Use a session-specific counter file based on session ID from environment
|
||||
// or parent PID as fallback
|
||||
const sessionId = process.env.CLAUDE_SESSION_ID || String(process.ppid) || 'default';
|
||||
const sessionId = process.env.CLAUDE_SESSION_ID || 'default';
|
||||
const counterFile = path.join(getTempDir(), `claude-tool-count-${sessionId}`);
|
||||
const rawThreshold = parseInt(process.env.COMPACT_THRESHOLD || '50', 10);
|
||||
const threshold = Number.isFinite(rawThreshold) && rawThreshold > 0 && rawThreshold <= 10000
|
||||
@@ -44,7 +44,11 @@ async function main() {
|
||||
const bytesRead = fs.readSync(fd, buf, 0, 64, 0);
|
||||
if (bytesRead > 0) {
|
||||
const parsed = parseInt(buf.toString('utf8', 0, bytesRead).trim(), 10);
|
||||
count = Number.isFinite(parsed) ? parsed + 1 : 1;
|
||||
// Clamp to reasonable range — corrupted files could contain huge values
|
||||
// that pass Number.isFinite() (e.g., parseInt('9'.repeat(30)) => 1e+29)
|
||||
count = (Number.isFinite(parsed) && parsed > 0 && parsed <= 1000000)
|
||||
? parsed + 1
|
||||
: 1;
|
||||
}
|
||||
// Truncate and write new value
|
||||
fs.ftruncateSync(fd, 0);
|
||||
@@ -62,8 +66,8 @@ async function main() {
|
||||
log(`[StrategicCompact] ${threshold} tool calls reached - consider /compact if transitioning phases`);
|
||||
}
|
||||
|
||||
// Suggest at regular intervals after threshold
|
||||
if (count > threshold && count % 25 === 0) {
|
||||
// Suggest at regular intervals after threshold (every 25 calls from threshold)
|
||||
if (count > threshold && (count - threshold) % 25 === 0) {
|
||||
log(`[StrategicCompact] ${count} tool calls - good checkpoint for /compact if context is stale`);
|
||||
}
|
||||
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -21,6 +21,16 @@ Thumbs.db
|
||||
# Node
|
||||
node_modules/
|
||||
|
||||
# Build output
|
||||
dist/
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
||||
# Task files (Claude Code teams)
|
||||
tasks/
|
||||
|
||||
# Personal configs (if any)
|
||||
personal/
|
||||
private/
|
||||
|
||||
@@ -1,9 +1,24 @@
|
||||
# OpenCode ECC Plugin
|
||||
|
||||
> ⚠️ This README is specific to OpenCode usage.
|
||||
> If you installed ECC via npm (e.g. `npm install opencode-ecc`), refer to the root README instead.
|
||||
|
||||
Everything Claude Code (ECC) plugin for OpenCode - agents, commands, hooks, and skills.
|
||||
|
||||
## Installation
|
||||
|
||||
## Installation Overview
|
||||
|
||||
There are two ways to use Everything Claude Code (ECC):
|
||||
|
||||
1. **npm package (recommended for most users)**
|
||||
Install via npm/bun/yarn and use the `ecc-install` CLI to set up rules and agents.
|
||||
|
||||
2. **Direct clone / plugin mode**
|
||||
Clone the repository and run OpenCode directly inside it.
|
||||
|
||||
Choose the method that matches your workflow below.
|
||||
|
||||
### Option 1: npm Package
|
||||
|
||||
```bash
|
||||
@@ -17,6 +32,11 @@ Add to your `opencode.json`:
|
||||
"plugin": ["ecc-universal"]
|
||||
}
|
||||
```
|
||||
After installation, the `ecc-install` CLI becomes available:
|
||||
|
||||
```bash
|
||||
npx ecc-install typescript
|
||||
```
|
||||
|
||||
### Option 2: Direct Use
|
||||
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
* Everything Claude Code (ECC) Plugin for OpenCode
|
||||
*
|
||||
* This package provides a complete OpenCode plugin with:
|
||||
* - 12 specialized agents (planner, architect, code-reviewer, etc.)
|
||||
* - 24 commands (/plan, /tdd, /code-review, etc.)
|
||||
* - 13 specialized agents (planner, architect, code-reviewer, etc.)
|
||||
* - 31 commands (/plan, /tdd, /code-review, etc.)
|
||||
* - Plugin hooks (auto-format, TypeScript check, console.log warning, etc.)
|
||||
* - Custom tools (run-tests, check-coverage, security-audit)
|
||||
* - 16 skills (coding-standards, security-review, tdd-workflow, etc.)
|
||||
* - 37 skills (coding-standards, security-review, tdd-workflow, etc.)
|
||||
*
|
||||
* Usage:
|
||||
*
|
||||
@@ -39,7 +39,7 @@ export { ECCHooksPlugin, default } from "./plugins/index.js"
|
||||
export * from "./plugins/index.js"
|
||||
|
||||
// Version export
|
||||
export const VERSION = "1.0.0"
|
||||
export const VERSION = "1.4.1"
|
||||
|
||||
// Plugin metadata
|
||||
export const metadata = {
|
||||
@@ -48,9 +48,9 @@ export const metadata = {
|
||||
description: "Everything Claude Code plugin for OpenCode",
|
||||
author: "affaan-m",
|
||||
features: {
|
||||
agents: 12,
|
||||
commands: 24,
|
||||
skills: 16,
|
||||
agents: 13,
|
||||
commands: 31,
|
||||
skills: 37,
|
||||
hookEvents: [
|
||||
"file.edited",
|
||||
"tool.execute.before",
|
||||
|
||||
83
.opencode/package-lock.json
generated
Normal file
83
.opencode/package-lock.json
generated
Normal file
@@ -0,0 +1,83 @@
|
||||
{
|
||||
"name": "ecc-universal",
|
||||
"version": "1.4.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "ecc-universal",
|
||||
"version": "1.4.1",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@opencode-ai/plugin": "^1.0.0",
|
||||
"@types/node": "^20.0.0",
|
||||
"typescript": "^5.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@opencode-ai/plugin": ">=1.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@opencode-ai/plugin": {
|
||||
"version": "1.1.53",
|
||||
"resolved": "https://registry.npmjs.org/@opencode-ai/plugin/-/plugin-1.1.53.tgz",
|
||||
"integrity": "sha512-9ye7Wz2kESgt02AUDaMea4hXxj6XhWwKAG8NwFhrw09Ux54bGaMJFt1eIS8QQGIMaD+Lp11X4QdyEg96etEBJw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@opencode-ai/sdk": "1.1.53",
|
||||
"zod": "4.1.8"
|
||||
}
|
||||
},
|
||||
"node_modules/@opencode-ai/sdk": {
|
||||
"version": "1.1.53",
|
||||
"resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.1.53.tgz",
|
||||
"integrity": "sha512-RUIVnPOP1CyyU32FrOOYuE7Ge51lOBuhaFp2NSX98ncApT7ffoNetmwzqrhOiJQgZB1KrbCHLYOCK6AZfacxag==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "20.19.33",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.33.tgz",
|
||||
"integrity": "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"undici-types": "~6.21.0"
|
||||
}
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.9.3",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
|
||||
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.17"
|
||||
}
|
||||
},
|
||||
"node_modules/undici-types": {
|
||||
"version": "6.21.0",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
|
||||
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/zod": {
|
||||
"version": "4.1.8",
|
||||
"resolved": "https://registry.npmjs.org/zod/-/zod-4.1.8.tgz",
|
||||
"integrity": "sha512-5R1P+WwQqmmMIEACyzSvo4JXHY5WiAFHRMg+zBZKgKS+Q1viRa0C1hmUKtHltoIFKtIdki3pRxkmpP74jnNYHQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/colinhacks"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "ecc-universal",
|
||||
"version": "1.0.0",
|
||||
"version": "1.4.1",
|
||||
"description": "Everything Claude Code (ECC) plugin for OpenCode - agents, commands, hooks, and skills",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
|
||||
@@ -1,6 +1,18 @@
|
||||
# 为 Everything Claude Code 做贡献
|
||||
|
||||
感谢您希望做出贡献。这个仓库旨在成为 Claude Code 用户的社区资源。
|
||||
感谢您想要贡献!这个仓库是 Claude Code 用户的社区资源。
|
||||
|
||||
## 目录
|
||||
|
||||
* [我们正在寻找的内容](#我们寻找什么)
|
||||
* [快速开始](#快速开始)
|
||||
* [贡献技能](#贡献技能)
|
||||
* [贡献智能体](#贡献智能体)
|
||||
* [贡献钩子](#贡献钩子)
|
||||
* [贡献命令](#贡献命令)
|
||||
* [拉取请求流程](#拉取请求流程)
|
||||
|
||||
***
|
||||
|
||||
## 我们寻找什么
|
||||
|
||||
@@ -21,16 +33,6 @@
|
||||
* 框架模式
|
||||
* 测试策略
|
||||
* 架构指南
|
||||
* 领域特定知识
|
||||
|
||||
### 命令
|
||||
|
||||
调用有用工作流的斜杠命令:
|
||||
|
||||
* 部署命令
|
||||
* 测试命令
|
||||
* 文档命令
|
||||
* 代码生成命令
|
||||
|
||||
### 钩子
|
||||
|
||||
@@ -41,124 +43,365 @@
|
||||
* 验证钩子
|
||||
* 通知钩子
|
||||
|
||||
### 规则
|
||||
### 命令
|
||||
|
||||
始终遵循的指导原则:
|
||||
调用有用工作流的斜杠命令:
|
||||
|
||||
* 安全规则
|
||||
* 代码风格规则
|
||||
* 测试要求
|
||||
* 命名约定
|
||||
|
||||
### MCP 配置
|
||||
|
||||
新的或改进的 MCP 服务器配置:
|
||||
|
||||
* 数据库集成
|
||||
* 云提供商 MCP
|
||||
* 监控工具
|
||||
* 通讯工具
|
||||
* 部署命令
|
||||
* 测试命令
|
||||
* 代码生成命令
|
||||
|
||||
***
|
||||
|
||||
## 如何贡献
|
||||
|
||||
### 1. Fork 仓库
|
||||
## 快速开始
|
||||
|
||||
```bash
|
||||
git clone https://github.com/YOUR_USERNAME/everything-claude-code.git
|
||||
# 1. Fork and clone
|
||||
gh repo fork affaan-m/everything-claude-code --clone
|
||||
cd everything-claude-code
|
||||
|
||||
# 2. Create a branch
|
||||
git checkout -b feat/my-contribution
|
||||
|
||||
# 3. Add your contribution (see sections below)
|
||||
|
||||
# 4. Test locally
|
||||
cp -r skills/my-skill ~/.claude/skills/ # for skills
|
||||
# Then test with Claude Code
|
||||
|
||||
# 5. Submit PR
|
||||
git add . && git commit -m "feat: add my-skill" && git push
|
||||
```
|
||||
|
||||
### 2. 创建一个分支
|
||||
***
|
||||
|
||||
```bash
|
||||
git checkout -b add-python-reviewer
|
||||
## 贡献技能
|
||||
|
||||
技能是 Claude Code 根据上下文加载的知识模块。
|
||||
|
||||
### 目录结构
|
||||
|
||||
```
|
||||
skills/
|
||||
└── your-skill-name/
|
||||
└── SKILL.md
|
||||
```
|
||||
|
||||
### 3. 添加您的贡献
|
||||
|
||||
将文件放在适当的目录中:
|
||||
|
||||
* `agents/` 用于新的智能体
|
||||
* `skills/` 用于技能(可以是单个 .md 文件或目录)
|
||||
* `commands/` 用于斜杠命令
|
||||
* `rules/` 用于规则文件
|
||||
* `hooks/` 用于钩子配置
|
||||
* `mcp-configs/` 用于 MCP 服务器配置
|
||||
|
||||
### 4. 遵循格式
|
||||
|
||||
**智能体** 应包含 frontmatter:
|
||||
### SKILL.md 模板
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: agent-name
|
||||
description: What it does
|
||||
tools: Read, Grep, Glob, Bash
|
||||
name: your-skill-name
|
||||
description: Brief description shown in skill list
|
||||
---
|
||||
|
||||
# 你的技能标题
|
||||
|
||||
简要概述此技能涵盖的内容。
|
||||
|
||||
## 核心概念
|
||||
|
||||
解释关键模式和准则。
|
||||
|
||||
## 代码示例
|
||||
|
||||
```typescript
|
||||
|
||||
// 包含实用、经过测试的示例
|
||||
function example() {
|
||||
// 注释良好的代码
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## 最佳实践
|
||||
|
||||
- 可操作的指导原则
|
||||
- 该做与不该做的事项
|
||||
- 需要避免的常见陷阱
|
||||
|
||||
## 适用场景
|
||||
|
||||
描述此技能适用的场景。
|
||||
|
||||
```
|
||||
|
||||
### 技能清单
|
||||
|
||||
* \[ ] 专注于一个领域/技术
|
||||
* \[ ] 包含实用的代码示例
|
||||
* \[ ] 少于 500 行
|
||||
* \[ ] 使用清晰的章节标题
|
||||
* \[ ] 已通过 Claude Code 测试
|
||||
|
||||
### 技能示例
|
||||
|
||||
| 技能 | 目的 |
|
||||
|-------|---------|
|
||||
| `coding-standards/` | TypeScript/JavaScript 模式 |
|
||||
| `frontend-patterns/` | React 和 Next.js 最佳实践 |
|
||||
| `backend-patterns/` | API 和数据库模式 |
|
||||
| `security-review/` | 安全检查清单 |
|
||||
|
||||
***
|
||||
|
||||
## 贡献智能体
|
||||
|
||||
智能体是通过任务工具调用的专业助手。
|
||||
|
||||
### 文件位置
|
||||
|
||||
```
|
||||
agents/your-agent-name.md
|
||||
```
|
||||
|
||||
### 智能体模板
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: 你的代理名称
|
||||
description: 该代理的作用以及 Claude 应在何时调用它。请具体说明!
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: sonnet
|
||||
---
|
||||
|
||||
Instructions here...
|
||||
你是一名 [角色] 专家。
|
||||
|
||||
## 你的角色
|
||||
|
||||
- 主要职责
|
||||
- 次要职责
|
||||
- 你不做的事情(界限)
|
||||
|
||||
## 工作流程
|
||||
|
||||
### 步骤 1:理解
|
||||
你如何着手处理任务。
|
||||
|
||||
### 步骤 2:执行
|
||||
你如何开展工作。
|
||||
|
||||
### 步骤 3:验证
|
||||
你如何验证结果。
|
||||
|
||||
## 输出格式
|
||||
|
||||
你返回给用户的内容。
|
||||
|
||||
## 示例
|
||||
|
||||
### 示例:[场景]
|
||||
输入:[用户提供的内容]
|
||||
操作:[你做了什么]
|
||||
输出:[你返回的内容]
|
||||
|
||||
```
|
||||
|
||||
**技能** 应清晰且可操作:
|
||||
### 智能体字段
|
||||
|
||||
```markdown
|
||||
# Skill Name
|
||||
| 字段 | 描述 | 选项 |
|
||||
|-------|-------------|---------|
|
||||
| `name` | 小写,用连字符连接 | `code-reviewer` |
|
||||
| `description` | 用于决定何时调用 | 要具体! |
|
||||
| `tools` | 仅包含必要的内容 | `Read, Write, Edit, Bash, Grep, Glob, WebFetch, Task` |
|
||||
| `model` | 复杂度级别 | `haiku` (简单), `sonnet` (编码), `opus` (复杂) |
|
||||
|
||||
## When to Use
|
||||
### 智能体示例
|
||||
|
||||
...
|
||||
| 智能体 | 目的 |
|
||||
|-------|---------|
|
||||
| `tdd-guide.md` | 测试驱动开发 |
|
||||
| `code-reviewer.md` | 代码审查 |
|
||||
| `security-reviewer.md` | 安全扫描 |
|
||||
| `build-error-resolver.md` | 修复构建错误 |
|
||||
|
||||
## How It Works
|
||||
***
|
||||
|
||||
...
|
||||
## 贡献钩子
|
||||
|
||||
## Examples
|
||||
钩子是由 Claude Code 事件触发的自动行为。
|
||||
|
||||
...
|
||||
### 文件位置
|
||||
|
||||
```
|
||||
hooks/hooks.json
|
||||
```
|
||||
|
||||
**命令** 应解释其功能:
|
||||
### 钩子类型
|
||||
|
||||
```markdown
|
||||
---
|
||||
description: Brief description of command
|
||||
---
|
||||
| 类型 | 触发条件 | 用例 |
|
||||
|------|---------|----------|
|
||||
| `PreToolUse` | 工具运行前 | 验证、警告、阻止 |
|
||||
| `PostToolUse` | 工具运行后 | 格式化、检查、通知 |
|
||||
| `SessionStart` | 会话开始时 | 加载上下文 |
|
||||
| `Stop` | 会话结束时 | 清理、审计 |
|
||||
|
||||
# Command Name
|
||||
|
||||
Detailed instructions...
|
||||
```
|
||||
|
||||
**钩子** 应包含描述:
|
||||
### 钩子格式
|
||||
|
||||
```json
|
||||
{
|
||||
"matcher": "...",
|
||||
"hooks": [...],
|
||||
"description": "What this hook does"
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"rm -rf /\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "echo '[Hook] BLOCKED: Dangerous command' && exit 1"
|
||||
}
|
||||
],
|
||||
"description": "Block dangerous rm commands"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 5. 测试您的贡献
|
||||
### 匹配器语法
|
||||
|
||||
在提交之前,请确保您的配置能在 Claude Code 中正常工作。
|
||||
```javascript
|
||||
// Match specific tools
|
||||
tool == "Bash"
|
||||
tool == "Edit"
|
||||
tool == "Write"
|
||||
|
||||
### 6. 提交 PR
|
||||
// Match input patterns
|
||||
tool_input.command matches "npm install"
|
||||
tool_input.file_path matches "\\.tsx?$"
|
||||
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "Add Python code reviewer agent"
|
||||
git push origin add-python-reviewer
|
||||
// Combine conditions
|
||||
tool == "Bash" && tool_input.command matches "git push"
|
||||
```
|
||||
|
||||
然后提交一个 PR,包含以下内容:
|
||||
### 钩子示例
|
||||
|
||||
* 您添加了什么
|
||||
* 为什么它有用
|
||||
* 您是如何测试的
|
||||
```json
|
||||
// Block dev servers outside tmux
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"npm run dev\"",
|
||||
"hooks": [{"type": "command", "command": "echo 'Use tmux for dev servers' && exit 1"}],
|
||||
"description": "Ensure dev servers run in tmux"
|
||||
}
|
||||
|
||||
// Auto-format after editing TypeScript
|
||||
{
|
||||
"matcher": "tool == \"Edit\" && tool_input.file_path matches \"\\.tsx?$\"",
|
||||
"hooks": [{"type": "command", "command": "npx prettier --write \"$file_path\""}],
|
||||
"description": "Format TypeScript files after edit"
|
||||
}
|
||||
|
||||
// Warn before git push
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"git push\"",
|
||||
"hooks": [{"type": "command", "command": "echo '[Hook] Review changes before pushing'"}],
|
||||
"description": "Reminder to review before push"
|
||||
}
|
||||
```
|
||||
|
||||
### 钩子清单
|
||||
|
||||
* \[ ] 匹配器具体(不过于宽泛)
|
||||
* \[ ] 包含清晰的错误/信息消息
|
||||
* \[ ] 使用正确的退出代码 (`exit 1` 阻止, `exit 0` 允许)
|
||||
* \[ ] 经过充分测试
|
||||
* \[ ] 有描述
|
||||
|
||||
***
|
||||
|
||||
## 贡献命令
|
||||
|
||||
命令是用户通过 `/command-name` 调用的操作。
|
||||
|
||||
### 文件位置
|
||||
|
||||
```
|
||||
commands/your-command.md
|
||||
```
|
||||
|
||||
### 命令模板
|
||||
|
||||
```markdown
|
||||
---
|
||||
description: 在 /help 中显示的简要描述
|
||||
---
|
||||
|
||||
# 命令名称
|
||||
|
||||
## 目的
|
||||
|
||||
此命令的功能。
|
||||
|
||||
## 用法
|
||||
|
||||
```
|
||||
|
||||
/your-command [args]
|
||||
```
|
||||
|
||||
|
||||
## 工作流程
|
||||
|
||||
1. 第一步
|
||||
2. 第二步
|
||||
3. 最后一步
|
||||
|
||||
## 输出
|
||||
|
||||
用户将收到的内容。
|
||||
|
||||
```
|
||||
|
||||
### 命令示例
|
||||
|
||||
| 命令 | 目的 |
|
||||
|---------|---------|
|
||||
| `commit.md` | 创建 git 提交 |
|
||||
| `code-review.md` | 审查代码变更 |
|
||||
| `tdd.md` | TDD 工作流 |
|
||||
| `e2e.md` | E2E 测试 |
|
||||
|
||||
***
|
||||
|
||||
## 拉取请求流程
|
||||
|
||||
### 1. PR 标题格式
|
||||
|
||||
```
|
||||
feat(skills): add rust-patterns skill
|
||||
feat(agents): add api-designer agent
|
||||
feat(hooks): add auto-format hook
|
||||
fix(skills): update React patterns
|
||||
docs: improve contributing guide
|
||||
```
|
||||
|
||||
### 2. PR 描述
|
||||
|
||||
```markdown
|
||||
## 摘要
|
||||
你正在添加什么以及为什么添加。
|
||||
|
||||
## 类型
|
||||
- [ ] 技能
|
||||
- [ ] 代理
|
||||
- [ ] 钩子
|
||||
- [ ] 命令
|
||||
|
||||
## 测试
|
||||
你是如何测试这个的。
|
||||
|
||||
## 检查清单
|
||||
- [ ] 遵循格式指南
|
||||
- [ ] 已使用 Claude Code 进行测试
|
||||
- [ ] 无敏感信息(API 密钥、路径)
|
||||
- [ ] 描述清晰
|
||||
|
||||
```
|
||||
|
||||
### 3. 审查流程
|
||||
|
||||
1. 维护者在 48 小时内审查
|
||||
2. 如有要求,请处理反馈
|
||||
3. 一旦批准,合并到主分支
|
||||
|
||||
***
|
||||
|
||||
@@ -166,34 +409,34 @@ git push origin add-python-reviewer
|
||||
|
||||
### 应该做的
|
||||
|
||||
* 保持配置专注且模块化
|
||||
* 保持贡献内容专注和模块化
|
||||
* 包含清晰的描述
|
||||
* 提交前进行测试
|
||||
* 遵循现有模式
|
||||
* 记录任何依赖项
|
||||
* 记录依赖项
|
||||
|
||||
### 不应该做的
|
||||
|
||||
* 包含敏感数据(API 密钥、令牌、路径)
|
||||
* 添加过于复杂或小众的配置
|
||||
* 提交未经测试的配置
|
||||
* 创建重复的功能
|
||||
* 添加需要特定付费服务且没有替代方案的配置
|
||||
* 提交未经测试的贡献
|
||||
* 创建现有功能的重复项
|
||||
|
||||
***
|
||||
|
||||
## 文件命名
|
||||
|
||||
* 使用小写字母和连字符:`python-reviewer.md`
|
||||
* 要有描述性:`tdd-workflow.md` 而不是 `workflow.md`
|
||||
* 确保智能体/技能名称与文件名匹配
|
||||
* 使用小写和连字符:`python-reviewer.md`
|
||||
* 描述性要强:`tdd-workflow.md` 而不是 `workflow.md`
|
||||
* 名称与文件名匹配
|
||||
|
||||
***
|
||||
|
||||
## 有问题吗?
|
||||
|
||||
请提出问题或在 X 上联系我们:[@affaanmustafa](https://x.com/affaanmustafa)
|
||||
* **问题:** [github.com/affaan-m/everything-claude-code/issues](https://github.com/affaan-m/everything-claude-code/issues)
|
||||
* **X/Twitter:** [@affaanmustafa](https://x.com/affaanmustafa)
|
||||
|
||||
***
|
||||
|
||||
感谢您的贡献。让我们共同构建一个优秀的资源。
|
||||
感谢您的贡献!让我们共同构建一个出色的资源。
|
||||
|
||||
@@ -1,21 +1,27 @@
|
||||
**语言:** English | [繁體中文](docs/zh-TW/README.md) | [简体中文](docs/zh-CN/README.md)
|
||||
**语言:** English | [繁體中文](../zh-TW/README.md) | [简体中文](README.md)
|
||||
|
||||
# Everything Claude Code
|
||||
|
||||
[](https://github.com/affaan-m/everything-claude-code/stargazers)
|
||||
[](https://github.com/affaan-m/everything-claude-code/network/members)
|
||||
[](https://github.com/affaan-m/everything-claude-code/graphs/contributors)
|
||||
[](LICENSE)
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
> **42K+ 星标** | **5K+ 分支** | **24 位贡献者** | **支持 6 种语言**
|
||||
|
||||
***
|
||||
|
||||
<div align="center">
|
||||
|
||||
**🌐 语言 / 语言 / 語言**
|
||||
|
||||
[**English**](README.md) | [简体中文](README.zh-CN.md) | [繁體中文](docs/zh-TW/README.md)
|
||||
[**English**](../../README.md) | [简体中文](../../README.zh-CN.md) | [繁體中文](../../docs/zh-TW/README.md)
|
||||
|
||||
</div>
|
||||
|
||||
@@ -61,6 +67,38 @@
|
||||
|
||||
***
|
||||
|
||||
## 最新动态
|
||||
|
||||
### v1.4.1 — 错误修复 (2026年2月)
|
||||
|
||||
* **修复了直觉导入内容丢失问题** — `parse_instinct_file()` 在 `/instinct-import` 期间会静默丢弃 frontmatter 之后的所有内容(Action, Evidence, Examples 部分)。已由社区贡献者 @ericcai0814 修复 ([#148](https://github.com/affaan-m/everything-claude-code/issues/148), [#161](https://github.com/affaan-m/everything-claude-code/pull/161))
|
||||
|
||||
### v1.4.0 — 多语言规则、安装向导 & PM2 (2026年2月)
|
||||
|
||||
* **交互式安装向导** — 新的 `configure-ecc` 技能提供了带有合并/覆盖检测的引导式设置
|
||||
* **PM2 & 多智能体编排** — 6 个新命令 (`/pm2`, `/multi-plan`, `/multi-execute`, `/multi-backend`, `/multi-frontend`, `/multi-workflow`) 用于管理复杂的多服务工作流
|
||||
* **多语言规则架构** — 规则从扁平文件重组为 `common/` + `typescript/` + `python/` + `golang/` 目录。仅安装您需要的语言
|
||||
* **中文 (zh-CN) 翻译** — 所有智能体、命令、技能和规则的完整翻译 (80+ 个文件)
|
||||
* **GitHub Sponsors 支持** — 通过 GitHub Sponsors 赞助项目
|
||||
* **增强的 CONTRIBUTING.md** — 针对每种贡献类型的详细 PR 模板
|
||||
|
||||
### v1.3.0 — OpenCode 插件支持 (2026年2月)
|
||||
|
||||
* **完整的 OpenCode 集成** — 12 个智能体,24 个命令,16 个技能,通过 OpenCode 的插件系统支持钩子 (20+ 种事件类型)
|
||||
* **3 个原生自定义工具** — run-tests, check-coverage, security-audit
|
||||
* **LLM 文档** — `llms.txt` 用于获取全面的 OpenCode 文档
|
||||
|
||||
### v1.2.0 — 统一的命令和技能 (2026年2月)
|
||||
|
||||
* **Python/Django 支持** — Django 模式、安全、TDD 和验证技能
|
||||
* **Java Spring Boot 技能** — Spring Boot 的模式、安全、TDD 和验证
|
||||
* **会话管理** — `/sessions` 命令用于查看会话历史
|
||||
* **持续学习 v2** — 基于直觉的学习,带有置信度评分、导入/导出、进化
|
||||
|
||||
完整的更新日志请参见 [Releases](https://github.com/affaan-m/everything-claude-code/releases)。
|
||||
|
||||
***
|
||||
|
||||
## 🚀 快速开始
|
||||
|
||||
在 2 分钟内启动并运行:
|
||||
@@ -83,8 +121,13 @@
|
||||
# Clone the repo first
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
|
||||
# Copy rules (applies to all projects)
|
||||
cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
# Install common rules (required)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
|
||||
# Install language-specific rules (pick your stack)
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
```
|
||||
|
||||
### 步骤 3:开始使用
|
||||
@@ -97,7 +140,7 @@ cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **就这样!** 您现在可以访问 15+ 个代理、30+ 个技能和 20+ 个命令。
|
||||
✨ **就是这样!** 您现在可以访问 15+ 个智能体,30+ 个技能,以及 30+ 个命令。
|
||||
|
||||
***
|
||||
|
||||
@@ -156,23 +199,38 @@ everything-claude-code/
|
||||
| |-- e2e-runner.md # Playwright 端到端测试
|
||||
| |-- refactor-cleaner.md # 无用代码清理
|
||||
| |-- doc-updater.md # 文档同步
|
||||
| |-- go-reviewer.md # Go 代码审查(新增)
|
||||
| |-- go-build-resolver.md # Go 构建错误修复(新增)
|
||||
| |-- go-reviewer.md # Go 代码审查
|
||||
| |-- go-build-resolver.md # Go 构建错误修复
|
||||
| |-- python-reviewer.md # Python 代码审查(新增)
|
||||
| |-- database-reviewer.md # 数据库/Supabase 审查(新增)
|
||||
|
|
||||
|-- skills/ # 工作流定义与领域知识
|
||||
| |-- coding-standards/ # 各语言最佳实践
|
||||
| |-- backend-patterns/ # API、数据库、缓存模式
|
||||
| |-- frontend-patterns/ # React、Next.js 模式
|
||||
| |-- continuous-learning/ # 从会话中自动提取模式(长文档指南)
|
||||
| |-- continuous-learning-v2/ # 基于直觉的学习,带置信度评分
|
||||
| |-- continuous-learning-v2/ # 基于直觉的学习与置信度评分
|
||||
| |-- iterative-retrieval/ # 子代理的渐进式上下文精炼
|
||||
| |-- strategic-compact/ # 手动压缩建议(长文档指南)
|
||||
| |-- tdd-workflow/ # TDD 方法论
|
||||
| |-- security-review/ # 安全检查清单
|
||||
| |-- eval-harness/ # 验证循环评估(长文档指南)
|
||||
| |-- verification-loop/ # 持续验证(长文档指南)
|
||||
| |-- golang-patterns/ # Go 语言习惯用法与最佳实践(新增)
|
||||
| |-- golang-testing/ # Go 测试模式、TDD、基准测试(新增)
|
||||
| |-- golang-patterns/ # Go 语言惯用法与最佳实践
|
||||
| |-- golang-testing/ # Go 测试模式、TDD 与基准测试
|
||||
| |-- cpp-testing/ # 使用 GoogleTest、CMake/CTest 的 C++ 测试(新增)
|
||||
| |-- django-patterns/ # Django 模式、模型与视图(新增)
|
||||
| |-- django-security/ # Django 安全最佳实践(新增)
|
||||
| |-- django-tdd/ # Django TDD 工作流(新增)
|
||||
| |-- django-verification/ # Django 验证循环(新增)
|
||||
| |-- python-patterns/ # Python 惯用法与最佳实践(新增)
|
||||
| |-- python-testing/ # 使用 pytest 的 Python 测试(新增)
|
||||
| |-- springboot-patterns/ # Java Spring Boot 模式(新增)
|
||||
| |-- springboot-security/ # Spring Boot 安全(新增)
|
||||
| |-- springboot-tdd/ # Spring Boot TDD(新增)
|
||||
| |-- springboot-verification/ # Spring Boot 验证流程(新增)
|
||||
| |-- configure-ecc/ # 交互式安装向导(新增)
|
||||
| |-- security-scan/ # AgentShield 安全审计集成(新增)
|
||||
|
|
||||
|-- commands/ # 快捷执行的 Slash 命令
|
||||
| |-- tdd.md # /tdd - 测试驱动开发
|
||||
@@ -192,15 +250,28 @@ everything-claude-code/
|
||||
| |-- instinct-status.md # /instinct-status - 查看已学习的直觉(新增)
|
||||
| |-- instinct-import.md # /instinct-import - 导入直觉(新增)
|
||||
| |-- instinct-export.md # /instinct-export - 导出直觉(新增)
|
||||
| |-- evolve.md # /evolve - 将直觉聚类为技能(新增)
|
||||
| |-- evolve.md # /evolve - 将直觉聚类为技能
|
||||
| |-- pm2.md # /pm2 - PM2 服务生命周期管理(新增)
|
||||
| |-- multi-plan.md # /multi-plan - 多代理任务拆解(新增)
|
||||
| |-- multi-execute.md # /multi-execute - 编排式多代理工作流(新增)
|
||||
| |-- multi-backend.md # /multi-backend - 后端多服务编排(新增)
|
||||
| |-- multi-frontend.md # /multi-frontend - 前端多服务编排(新增)
|
||||
| |-- multi-workflow.md # /multi-workflow - 通用多服务工作流(新增)
|
||||
|
|
||||
|-- rules/ # 必须遵循的规则(复制到 ~/.claude/rules/)
|
||||
| |-- security.md # 强制安全检查
|
||||
| |-- coding-style.md # 不可变性、文件组织规范
|
||||
| |-- testing.md # TDD,80% 覆盖率要求
|
||||
| |-- git-workflow.md # 提交格式与 PR 流程
|
||||
| |-- agents.md # 何时委派给子代理
|
||||
| |-- performance.md # 模型选择与上下文管理
|
||||
| |-- README.md # 结构概览与安装指南
|
||||
| |-- common/ # 与语言无关的通用原则
|
||||
| | |-- coding-style.md # 不可变性与文件组织
|
||||
| | |-- git-workflow.md # 提交格式与 PR 流程
|
||||
| | |-- testing.md # TDD,80% 覆盖率要求
|
||||
| | |-- performance.md # 模型选择与上下文管理
|
||||
| | |-- patterns.md # 设计模式与项目骨架
|
||||
| | |-- hooks.md # Hook 架构与 TodoWrite
|
||||
| | |-- agents.md # 何时委派给子代理
|
||||
| | |-- security.md # 强制安全检查
|
||||
| |-- typescript/ # TypeScript / JavaScript 专用
|
||||
| |-- python/ # Python 专用
|
||||
| |-- golang/ # Go 专用
|
||||
|
|
||||
|-- hooks/ # 基于触发器的自动化
|
||||
| |-- hooks.json # 所有 Hook 配置(PreToolUse、PostToolUse、Stop 等)
|
||||
@@ -209,7 +280,7 @@ everything-claude-code/
|
||||
|
|
||||
|-- scripts/ # 跨平台 Node.js 脚本(新增)
|
||||
| |-- lib/ # 共享工具
|
||||
| | |-- utils.js # 跨平台文件 / 路径 / 系统工具
|
||||
| | |-- utils.js # 跨平台文件/路径/系统工具
|
||||
| | |-- package-manager.js # 包管理器检测与选择
|
||||
| |-- hooks/ # Hook 实现
|
||||
| | |-- session-start.js # 会话开始时加载上下文
|
||||
@@ -227,7 +298,7 @@ everything-claude-code/
|
||||
|-- contexts/ # 动态系统提示注入上下文(长文档指南)
|
||||
| |-- dev.md # 开发模式上下文
|
||||
| |-- review.md # 代码审查模式上下文
|
||||
| |-- research.md # 研究 / 探索模式上下文
|
||||
| |-- research.md # 研究/探索模式上下文
|
||||
|
|
||||
|-- examples/ # 示例配置与会话
|
||||
| |-- CLAUDE.md # 项目级配置示例
|
||||
@@ -277,6 +348,30 @@ everything-claude-code/
|
||||
* **Instinct 集合** - 用于 continuous-learning-v2
|
||||
* **模式提取** - 从您的提交历史中学习
|
||||
|
||||
### AgentShield — 安全审计器
|
||||
|
||||
扫描您的 Claude Code 配置,查找漏洞、错误配置和注入风险。
|
||||
|
||||
```bash
|
||||
# Quick scan (no install needed)
|
||||
npx ecc-agentshield scan
|
||||
|
||||
# Auto-fix safe issues
|
||||
npx ecc-agentshield scan --fix
|
||||
|
||||
# Deep analysis with Opus 4.6
|
||||
npx ecc-agentshield scan --opus --stream
|
||||
|
||||
# Generate secure config from scratch
|
||||
npx ecc-agentshield init
|
||||
```
|
||||
|
||||
检查 CLAUDE.md、settings.json、MCP 服务器、钩子和智能体定义。生成带有可操作发现的安全等级 (A-F)。
|
||||
|
||||
在 Claude Code 中使用 `/security-scan` 来运行它,或者通过 [GitHub Action](https://github.com/affaan-m/agentshield) 添加到 CI。
|
||||
|
||||
[GitHub](https://github.com/affaan-m/agentshield) | [npm](https://www.npmjs.com/package/ecc-agentshield)
|
||||
|
||||
### 🧠 持续学习 v2
|
||||
|
||||
基于本能的学习系统会自动学习您的模式:
|
||||
@@ -361,11 +456,15 @@ Duplicate hooks file detected: ./hooks/hooks.json resolves to already-loaded fil
|
||||
> git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
>
|
||||
> # 选项 A:用户级规则(适用于所有项目)
|
||||
> cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择您的技术栈
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
>
|
||||
> # 选项 B:项目级规则(仅适用于当前项目)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* .claude/rules/ # 选择您的技术栈
|
||||
> ```
|
||||
|
||||
***
|
||||
@@ -381,8 +480,11 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# Copy agents to your Claude config
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Copy rules
|
||||
cp everything-claude-code/rules/*.md ~/.claude/rules/
|
||||
# Copy rules (common + language-specific)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
|
||||
# Copy commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -451,15 +553,18 @@ model: opus
|
||||
|
||||
### 规则
|
||||
|
||||
规则是始终遵循的指导原则。保持其模块化:
|
||||
规则是始终遵循的指导原则,组织成 `common/`(与语言无关)+ 语言特定目录:
|
||||
|
||||
```
|
||||
~/.claude/rules/
|
||||
security.md # No hardcoded secrets
|
||||
coding-style.md # Immutability, file limits
|
||||
testing.md # TDD, coverage requirements
|
||||
rules/
|
||||
common/ # Universal principles (always install)
|
||||
typescript/ # TS/JS specific patterns and tools
|
||||
python/ # Python specific patterns and tools
|
||||
golang/ # Go specific patterns and tools
|
||||
```
|
||||
|
||||
有关安装和结构详情,请参阅 [`rules/README.md`](rules/README.md)。
|
||||
|
||||
***
|
||||
|
||||
## 🧪 运行测试
|
||||
@@ -493,11 +598,144 @@ node tests/hooks/hooks.test.js
|
||||
|
||||
### 贡献想法
|
||||
|
||||
* 特定语言的技能(Python、Rust 模式)- 现已包含 Go!
|
||||
* 特定框架的配置(Django、Rails、Laravel)
|
||||
* DevOps 代理(Kubernetes、Terraform、AWS)
|
||||
* 测试策略(不同框架)
|
||||
* 特定领域的知识(ML、数据工程、移动开发)
|
||||
* 特定语言技能 (Rust, C#, Swift, Kotlin) — Go, Python, Java 已包含
|
||||
* 特定框架配置 (Rails, Laravel, FastAPI, NestJS) — Django, Spring Boot 已包含
|
||||
* DevOps 智能体 (Kubernetes, Terraform, AWS, Docker)
|
||||
* 测试策略 (不同框架,视觉回归)
|
||||
* 领域特定知识 (ML, 数据工程, 移动端)
|
||||
|
||||
***
|
||||
|
||||
## Cursor IDE 支持
|
||||
|
||||
ecc-universal 包含为 [Cursor IDE](https://cursor.com) 预翻译的配置。`.cursor/` 目录包含适用于 Cursor 格式的规则、智能体、技能、命令和 MCP 配置。
|
||||
|
||||
### 快速开始 (Cursor)
|
||||
|
||||
```bash
|
||||
# Install the package
|
||||
npm install ecc-universal
|
||||
|
||||
# Install for your language(s)
|
||||
./install.sh --target cursor typescript
|
||||
./install.sh --target cursor python golang
|
||||
```
|
||||
|
||||
### 已翻译内容
|
||||
|
||||
| 组件 | Claude Code → Cursor | 对等性 |
|
||||
|-----------|---------------------|--------|
|
||||
| 规则 | 添加了 YAML frontmatter,路径扁平化 | 完全 |
|
||||
| 智能体 | 模型 ID 已扩展,工具 → 只读标志 | 完全 |
|
||||
| 技能 | 无需更改 (标准相同) | 相同 |
|
||||
| 命令 | 路径引用已更新,多-\* 已存根 | 部分 |
|
||||
| MCP 配置 | 环境变量插值语法已更新 | 完全 |
|
||||
| 钩子 | Cursor 中无等效项 | 参见替代方案 |
|
||||
|
||||
详情请参阅 [.cursor/README.md](.cursor/README.md),完整迁移指南请参阅 [.cursor/MIGRATION.md](.cursor/MIGRATION.md)。
|
||||
|
||||
***
|
||||
|
||||
## 🔌 OpenCode 支持
|
||||
|
||||
ECC 提供 **完整的 OpenCode 支持**,包括插件和钩子。
|
||||
|
||||
### 快速开始
|
||||
|
||||
```bash
|
||||
# Install OpenCode
|
||||
npm install -g opencode
|
||||
|
||||
# Run in the repository root
|
||||
opencode
|
||||
```
|
||||
|
||||
配置会自动从 `.opencode/opencode.json` 检测。
|
||||
|
||||
### 功能对等
|
||||
|
||||
| 特性 | Claude Code | OpenCode | 状态 |
|
||||
|---------|-------------|----------|--------|
|
||||
| 智能体 | ✅ 14 agents | ✅ 12 agents | **Claude Code 领先** |
|
||||
| 命令 | ✅ 30 commands | ✅ 24 commands | **Claude Code 领先** |
|
||||
| 技能 | ✅ 28 skills | ✅ 16 skills | **Claude Code 领先** |
|
||||
| 钩子 | ✅ 3 phases | ✅ 20+ events | **OpenCode 更多!** |
|
||||
| 规则 | ✅ 8 rules | ✅ 8 rules | **完全一致** |
|
||||
| MCP Servers | ✅ Full | ✅ Full | **完全一致** |
|
||||
| 自定义工具 | ✅ Via hooks | ✅ Native support | **OpenCode 更好** |
|
||||
|
||||
### 通过插件实现的钩子支持
|
||||
|
||||
OpenCode 的插件系统比 Claude Code 更复杂,有 20 多种事件类型:
|
||||
|
||||
| Claude Code 钩子 | OpenCode 插件事件 |
|
||||
|-----------------|----------------------|
|
||||
| PreToolUse | `tool.execute.before` |
|
||||
| PostToolUse | `tool.execute.after` |
|
||||
| Stop | `session.idle` |
|
||||
| SessionStart | `session.created` |
|
||||
| SessionEnd | `session.deleted` |
|
||||
|
||||
**额外的 OpenCode 事件**:`file.edited`、`file.watcher.updated`、`message.updated`、`lsp.client.diagnostics`、`tui.toast.show` 等等。
|
||||
|
||||
### 可用命令 (24)
|
||||
|
||||
| 命令 | 描述 |
|
||||
|---------|-------------|
|
||||
| `/plan` | 创建实施计划 |
|
||||
| `/tdd` | 强制执行 TDD 工作流 |
|
||||
| `/code-review` | 审查代码变更 |
|
||||
| `/security` | 运行安全审查 |
|
||||
| `/build-fix` | 修复构建错误 |
|
||||
| `/e2e` | 生成端到端测试 |
|
||||
| `/refactor-clean` | 移除死代码 |
|
||||
| `/orchestrate` | 多代理工作流 |
|
||||
| `/learn` | 从会话中提取模式 |
|
||||
| `/checkpoint` | 保存验证状态 |
|
||||
| `/verify` | 运行验证循环 |
|
||||
| `/eval` | 根据标准进行评估 |
|
||||
| `/update-docs` | 更新文档 |
|
||||
| `/update-codemaps` | 更新代码地图 |
|
||||
| `/test-coverage` | 分析覆盖率 |
|
||||
| `/go-review` | Go 代码审查 |
|
||||
| `/go-test` | Go TDD 工作流 |
|
||||
| `/go-build` | 修复 Go 构建错误 |
|
||||
| `/skill-create` | 从 git 生成技能 |
|
||||
| `/instinct-status` | 查看习得的本能 |
|
||||
| `/instinct-import` | 导入本能 |
|
||||
| `/instinct-export` | 导出本能 |
|
||||
| `/evolve` | 将本能聚类为技能 |
|
||||
| `/setup-pm` | 配置包管理器 |
|
||||
|
||||
### 插件安装
|
||||
|
||||
**选项 1:直接使用**
|
||||
|
||||
```bash
|
||||
cd everything-claude-code
|
||||
opencode
|
||||
```
|
||||
|
||||
**选项 2:作为 npm 包安装**
|
||||
|
||||
```bash
|
||||
npm install ecc-universal
|
||||
```
|
||||
|
||||
然后添加到您的 `opencode.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"plugin": ["ecc-universal"]
|
||||
}
|
||||
```
|
||||
|
||||
### 文档
|
||||
|
||||
* **迁移指南**:`.opencode/MIGRATION.md`
|
||||
* **OpenCode 插件 README**:`.opencode/README.md`
|
||||
* **整合的规则**:`.opencode/instructions/INSTRUCTIONS.md`
|
||||
* **LLM 文档**:`llms.txt`(完整的 OpenCode 文档,供 LLM 使用)
|
||||
|
||||
***
|
||||
|
||||
@@ -542,10 +780,11 @@ node tests/hooks/hooks.test.js
|
||||
|
||||
## 🔗 链接
|
||||
|
||||
* **简明指南(从此开始):** [Everything Claude Code 简明指南](https://x.com/affaanmustafa/status/2012378465664745795)
|
||||
* **详细指南(高级):** [Everything Claude Code 详细指南](https://x.com/affaanmustafa/status/2014040193557471352)
|
||||
* **关注:** [@affaanmustafa](https://x.com/affaanmustafa)
|
||||
* **zenith.chat:** [zenith.chat](https://zenith.chat)
|
||||
* **速查指南 (从此开始):** [Claude Code 万事速查指南](https://x.com/affaanmustafa/status/2012378465664745795)
|
||||
* **详细指南 (进阶):** [Claude Code 万事详细指南](https://x.com/affaanmustafa/status/2014040193557471352)
|
||||
* **关注:** [@affaanmustafa](https://x.com/affaanmustafa)
|
||||
* **zenith.chat:** [zenith.chat](https://zenith.chat)
|
||||
* **技能目录:** [awesome-agent-skills](https://github.com/JackyST0/awesome-agent-skills)
|
||||
|
||||
***
|
||||
|
||||
|
||||
47
docs/zh-CN/SPONSORS.md
Normal file
47
docs/zh-CN/SPONSORS.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# 赞助者
|
||||
|
||||
感谢所有赞助本项目的各位!你们的支持让 ECC 生态系统持续成长。
|
||||
|
||||
## 企业赞助者
|
||||
|
||||
*成为 [企业赞助者](https://github.com/sponsors/affaan-m),将您的名字展示在此处*
|
||||
|
||||
## 商业赞助者
|
||||
|
||||
*成为 [商业赞助者](https://github.com/sponsors/affaan-m),将您的名字展示在此处*
|
||||
|
||||
## 团队赞助者
|
||||
|
||||
*成为 [团队赞助者](https://github.com/sponsors/affaan-m),将您的名字展示在此处*
|
||||
|
||||
## 个人赞助者
|
||||
|
||||
*成为 [赞助者](https://github.com/sponsors/affaan-m),将您的名字列在此处*
|
||||
|
||||
***
|
||||
|
||||
## 为什么要赞助?
|
||||
|
||||
您的赞助将帮助我们:
|
||||
|
||||
* **更快地交付** — 更多时间投入到工具和功能的开发上
|
||||
* **保持免费** — 高级功能为所有人的免费层级提供资金支持
|
||||
* **更好的支持** — 赞助者获得优先响应
|
||||
* **影响路线图** — Pro+ 赞助者可以对功能进行投票
|
||||
|
||||
## 赞助等级
|
||||
|
||||
| 等级 | 价格 | 权益 |
|
||||
|------|-------|----------|
|
||||
| 支持者 | $5/月 | 名字出现在 README 中,早期访问 |
|
||||
| 建造者 | $10/月 | 高级工具访问权限 |
|
||||
| 专业版 | $25/月 | 优先支持,办公时间咨询 |
|
||||
| 团队 | $100/月 | 5个席位,团队配置 |
|
||||
| 商业 | $500/月 | 25个席位,咨询额度 |
|
||||
| 企业 | $2K/月 | 无限席位,定制工具 |
|
||||
|
||||
[**Become a Sponsor →**](https://github.com/sponsors/affaan-m)
|
||||
|
||||
***
|
||||
|
||||
*自动更新。最后同步:2026年2月*
|
||||
274
docs/zh-CN/commands/go-test.md
Normal file
274
docs/zh-CN/commands/go-test.md
Normal file
@@ -0,0 +1,274 @@
|
||||
---
|
||||
description: 为Go强制执行TDD工作流程。首先编写表驱动测试,然后实现。使用go test -cover验证80%以上的覆盖率。
|
||||
---
|
||||
|
||||
# Go TDD 命令
|
||||
|
||||
此命令使用惯用的 Go 测试模式,为 Go 代码强制执行测试驱动开发方法。
|
||||
|
||||
## 此命令的作用
|
||||
|
||||
1. **定义类型/接口**:首先搭建函数签名
|
||||
2. **编写表驱动测试**:创建全面的测试用例(RED 阶段)
|
||||
3. **运行测试**:验证测试因正确原因而失败
|
||||
4. **实现代码**:编写最少的代码以通过测试(GREEN 阶段)
|
||||
5. **重构**:改进代码,同时保持测试通过
|
||||
6. **检查覆盖率**:确保 80% 以上的覆盖率
|
||||
|
||||
## 何时使用
|
||||
|
||||
在以下情况下使用 `/go-test`:
|
||||
|
||||
* 实现新的 Go 函数时
|
||||
* 为现有代码添加测试覆盖率时
|
||||
* 修复 bug 时(先编写失败的测试)
|
||||
* 构建关键业务逻辑时
|
||||
* 学习 Go 中的 TDD 工作流程时
|
||||
|
||||
## TDD 循环
|
||||
|
||||
```
|
||||
RED → Write failing table-driven test
|
||||
GREEN → Implement minimal code to pass
|
||||
REFACTOR → Improve code, tests stay green
|
||||
REPEAT → Next test case
|
||||
```
|
||||
|
||||
## 示例会话
|
||||
|
||||
````
|
||||
User: /go-test I need a function to validate email addresses
|
||||
|
||||
Agent:
|
||||
# TDD Session: Email Validator
|
||||
|
||||
## Step 1: Define Interface
|
||||
|
||||
```go
|
||||
// validator/email.go
|
||||
package validator
|
||||
|
||||
// ValidateEmail checks if the given string is a valid email address.
|
||||
// Returns nil if valid, error describing the issue otherwise.
|
||||
func ValidateEmail(email string) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2: Write Table-Driven Tests (RED)
|
||||
|
||||
```go
|
||||
// validator/email_test.go
|
||||
package validator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateEmail(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
email string
|
||||
wantErr bool
|
||||
}{
|
||||
// Valid emails
|
||||
{"simple email", "user@example.com", false},
|
||||
{"with subdomain", "user@mail.example.com", false},
|
||||
{"with plus", "user+tag@example.com", false},
|
||||
{"with dots", "first.last@example.com", false},
|
||||
|
||||
// Invalid emails
|
||||
{"empty string", "", true},
|
||||
{"no at sign", "userexample.com", true},
|
||||
{"no domain", "user@", true},
|
||||
{"no local part", "@example.com", true},
|
||||
{"double at", "user@@example.com", true},
|
||||
{"spaces", "user @example.com", true},
|
||||
{"no tld", "user@example", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := ValidateEmail(tt.email)
|
||||
if tt.wantErr && err == nil {
|
||||
t.Errorf("ValidateEmail(%q) = nil; want error", tt.email)
|
||||
}
|
||||
if !tt.wantErr && err != nil {
|
||||
t.Errorf("ValidateEmail(%q) = %v; want nil", tt.email, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Step 3: Run Tests - Verify FAIL
|
||||
|
||||
```bash
|
||||
$ go test ./validator/...
|
||||
|
||||
--- FAIL: TestValidateEmail (0.00s)
|
||||
--- FAIL: TestValidateEmail/simple_email (0.00s)
|
||||
panic: not implemented
|
||||
|
||||
FAIL
|
||||
```
|
||||
|
||||
✓ Tests fail as expected (panic).
|
||||
|
||||
## Step 4: Implement Minimal Code (GREEN)
|
||||
|
||||
```go
|
||||
// validator/email.go
|
||||
package validator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var emailRegex = regexp.MustCompile(`^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$`)
|
||||
|
||||
var (
|
||||
ErrEmailEmpty = errors.New("email cannot be empty")
|
||||
ErrEmailInvalid = errors.New("email format is invalid")
|
||||
)
|
||||
|
||||
func ValidateEmail(email string) error {
|
||||
if email == "" {
|
||||
return ErrEmailEmpty
|
||||
}
|
||||
if !emailRegex.MatchString(email) {
|
||||
return ErrEmailInvalid
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Step 5: Run Tests - Verify PASS
|
||||
|
||||
```bash
|
||||
$ go test ./validator/...
|
||||
|
||||
PASS
|
||||
ok project/validator 0.003s
|
||||
```
|
||||
|
||||
✓ All tests passing!
|
||||
|
||||
## Step 6: Check Coverage
|
||||
|
||||
```bash
|
||||
$ go test -cover ./validator/...
|
||||
|
||||
PASS
|
||||
coverage: 100.0% of statements
|
||||
ok project/validator 0.003s
|
||||
```
|
||||
|
||||
✓ Coverage: 100%
|
||||
|
||||
## TDD Complete!
|
||||
````
|
||||
|
||||
## 测试模式
|
||||
|
||||
### 表驱动测试
|
||||
|
||||
```go
|
||||
tests := []struct {
|
||||
name string
|
||||
input InputType
|
||||
want OutputType
|
||||
wantErr bool
|
||||
}{
|
||||
{"case 1", input1, want1, false},
|
||||
{"case 2", input2, want2, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := Function(tt.input)
|
||||
// assertions
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### 并行测试
|
||||
|
||||
```go
|
||||
for _, tt := range tests {
|
||||
tt := tt // Capture
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// test body
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### 测试辅助函数
|
||||
|
||||
```go
|
||||
func setupTestDB(t *testing.T) *sql.DB {
|
||||
t.Helper()
|
||||
db := createDB()
|
||||
t.Cleanup(func() { db.Close() })
|
||||
return db
|
||||
}
|
||||
```
|
||||
|
||||
## 覆盖率命令
|
||||
|
||||
```bash
|
||||
# Basic coverage
|
||||
go test -cover ./...
|
||||
|
||||
# Coverage profile
|
||||
go test -coverprofile=coverage.out ./...
|
||||
|
||||
# View in browser
|
||||
go tool cover -html=coverage.out
|
||||
|
||||
# Coverage by function
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
# With race detection
|
||||
go test -race -cover ./...
|
||||
```
|
||||
|
||||
## 覆盖率目标
|
||||
|
||||
| 代码类型 | 目标 |
|
||||
|-----------|--------|
|
||||
| 关键业务逻辑 | 100% |
|
||||
| 公共 API | 90%+ |
|
||||
| 通用代码 | 80%+ |
|
||||
| 生成的代码 | 排除 |
|
||||
|
||||
## TDD 最佳实践
|
||||
|
||||
**应该做:**
|
||||
|
||||
* 先编写测试,再编写任何实现
|
||||
* 每次更改后运行测试
|
||||
* 使用表驱动测试以获得全面的覆盖率
|
||||
* 测试行为,而非实现细节
|
||||
* 包含边界情况(空值、nil、最大值)
|
||||
|
||||
**不应该做:**
|
||||
|
||||
* 在编写测试之前编写实现
|
||||
* 跳过 RED 阶段
|
||||
* 直接测试私有函数
|
||||
* 在测试中使用 `time.Sleep`
|
||||
* 忽略不稳定的测试
|
||||
|
||||
## 相关命令
|
||||
|
||||
* `/go-build` - 修复构建错误
|
||||
* `/go-review` - 在实现后审查代码
|
||||
* `/verify` - 运行完整的验证循环
|
||||
|
||||
## 相关
|
||||
|
||||
* 技能:`skills/golang-testing/`
|
||||
* 技能:`skills/tdd-workflow/`
|
||||
162
docs/zh-CN/commands/multi-backend.md
Normal file
162
docs/zh-CN/commands/multi-backend.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# 后端 - 后端导向开发
|
||||
|
||||
后端导向的工作流程(研究 → 构思 → 规划 → 执行 → 优化 → 评审),由 Codex 主导。
|
||||
|
||||
## 使用方法
|
||||
|
||||
```bash
|
||||
/backend <backend task description>
|
||||
```
|
||||
|
||||
## 上下文
|
||||
|
||||
* 后端任务:$ARGUMENTS
|
||||
* Codex 主导,Gemini 作为辅助参考
|
||||
* 适用场景:API 设计、算法实现、数据库优化、业务逻辑
|
||||
|
||||
## 你的角色
|
||||
|
||||
你是 **后端协调者**,为服务器端任务协调多模型协作(研究 → 构思 → 规划 → 执行 → 优化 → 评审)。
|
||||
|
||||
**协作模型**:
|
||||
|
||||
* **Codex** – 后端逻辑、算法(**后端权威,可信赖**)
|
||||
* **Gemini** – 前端视角(**后端意见仅供参考**)
|
||||
* **Claude (自身)** – 协调、规划、执行、交付
|
||||
|
||||
***
|
||||
|
||||
## 多模型调用规范
|
||||
|
||||
**调用语法**:
|
||||
|
||||
```
|
||||
# New session call
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend codex - \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <enhanced requirement (or $ARGUMENTS if not enhanced)>
|
||||
Context: <project context and analysis from previous phases>
|
||||
</TASK>
|
||||
OUTPUT: Expected output format
|
||||
EOF",
|
||||
run_in_background: false,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
|
||||
# Resume session call
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend codex resume <SESSION_ID> - \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <enhanced requirement (or $ARGUMENTS if not enhanced)>
|
||||
Context: <project context and analysis from previous phases>
|
||||
</TASK>
|
||||
OUTPUT: Expected output format
|
||||
EOF",
|
||||
run_in_background: false,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
```
|
||||
|
||||
**角色提示词**:
|
||||
|
||||
| 阶段 | Codex |
|
||||
|-------|-------|
|
||||
| 分析 | `~/.claude/.ccg/prompts/codex/analyzer.md` |
|
||||
| 规划 | `~/.claude/.ccg/prompts/codex/architect.md` |
|
||||
| 评审 | `~/.claude/.ccg/prompts/codex/reviewer.md` |
|
||||
|
||||
**会话复用**:每次调用返回 `SESSION_ID: xxx`,在后续阶段使用 `resume xxx`。在第 2 阶段保存 `CODEX_SESSION`,在第 3 和第 5 阶段使用 `resume`。
|
||||
|
||||
***
|
||||
|
||||
## 沟通准则
|
||||
|
||||
1. 在回复开头使用模式标签 `[Mode: X]`,初始值为 `[Mode: Research]`
|
||||
2. 遵循严格序列:`Research → Ideation → Plan → Execute → Optimize → Review`
|
||||
3. 需要时(例如确认/选择/批准)使用 `AskUserQuestion` 工具进行用户交互
|
||||
|
||||
***
|
||||
|
||||
## 核心工作流程
|
||||
|
||||
### 阶段 0:提示词增强(可选)
|
||||
|
||||
`[Mode: Prepare]` - 如果 ace-tool MCP 可用,调用 `mcp__ace-tool__enhance_prompt`,**用增强后的结果替换原始的 $ARGUMENTS,用于后续的 Codex 调用**
|
||||
|
||||
### 阶段 1:研究
|
||||
|
||||
`[Mode: Research]` - 理解需求并收集上下文
|
||||
|
||||
1. **代码检索**(如果 ace-tool MCP 可用):调用 `mcp__ace-tool__search_context` 以检索现有的 API、数据模型、服务架构
|
||||
2. 需求完整性评分(0-10):>=7 继续,<7 停止并补充
|
||||
|
||||
### 阶段 2:构思
|
||||
|
||||
`[Mode: Ideation]` - Codex 主导的分析
|
||||
|
||||
**必须调用 Codex**(遵循上述调用规范):
|
||||
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/codex/analyzer.md`
|
||||
* 需求:增强后的需求(或未增强时的 $ARGUMENTS)
|
||||
* 上下文:来自阶段 1 的项目上下文
|
||||
* 输出:技术可行性分析、推荐解决方案(至少 2 个)、风险评估
|
||||
|
||||
**保存 SESSION\_ID**(`CODEX_SESSION`)以供后续阶段复用。
|
||||
|
||||
输出解决方案(至少 2 个),等待用户选择。
|
||||
|
||||
### 阶段 3:规划
|
||||
|
||||
`[Mode: Plan]` - Codex 主导的规划
|
||||
|
||||
**必须调用 Codex**(使用 `resume <CODEX_SESSION>` 以复用会话):
|
||||
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/codex/architect.md`
|
||||
* 需求:用户选择的解决方案
|
||||
* 上下文:阶段 2 的分析结果
|
||||
* 输出:文件结构、函数/类设计、依赖关系
|
||||
|
||||
Claude 综合规划,在用户批准后保存到 `.claude/plan/task-name.md`。
|
||||
|
||||
### 阶段 4:实施
|
||||
|
||||
`[Mode: Execute]` - 代码开发
|
||||
|
||||
* 严格遵循已批准的规划
|
||||
* 遵循现有项目的代码规范
|
||||
* 确保错误处理、安全性、性能优化
|
||||
|
||||
### 阶段 5:优化
|
||||
|
||||
`[Mode: Optimize]` - Codex 主导的评审
|
||||
|
||||
**必须调用 Codex**(遵循上述调用规范):
|
||||
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/codex/reviewer.md`
|
||||
* 需求:评审以下后端代码变更
|
||||
* 上下文:git diff 或代码内容
|
||||
* 输出:安全性、性能、错误处理、API 合规性问题列表
|
||||
|
||||
整合评审反馈,在用户确认后执行优化。
|
||||
|
||||
### 阶段 6:质量评审
|
||||
|
||||
`[Mode: Review]` - 最终评估
|
||||
|
||||
* 对照规划检查完成情况
|
||||
* 运行测试以验证功能
|
||||
* 报告问题和建议
|
||||
|
||||
***
|
||||
|
||||
## 关键规则
|
||||
|
||||
1. **Codex 的后端意见是可信赖的**
|
||||
2. **Gemini 的后端意见仅供参考**
|
||||
3. 外部模型**对文件系统零写入权限**
|
||||
4. Claude 处理所有代码写入和文件操作
|
||||
315
docs/zh-CN/commands/multi-execute.md
Normal file
315
docs/zh-CN/commands/multi-execute.md
Normal file
@@ -0,0 +1,315 @@
|
||||
# 执行 - 多模型协同执行
|
||||
|
||||
多模型协同执行 - 从计划获取原型 → Claude 重构并实施 → 多模型审计与交付。
|
||||
|
||||
$ARGUMENTS
|
||||
|
||||
***
|
||||
|
||||
## 核心协议
|
||||
|
||||
* **语言协议**:与工具/模型交互时使用**英语**,与用户沟通时使用用户的语言
|
||||
* **代码主权**:外部模型**零文件系统写入权限**,所有修改由 Claude 执行
|
||||
* **脏原型重构**:将 Codex/Gemini 统一差异视为“脏原型”,必须重构为生产级代码
|
||||
* **止损机制**:当前阶段输出未经验证前,不得进入下一阶段
|
||||
* **前提条件**:仅在用户明确回复“Y”到 `/ccg:plan` 输出后执行(如果缺失,必须先确认)
|
||||
|
||||
***
|
||||
|
||||
## 多模型调用规范
|
||||
|
||||
**调用语法**(并行:使用 `run_in_background: true`):
|
||||
|
||||
```
|
||||
# Resume session call (recommended) - Implementation Prototype
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend <codex|gemini> {{GEMINI_MODEL_FLAG}}resume <SESSION_ID> - \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <task description>
|
||||
Context: <plan content + target files>
|
||||
</TASK>
|
||||
OUTPUT: Unified Diff Patch ONLY. Strictly prohibit any actual modifications.
|
||||
EOF",
|
||||
run_in_background: true,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
|
||||
# New session call - Implementation Prototype
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend <codex|gemini> {{GEMINI_MODEL_FLAG}}- \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <task description>
|
||||
Context: <plan content + target files>
|
||||
</TASK>
|
||||
OUTPUT: Unified Diff Patch ONLY. Strictly prohibit any actual modifications.
|
||||
EOF",
|
||||
run_in_background: true,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
```
|
||||
|
||||
**审计调用语法**(代码审查 / 审计):
|
||||
|
||||
```
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend <codex|gemini> {{GEMINI_MODEL_FLAG}}resume <SESSION_ID> - \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Scope: Audit the final code changes.
|
||||
Inputs:
|
||||
- The applied patch (git diff / final unified diff)
|
||||
- The touched files (relevant excerpts if needed)
|
||||
Constraints:
|
||||
- Do NOT modify any files.
|
||||
- Do NOT output tool commands that assume filesystem access.
|
||||
</TASK>
|
||||
OUTPUT:
|
||||
1) A prioritized list of issues (severity, file, rationale)
|
||||
2) Concrete fixes; if code changes are needed, include a Unified Diff Patch in a fenced code block.
|
||||
EOF",
|
||||
run_in_background: true,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
```
|
||||
|
||||
**模型参数说明**:
|
||||
|
||||
* `{{GEMINI_MODEL_FLAG}}`:当使用 `--backend gemini` 时,替换为 `--gemini-model gemini-3-pro-preview`(注意尾随空格);对于 codex 使用空字符串
|
||||
|
||||
**角色提示**:
|
||||
|
||||
| 阶段 | Codex | Gemini |
|
||||
|-------|-------|--------|
|
||||
| 实施 | `~/.claude/.ccg/prompts/codex/architect.md` | `~/.claude/.ccg/prompts/gemini/frontend.md` |
|
||||
| 审查 | `~/.claude/.ccg/prompts/codex/reviewer.md` | `~/.claude/.ccg/prompts/gemini/reviewer.md` |
|
||||
|
||||
**会话重用**:如果 `/ccg:plan` 提供了 SESSION\_ID,使用 `resume <SESSION_ID>` 来重用上下文。
|
||||
|
||||
**等待后台任务**(最大超时 600000ms = 10 分钟):
|
||||
|
||||
```
|
||||
TaskOutput({ task_id: "<task_id>", block: true, timeout: 600000 })
|
||||
```
|
||||
|
||||
**重要**:
|
||||
|
||||
* 必须指定 `timeout: 600000`,否则默认 30 秒会导致过早超时
|
||||
* 如果 10 分钟后仍未完成,继续使用 `TaskOutput` 轮询,**切勿终止进程**
|
||||
* 如果因超时而跳过等待,**必须调用 `AskUserQuestion` 询问用户是继续等待还是终止任务**
|
||||
|
||||
***
|
||||
|
||||
## 执行工作流
|
||||
|
||||
**执行任务**:$ARGUMENTS
|
||||
|
||||
### 阶段 0:读取计划
|
||||
|
||||
`[Mode: Prepare]`
|
||||
|
||||
1. **识别输入类型**:
|
||||
* 计划文件路径(例如 `.claude/plan/xxx.md`)
|
||||
* 直接任务描述
|
||||
|
||||
2. **读取计划内容**:
|
||||
* 如果提供了计划文件路径,读取并解析
|
||||
* 提取:任务类型、实施步骤、关键文件、SESSION\_ID
|
||||
|
||||
3. **执行前确认**:
|
||||
* 如果输入是“直接任务描述”或计划缺少 `SESSION_ID` / 关键文件:先与用户确认
|
||||
* 如果无法确认用户已回复“Y”到计划:在继续前必须再次确认
|
||||
|
||||
4. **任务类型路由**:
|
||||
|
||||
| 任务类型 | 检测 | 路由 |
|
||||
|-----------|-----------|-------|
|
||||
| **前端** | 页面、组件、UI、样式、布局 | Gemini |
|
||||
| **后端** | API、接口、数据库、逻辑、算法 | Codex |
|
||||
| **全栈** | 包含前端和后端 | Codex ∥ Gemini 并行 |
|
||||
|
||||
***
|
||||
|
||||
### 阶段 1:快速上下文检索
|
||||
|
||||
`[Mode: Retrieval]`
|
||||
|
||||
**必须使用 MCP 工具进行快速上下文检索,切勿手动逐个读取文件**
|
||||
|
||||
基于计划中的“关键文件”列表,调用 `mcp__ace-tool__search_context`:
|
||||
|
||||
```
|
||||
mcp__ace-tool__search_context({
|
||||
query: "<semantic query based on plan content, including key files, modules, function names>",
|
||||
project_root_path: "$PWD"
|
||||
})
|
||||
```
|
||||
|
||||
**检索策略**:
|
||||
|
||||
* 从计划的“关键文件”表中提取目标路径
|
||||
* 构建语义查询覆盖:入口文件、依赖模块、相关类型定义
|
||||
* 如果结果不足,添加 1-2 次递归检索
|
||||
* **切勿**使用 Bash + find/ls 手动探索项目结构
|
||||
|
||||
**检索后**:
|
||||
|
||||
* 组织检索到的代码片段
|
||||
* 确认实施所需的完整上下文
|
||||
* 进入阶段 3
|
||||
|
||||
***
|
||||
|
||||
### 阶段 3:原型获取
|
||||
|
||||
`[Mode: Prototype]`
|
||||
|
||||
**基于任务类型路由**:
|
||||
|
||||
#### 路由 A:前端/UI/样式 → Gemini
|
||||
|
||||
**限制**:上下文 < 32k 令牌
|
||||
|
||||
1. 调用 Gemini(使用 `~/.claude/.ccg/prompts/gemini/frontend.md`)
|
||||
2. 输入:计划内容 + 检索到的上下文 + 目标文件
|
||||
3. 输出:`Unified Diff Patch ONLY. Strictly prohibit any actual modifications.`
|
||||
4. **Gemini 是前端设计权威,其 CSS/React/Vue 原型是最终的视觉基线**
|
||||
5. **警告**:忽略 Gemini 的后端逻辑建议
|
||||
6. 如果计划包含 `GEMINI_SESSION`:优先使用 `resume <GEMINI_SESSION>`
|
||||
|
||||
#### 路由 B:后端/逻辑/算法 → Codex
|
||||
|
||||
1. 调用 Codex(使用 `~/.claude/.ccg/prompts/codex/architect.md`)
|
||||
2. 输入:计划内容 + 检索到的上下文 + 目标文件
|
||||
3. 输出:`Unified Diff Patch ONLY. Strictly prohibit any actual modifications.`
|
||||
4. **Codex 是后端逻辑权威,利用其逻辑推理和调试能力**
|
||||
5. 如果计划包含 `CODEX_SESSION`:优先使用 `resume <CODEX_SESSION>`
|
||||
|
||||
#### 路由 C:全栈 → 并行调用
|
||||
|
||||
1. **并行调用**(`run_in_background: true`):
|
||||
* Gemini:处理前端部分
|
||||
* Codex:处理后端部分
|
||||
2. 使用 `TaskOutput` 等待两个模型的完整结果
|
||||
3. 每个模型使用计划中相应的 `SESSION_ID` 作为 `resume`(如果缺失则创建新会话)
|
||||
|
||||
**遵循上面 `IMPORTANT` 中的 `Multi-Model Call Specification` 指令**
|
||||
|
||||
***
|
||||
|
||||
### 阶段 4:代码实施
|
||||
|
||||
`[Mode: Implement]`
|
||||
|
||||
**Claude 作为代码主权执行以下步骤**:
|
||||
|
||||
1. **读取差异**:解析 Codex/Gemini 返回的统一差异补丁
|
||||
|
||||
2. **心智沙盒**:
|
||||
* 模拟将差异应用到目标文件
|
||||
* 检查逻辑一致性
|
||||
* 识别潜在冲突或副作用
|
||||
|
||||
3. **重构与清理**:
|
||||
* 将“脏原型”重构为**高度可读、可维护、企业级代码**
|
||||
* 移除冗余代码
|
||||
* 确保符合项目现有代码标准
|
||||
* **除非必要,不要生成注释/文档**,代码应具有自解释性
|
||||
|
||||
4. **最小范围**:
|
||||
* 更改仅限于需求范围
|
||||
* **强制审查**副作用
|
||||
* 进行针对性修正
|
||||
|
||||
5. **应用更改**:
|
||||
* 使用编辑/写入工具执行实际修改
|
||||
* **仅修改必要代码**,绝不影响用户的其他现有功能
|
||||
|
||||
6. **自验证**(强烈推荐):
|
||||
* 运行项目现有的 lint / 类型检查 / 测试(优先考虑最小相关范围)
|
||||
* 如果失败:先修复回归问题,然后进入阶段 5
|
||||
|
||||
***
|
||||
|
||||
### 阶段 5:审计与交付
|
||||
|
||||
`[Mode: Audit]`
|
||||
|
||||
#### 5.1 自动审计
|
||||
|
||||
**更改生效后,必须立即并行调用** Codex 和 Gemini 进行代码审查:
|
||||
|
||||
1. **Codex 审查**(`run_in_background: true`):
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/codex/reviewer.md`
|
||||
* 输入:更改的差异 + 目标文件
|
||||
* 重点:安全性、性能、错误处理、逻辑正确性
|
||||
|
||||
2. **Gemini 审查**(`run_in_background: true`):
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/gemini/reviewer.md`
|
||||
* 输入:更改的差异 + 目标文件
|
||||
* 重点:可访问性、设计一致性、用户体验
|
||||
|
||||
使用 `TaskOutput` 等待两个模型的完整审查结果。优先重用阶段 3 的会话(`resume <SESSION_ID>`)以确保上下文一致性。
|
||||
|
||||
#### 5.2 整合与修复
|
||||
|
||||
1. 综合 Codex + Gemini 的审查反馈
|
||||
2. 按信任规则权衡:后端遵循 Codex,前端遵循 Gemini
|
||||
3. 执行必要的修复
|
||||
4. 根据需要重复阶段 5.1(直到风险可接受)
|
||||
|
||||
#### 5.3 交付确认
|
||||
|
||||
审计通过后,向用户报告:
|
||||
|
||||
```markdown
|
||||
## 执行完成
|
||||
|
||||
### 变更摘要
|
||||
| 文件 | 操作 | 描述 |
|
||||
|------|-----------|-------------|
|
||||
| path/to/file.ts | 已修改 | 描述 |
|
||||
|
||||
### 审计结果
|
||||
- Codex: <通过/发现 N 个问题>
|
||||
- Gemini: <通过/发现 N 个问题>
|
||||
|
||||
### 建议
|
||||
1. [ ] <建议的测试步骤>
|
||||
2. [ ] <建议的验证步骤>
|
||||
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 关键规则
|
||||
|
||||
1. **代码主权** – 所有文件修改由 Claude 执行,外部模型零写入权限
|
||||
2. **脏原型重构** – Codex/Gemini 输出视为草稿,必须重构
|
||||
3. **信任规则** – 后端遵循 Codex,前端遵循 Gemini
|
||||
4. **最小更改** – 仅修改必要代码,无副作用
|
||||
5. **强制审计** – 更改后必须执行多模型代码审查
|
||||
|
||||
***
|
||||
|
||||
## 使用方法
|
||||
|
||||
```bash
|
||||
# Execute plan file
|
||||
/ccg:execute .claude/plan/feature-name.md
|
||||
|
||||
# Execute task directly (for plans already discussed in context)
|
||||
/ccg:execute implement user authentication based on previous plan
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 与 /ccg:plan 的关系
|
||||
|
||||
1. `/ccg:plan` 生成计划 + SESSION\_ID
|
||||
2. 用户用“Y”确认
|
||||
3. `/ccg:execute` 读取计划,重用 SESSION\_ID,执行实施
|
||||
162
docs/zh-CN/commands/multi-frontend.md
Normal file
162
docs/zh-CN/commands/multi-frontend.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# 前端 - 前端聚焦开发
|
||||
|
||||
前端聚焦的工作流(研究 → 构思 → 规划 → 执行 → 优化 → 评审),由 Gemini 主导。
|
||||
|
||||
## 使用方法
|
||||
|
||||
```bash
|
||||
/frontend <UI task description>
|
||||
```
|
||||
|
||||
## 上下文
|
||||
|
||||
* 前端任务: $ARGUMENTS
|
||||
* Gemini 主导,Codex 作为辅助参考
|
||||
* 适用场景: 组件设计、响应式布局、UI 动画、样式优化
|
||||
|
||||
## 您的角色
|
||||
|
||||
您是 **前端协调器**,为 UI/UX 任务协调多模型协作(研究 → 构思 → 规划 → 执行 → 优化 → 评审)。
|
||||
|
||||
**协作模型**:
|
||||
|
||||
* **Gemini** – 前端 UI/UX(**前端权威,可信赖**)
|
||||
* **Codex** – 后端视角(**前端意见仅供参考**)
|
||||
* **Claude(自身)** – 协调、规划、执行、交付
|
||||
|
||||
***
|
||||
|
||||
## 多模型调用规范
|
||||
|
||||
**调用语法**:
|
||||
|
||||
```
|
||||
# New session call
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend gemini --gemini-model gemini-3-pro-preview - \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <enhanced requirement (or $ARGUMENTS if not enhanced)>
|
||||
Context: <project context and analysis from previous phases>
|
||||
</TASK>
|
||||
OUTPUT: Expected output format
|
||||
EOF",
|
||||
run_in_background: false,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
|
||||
# Resume session call
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend gemini --gemini-model gemini-3-pro-preview resume <SESSION_ID> - \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <enhanced requirement (or $ARGUMENTS if not enhanced)>
|
||||
Context: <project context and analysis from previous phases>
|
||||
</TASK>
|
||||
OUTPUT: Expected output format
|
||||
EOF",
|
||||
run_in_background: false,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
```
|
||||
|
||||
**角色提示词**:
|
||||
|
||||
| 阶段 | Gemini |
|
||||
|-------|--------|
|
||||
| 分析 | `~/.claude/.ccg/prompts/gemini/analyzer.md` |
|
||||
| 规划 | `~/.claude/.ccg/prompts/gemini/architect.md` |
|
||||
| 评审 | `~/.claude/.ccg/prompts/gemini/reviewer.md` |
|
||||
|
||||
**会话重用**: 每次调用返回 `SESSION_ID: xxx`,在后续阶段使用 `resume xxx`。在阶段 2 保存 `GEMINI_SESSION`,在阶段 3 和 5 使用 `resume`。
|
||||
|
||||
***
|
||||
|
||||
## 沟通指南
|
||||
|
||||
1. 以模式标签 `[Mode: X]` 开始响应,初始为 `[Mode: Research]`
|
||||
2. 遵循严格顺序: `Research → Ideation → Plan → Execute → Optimize → Review`
|
||||
3. 需要时(例如确认/选择/批准)使用 `AskUserQuestion` 工具进行用户交互
|
||||
|
||||
***
|
||||
|
||||
## 核心工作流
|
||||
|
||||
### 阶段 0: 提示词增强(可选)
|
||||
|
||||
`[Mode: Prepare]` - 如果 ace-tool MCP 可用,调用 `mcp__ace-tool__enhance_prompt`,**将原始的 $ARGUMENTS 替换为增强后的结果,用于后续的 Gemini 调用**
|
||||
|
||||
### 阶段 1: 研究
|
||||
|
||||
`[Mode: Research]` - 理解需求并收集上下文
|
||||
|
||||
1. **代码检索**(如果 ace-tool MCP 可用): 调用 `mcp__ace-tool__search_context` 来检索现有的组件、样式、设计系统
|
||||
2. 需求完整性评分(0-10): >=7 继续,<7 停止并补充
|
||||
|
||||
### 阶段 2: 构思
|
||||
|
||||
`[Mode: Ideation]` - Gemini 主导的分析
|
||||
|
||||
**必须调用 Gemini**(遵循上述调用规范):
|
||||
|
||||
* ROLE\_FILE: `~/.claude/.ccg/prompts/gemini/analyzer.md`
|
||||
* 需求: 增强后的需求(或未经增强的 $ARGUMENTS)
|
||||
* 上下文: 来自阶段 1 的项目上下文
|
||||
* 输出: UI 可行性分析、推荐解决方案(至少 2 个)、UX 评估
|
||||
|
||||
**保存 SESSION\_ID**(`GEMINI_SESSION`)以供后续阶段重用。
|
||||
|
||||
输出解决方案(至少 2 个),等待用户选择。
|
||||
|
||||
### 阶段 3: 规划
|
||||
|
||||
`[Mode: Plan]` - Gemini 主导的规划
|
||||
|
||||
**必须调用 Gemini**(使用 `resume <GEMINI_SESSION>` 来重用会话):
|
||||
|
||||
* ROLE\_FILE: `~/.claude/.ccg/prompts/gemini/architect.md`
|
||||
* 需求: 用户选择的解决方案
|
||||
* 上下文: 阶段 2 的分析结果
|
||||
* 输出: 组件结构、UI 流程、样式方案
|
||||
|
||||
Claude 综合规划,在用户批准后保存到 `.claude/plan/task-name.md`。
|
||||
|
||||
### 阶段 4: 实现
|
||||
|
||||
`[Mode: Execute]` - 代码开发
|
||||
|
||||
* 严格遵循批准的规划
|
||||
* 遵循现有项目设计系统和代码标准
|
||||
* 确保响应式设计、可访问性
|
||||
|
||||
### 阶段 5: 优化
|
||||
|
||||
`[Mode: Optimize]` - Gemini 主导的评审
|
||||
|
||||
**必须调用 Gemini**(遵循上述调用规范):
|
||||
|
||||
* ROLE\_FILE: `~/.claude/.ccg/prompts/gemini/reviewer.md`
|
||||
* 需求: 评审以下前端代码变更
|
||||
* 上下文: git diff 或代码内容
|
||||
* 输出: 可访问性、响应式设计、性能、设计一致性等问题列表
|
||||
|
||||
整合评审反馈,在用户确认后执行优化。
|
||||
|
||||
### 阶段 6: 质量评审
|
||||
|
||||
`[Mode: Review]` - 最终评估
|
||||
|
||||
* 对照规划检查完成情况
|
||||
* 验证响应式设计和可访问性
|
||||
* 报告问题与建议
|
||||
|
||||
***
|
||||
|
||||
## 关键规则
|
||||
|
||||
1. **Gemini 的前端意见是可信赖的**
|
||||
2. **Codex 的前端意见仅供参考**
|
||||
3. 外部模型**没有文件系统写入权限**
|
||||
4. Claude 处理所有代码写入和文件操作
|
||||
270
docs/zh-CN/commands/multi-plan.md
Normal file
270
docs/zh-CN/commands/multi-plan.md
Normal file
@@ -0,0 +1,270 @@
|
||||
# 计划 - 多模型协同规划
|
||||
|
||||
多模型协同规划 - 上下文检索 + 双模型分析 → 生成分步实施计划。
|
||||
|
||||
$ARGUMENTS
|
||||
|
||||
***
|
||||
|
||||
## 核心协议
|
||||
|
||||
* **语言协议**:与工具/模型交互时使用 **英语**,与用户沟通时使用其语言
|
||||
* **强制并行**:Codex/Gemini 调用 **必须** 使用 `run_in_background: true`(包括单模型调用,以避免阻塞主线程)
|
||||
* **代码主权**:外部模型 **零文件系统写入权限**,所有修改由 Claude 执行
|
||||
* **止损机制**:在当前阶段输出验证完成前,不进入下一阶段
|
||||
* **仅限规划**:此命令允许读取上下文并写入 `.claude/plan/*` 计划文件,但 **绝不修改生产代码**
|
||||
|
||||
***
|
||||
|
||||
## 多模型调用规范
|
||||
|
||||
**调用语法**(并行:使用 `run_in_background: true`):
|
||||
|
||||
```
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend <codex|gemini> {{GEMINI_MODEL_FLAG}}- \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <enhanced requirement>
|
||||
Context: <retrieved project context>
|
||||
</TASK>
|
||||
OUTPUT: Step-by-step implementation plan with pseudo-code. DO NOT modify any files.
|
||||
EOF",
|
||||
run_in_background: true,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
```
|
||||
|
||||
**模型参数说明**:
|
||||
|
||||
* `{{GEMINI_MODEL_FLAG}}`: 当使用 `--backend gemini` 时,替换为 `--gemini-model gemini-3-pro-preview`(注意尾随空格);对于 codex 使用空字符串
|
||||
|
||||
**角色提示**:
|
||||
|
||||
| 阶段 | Codex | Gemini |
|
||||
|-------|-------|--------|
|
||||
| 分析 | `~/.claude/.ccg/prompts/codex/analyzer.md` | `~/.claude/.ccg/prompts/gemini/analyzer.md` |
|
||||
| 规划 | `~/.claude/.ccg/prompts/codex/architect.md` | `~/.claude/.ccg/prompts/gemini/architect.md` |
|
||||
|
||||
**会话复用**:每次调用返回 `SESSION_ID: xxx`(通常由包装器输出),**必须保存** 供后续 `/ccg:execute` 使用。
|
||||
|
||||
**等待后台任务**(最大超时 600000ms = 10 分钟):
|
||||
|
||||
```
|
||||
TaskOutput({ task_id: "<task_id>", block: true, timeout: 600000 })
|
||||
```
|
||||
|
||||
**重要提示**:
|
||||
|
||||
* 必须指定 `timeout: 600000`,否则默认 30 秒会导致过早超时
|
||||
* 如果 10 分钟后仍未完成,继续使用 `TaskOutput` 轮询,**绝不终止进程**
|
||||
* 如果因超时而跳过等待,**必须调用 `AskUserQuestion` 询问用户是继续等待还是终止任务**
|
||||
|
||||
***
|
||||
|
||||
## 执行流程
|
||||
|
||||
**规划任务**:$ARGUMENTS
|
||||
|
||||
### 阶段 1:完整上下文检索
|
||||
|
||||
`[Mode: Research]`
|
||||
|
||||
#### 1.1 提示增强(必须先执行)
|
||||
|
||||
**必须调用 `mcp__ace-tool__enhance_prompt` 工具**:
|
||||
|
||||
```
|
||||
mcp__ace-tool__enhance_prompt({
|
||||
prompt: "$ARGUMENTS",
|
||||
conversation_history: "<last 5-10 conversation turns>",
|
||||
project_root_path: "$PWD"
|
||||
})
|
||||
```
|
||||
|
||||
等待增强后的提示,**将所有后续阶段的原始 $ARGUMENTS 替换为增强结果**。
|
||||
|
||||
#### 1.2 上下文检索
|
||||
|
||||
**调用 `mcp__ace-tool__search_context` 工具**:
|
||||
|
||||
```
|
||||
mcp__ace-tool__search_context({
|
||||
query: "<semantic query based on enhanced requirement>",
|
||||
project_root_path: "$PWD"
|
||||
})
|
||||
```
|
||||
|
||||
* 使用自然语言构建语义查询(Where/What/How)
|
||||
* **绝不基于假设回答**
|
||||
* 如果 MCP 不可用:回退到 Glob + Grep 进行文件发现和关键符号定位
|
||||
|
||||
#### 1.3 完整性检查
|
||||
|
||||
* 必须获取相关类、函数、变量的 **完整定义和签名**
|
||||
* 如果上下文不足,触发 **递归检索**
|
||||
* 输出优先级:入口文件 + 行号 + 关键符号名称;仅在必要时添加最小代码片段以消除歧义
|
||||
|
||||
#### 1.4 需求对齐
|
||||
|
||||
* 如果需求仍有歧义,**必须** 输出引导性问题给用户
|
||||
* 直到需求边界清晰(无遗漏,无冗余)
|
||||
|
||||
### 阶段 2:多模型协同分析
|
||||
|
||||
`[Mode: Analysis]`
|
||||
|
||||
#### 2.1 分发输入
|
||||
|
||||
**并行调用** Codex 和 Gemini(`run_in_background: true`):
|
||||
|
||||
将 **原始需求**(不预设观点)分发给两个模型:
|
||||
|
||||
1. **Codex 后端分析**:
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/codex/analyzer.md`
|
||||
* 重点:技术可行性、架构影响、性能考虑、潜在风险
|
||||
* 输出:多视角解决方案 + 优缺点分析
|
||||
|
||||
2. **Gemini 前端分析**:
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/gemini/analyzer.md`
|
||||
* 重点:UI/UX 影响、用户体验、视觉设计
|
||||
* 输出:多视角解决方案 + 优缺点分析
|
||||
|
||||
使用 `TaskOutput` 等待两个模型的完整结果。**保存 SESSION\_ID**(`CODEX_SESSION` 和 `GEMINI_SESSION`)。
|
||||
|
||||
#### 2.2 交叉验证
|
||||
|
||||
整合视角并迭代优化:
|
||||
|
||||
1. **识别共识**(强信号)
|
||||
2. **识别分歧**(需要权衡)
|
||||
3. **互补优势**:后端逻辑遵循 Codex,前端设计遵循 Gemini
|
||||
4. **逻辑推理**:消除解决方案中的逻辑漏洞
|
||||
|
||||
#### 2.3(可选但推荐)双模型计划草案
|
||||
|
||||
为减少 Claude 综合计划中的遗漏风险,可以并行让两个模型输出“计划草案”(仍然 **不允许** 修改文件):
|
||||
|
||||
1. **Codex 计划草案**(后端权威):
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/codex/architect.md`
|
||||
* 输出:分步计划 + 伪代码(重点:数据流/边缘情况/错误处理/测试策略)
|
||||
|
||||
2. **Gemini 计划草案**(前端权威):
|
||||
* ROLE\_FILE:`~/.claude/.ccg/prompts/gemini/architect.md`
|
||||
* 输出:分步计划 + 伪代码(重点:信息架构/交互/可访问性/视觉一致性)
|
||||
|
||||
使用 `TaskOutput` 等待两个模型的完整结果,记录它们建议的关键差异。
|
||||
|
||||
#### 2.4 生成实施计划(Claude 最终版本)
|
||||
|
||||
综合两个分析,生成 **分步实施计划**:
|
||||
|
||||
```markdown
|
||||
## 实施计划:<任务名称>
|
||||
|
||||
### 任务类型
|
||||
- [ ] 前端 (→ Gemini)
|
||||
- [ ] 后端 (→ Codex)
|
||||
- [ ] 全栈 (→ 并行)
|
||||
|
||||
### 技术解决方案
|
||||
<基于 Codex + Gemini 分析得出的最优解决方案>
|
||||
|
||||
### 实施步骤
|
||||
1. <步骤 1> - 预期交付物
|
||||
2. <步骤 2> - 预期交付物
|
||||
...
|
||||
|
||||
### 关键文件
|
||||
| 文件 | 操作 | 描述 |
|
||||
|------|-----------|-------------|
|
||||
| path/to/file.ts:L10-L50 | 修改 | 描述 |
|
||||
|
||||
### 风险与缓解措施
|
||||
| 风险 | 缓解措施 |
|
||||
|------|------------|
|
||||
|
||||
### SESSION_ID (供 /ccg:execute 使用)
|
||||
- CODEX_SESSION: <session_id>
|
||||
- GEMINI_SESSION: <session_id>
|
||||
|
||||
```
|
||||
|
||||
### 阶段 2 结束:计划交付(非执行)
|
||||
|
||||
**`/ccg:plan` 的职责到此结束,必须执行以下操作**:
|
||||
|
||||
1. 向用户呈现完整的实施计划(包括伪代码)
|
||||
|
||||
2. 将计划保存到 `.claude/plan/<feature-name>.md`(从需求中提取功能名称,例如 `user-auth`,`payment-module`)
|
||||
|
||||
3. 以 **粗体文本** 输出提示(必须使用实际保存的文件路径):
|
||||
|
||||
***
|
||||
|
||||
**计划已生成并保存至 `.claude/plan/actual-feature-name.md`**
|
||||
|
||||
**请审阅以上计划。您可以:**
|
||||
|
||||
* **修改计划**:告诉我需要调整的内容,我会更新计划
|
||||
* **执行计划**:复制以下命令到新会话
|
||||
|
||||
```
|
||||
/ccg:execute .claude/plan/actual-feature-name.md
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
**注意**:上面的 `actual-feature-name.md` 必须替换为实际保存的文件名!
|
||||
|
||||
4. **立即终止当前响应**(在此停止。不再进行工具调用。)
|
||||
|
||||
**绝对禁止**:
|
||||
|
||||
* 询问用户“是/否”然后自动执行(执行是 `/ccg:execute` 的职责)
|
||||
* 任何对生产代码的写入操作
|
||||
* 自动调用 `/ccg:execute` 或任何实施操作
|
||||
* 当用户未明确请求修改时继续触发模型调用
|
||||
|
||||
***
|
||||
|
||||
## 计划保存
|
||||
|
||||
规划完成后,将计划保存至:
|
||||
|
||||
* **首次规划**:`.claude/plan/<feature-name>.md`
|
||||
* **迭代版本**:`.claude/plan/<feature-name>-v2.md`,`.claude/plan/<feature-name>-v3.md`...
|
||||
|
||||
计划文件写入应在向用户呈现计划前完成。
|
||||
|
||||
***
|
||||
|
||||
## 计划修改流程
|
||||
|
||||
如果用户请求修改计划:
|
||||
|
||||
1. 根据用户反馈调整计划内容
|
||||
2. 更新 `.claude/plan/<feature-name>.md` 文件
|
||||
3. 重新呈现修改后的计划
|
||||
4. 提示用户再次审阅或执行
|
||||
|
||||
***
|
||||
|
||||
## 后续步骤
|
||||
|
||||
用户批准后,**手动** 执行:
|
||||
|
||||
```bash
|
||||
/ccg:execute .claude/plan/<feature-name>.md
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 关键规则
|
||||
|
||||
1. **仅规划,不实施** – 此命令不执行任何代码更改
|
||||
2. **无是/否提示** – 仅呈现计划,让用户决定后续步骤
|
||||
3. **信任规则** – 后端遵循 Codex,前端遵循 Gemini
|
||||
4. 外部模型 **零文件系统写入权限**
|
||||
5. **SESSION\_ID 交接** – 计划末尾必须包含 `CODEX_SESSION` / `GEMINI_SESSION`(供 `/ccg:execute resume <SESSION_ID>` 使用)
|
||||
189
docs/zh-CN/commands/multi-workflow.md
Normal file
189
docs/zh-CN/commands/multi-workflow.md
Normal file
@@ -0,0 +1,189 @@
|
||||
# 工作流程 - 多模型协同开发
|
||||
|
||||
多模型协同开发工作流程(研究 → 构思 → 规划 → 执行 → 优化 → 审查),带有智能路由:前端 → Gemini,后端 → Codex。
|
||||
|
||||
结构化开发工作流程,包含质量门控、MCP 服务和多模型协作。
|
||||
|
||||
## 使用方法
|
||||
|
||||
```bash
|
||||
/workflow <task description>
|
||||
```
|
||||
|
||||
## 上下文
|
||||
|
||||
* 待开发任务:$ARGUMENTS
|
||||
* 结构化的 6 阶段工作流程,包含质量门控
|
||||
* 多模型协作:Codex(后端) + Gemini(前端) + Claude(编排)
|
||||
* MCP 服务集成(ace-tool)以增强能力
|
||||
|
||||
## 你的角色
|
||||
|
||||
你是**编排者**,协调一个多模型协作系统(研究 → 构思 → 规划 → 执行 → 优化 → 审查)。为有经验的开发者进行简洁、专业的沟通。
|
||||
|
||||
**协作模型**:
|
||||
|
||||
* **ace-tool MCP** – 代码检索 + 提示词增强
|
||||
* **Codex** – 后端逻辑、算法、调试(**后端权威,可信赖**)
|
||||
* **Gemini** – 前端 UI/UX、视觉设计(**前端专家,后端意见仅供参考**)
|
||||
* **Claude(自身)** – 编排、规划、执行、交付
|
||||
|
||||
***
|
||||
|
||||
## 多模型调用规范
|
||||
|
||||
**调用语法**(并行:`run_in_background: true`,串行:`false`):
|
||||
|
||||
```
|
||||
# New session call
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend <codex|gemini> {{GEMINI_MODEL_FLAG}}- \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <enhanced requirement (or $ARGUMENTS if not enhanced)>
|
||||
Context: <project context and analysis from previous phases>
|
||||
</TASK>
|
||||
OUTPUT: Expected output format
|
||||
EOF",
|
||||
run_in_background: true,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
|
||||
# Resume session call
|
||||
Bash({
|
||||
command: "~/.claude/bin/codeagent-wrapper {{LITE_MODE_FLAG}}--backend <codex|gemini> {{GEMINI_MODEL_FLAG}}resume <SESSION_ID> - \"$PWD\" <<'EOF'
|
||||
ROLE_FILE: <role prompt path>
|
||||
<TASK>
|
||||
Requirement: <enhanced requirement (or $ARGUMENTS if not enhanced)>
|
||||
Context: <project context and analysis from previous phases>
|
||||
</TASK>
|
||||
OUTPUT: Expected output format
|
||||
EOF",
|
||||
run_in_background: true,
|
||||
timeout: 3600000,
|
||||
description: "Brief description"
|
||||
})
|
||||
```
|
||||
|
||||
**模型参数说明**:
|
||||
|
||||
* `{{GEMINI_MODEL_FLAG}}`: 当使用 `--backend gemini` 时,替换为 `--gemini-model gemini-3-pro-preview`(注意末尾空格);对于 codex 使用空字符串
|
||||
|
||||
**角色提示词**:
|
||||
|
||||
| 阶段 | Codex | Gemini |
|
||||
|-------|-------|--------|
|
||||
| 分析 | `~/.claude/.ccg/prompts/codex/analyzer.md` | `~/.claude/.ccg/prompts/gemini/analyzer.md` |
|
||||
| 规划 | `~/.claude/.ccg/prompts/codex/architect.md` | `~/.claude/.ccg/prompts/gemini/architect.md` |
|
||||
| 审查 | `~/.claude/.ccg/prompts/codex/reviewer.md` | `~/.claude/.ccg/prompts/gemini/reviewer.md` |
|
||||
|
||||
**会话复用**:每次调用返回 `SESSION_ID: xxx`,在后续阶段使用 `resume xxx` 子命令(注意:`resume`,而非 `--resume`)。
|
||||
|
||||
**并行调用**:使用 `run_in_background: true` 启动,使用 `TaskOutput` 等待结果。**必须等待所有模型返回后才能进入下一阶段**。
|
||||
|
||||
**等待后台任务**(使用最大超时 600000ms = 10 分钟):
|
||||
|
||||
```
|
||||
TaskOutput({ task_id: "<task_id>", block: true, timeout: 600000 })
|
||||
```
|
||||
|
||||
**重要**:
|
||||
|
||||
* 必须指定 `timeout: 600000`,否则默认 30 秒会导致过早超时。
|
||||
* 如果 10 分钟后仍未完成,继续使用 `TaskOutput` 轮询,**切勿终止进程**。
|
||||
* 如果因超时而跳过等待,**必须调用 `AskUserQuestion` 询问用户是继续等待还是终止任务。切勿直接终止。**
|
||||
|
||||
***
|
||||
|
||||
## 沟通指南
|
||||
|
||||
1. 回复以模式标签 `[Mode: X]` 开头,初始为 `[Mode: Research]`。
|
||||
2. 遵循严格顺序:`Research → Ideation → Plan → Execute → Optimize → Review`。
|
||||
3. 每个阶段完成后请求用户确认。
|
||||
4. 当评分 < 7 或用户不批准时强制停止。
|
||||
5. 需要时(例如确认/选择/批准)使用 `AskUserQuestion` 工具进行用户交互。
|
||||
|
||||
***
|
||||
|
||||
## 执行工作流程
|
||||
|
||||
**任务描述**:$ARGUMENTS
|
||||
|
||||
### 阶段 1:研究与分析
|
||||
|
||||
`[Mode: Research]` - 理解需求并收集上下文:
|
||||
|
||||
1. **提示词增强**:调用 `mcp__ace-tool__enhance_prompt`,**将所有后续对 Codex/Gemini 的调用中的原始 $ARGUMENTS 替换为增强后的结果**
|
||||
2. **上下文检索**:调用 `mcp__ace-tool__search_context`
|
||||
3. **需求完整性评分** (0-10):
|
||||
* 目标清晰度 (0-3),预期成果 (0-3),范围边界 (0-2),约束条件 (0-2)
|
||||
* ≥7:继续 | <7:停止,询问澄清问题
|
||||
|
||||
### 阶段 2:解决方案构思
|
||||
|
||||
`[Mode: Ideation]` - 多模型并行分析:
|
||||
|
||||
**并行调用** (`run_in_background: true`):
|
||||
|
||||
* Codex:使用分析器提示词,输出技术可行性、解决方案、风险
|
||||
* Gemini:使用分析器提示词,输出 UI 可行性、解决方案、UX 评估
|
||||
|
||||
使用 `TaskOutput` 等待结果。**保存 SESSION\_ID** (`CODEX_SESSION` 和 `GEMINI_SESSION`)。
|
||||
|
||||
**遵循上方 `Multi-Model Call Specification` 中的 `IMPORTANT` 说明**
|
||||
|
||||
综合两项分析,输出解决方案比较(至少 2 个选项),等待用户选择。
|
||||
|
||||
### 阶段 3:详细规划
|
||||
|
||||
`[Mode: Plan]` - 多模型协作规划:
|
||||
|
||||
**并行调用**(使用 `resume <SESSION_ID>` 恢复会话):
|
||||
|
||||
* Codex:使用架构师提示词 + `resume $CODEX_SESSION`,输出后端架构
|
||||
* Gemini:使用架构师提示词 + `resume $GEMINI_SESSION`,输出前端架构
|
||||
|
||||
使用 `TaskOutput` 等待结果。
|
||||
|
||||
**遵循上方 `Multi-Model Call Specification` 中的 `IMPORTANT` 说明**
|
||||
|
||||
**Claude 综合**:采纳 Codex 后端计划 + Gemini 前端计划,在用户批准后保存到 `.claude/plan/task-name.md`。
|
||||
|
||||
### 阶段 4:实施
|
||||
|
||||
`[Mode: Execute]` - 代码开发:
|
||||
|
||||
* 严格遵循批准的计划
|
||||
* 遵循现有项目代码标准
|
||||
* 在关键里程碑请求反馈
|
||||
|
||||
### 阶段 5:代码优化
|
||||
|
||||
`[Mode: Optimize]` - 多模型并行审查:
|
||||
|
||||
**并行调用**:
|
||||
|
||||
* Codex:使用审查者提示词,关注安全性、性能、错误处理
|
||||
* Gemini:使用审查者提示词,关注可访问性、设计一致性
|
||||
|
||||
使用 `TaskOutput` 等待结果。整合审查反馈,在用户确认后执行优化。
|
||||
|
||||
**遵循上方 `Multi-Model Call Specification` 中的 `IMPORTANT` 说明**
|
||||
|
||||
### 阶段 6:质量审查
|
||||
|
||||
`[Mode: Review]` - 最终评估:
|
||||
|
||||
* 对照计划检查完成情况
|
||||
* 运行测试以验证功能
|
||||
* 报告问题和建议
|
||||
* 请求最终用户确认
|
||||
|
||||
***
|
||||
|
||||
## 关键规则
|
||||
|
||||
1. 阶段顺序不可跳过(除非用户明确指示)
|
||||
2. 外部模型**对文件系统零写入权限**,所有修改由 Claude 执行
|
||||
3. 当评分 < 7 或用户不批准时**强制停止**
|
||||
283
docs/zh-CN/commands/pm2.md
Normal file
283
docs/zh-CN/commands/pm2.md
Normal file
@@ -0,0 +1,283 @@
|
||||
# PM2 初始化
|
||||
|
||||
自动分析项目并生成 PM2 服务命令。
|
||||
|
||||
**命令**: `$ARGUMENTS`
|
||||
|
||||
***
|
||||
|
||||
## 工作流程
|
||||
|
||||
1. 检查 PM2(如果缺失,通过 `npm install -g pm2` 安装)
|
||||
2. 扫描项目以识别服务(前端/后端/数据库)
|
||||
3. 生成配置文件和各命令文件
|
||||
|
||||
***
|
||||
|
||||
## 服务检测
|
||||
|
||||
| 类型 | 检测方式 | 默认端口 |
|
||||
|------|-----------|--------------|
|
||||
| Vite | vite.config.\* | 5173 |
|
||||
| Next.js | next.config.\* | 3000 |
|
||||
| Nuxt | nuxt.config.\* | 3000 |
|
||||
| CRA | package.json 中的 react-scripts | 3000 |
|
||||
| Express/Node | server/backend/api 目录 + package.json | 3000 |
|
||||
| FastAPI/Flask | requirements.txt / pyproject.toml | 8000 |
|
||||
| Go | go.mod / main.go | 8080 |
|
||||
|
||||
**端口检测优先级**: 用户指定 > .env 文件 > 配置文件 > 脚本参数 > 默认端口
|
||||
|
||||
***
|
||||
|
||||
## 生成的文件
|
||||
|
||||
```
|
||||
project/
|
||||
├── ecosystem.config.cjs # PM2 config
|
||||
├── {backend}/start.cjs # Python wrapper (if applicable)
|
||||
└── .claude/
|
||||
├── commands/
|
||||
│ ├── pm2-all.md # Start all + monit
|
||||
│ ├── pm2-all-stop.md # Stop all
|
||||
│ ├── pm2-all-restart.md # Restart all
|
||||
│ ├── pm2-{port}.md # Start single + logs
|
||||
│ ├── pm2-{port}-stop.md # Stop single
|
||||
│ ├── pm2-{port}-restart.md # Restart single
|
||||
│ ├── pm2-logs.md # View all logs
|
||||
│ └── pm2-status.md # View status
|
||||
└── scripts/
|
||||
├── pm2-logs-{port}.ps1 # Single service logs
|
||||
└── pm2-monit.ps1 # PM2 monitor
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## Windows 配置(重要)
|
||||
|
||||
### ecosystem.config.cjs
|
||||
|
||||
**必须使用 `.cjs` 扩展名**
|
||||
|
||||
```javascript
|
||||
module.exports = {
|
||||
apps: [
|
||||
// Node.js (Vite/Next/Nuxt)
|
||||
{
|
||||
name: 'project-3000',
|
||||
cwd: './packages/web',
|
||||
script: 'node_modules/vite/bin/vite.js',
|
||||
args: '--port 3000',
|
||||
interpreter: 'C:/Program Files/nodejs/node.exe',
|
||||
env: { NODE_ENV: 'development' }
|
||||
},
|
||||
// Python
|
||||
{
|
||||
name: 'project-8000',
|
||||
cwd: './backend',
|
||||
script: 'start.cjs',
|
||||
interpreter: 'C:/Program Files/nodejs/node.exe',
|
||||
env: { PYTHONUNBUFFERED: '1' }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**框架脚本路径:**
|
||||
|
||||
| 框架 | script | args |
|
||||
|-----------|--------|------|
|
||||
| Vite | `node_modules/vite/bin/vite.js` | `--port {port}` |
|
||||
| Next.js | `node_modules/next/dist/bin/next` | `dev -p {port}` |
|
||||
| Nuxt | `node_modules/nuxt/bin/nuxt.mjs` | `dev --port {port}` |
|
||||
| Express | `src/index.js` 或 `server.js` | - |
|
||||
|
||||
### Python 包装脚本 (start.cjs)
|
||||
|
||||
```javascript
|
||||
const { spawn } = require('child_process');
|
||||
const proc = spawn('python', ['-m', 'uvicorn', 'app.main:app', '--host', '0.0.0.0', '--port', '8000', '--reload'], {
|
||||
cwd: __dirname, stdio: 'inherit', windowsHide: true
|
||||
});
|
||||
proc.on('close', (code) => process.exit(code));
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 命令文件模板(最简内容)
|
||||
|
||||
### pm2-all.md (启动所有 + 监控)
|
||||
|
||||
````markdown
|
||||
启动所有服务并打开 PM2 监控器。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 start ecosystem.config.cjs && start wt.exe -d "{PROJECT_ROOT}" pwsh -NoExit -c "pm2 monit"
|
||||
```
|
||||
````
|
||||
|
||||
### pm2-all-stop.md
|
||||
|
||||
````markdown
|
||||
停止所有服务。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 stop all
|
||||
```
|
||||
````
|
||||
|
||||
### pm2-all-restart.md
|
||||
|
||||
````markdown
|
||||
重启所有服务。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 restart all
|
||||
```
|
||||
````
|
||||
|
||||
### pm2-{port}.md (启动单个 + 日志)
|
||||
|
||||
````markdown
|
||||
启动 {name} ({port}) 并打开日志。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 start ecosystem.config.cjs --only {name} && start wt.exe -d "{PROJECT_ROOT}" pwsh -NoExit -c "pm2 logs {name}"
|
||||
```
|
||||
````
|
||||
|
||||
### pm2-{port}-stop.md
|
||||
|
||||
````markdown
|
||||
停止 {name} ({port})。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 stop {name}
|
||||
```
|
||||
````
|
||||
|
||||
### pm2-{port}-restart.md
|
||||
|
||||
````markdown
|
||||
重启 {name} ({port})。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 restart {name}
|
||||
```
|
||||
````
|
||||
|
||||
### pm2-logs.md
|
||||
|
||||
````markdown
|
||||
查看所有 PM2 日志。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 logs
|
||||
```
|
||||
````
|
||||
|
||||
### pm2-status.md
|
||||
|
||||
````markdown
|
||||
查看 PM2 状态。
|
||||
```bash
|
||||
cd "{PROJECT_ROOT}" && pm2 status
|
||||
```
|
||||
````
|
||||
|
||||
### PowerShell 脚本 (pm2-logs-{port}.ps1)
|
||||
|
||||
```powershell
|
||||
Set-Location "{PROJECT_ROOT}"
|
||||
pm2 logs {name}
|
||||
```
|
||||
|
||||
### PowerShell 脚本 (pm2-monit.ps1)
|
||||
|
||||
```powershell
|
||||
Set-Location "{PROJECT_ROOT}"
|
||||
pm2 monit
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 关键规则
|
||||
|
||||
1. **配置文件**: `ecosystem.config.cjs` (不是 .js)
|
||||
2. **Node.js**: 直接指定 bin 路径 + 解释器
|
||||
3. **Python**: Node.js 包装脚本 + `windowsHide: true`
|
||||
4. **打开新窗口**: `start wt.exe -d "{path}" pwsh -NoExit -c "command"`
|
||||
5. **最简内容**: 每个命令文件只有 1-2 行描述 + bash 代码块
|
||||
6. **直接执行**: 无需 AI 解析,直接运行 bash 命令
|
||||
|
||||
***
|
||||
|
||||
## 执行
|
||||
|
||||
基于 `$ARGUMENTS`,执行初始化:
|
||||
|
||||
1. 扫描项目服务
|
||||
2. 生成 `ecosystem.config.cjs`
|
||||
3. 为 Python 服务生成 `{backend}/start.cjs`(如果适用)
|
||||
4. 在 `.claude/commands/` 中生成命令文件
|
||||
5. 在 `.claude/scripts/` 中生成脚本文件
|
||||
6. **更新项目 CLAUDE.md**,添加 PM2 信息(见下文)
|
||||
7. **显示完成摘要**,包含终端命令
|
||||
|
||||
***
|
||||
|
||||
## 初始化后:更新 CLAUDE.md
|
||||
|
||||
生成文件后,将 PM2 部分追加到项目的 `CLAUDE.md`(如果不存在则创建):
|
||||
|
||||
````markdown
|
||||
## PM2 服务
|
||||
|
||||
| 端口 | 名称 | 类型 |
|
||||
|------|------|------|
|
||||
| {port} | {name} | {type} |
|
||||
|
||||
**终端命令:**
|
||||
```bash
|
||||
pm2 start ecosystem.config.cjs # First time
|
||||
pm2 start all # After first time
|
||||
pm2 stop all / pm2 restart all
|
||||
pm2 start {name} / pm2 stop {name}
|
||||
pm2 logs / pm2 status / pm2 monit
|
||||
pm2 save # Save process list
|
||||
pm2 resurrect # Restore saved list
|
||||
```
|
||||
````
|
||||
|
||||
**更新 CLAUDE.md 的规则:**
|
||||
|
||||
* 如果存在 PM2 部分,替换它
|
||||
* 如果不存在,追加到末尾
|
||||
* 保持内容精简且必要
|
||||
|
||||
***
|
||||
|
||||
## 初始化后:显示摘要
|
||||
|
||||
所有文件生成后,输出:
|
||||
|
||||
```
|
||||
## PM2 Init Complete
|
||||
|
||||
**Services:**
|
||||
|
||||
| Port | Name | Type |
|
||||
|------|------|------|
|
||||
| {port} | {name} | {type} |
|
||||
|
||||
**Claude Commands:** /pm2-all, /pm2-all-stop, /pm2-{port}, /pm2-{port}-stop, /pm2-logs, /pm2-status
|
||||
|
||||
**Terminal Commands:**
|
||||
## First time (with config file)
|
||||
pm2 start ecosystem.config.cjs && pm2 save
|
||||
|
||||
## After first time (simplified)
|
||||
pm2 start all # Start all
|
||||
pm2 stop all # Stop all
|
||||
pm2 restart all # Restart all
|
||||
pm2 start {name} # Start single
|
||||
pm2 stop {name} # Stop single
|
||||
pm2 logs # View logs
|
||||
pm2 monit # Monitor panel
|
||||
pm2 resurrect # Restore saved processes
|
||||
|
||||
**Tip:** Run `pm2 save` after first start to enable simplified commands.
|
||||
```
|
||||
80
docs/zh-CN/rules/README.md
Normal file
80
docs/zh-CN/rules/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# 规则
|
||||
|
||||
## 结构
|
||||
|
||||
规则被组织为一个**通用**层加上**语言特定**的目录:
|
||||
|
||||
```
|
||||
rules/
|
||||
├── common/ # Language-agnostic principles (always install)
|
||||
│ ├── coding-style.md
|
||||
│ ├── git-workflow.md
|
||||
│ ├── testing.md
|
||||
│ ├── performance.md
|
||||
│ ├── patterns.md
|
||||
│ ├── hooks.md
|
||||
│ ├── agents.md
|
||||
│ └── security.md
|
||||
├── typescript/ # TypeScript/JavaScript specific
|
||||
├── python/ # Python specific
|
||||
└── golang/ # Go specific
|
||||
```
|
||||
|
||||
* **common/** 包含通用原则 —— 没有语言特定的代码示例。
|
||||
* **语言目录** 通过框架特定的模式、工具和代码示例来扩展通用规则。每个文件都引用其对应的通用文件。
|
||||
|
||||
## 安装
|
||||
|
||||
### 选项 1:安装脚本(推荐)
|
||||
|
||||
```bash
|
||||
# Install common + one or more language-specific rule sets
|
||||
./install.sh typescript
|
||||
./install.sh python
|
||||
./install.sh golang
|
||||
|
||||
# Install multiple languages at once
|
||||
./install.sh typescript python
|
||||
```
|
||||
|
||||
### 选项 2:手动安装
|
||||
|
||||
> **重要提示:** 复制整个目录 —— 不要使用 `/*` 将其扁平化。
|
||||
> 通用目录和语言特定目录包含同名的文件。
|
||||
> 将它们扁平化到一个目录会导致语言特定的文件覆盖通用规则,并破坏语言特定文件使用的相对 `../common/` 引用。
|
||||
|
||||
```bash
|
||||
# Install common rules (required for all projects)
|
||||
cp -r rules/common ~/.claude/rules/common
|
||||
|
||||
# Install language-specific rules based on your project's tech stack
|
||||
cp -r rules/typescript ~/.claude/rules/typescript
|
||||
cp -r rules/python ~/.claude/rules/python
|
||||
cp -r rules/golang ~/.claude/rules/golang
|
||||
|
||||
# Attention ! ! ! Configure according to your actual project requirements; the configuration here is for reference only.
|
||||
```
|
||||
|
||||
## 规则与技能
|
||||
|
||||
* **规则** 定义广泛适用的标准、约定和检查清单(例如,“80% 的测试覆盖率”、“没有硬编码的密钥”)。
|
||||
* **技能**(`skills/` 目录)为特定任务提供深入、可操作的参考材料(例如,`python-patterns`,`golang-testing`)。
|
||||
|
||||
语言特定的规则文件会在适当的地方引用相关的技能。规则告诉你*要做什么*;技能告诉你*如何去做*。
|
||||
|
||||
## 添加新语言
|
||||
|
||||
要添加对新语言的支持(例如,`rust/`):
|
||||
|
||||
1. 创建一个 `rules/rust/` 目录
|
||||
2. 添加扩展通用规则的文件:
|
||||
* `coding-style.md` —— 格式化工具、习惯用法、错误处理模式
|
||||
* `testing.md` —— 测试框架、覆盖率工具、测试组织
|
||||
* `patterns.md` —— 语言特定的设计模式
|
||||
* `hooks.md` —— 用于格式化工具、代码检查器、类型检查器的 PostToolUse 钩子
|
||||
* `security.md` —— 密钥管理、安全扫描工具
|
||||
3. 每个文件应以以下内容开头:
|
||||
```
|
||||
> 此文件通过 <语言> 特定内容扩展了 [common/xxx.md](../common/xxx.md)。
|
||||
```
|
||||
4. 如果现有技能可用,则引用它们,或者在 `skills/` 下创建新的技能。
|
||||
@@ -1,72 +0,0 @@
|
||||
# 编码风格
|
||||
|
||||
## 不可变性(关键)
|
||||
|
||||
始终创建新对象,切勿修改:
|
||||
|
||||
```javascript
|
||||
// WRONG: Mutation
|
||||
function updateUser(user, name) {
|
||||
user.name = name // MUTATION!
|
||||
return user
|
||||
}
|
||||
|
||||
// CORRECT: Immutability
|
||||
function updateUser(user, name) {
|
||||
return {
|
||||
...user,
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 文件组织
|
||||
|
||||
多个小文件 > 少数大文件:
|
||||
|
||||
* 高内聚,低耦合
|
||||
* 典型 200-400 行,最多 800 行
|
||||
* 从大型组件中提取实用工具
|
||||
* 按功能/领域组织,而非按类型
|
||||
|
||||
## 错误处理
|
||||
|
||||
始终全面处理错误:
|
||||
|
||||
```typescript
|
||||
try {
|
||||
const result = await riskyOperation()
|
||||
return result
|
||||
} catch (error) {
|
||||
console.error('Operation failed:', error)
|
||||
throw new Error('Detailed user-friendly message')
|
||||
}
|
||||
```
|
||||
|
||||
## 输入验证
|
||||
|
||||
始终验证用户输入:
|
||||
|
||||
```typescript
|
||||
import { z } from 'zod'
|
||||
|
||||
const schema = z.object({
|
||||
email: z.string().email(),
|
||||
age: z.number().int().min(0).max(150)
|
||||
})
|
||||
|
||||
const validated = schema.parse(input)
|
||||
```
|
||||
|
||||
## 代码质量检查清单
|
||||
|
||||
在标记工作完成之前:
|
||||
|
||||
* \[ ] 代码可读且命名良好
|
||||
* \[ ] 函数短小(<50 行)
|
||||
* \[ ] 文件专注(<800 行)
|
||||
* \[ ] 无深层嵌套(>4 层)
|
||||
* \[ ] 正确的错误处理
|
||||
* \[ ] 无 console.log 语句
|
||||
* \[ ] 无硬编码值
|
||||
* \[ ] 无修改(使用不可变模式)
|
||||
@@ -30,14 +30,15 @@
|
||||
对于独立操作,**始终**使用并行任务执行:
|
||||
|
||||
```markdown
|
||||
# GOOD: Parallel execution
|
||||
Launch 3 agents in parallel:
|
||||
1. Agent 1: Security analysis of auth.ts
|
||||
2. Agent 2: Performance review of cache system
|
||||
3. Agent 3: Type checking of utils.ts
|
||||
# 良好:并行执行
|
||||
同时启动 3 个智能体:
|
||||
1. 智能体 1:认证模块的安全分析
|
||||
2. 智能体 2:缓存系统的性能审查
|
||||
3. 智能体 3:工具类的类型检查
|
||||
|
||||
# 不良:不必要的顺序执行
|
||||
先智能体 1,然后智能体 2,最后智能体 3
|
||||
|
||||
# BAD: Sequential when unnecessary
|
||||
First agent 1, then agent 2, then agent 3
|
||||
```
|
||||
|
||||
## 多视角分析
|
||||
52
docs/zh-CN/rules/common/coding-style.md
Normal file
52
docs/zh-CN/rules/common/coding-style.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# 编码风格
|
||||
|
||||
## 不可变性(关键)
|
||||
|
||||
始终创建新对象,绝不改变现有对象:
|
||||
|
||||
```
|
||||
// Pseudocode
|
||||
WRONG: modify(original, field, value) → changes original in-place
|
||||
CORRECT: update(original, field, value) → returns new copy with change
|
||||
```
|
||||
|
||||
理由:不可变数据可以防止隐藏的副作用,使调试更容易,并支持安全的并发。
|
||||
|
||||
## 文件组织
|
||||
|
||||
多个小文件 > 少数大文件:
|
||||
|
||||
* 高内聚,低耦合
|
||||
* 通常 200-400 行,最多 800 行
|
||||
* 从大型模块中提取实用工具
|
||||
* 按功能/领域组织,而不是按类型组织
|
||||
|
||||
## 错误处理
|
||||
|
||||
始终全面处理错误:
|
||||
|
||||
* 在每个层级明确处理错误
|
||||
* 在面向用户的代码中提供用户友好的错误消息
|
||||
* 在服务器端记录详细的错误上下文
|
||||
* 绝不默默地忽略错误
|
||||
|
||||
## 输入验证
|
||||
|
||||
始终在系统边界处进行验证:
|
||||
|
||||
* 在处理前验证所有用户输入
|
||||
* 在可用时使用基于模式的验证
|
||||
* 快速失败并提供清晰的错误消息
|
||||
* 绝不信任外部数据(API 响应、用户输入、文件内容)
|
||||
|
||||
## 代码质量检查清单
|
||||
|
||||
在标记工作完成之前:
|
||||
|
||||
* \[ ] 代码可读且命名良好
|
||||
* \[ ] 函数短小(<50 行)
|
||||
* \[ ] 文件专注(<800 行)
|
||||
* \[ ] 没有深度嵌套(>4 层)
|
||||
* \[ ] 正确的错误处理
|
||||
* \[ ] 没有硬编码的值(使用常量或配置)
|
||||
* \[ ] 没有突变(使用不可变模式)
|
||||
@@ -6,25 +6,6 @@
|
||||
* **PostToolUse**:工具执行后(自动格式化、检查)
|
||||
* **Stop**:会话结束时(最终验证)
|
||||
|
||||
## 当前 Hooks(位于 ~/.claude/settings.json)
|
||||
|
||||
### PreToolUse
|
||||
|
||||
* **tmux 提醒**:建议对长时间运行的命令(npm、pnpm、yarn、cargo 等)使用 tmux
|
||||
* **git push 审查**:推送前在 Zed 中打开进行审查
|
||||
* **文档拦截器**:阻止创建不必要的 .md/.txt 文件
|
||||
|
||||
### PostToolUse
|
||||
|
||||
* **PR 创建**:记录 PR URL 和 GitHub Actions 状态
|
||||
* **Prettier**:编辑后自动格式化 JS/TS 文件
|
||||
* **TypeScript 检查**:编辑 .ts/.tsx 文件后运行 tsc
|
||||
* **console.log 警告**:警告编辑的文件中存在 console.log
|
||||
|
||||
### Stop
|
||||
|
||||
* **console.log 审计**:会话结束前检查所有修改的文件中是否存在 console.log
|
||||
|
||||
## 自动接受权限
|
||||
|
||||
谨慎使用:
|
||||
34
docs/zh-CN/rules/common/patterns.md
Normal file
34
docs/zh-CN/rules/common/patterns.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# 常见模式
|
||||
|
||||
## 骨架项目
|
||||
|
||||
当实现新功能时:
|
||||
|
||||
1. 搜索经过实战检验的骨架项目
|
||||
2. 使用并行代理评估选项:
|
||||
* 安全性评估
|
||||
* 可扩展性分析
|
||||
* 相关性评分
|
||||
* 实施规划
|
||||
3. 克隆最佳匹配作为基础
|
||||
4. 在已验证的结构内迭代
|
||||
|
||||
## 设计模式
|
||||
|
||||
### 仓库模式
|
||||
|
||||
将数据访问封装在一个一致的接口之后:
|
||||
|
||||
* 定义标准操作:findAll, findById, create, update, delete
|
||||
* 具体实现处理存储细节(数据库、API、文件等)
|
||||
* 业务逻辑依赖于抽象接口,而非存储机制
|
||||
* 便于轻松切换数据源,并使用模拟对象简化测试
|
||||
|
||||
### API 响应格式
|
||||
|
||||
对所有 API 响应使用一致的信封格式:
|
||||
|
||||
* 包含一个成功/状态指示器
|
||||
* 包含数据载荷(出错时可为空)
|
||||
* 包含一个错误消息字段(成功时可为空)
|
||||
* 为分页响应包含元数据(总数、页码、限制)
|
||||
@@ -35,14 +35,23 @@
|
||||
* 文档更新
|
||||
* 简单的错误修复
|
||||
|
||||
## Ultrathink + 计划模式
|
||||
## 扩展思考 + 计划模式
|
||||
|
||||
扩展思考默认启用,最多保留 31,999 个令牌用于内部推理。
|
||||
|
||||
通过以下方式控制扩展思考:
|
||||
|
||||
* **切换**:Option+T (macOS) / Alt+T (Windows/Linux)
|
||||
* **配置**:在 `~/.claude/settings.json` 中设置 `alwaysThinkingEnabled`
|
||||
* **预算上限**:`export MAX_THINKING_TOKENS=10000`
|
||||
* **详细模式**:Ctrl+O 查看思考输出
|
||||
|
||||
对于需要深度推理的复杂任务:
|
||||
|
||||
1. 使用 `ultrathink` 进行增强思考
|
||||
2. 启用**计划模式**以获得结构化方法
|
||||
3. 通过多轮批判性评审来"发动引擎"
|
||||
4. 使用拆分角色的子智能体进行多样化分析
|
||||
1. 确保扩展思考已启用(默认开启)
|
||||
2. 启用 **计划模式** 以获得结构化方法
|
||||
3. 使用多轮批判进行彻底分析
|
||||
4. 使用分割角色子代理以获得多元视角
|
||||
|
||||
## 构建故障排除
|
||||
|
||||
@@ -15,17 +15,10 @@
|
||||
|
||||
## 密钥管理
|
||||
|
||||
```typescript
|
||||
// NEVER: Hardcoded secrets
|
||||
const apiKey = "sk-proj-xxxxx"
|
||||
|
||||
// ALWAYS: Environment variables
|
||||
const apiKey = process.env.OPENAI_API_KEY
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('OPENAI_API_KEY not configured')
|
||||
}
|
||||
```
|
||||
* 切勿在源代码中硬编码密钥
|
||||
* 始终使用环境变量或密钥管理器
|
||||
* 在启动时验证所需的密钥是否存在
|
||||
* 轮换任何可能已泄露的密钥
|
||||
|
||||
## 安全响应协议
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
1. **单元测试** - 单个函数、工具、组件
|
||||
2. **集成测试** - API 端点、数据库操作
|
||||
3. **端到端测试** - 关键用户流程 (Playwright)
|
||||
3. **端到端测试** - 关键用户流程(根据语言选择框架)
|
||||
|
||||
## 测试驱动开发
|
||||
|
||||
@@ -28,5 +28,4 @@
|
||||
|
||||
## 代理支持
|
||||
|
||||
* **tdd-guide** - 主动用于新功能,强制执行先写测试
|
||||
* **e2e-runner** - Playwright 端到端测试专家
|
||||
* **tdd-guide** - 主动用于新功能,强制执行测试优先
|
||||
26
docs/zh-CN/rules/golang/coding-style.md
Normal file
26
docs/zh-CN/rules/golang/coding-style.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Go 编码风格
|
||||
|
||||
> 本文件在 [common/coding-style.md](../common/coding-style.md) 的基础上,扩展了 Go 语言的特定内容。
|
||||
|
||||
## 格式化
|
||||
|
||||
* **gofmt** 和 **goimports** 是强制性的 —— 无需进行风格辩论
|
||||
|
||||
## 设计原则
|
||||
|
||||
* 接受接口,返回结构体
|
||||
* 保持接口小巧(1-3 个方法)
|
||||
|
||||
## 错误处理
|
||||
|
||||
始终用上下文包装错误:
|
||||
|
||||
```go
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create user: %w", err)
|
||||
}
|
||||
```
|
||||
|
||||
## 参考
|
||||
|
||||
查看技能:`golang-patterns` 以获取全面的 Go 语言惯用法和模式。
|
||||
11
docs/zh-CN/rules/golang/hooks.md
Normal file
11
docs/zh-CN/rules/golang/hooks.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Go 钩子
|
||||
|
||||
> 本文件通过 Go 特定内容扩展了 [common/hooks.md](../common/hooks.md)。
|
||||
|
||||
## PostToolUse 钩子
|
||||
|
||||
在 `~/.claude/settings.json` 中配置:
|
||||
|
||||
* **gofmt/goimports**:编辑后自动格式化 `.go` 文件
|
||||
* **go vet**:编辑 `.go` 文件后运行静态分析
|
||||
* **staticcheck**:对修改的包运行扩展静态检查
|
||||
39
docs/zh-CN/rules/golang/patterns.md
Normal file
39
docs/zh-CN/rules/golang/patterns.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Go 模式
|
||||
|
||||
> 本文档在 [common/patterns.md](../common/patterns.md) 的基础上扩展了 Go 语言特定的内容。
|
||||
|
||||
## 函数式选项
|
||||
|
||||
```go
|
||||
type Option func(*Server)
|
||||
|
||||
func WithPort(port int) Option {
|
||||
return func(s *Server) { s.port = port }
|
||||
}
|
||||
|
||||
func NewServer(opts ...Option) *Server {
|
||||
s := &Server{port: 8080}
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
```
|
||||
|
||||
## 小接口
|
||||
|
||||
在接口被使用的地方定义它们,而不是在它们被实现的地方。
|
||||
|
||||
## 依赖注入
|
||||
|
||||
使用构造函数来注入依赖:
|
||||
|
||||
```go
|
||||
func NewUserService(repo UserRepository, logger Logger) *UserService {
|
||||
return &UserService{repo: repo, logger: logger}
|
||||
}
|
||||
```
|
||||
|
||||
## 参考
|
||||
|
||||
有关全面的 Go 模式(包括并发、错误处理和包组织),请参阅技能:`golang-patterns`。
|
||||
28
docs/zh-CN/rules/golang/security.md
Normal file
28
docs/zh-CN/rules/golang/security.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Go 安全
|
||||
|
||||
> 此文件基于 [common/security.md](../common/security.md) 扩展了 Go 特定内容。
|
||||
|
||||
## 密钥管理
|
||||
|
||||
```go
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
log.Fatal("OPENAI_API_KEY not configured")
|
||||
}
|
||||
```
|
||||
|
||||
## 安全扫描
|
||||
|
||||
* 使用 **gosec** 进行静态安全分析:
|
||||
```bash
|
||||
gosec ./...
|
||||
```
|
||||
|
||||
## 上下文与超时
|
||||
|
||||
始终使用 `context.Context` 进行超时控制:
|
||||
|
||||
```go
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
```
|
||||
25
docs/zh-CN/rules/golang/testing.md
Normal file
25
docs/zh-CN/rules/golang/testing.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Go 测试
|
||||
|
||||
> 本文档在 [common/testing.md](../common/testing.md) 的基础上扩展了 Go 特定的内容。
|
||||
|
||||
## 框架
|
||||
|
||||
使用标准的 `go test` 并采用 **表格驱动测试**。
|
||||
|
||||
## 竞态检测
|
||||
|
||||
始终使用 `-race` 标志运行:
|
||||
|
||||
```bash
|
||||
go test -race ./...
|
||||
```
|
||||
|
||||
## 覆盖率
|
||||
|
||||
```bash
|
||||
go test -cover ./...
|
||||
```
|
||||
|
||||
## 参考
|
||||
|
||||
查看技能:`golang-testing` 以获取详细的 Go 测试模式和辅助工具。
|
||||
37
docs/zh-CN/rules/python/coding-style.md
Normal file
37
docs/zh-CN/rules/python/coding-style.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Python 编码风格
|
||||
|
||||
> 本文件在 [common/coding-style.md](../common/coding-style.md) 的基础上扩展了 Python 特定的内容。
|
||||
|
||||
## 标准
|
||||
|
||||
* 遵循 **PEP 8** 规范
|
||||
* 在所有函数签名上使用 **类型注解**
|
||||
|
||||
## 不变性
|
||||
|
||||
优先使用不可变数据结构:
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class User:
|
||||
name: str
|
||||
email: str
|
||||
|
||||
from typing import NamedTuple
|
||||
|
||||
class Point(NamedTuple):
|
||||
x: float
|
||||
y: float
|
||||
```
|
||||
|
||||
## 格式化
|
||||
|
||||
* 使用 **black** 进行代码格式化
|
||||
* 使用 **isort** 进行导入排序
|
||||
* 使用 **ruff** 进行代码检查
|
||||
|
||||
## 参考
|
||||
|
||||
查看技能:`python-patterns` 以获取全面的 Python 惯用法和模式。
|
||||
14
docs/zh-CN/rules/python/hooks.md
Normal file
14
docs/zh-CN/rules/python/hooks.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Python 钩子
|
||||
|
||||
> 本文档扩展了 [common/hooks.md](../common/hooks.md) 中关于 Python 的特定内容。
|
||||
|
||||
## PostToolUse 钩子
|
||||
|
||||
在 `~/.claude/settings.json` 中配置:
|
||||
|
||||
* **black/ruff**:编辑后自动格式化 `.py` 文件
|
||||
* **mypy/pyright**:编辑 `.py` 文件后运行类型检查
|
||||
|
||||
## 警告
|
||||
|
||||
* 对编辑文件中的 `print()` 语句发出警告(应使用 `logging` 模块替代)
|
||||
34
docs/zh-CN/rules/python/patterns.md
Normal file
34
docs/zh-CN/rules/python/patterns.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Python 模式
|
||||
|
||||
> 本文档扩展了 [common/patterns.md](../common/patterns.md),补充了 Python 特定的内容。
|
||||
|
||||
## 协议(鸭子类型)
|
||||
|
||||
```python
|
||||
from typing import Protocol
|
||||
|
||||
class Repository(Protocol):
|
||||
def find_by_id(self, id: str) -> dict | None: ...
|
||||
def save(self, entity: dict) -> dict: ...
|
||||
```
|
||||
|
||||
## 数据类作为 DTO
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class CreateUserRequest:
|
||||
name: str
|
||||
email: str
|
||||
age: int | None = None
|
||||
```
|
||||
|
||||
## 上下文管理器与生成器
|
||||
|
||||
* 使用上下文管理器(`with` 语句)进行资源管理
|
||||
* 使用生成器进行惰性求值和内存高效迭代
|
||||
|
||||
## 参考
|
||||
|
||||
查看技能:`python-patterns`,了解包括装饰器、并发和包组织在内的综合模式。
|
||||
25
docs/zh-CN/rules/python/security.md
Normal file
25
docs/zh-CN/rules/python/security.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Python 安全
|
||||
|
||||
> 本文档基于 [通用安全指南](../common/security.md) 扩展,补充了 Python 相关的内容。
|
||||
|
||||
## 密钥管理
|
||||
|
||||
```python
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
api_key = os.environ["OPENAI_API_KEY"] # Raises KeyError if missing
|
||||
```
|
||||
|
||||
## 安全扫描
|
||||
|
||||
* 使用 **bandit** 进行静态安全分析:
|
||||
```bash
|
||||
bandit -r src/
|
||||
```
|
||||
|
||||
## 参考
|
||||
|
||||
查看技能:`django-security` 以获取 Django 特定的安全指南(如适用)。
|
||||
33
docs/zh-CN/rules/python/testing.md
Normal file
33
docs/zh-CN/rules/python/testing.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Python 测试
|
||||
|
||||
> 本文件在 [通用/测试.md](../common/testing.md) 的基础上扩展了 Python 特定的内容。
|
||||
|
||||
## 框架
|
||||
|
||||
使用 **pytest** 作为测试框架。
|
||||
|
||||
## 覆盖率
|
||||
|
||||
```bash
|
||||
pytest --cov=src --cov-report=term-missing
|
||||
```
|
||||
|
||||
## 测试组织
|
||||
|
||||
使用 `pytest.mark` 进行测试分类:
|
||||
|
||||
```python
|
||||
import pytest
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_calculate_total():
|
||||
...
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_database_connection():
|
||||
...
|
||||
```
|
||||
|
||||
## 参考
|
||||
|
||||
查看技能:`python-testing` 以获取详细的 pytest 模式和夹具信息。
|
||||
58
docs/zh-CN/rules/typescript/coding-style.md
Normal file
58
docs/zh-CN/rules/typescript/coding-style.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# TypeScript/JavaScript 编码风格
|
||||
|
||||
> 本文件基于 [common/coding-style.md](../common/coding-style.md) 扩展,包含 TypeScript/JavaScript 特定内容。
|
||||
|
||||
## 不可变性
|
||||
|
||||
使用展开运算符进行不可变更新:
|
||||
|
||||
```typescript
|
||||
// WRONG: Mutation
|
||||
function updateUser(user, name) {
|
||||
user.name = name // MUTATION!
|
||||
return user
|
||||
}
|
||||
|
||||
// CORRECT: Immutability
|
||||
function updateUser(user, name) {
|
||||
return {
|
||||
...user,
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 错误处理
|
||||
|
||||
使用 async/await 配合 try-catch:
|
||||
|
||||
```typescript
|
||||
try {
|
||||
const result = await riskyOperation()
|
||||
return result
|
||||
} catch (error) {
|
||||
console.error('Operation failed:', error)
|
||||
throw new Error('Detailed user-friendly message')
|
||||
}
|
||||
```
|
||||
|
||||
## 输入验证
|
||||
|
||||
使用 Zod 进行基于模式的验证:
|
||||
|
||||
```typescript
|
||||
import { z } from 'zod'
|
||||
|
||||
const schema = z.object({
|
||||
email: z.string().email(),
|
||||
age: z.number().int().min(0).max(150)
|
||||
})
|
||||
|
||||
const validated = schema.parse(input)
|
||||
```
|
||||
|
||||
## Console.log
|
||||
|
||||
* 生产代码中不允许出现 `console.log` 语句
|
||||
* 请使用适当的日志库替代
|
||||
* 查看钩子以进行自动检测
|
||||
15
docs/zh-CN/rules/typescript/hooks.md
Normal file
15
docs/zh-CN/rules/typescript/hooks.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# TypeScript/JavaScript 钩子
|
||||
|
||||
> 此文件扩展了 [common/hooks.md](../common/hooks.md),并添加了 TypeScript/JavaScript 特有的内容。
|
||||
|
||||
## PostToolUse 钩子
|
||||
|
||||
在 `~/.claude/settings.json` 中配置:
|
||||
|
||||
* **Prettier**:编辑后自动格式化 JS/TS 文件
|
||||
* **TypeScript 检查**:编辑 `.ts`/`.tsx` 文件后运行 `tsc`
|
||||
* **console.log 警告**:警告编辑过的文件中存在 `console.log`
|
||||
|
||||
## Stop 钩子
|
||||
|
||||
* **console.log 审计**:在会话结束前,检查所有修改过的文件中是否存在 `console.log`
|
||||
@@ -1,4 +1,6 @@
|
||||
# 常见模式
|
||||
# TypeScript/JavaScript 模式
|
||||
|
||||
> 此文件在 [common/patterns.md](../common/patterns.md) 的基础上扩展了 TypeScript/JavaScript 特定的内容。
|
||||
|
||||
## API 响应格式
|
||||
|
||||
@@ -41,16 +43,3 @@ interface Repository<T> {
|
||||
delete(id: string): Promise<void>
|
||||
}
|
||||
```
|
||||
|
||||
## 骨架项目
|
||||
|
||||
当实现新功能时:
|
||||
|
||||
1. 搜索经过实战检验的骨架项目
|
||||
2. 使用并行代理评估选项:
|
||||
* 安全性评估
|
||||
* 可扩展性分析
|
||||
* 相关性评分
|
||||
* 实施规划
|
||||
3. 克隆最佳匹配作为基础
|
||||
4. 在已验证的结构内迭代
|
||||
21
docs/zh-CN/rules/typescript/security.md
Normal file
21
docs/zh-CN/rules/typescript/security.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# TypeScript/JavaScript 安全
|
||||
|
||||
> 本文档扩展了 [common/security.md](../common/security.md),包含了 TypeScript/JavaScript 特定的内容。
|
||||
|
||||
## 密钥管理
|
||||
|
||||
```typescript
|
||||
// NEVER: Hardcoded secrets
|
||||
const apiKey = "sk-proj-xxxxx"
|
||||
|
||||
// ALWAYS: Environment variables
|
||||
const apiKey = process.env.OPENAI_API_KEY
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('OPENAI_API_KEY not configured')
|
||||
}
|
||||
```
|
||||
|
||||
## 代理支持
|
||||
|
||||
* 使用 **security-reviewer** 技能进行全面的安全审计
|
||||
11
docs/zh-CN/rules/typescript/testing.md
Normal file
11
docs/zh-CN/rules/typescript/testing.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# TypeScript/JavaScript 测试
|
||||
|
||||
> 本文档基于 [common/testing.md](../common/testing.md) 扩展,补充了 TypeScript/JavaScript 特定的内容。
|
||||
|
||||
## E2E 测试
|
||||
|
||||
使用 **Playwright** 作为关键用户流程的 E2E 测试框架。
|
||||
|
||||
## 智能体支持
|
||||
|
||||
* **e2e-runner** - Playwright E2E 测试专家
|
||||
314
docs/zh-CN/skills/configure-ecc/SKILL.md
Normal file
314
docs/zh-CN/skills/configure-ecc/SKILL.md
Normal file
@@ -0,0 +1,314 @@
|
||||
---
|
||||
name: configure-ecc
|
||||
description: Everything Claude Code 的交互式安装程序 — 引导用户选择并安装技能和规则到用户级或项目级目录,验证路径,并可选择性地优化已安装的文件。
|
||||
---
|
||||
|
||||
# 配置 Everything Claude Code (ECC)
|
||||
|
||||
一个交互式、分步安装向导,用于 Everything Claude Code 项目。使用 `AskUserQuestion` 引导用户选择性安装技能和规则,然后验证正确性并提供优化。
|
||||
|
||||
## 何时激活
|
||||
|
||||
* 用户说 "configure ecc"、"install ecc"、"setup everything claude code" 或类似表述
|
||||
* 用户想要从此项目中选择性安装技能或规则
|
||||
* 用户想要验证或修复现有的 ECC 安装
|
||||
* 用户想要为其项目优化已安装的技能或规则
|
||||
|
||||
## 先决条件
|
||||
|
||||
此技能必须在激活前对 Claude Code 可访问。有两种引导方式:
|
||||
|
||||
1. **通过插件**: `/plugin install everything-claude-code` — 插件会自动加载此技能
|
||||
2. **手动**: 仅将此技能复制到 `~/.claude/skills/configure-ecc/SKILL.md`,然后通过说 "configure ecc" 激活
|
||||
|
||||
***
|
||||
|
||||
## 步骤 0:克隆 ECC 仓库
|
||||
|
||||
在任何安装之前,将最新的 ECC 源代码克隆到 `/tmp`:
|
||||
|
||||
```bash
|
||||
rm -rf /tmp/everything-claude-code
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git /tmp/everything-claude-code
|
||||
```
|
||||
|
||||
将 `ECC_ROOT=/tmp/everything-claude-code` 设置为所有后续复制操作的源。
|
||||
|
||||
如果克隆失败(网络问题等),使用 `AskUserQuestion` 要求用户提供现有 ECC 克隆的本地路径。
|
||||
|
||||
***
|
||||
|
||||
## 步骤 1:选择安装级别
|
||||
|
||||
使用 `AskUserQuestion` 询问用户安装位置:
|
||||
|
||||
```
|
||||
Question: "Where should ECC components be installed?"
|
||||
Options:
|
||||
- "User-level (~/.claude/)" — "Applies to all your Claude Code projects"
|
||||
- "Project-level (.claude/)" — "Applies only to the current project"
|
||||
- "Both" — "Common/shared items user-level, project-specific items project-level"
|
||||
```
|
||||
|
||||
将选择存储为 `INSTALL_LEVEL`。设置目标目录:
|
||||
|
||||
* 用户级别:`TARGET=~/.claude`
|
||||
* 项目级别:`TARGET=.claude`(相对于当前项目根目录)
|
||||
* 两者:`TARGET_USER=~/.claude`,`TARGET_PROJECT=.claude`
|
||||
|
||||
如果目标目录不存在,则创建它们:
|
||||
|
||||
```bash
|
||||
mkdir -p $TARGET/skills $TARGET/rules
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 步骤 2:选择并安装技能
|
||||
|
||||
### 2a:选择技能类别
|
||||
|
||||
共有 27 项技能,分为 4 个类别。使用 `AskUserQuestion` 和 `multiSelect: true`:
|
||||
|
||||
```
|
||||
Question: "Which skill categories do you want to install?"
|
||||
Options:
|
||||
- "Framework & Language" — "Django, Spring Boot, Go, Python, Java, Frontend, Backend patterns"
|
||||
- "Database" — "PostgreSQL, ClickHouse, JPA/Hibernate patterns"
|
||||
- "Workflow & Quality" — "TDD, verification, learning, security review, compaction"
|
||||
- "All skills" — "Install every available skill"
|
||||
```
|
||||
|
||||
### 2b:确认单项技能
|
||||
|
||||
对于每个选定的类别,打印下面的完整技能列表,并要求用户确认或取消选择特定的技能。如果列表超过 4 项,将列表打印为文本,并使用 `AskUserQuestion`,提供一个 "安装所有列出项" 的选项,以及一个 "其他" 选项供用户粘贴特定名称。
|
||||
|
||||
**类别:框架与语言(16 项技能)**
|
||||
|
||||
| 技能 | 描述 |
|
||||
|-------|-------------|
|
||||
| `backend-patterns` | Node.js/Express/Next.js 的后端架构、API 设计、服务器端最佳实践 |
|
||||
| `coding-standards` | TypeScript、JavaScript、React、Node.js 的通用编码标准 |
|
||||
| `django-patterns` | Django 架构、使用 DRF 的 REST API、ORM、缓存、信号、中间件 |
|
||||
| `django-security` | Django 安全:身份验证、CSRF、SQL 注入、XSS 防护 |
|
||||
| `django-tdd` | 使用 pytest-django、factory\_boy、模拟、覆盖率的 Django 测试 |
|
||||
| `django-verification` | Django 验证循环:迁移、代码检查、测试、安全扫描 |
|
||||
| `frontend-patterns` | React、Next.js、状态管理、性能、UI 模式 |
|
||||
| `golang-patterns` | 地道的 Go 模式、健壮 Go 应用程序的约定 |
|
||||
| `golang-testing` | Go 测试:表格驱动测试、子测试、基准测试、模糊测试 |
|
||||
| `java-coding-standards` | Spring Boot 的 Java 编码标准:命名、不可变性、Optional、流 |
|
||||
| `python-patterns` | Pythonic 惯用法、PEP 8、类型提示、最佳实践 |
|
||||
| `python-testing` | 使用 pytest、TDD、夹具、模拟、参数化的 Python 测试 |
|
||||
| `springboot-patterns` | Spring Boot 架构、REST API、分层服务、缓存、异步 |
|
||||
| `springboot-security` | Spring Security:身份验证/授权、验证、CSRF、密钥、速率限制 |
|
||||
| `springboot-tdd` | 使用 JUnit 5、Mockito、MockMvc、Testcontainers 的 Spring Boot TDD |
|
||||
| `springboot-verification` | Spring Boot 验证:构建、静态分析、测试、安全扫描 |
|
||||
|
||||
**类别:数据库(3 项技能)**
|
||||
|
||||
| 技能 | 描述 |
|
||||
|-------|-------------|
|
||||
| `clickhouse-io` | ClickHouse 模式、查询优化、分析、数据工程 |
|
||||
| `jpa-patterns` | JPA/Hibernate 实体设计、关系、查询优化、事务 |
|
||||
| `postgres-patterns` | PostgreSQL 查询优化、模式设计、索引、安全 |
|
||||
|
||||
**类别:工作流与质量(8 项技能)**
|
||||
|
||||
| 技能 | 描述 |
|
||||
|-------|-------------|
|
||||
| `continuous-learning` | 从会话中自动提取可重用模式作为习得技能 |
|
||||
| `continuous-learning-v2` | 基于本能的学习,带有置信度评分,演变为技能/命令/代理 |
|
||||
| `eval-harness` | 用于评估驱动开发 (EDD) 的正式评估框架 |
|
||||
| `iterative-retrieval` | 用于子代理上下文问题的渐进式上下文优化 |
|
||||
| `security-review` | 安全检查清单:身份验证、输入、密钥、API、支付功能 |
|
||||
| `strategic-compact` | 在逻辑间隔处建议手动上下文压缩 |
|
||||
| `tdd-workflow` | 强制要求 TDD,覆盖率 80% 以上:单元测试、集成测试、端到端测试 |
|
||||
| `verification-loop` | 验证和质量循环模式 |
|
||||
|
||||
**独立技能**
|
||||
|
||||
| 技能 | 描述 |
|
||||
|-------|-------------|
|
||||
| `project-guidelines-example` | 用于创建项目特定技能的模板 |
|
||||
|
||||
### 2c:执行安装
|
||||
|
||||
对于每个选定的技能,复制整个技能目录:
|
||||
|
||||
```bash
|
||||
cp -r $ECC_ROOT/skills/<skill-name> $TARGET/skills/
|
||||
```
|
||||
|
||||
注意:`continuous-learning` 和 `continuous-learning-v2` 有额外的文件(config.json、钩子、脚本)——确保复制整个目录,而不仅仅是 SKILL.md。
|
||||
|
||||
***
|
||||
|
||||
## 步骤 3:选择并安装规则
|
||||
|
||||
使用 `AskUserQuestion` 和 `multiSelect: true`:
|
||||
|
||||
```
|
||||
Question: "Which rule sets do you want to install?"
|
||||
Options:
|
||||
- "Common rules (Recommended)" — "Language-agnostic principles: coding style, git workflow, testing, security, etc. (8 files)"
|
||||
- "TypeScript/JavaScript" — "TS/JS patterns, hooks, testing with Playwright (5 files)"
|
||||
- "Python" — "Python patterns, pytest, black/ruff formatting (5 files)"
|
||||
- "Go" — "Go patterns, table-driven tests, gofmt/staticcheck (5 files)"
|
||||
```
|
||||
|
||||
执行安装:
|
||||
|
||||
```bash
|
||||
# Common rules (flat copy into rules/)
|
||||
cp -r $ECC_ROOT/rules/common/* $TARGET/rules/
|
||||
|
||||
# Language-specific rules (flat copy into rules/)
|
||||
cp -r $ECC_ROOT/rules/typescript/* $TARGET/rules/ # if selected
|
||||
cp -r $ECC_ROOT/rules/python/* $TARGET/rules/ # if selected
|
||||
cp -r $ECC_ROOT/rules/golang/* $TARGET/rules/ # if selected
|
||||
```
|
||||
|
||||
**重要**:如果用户选择了任何特定语言的规则但**没有**选择通用规则,警告他们:
|
||||
|
||||
> "特定语言规则扩展了通用规则。不安装通用规则可能导致覆盖不完整。是否也安装通用规则?"
|
||||
|
||||
***
|
||||
|
||||
## 步骤 4:安装后验证
|
||||
|
||||
安装后,执行这些自动化检查:
|
||||
|
||||
### 4a:验证文件存在
|
||||
|
||||
列出所有已安装的文件并确认它们存在于目标位置:
|
||||
|
||||
```bash
|
||||
ls -la $TARGET/skills/
|
||||
ls -la $TARGET/rules/
|
||||
```
|
||||
|
||||
### 4b:检查路径引用
|
||||
|
||||
扫描所有已安装的 `.md` 文件中的路径引用:
|
||||
|
||||
```bash
|
||||
grep -rn "~/.claude/" $TARGET/skills/ $TARGET/rules/
|
||||
grep -rn "../common/" $TARGET/rules/
|
||||
grep -rn "skills/" $TARGET/skills/
|
||||
```
|
||||
|
||||
**对于项目级别安装**,标记任何对 `~/.claude/` 路径的引用:
|
||||
|
||||
* 如果技能引用 `~/.claude/settings.json` — 这通常没问题(设置始终是用户级别的)
|
||||
* 如果技能引用 `~/.claude/skills/` 或 `~/.claude/rules/` — 如果仅安装在项目级别,这可能损坏
|
||||
* 如果技能通过名称引用另一项技能 — 检查被引用的技能是否也已安装
|
||||
|
||||
### 4c:检查技能间的交叉引用
|
||||
|
||||
有些技能会引用其他技能。验证这些依赖关系:
|
||||
|
||||
* `django-tdd` 可能引用 `django-patterns`
|
||||
* `springboot-tdd` 可能引用 `springboot-patterns`
|
||||
* `continuous-learning-v2` 引用 `~/.claude/homunculus/` 目录
|
||||
* `python-testing` 可能引用 `python-patterns`
|
||||
* `golang-testing` 可能引用 `golang-patterns`
|
||||
* 特定语言规则引用其 `common/` 对应项
|
||||
|
||||
### 4d:报告问题
|
||||
|
||||
对于发现的每个问题,报告:
|
||||
|
||||
1. **文件**:包含问题引用的文件
|
||||
2. **行号**:行号
|
||||
3. **问题**:哪里出错了(例如,"引用了 ~/.claude/skills/python-patterns 但 python-patterns 未安装")
|
||||
4. **建议的修复**:该怎么做(例如,"安装 python-patterns 技能" 或 "将路径更新为 .claude/skills/")
|
||||
|
||||
***
|
||||
|
||||
## 步骤 5:优化已安装文件(可选)
|
||||
|
||||
使用 `AskUserQuestion`:
|
||||
|
||||
```
|
||||
Question: "Would you like to optimize the installed files for your project?"
|
||||
Options:
|
||||
- "Optimize skills" — "Remove irrelevant sections, adjust paths, tailor to your tech stack"
|
||||
- "Optimize rules" — "Adjust coverage targets, add project-specific patterns, customize tool configs"
|
||||
- "Optimize both" — "Full optimization of all installed files"
|
||||
- "Skip" — "Keep everything as-is"
|
||||
```
|
||||
|
||||
### 如果优化技能:
|
||||
|
||||
1. 读取每个已安装的 SKILL.md
|
||||
2. 询问用户其项目的技术栈是什么(如果尚不清楚)
|
||||
3. 对于每项技能,建议删除无关部分
|
||||
4. 在安装目标处就地编辑 SKILL.md 文件(**不是**源仓库)
|
||||
5. 修复在步骤 4 中发现的任何路径问题
|
||||
|
||||
### 如果优化规则:
|
||||
|
||||
1. 读取每个已安装的规则 .md 文件
|
||||
2. 询问用户的偏好:
|
||||
* 测试覆盖率目标(默认 80%)
|
||||
* 首选的格式化工具
|
||||
* Git 工作流约定
|
||||
* 安全要求
|
||||
3. 在安装目标处就地编辑规则文件
|
||||
|
||||
**关键**:只修改安装目标(`$TARGET/`)中的文件,**绝不**修改源 ECC 仓库(`$ECC_ROOT/`)中的文件。
|
||||
|
||||
***
|
||||
|
||||
## 步骤 6:安装摘要
|
||||
|
||||
从 `/tmp` 清理克隆的仓库:
|
||||
|
||||
```bash
|
||||
rm -rf /tmp/everything-claude-code
|
||||
```
|
||||
|
||||
然后打印摘要报告:
|
||||
|
||||
```
|
||||
## ECC Installation Complete
|
||||
|
||||
### Installation Target
|
||||
- Level: [user-level / project-level / both]
|
||||
- Path: [target path]
|
||||
|
||||
### Skills Installed ([count])
|
||||
- skill-1, skill-2, skill-3, ...
|
||||
|
||||
### Rules Installed ([count])
|
||||
- common (8 files)
|
||||
- typescript (5 files)
|
||||
- ...
|
||||
|
||||
### Verification Results
|
||||
- [count] issues found, [count] fixed
|
||||
- [list any remaining issues]
|
||||
|
||||
### Optimizations Applied
|
||||
- [list changes made, or "None"]
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 故障排除
|
||||
|
||||
### "Claude Code 未获取技能"
|
||||
|
||||
* 验证技能目录包含一个 `SKILL.md` 文件(不仅仅是松散的 .md 文件)
|
||||
* 对于用户级别:检查 `~/.claude/skills/<skill-name>/SKILL.md` 是否存在
|
||||
* 对于项目级别:检查 `.claude/skills/<skill-name>/SKILL.md` 是否存在
|
||||
|
||||
### "规则不工作"
|
||||
|
||||
* 规则是平面文件,不在子目录中:`$TARGET/rules/coding-style.md`(正确)对比 `$TARGET/rules/common/coding-style.md`(对于平面安装不正确)
|
||||
* 安装规则后重启 Claude Code
|
||||
|
||||
### "项目级别安装后出现路径引用错误"
|
||||
|
||||
* 有些技能假设 `~/.claude/` 路径。运行步骤 4 验证来查找并修复这些问题。
|
||||
* 对于 `continuous-learning-v2`,`~/.claude/homunculus/` 目录始终是用户级别的 — 这是预期的,不是错误。
|
||||
322
docs/zh-CN/skills/cpp-testing/SKILL.md
Normal file
322
docs/zh-CN/skills/cpp-testing/SKILL.md
Normal file
@@ -0,0 +1,322 @@
|
||||
---
|
||||
name: cpp-testing
|
||||
description: 仅在编写/更新/修复C++测试、配置GoogleTest/CTest、诊断失败或不稳定的测试,或添加覆盖率/消毒器时使用。
|
||||
---
|
||||
|
||||
# C++ 测试(代理技能)
|
||||
|
||||
针对现代 C++(C++17/20)的代理导向测试工作流,使用 GoogleTest/GoogleMock 和 CMake/CTest。
|
||||
|
||||
## 使用时机
|
||||
|
||||
* 编写新的 C++ 测试或修复现有测试
|
||||
* 为 C++ 组件设计单元/集成测试覆盖
|
||||
* 添加测试覆盖、CI 门控或回归保护
|
||||
* 配置 CMake/CTest 工作流以实现一致的执行
|
||||
* 调查测试失败或偶发性行为
|
||||
* 启用用于内存/竞态诊断的消毒剂
|
||||
|
||||
### 不适用时机
|
||||
|
||||
* 在不修改测试的情况下实现新的产品功能
|
||||
* 与测试覆盖或失败无关的大规模重构
|
||||
* 没有测试回归需要验证的性能调优
|
||||
* 非 C++ 项目或非测试任务
|
||||
|
||||
## 核心概念
|
||||
|
||||
* **TDD 循环**:红 → 绿 → 重构(先写测试,最小化修复,然后清理)。
|
||||
* **隔离**:优先使用依赖注入和仿制品,而非全局状态。
|
||||
* **测试布局**:`tests/unit`、`tests/integration`、`tests/testdata`。
|
||||
* **Mock 与 Fake**:Mock 用于交互,Fake 用于有状态行为。
|
||||
* **CTest 发现**:使用 `gtest_discover_tests()` 进行稳定的测试发现。
|
||||
* **CI 信号**:先运行子集,然后使用 `--output-on-failure` 运行完整套件。
|
||||
|
||||
## TDD 工作流
|
||||
|
||||
遵循 RED → GREEN → REFACTOR 循环:
|
||||
|
||||
1. **RED**:编写一个捕获新行为的失败测试
|
||||
2. **GREEN**:实现最小的更改以使其通过
|
||||
3. **REFACTOR**:在测试保持通过的同时进行清理
|
||||
|
||||
```cpp
|
||||
// tests/add_test.cpp
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
int Add(int a, int b); // Provided by production code.
|
||||
|
||||
TEST(AddTest, AddsTwoNumbers) { // RED
|
||||
EXPECT_EQ(Add(2, 3), 5);
|
||||
}
|
||||
|
||||
// src/add.cpp
|
||||
int Add(int a, int b) { // GREEN
|
||||
return a + b;
|
||||
}
|
||||
|
||||
// REFACTOR: simplify/rename once tests pass
|
||||
```
|
||||
|
||||
## 代码示例
|
||||
|
||||
### 基础单元测试 (gtest)
|
||||
|
||||
```cpp
|
||||
// tests/calculator_test.cpp
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
int Add(int a, int b); // Provided by production code.
|
||||
|
||||
TEST(CalculatorTest, AddsTwoNumbers) {
|
||||
EXPECT_EQ(Add(2, 3), 5);
|
||||
}
|
||||
```
|
||||
|
||||
### 夹具 (gtest)
|
||||
|
||||
```cpp
|
||||
// tests/user_store_test.cpp
|
||||
// Pseudocode stub: replace UserStore/User with project types.
|
||||
#include <gtest/gtest.h>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
struct User { std::string name; };
|
||||
class UserStore {
|
||||
public:
|
||||
explicit UserStore(std::string /*path*/) {}
|
||||
void Seed(std::initializer_list<User> /*users*/) {}
|
||||
std::optional<User> Find(const std::string &/*name*/) { return User{"alice"}; }
|
||||
};
|
||||
|
||||
class UserStoreTest : public ::testing::Test {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
store = std::make_unique<UserStore>(":memory:");
|
||||
store->Seed({{"alice"}, {"bob"}});
|
||||
}
|
||||
|
||||
std::unique_ptr<UserStore> store;
|
||||
};
|
||||
|
||||
TEST_F(UserStoreTest, FindsExistingUser) {
|
||||
auto user = store->Find("alice");
|
||||
ASSERT_TRUE(user.has_value());
|
||||
EXPECT_EQ(user->name, "alice");
|
||||
}
|
||||
```
|
||||
|
||||
### Mock (gmock)
|
||||
|
||||
```cpp
|
||||
// tests/notifier_test.cpp
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include <string>
|
||||
|
||||
class Notifier {
|
||||
public:
|
||||
virtual ~Notifier() = default;
|
||||
virtual void Send(const std::string &message) = 0;
|
||||
};
|
||||
|
||||
class MockNotifier : public Notifier {
|
||||
public:
|
||||
MOCK_METHOD(void, Send, (const std::string &message), (override));
|
||||
};
|
||||
|
||||
class Service {
|
||||
public:
|
||||
explicit Service(Notifier ¬ifier) : notifier_(notifier) {}
|
||||
void Publish(const std::string &message) { notifier_.Send(message); }
|
||||
|
||||
private:
|
||||
Notifier ¬ifier_;
|
||||
};
|
||||
|
||||
TEST(ServiceTest, SendsNotifications) {
|
||||
MockNotifier notifier;
|
||||
Service service(notifier);
|
||||
|
||||
EXPECT_CALL(notifier, Send("hello")).Times(1);
|
||||
service.Publish("hello");
|
||||
}
|
||||
```
|
||||
|
||||
### CMake/CTest 快速入门
|
||||
|
||||
```cmake
|
||||
# CMakeLists.txt (excerpt)
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
project(example LANGUAGES CXX)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
include(FetchContent)
|
||||
# Prefer project-locked versions. If using a tag, use a pinned version per project policy.
|
||||
set(GTEST_VERSION v1.17.0) # Adjust to project policy.
|
||||
FetchContent_Declare(
|
||||
googletest
|
||||
URL https://github.com/google/googletest/archive/refs/tags/${GTEST_VERSION}.zip
|
||||
)
|
||||
FetchContent_MakeAvailable(googletest)
|
||||
|
||||
add_executable(example_tests
|
||||
tests/calculator_test.cpp
|
||||
src/calculator.cpp
|
||||
)
|
||||
target_link_libraries(example_tests GTest::gtest GTest::gmock GTest::gtest_main)
|
||||
|
||||
enable_testing()
|
||||
include(GoogleTest)
|
||||
gtest_discover_tests(example_tests)
|
||||
```
|
||||
|
||||
```bash
|
||||
cmake -S . -B build -DCMAKE_BUILD_TYPE=Debug
|
||||
cmake --build build -j
|
||||
ctest --test-dir build --output-on-failure
|
||||
```
|
||||
|
||||
## 运行测试
|
||||
|
||||
```bash
|
||||
ctest --test-dir build --output-on-failure
|
||||
ctest --test-dir build -R ClampTest
|
||||
ctest --test-dir build -R "UserStoreTest.*" --output-on-failure
|
||||
```
|
||||
|
||||
```bash
|
||||
./build/example_tests --gtest_filter=ClampTest.*
|
||||
./build/example_tests --gtest_filter=UserStoreTest.FindsExistingUser
|
||||
```
|
||||
|
||||
## 调试失败
|
||||
|
||||
1. 使用 gtest 过滤器重新运行单个失败的测试。
|
||||
2. 在失败的断言周围添加作用域日志记录。
|
||||
3. 启用消毒剂后重新运行。
|
||||
4. 根本原因修复后,扩展到完整套件。
|
||||
|
||||
## 覆盖率
|
||||
|
||||
优先使用目标级别的设置,而非全局标志。
|
||||
|
||||
```cmake
|
||||
option(ENABLE_COVERAGE "Enable coverage flags" OFF)
|
||||
|
||||
if(ENABLE_COVERAGE)
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
|
||||
target_compile_options(example_tests PRIVATE --coverage)
|
||||
target_link_options(example_tests PRIVATE --coverage)
|
||||
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
target_compile_options(example_tests PRIVATE -fprofile-instr-generate -fcoverage-mapping)
|
||||
target_link_options(example_tests PRIVATE -fprofile-instr-generate)
|
||||
endif()
|
||||
endif()
|
||||
```
|
||||
|
||||
GCC + gcov + lcov:
|
||||
|
||||
```bash
|
||||
cmake -S . -B build-cov -DENABLE_COVERAGE=ON
|
||||
cmake --build build-cov -j
|
||||
ctest --test-dir build-cov
|
||||
lcov --capture --directory build-cov --output-file coverage.info
|
||||
lcov --remove coverage.info '/usr/*' --output-file coverage.info
|
||||
genhtml coverage.info --output-directory coverage
|
||||
```
|
||||
|
||||
Clang + llvm-cov:
|
||||
|
||||
```bash
|
||||
cmake -S . -B build-llvm -DENABLE_COVERAGE=ON -DCMAKE_CXX_COMPILER=clang++
|
||||
cmake --build build-llvm -j
|
||||
LLVM_PROFILE_FILE="build-llvm/default.profraw" ctest --test-dir build-llvm
|
||||
llvm-profdata merge -sparse build-llvm/default.profraw -o build-llvm/default.profdata
|
||||
llvm-cov report build-llvm/example_tests -instr-profile=build-llvm/default.profdata
|
||||
```
|
||||
|
||||
## 消毒剂
|
||||
|
||||
```cmake
|
||||
option(ENABLE_ASAN "Enable AddressSanitizer" OFF)
|
||||
option(ENABLE_UBSAN "Enable UndefinedBehaviorSanitizer" OFF)
|
||||
option(ENABLE_TSAN "Enable ThreadSanitizer" OFF)
|
||||
|
||||
if(ENABLE_ASAN)
|
||||
add_compile_options(-fsanitize=address -fno-omit-frame-pointer)
|
||||
add_link_options(-fsanitize=address)
|
||||
endif()
|
||||
if(ENABLE_UBSAN)
|
||||
add_compile_options(-fsanitize=undefined -fno-omit-frame-pointer)
|
||||
add_link_options(-fsanitize=undefined)
|
||||
endif()
|
||||
if(ENABLE_TSAN)
|
||||
add_compile_options(-fsanitize=thread)
|
||||
add_link_options(-fsanitize=thread)
|
||||
endif()
|
||||
```
|
||||
|
||||
## 偶发性测试防护
|
||||
|
||||
* 切勿使用 `sleep` 进行同步;使用条件变量或门闩。
|
||||
* 为每个测试创建唯一的临时目录并始终清理它们。
|
||||
* 避免在单元测试中依赖真实时间、网络或文件系统。
|
||||
* 对随机化输入使用确定性种子。
|
||||
|
||||
## 最佳实践
|
||||
|
||||
### 应该做
|
||||
|
||||
* 保持测试的确定性和隔离性
|
||||
* 优先使用依赖注入而非全局变量
|
||||
* 对前置条件使用 `ASSERT_*`,对多个检查使用 `EXPECT_*`
|
||||
* 在 CTest 标签或目录中分离单元测试与集成测试
|
||||
* 在 CI 中运行消毒剂以进行内存和竞态检测
|
||||
|
||||
### 不应该做
|
||||
|
||||
* 不要在单元测试中依赖真实时间或网络
|
||||
* 当可以使用条件变量时,不要使用睡眠作为同步手段
|
||||
* 不要过度模拟简单的值对象
|
||||
* 不要对非关键日志使用脆弱的字符串匹配
|
||||
|
||||
### 常见陷阱
|
||||
|
||||
* **使用固定的临时路径** → 为每个测试生成唯一的临时目录并清理它们。
|
||||
* **依赖挂钟时间** → 注入时钟或使用模拟时间源。
|
||||
* **偶发性并发测试** → 使用条件变量/门闩和有界等待。
|
||||
* **隐藏的全局状态** → 在夹具中重置全局状态或移除全局变量。
|
||||
* **过度模拟** → 对有状态行为优先使用 Fake,仅对交互进行 Mock。
|
||||
* **缺少消毒剂运行** → 在 CI 中添加 ASan/UBSan/TSan 构建。
|
||||
* **仅在调试版本上计算覆盖率** → 确保覆盖率目标使用一致的标志。
|
||||
|
||||
## 可选附录:模糊测试 / 属性测试
|
||||
|
||||
仅在项目已支持 LLVM/libFuzzer 或属性测试库时使用。
|
||||
|
||||
* **libFuzzer**:最适合 I/O 最少的纯函数。
|
||||
* **RapidCheck**:基于属性的测试,用于验证不变量。
|
||||
|
||||
最小的 libFuzzer 测试框架(伪代码:替换 ParseConfig):
|
||||
|
||||
```cpp
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
|
||||
std::string input(reinterpret_cast<const char *>(data), size);
|
||||
// ParseConfig(input); // project function
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
## GoogleTest 的替代方案
|
||||
|
||||
* **Catch2**:仅头文件,表达性强的匹配器
|
||||
* **doctest**:轻量级,编译开销最小
|
||||
165
docs/zh-CN/skills/nutrient-document-processing/SKILL.md
Normal file
165
docs/zh-CN/skills/nutrient-document-processing/SKILL.md
Normal file
@@ -0,0 +1,165 @@
|
||||
---
|
||||
name: nutrient-document-processing
|
||||
description: 使用Nutrient DWS API处理、转换、OCR、提取、编辑、签署和填写文档。支持PDF、DOCX、XLSX、PPTX、HTML和图像文件。
|
||||
---
|
||||
|
||||
# 文档处理
|
||||
|
||||
使用 [Nutrient DWS Processor API](https://www.nutrient.io/api/) 处理文档。转换格式、提取文本和表格、对扫描文档进行 OCR、编辑 PII、添加水印、数字签名以及填写 PDF 表单。
|
||||
|
||||
## 设置
|
||||
|
||||
在 **[nutrient.io](https://dashboard.nutrient.io/sign_up/?product=processor)** 获取一个免费的 API 密钥
|
||||
|
||||
```bash
|
||||
export NUTRIENT_API_KEY="pdf_live_..."
|
||||
```
|
||||
|
||||
所有请求都以 multipart POST 形式发送到 `https://api.nutrient.io/build`,并附带一个 `instructions` JSON 字段。
|
||||
|
||||
## 操作
|
||||
|
||||
### 转换文档
|
||||
|
||||
```bash
|
||||
# DOCX to PDF
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.docx=@document.docx" \
|
||||
-F 'instructions={"parts":[{"file":"document.docx"}]}' \
|
||||
-o output.pdf
|
||||
|
||||
# PDF to DOCX
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.pdf=@document.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"document.pdf"}],"output":{"type":"docx"}}' \
|
||||
-o output.docx
|
||||
|
||||
# HTML to PDF
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "index.html=@index.html" \
|
||||
-F 'instructions={"parts":[{"html":"index.html"}]}' \
|
||||
-o output.pdf
|
||||
```
|
||||
|
||||
支持的输入格式:PDF, DOCX, XLSX, PPTX, DOC, XLS, PPT, PPS, PPSX, ODT, RTF, HTML, JPG, PNG, TIFF, HEIC, GIF, WebP, SVG, TGA, EPS。
|
||||
|
||||
### 提取文本和数据
|
||||
|
||||
```bash
|
||||
# Extract plain text
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.pdf=@document.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"document.pdf"}],"output":{"type":"text"}}' \
|
||||
-o output.txt
|
||||
|
||||
# Extract tables as Excel
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.pdf=@document.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"document.pdf"}],"output":{"type":"xlsx"}}' \
|
||||
-o tables.xlsx
|
||||
```
|
||||
|
||||
### OCR 扫描文档
|
||||
|
||||
```bash
|
||||
# OCR to searchable PDF (supports 100+ languages)
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "scanned.pdf=@scanned.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"scanned.pdf"}],"actions":[{"type":"ocr","language":"english"}]}' \
|
||||
-o searchable.pdf
|
||||
```
|
||||
|
||||
支持语言:通过 ISO 639-2 代码支持 100 多种语言(例如,`eng`, `deu`, `fra`, `spa`, `jpn`, `kor`, `chi_sim`, `chi_tra`, `ara`, `hin`, `rus`)。完整的语言名称如 `english` 或 `german` 也适用。查看 [完整的 OCR 语言表](https://www.nutrient.io/guides/document-engine/ocr/language-support/) 以获取所有支持的代码。
|
||||
|
||||
### 编辑敏感信息
|
||||
|
||||
```bash
|
||||
# Pattern-based (SSN, email)
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.pdf=@document.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"document.pdf"}],"actions":[{"type":"redaction","strategy":"preset","strategyOptions":{"preset":"social-security-number"}},{"type":"redaction","strategy":"preset","strategyOptions":{"preset":"email-address"}}]}' \
|
||||
-o redacted.pdf
|
||||
|
||||
# Regex-based
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.pdf=@document.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"document.pdf"}],"actions":[{"type":"redaction","strategy":"regex","strategyOptions":{"regex":"\\b[A-Z]{2}\\d{6}\\b"}}]}' \
|
||||
-o redacted.pdf
|
||||
```
|
||||
|
||||
预设:`social-security-number`, `email-address`, `credit-card-number`, `international-phone-number`, `north-american-phone-number`, `date`, `time`, `url`, `ipv4`, `ipv6`, `mac-address`, `us-zip-code`, `vin`。
|
||||
|
||||
### 添加水印
|
||||
|
||||
```bash
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.pdf=@document.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"document.pdf"}],"actions":[{"type":"watermark","text":"CONFIDENTIAL","fontSize":72,"opacity":0.3,"rotation":-45}]}' \
|
||||
-o watermarked.pdf
|
||||
```
|
||||
|
||||
### 数字签名
|
||||
|
||||
```bash
|
||||
# Self-signed CMS signature
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "document.pdf=@document.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"document.pdf"}],"actions":[{"type":"sign","signatureType":"cms"}]}' \
|
||||
-o signed.pdf
|
||||
```
|
||||
|
||||
### 填写 PDF 表单
|
||||
|
||||
```bash
|
||||
curl -X POST https://api.nutrient.io/build \
|
||||
-H "Authorization: Bearer $NUTRIENT_API_KEY" \
|
||||
-F "form.pdf=@form.pdf" \
|
||||
-F 'instructions={"parts":[{"file":"form.pdf"}],"actions":[{"type":"fillForm","formFields":{"name":"Jane Smith","email":"jane@example.com","date":"2026-02-06"}}]}' \
|
||||
-o filled.pdf
|
||||
```
|
||||
|
||||
## MCP 服务器(替代方案)
|
||||
|
||||
对于原生工具集成,请使用 MCP 服务器代替 curl:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"nutrient-dws": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@nutrient-sdk/dws-mcp-server"],
|
||||
"env": {
|
||||
"NUTRIENT_DWS_API_KEY": "YOUR_API_KEY",
|
||||
"SANDBOX_PATH": "/path/to/working/directory"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 使用场景
|
||||
|
||||
* 在格式之间转换文档(PDF, DOCX, XLSX, PPTX, HTML, 图像)
|
||||
* 从 PDF 中提取文本、表格或键值对
|
||||
* 对扫描文档或图像进行 OCR
|
||||
* 在共享文档前编辑 PII
|
||||
* 为草稿或机密文档添加水印
|
||||
* 数字签署合同或协议
|
||||
* 以编程方式填写 PDF 表单
|
||||
|
||||
## 链接
|
||||
|
||||
* [API 演练场](https://dashboard.nutrient.io/processor-api/playground/)
|
||||
* [完整 API 文档](https://www.nutrient.io/guides/dws-processor/)
|
||||
* [代理技能仓库](https://github.com/PSPDFKit-labs/nutrient-agent-skill)
|
||||
* [npm MCP 服务器](https://www.npmjs.com/package/@nutrient-sdk/dws-mcp-server)
|
||||
171
docs/zh-CN/skills/security-scan/SKILL.md
Normal file
171
docs/zh-CN/skills/security-scan/SKILL.md
Normal file
@@ -0,0 +1,171 @@
|
||||
---
|
||||
name: security-scan
|
||||
description: 使用AgentShield扫描您的Claude Code配置(.claude/目录),检测安全漏洞、错误配置和注入风险。检查CLAUDE.md、settings.json、MCP服务器、钩子和代理定义。
|
||||
---
|
||||
|
||||
# 安全扫描技能
|
||||
|
||||
使用 [AgentShield](https://github.com/affaan-m/agentshield) 审计您的 Claude Code 配置中的安全问题。
|
||||
|
||||
## 何时激活
|
||||
|
||||
* 设置新的 Claude Code 项目时
|
||||
* 修改 `.claude/settings.json`、`CLAUDE.md` 或 MCP 配置后
|
||||
* 提交配置更改前
|
||||
* 加入具有现有 Claude Code 配置的新代码库时
|
||||
* 定期进行安全卫生检查时
|
||||
|
||||
## 扫描内容
|
||||
|
||||
| 文件 | 检查项 |
|
||||
|------|--------|
|
||||
| `CLAUDE.md` | 硬编码的密钥、自动运行指令、提示词注入模式 |
|
||||
| `settings.json` | 过于宽松的允许列表、缺失的拒绝列表、危险的绕过标志 |
|
||||
| `mcp.json` | 有风险的 MCP 服务器、硬编码的环境变量密钥、npx 供应链风险 |
|
||||
| `hooks/` | 通过 `${file}` 插值导致的命令注入、数据泄露、静默错误抑制 |
|
||||
| `agents/*.md` | 无限制的工具访问、提示词注入攻击面、缺失的模型规格 |
|
||||
|
||||
## 先决条件
|
||||
|
||||
必须安装 AgentShield。检查并在需要时安装:
|
||||
|
||||
```bash
|
||||
# Check if installed
|
||||
npx ecc-agentshield --version
|
||||
|
||||
# Install globally (recommended)
|
||||
npm install -g ecc-agentshield
|
||||
|
||||
# Or run directly via npx (no install needed)
|
||||
npx ecc-agentshield scan .
|
||||
```
|
||||
|
||||
## 使用方法
|
||||
|
||||
### 基础扫描
|
||||
|
||||
针对当前项目的 `.claude/` 目录运行:
|
||||
|
||||
```bash
|
||||
# Scan current project
|
||||
npx ecc-agentshield scan
|
||||
|
||||
# Scan a specific path
|
||||
npx ecc-agentshield scan --path /path/to/.claude
|
||||
|
||||
# Scan with minimum severity filter
|
||||
npx ecc-agentshield scan --min-severity medium
|
||||
```
|
||||
|
||||
### 输出格式
|
||||
|
||||
```bash
|
||||
# Terminal output (default) — colored report with grade
|
||||
npx ecc-agentshield scan
|
||||
|
||||
# JSON — for CI/CD integration
|
||||
npx ecc-agentshield scan --format json
|
||||
|
||||
# Markdown — for documentation
|
||||
npx ecc-agentshield scan --format markdown
|
||||
|
||||
# HTML — self-contained dark-theme report
|
||||
npx ecc-agentshield scan --format html > security-report.html
|
||||
```
|
||||
|
||||
### 自动修复
|
||||
|
||||
自动应用安全的修复(仅修复标记为可自动修复的问题):
|
||||
|
||||
```bash
|
||||
npx ecc-agentshield scan --fix
|
||||
```
|
||||
|
||||
这将:
|
||||
|
||||
* 用环境变量引用替换硬编码的密钥
|
||||
* 将通配符权限收紧为作用域明确的替代方案
|
||||
* 绝不修改仅限手动修复的建议
|
||||
|
||||
### Opus 4.6 深度分析
|
||||
|
||||
运行对抗性的三智能体流程以进行更深入的分析:
|
||||
|
||||
```bash
|
||||
# Requires ANTHROPIC_API_KEY
|
||||
export ANTHROPIC_API_KEY=your-key
|
||||
npx ecc-agentshield scan --opus --stream
|
||||
```
|
||||
|
||||
这将运行:
|
||||
|
||||
1. **攻击者(红队)** — 寻找攻击向量
|
||||
2. **防御者(蓝队)** — 建议加固措施
|
||||
3. **审计员(最终裁决)** — 综合双方观点
|
||||
|
||||
### 初始化安全配置
|
||||
|
||||
从头开始搭建一个新的安全 `.claude/` 配置:
|
||||
|
||||
```bash
|
||||
npx ecc-agentshield init
|
||||
```
|
||||
|
||||
创建:
|
||||
|
||||
* 具有作用域权限和拒绝列表的 `settings.json`
|
||||
* 遵循安全最佳实践的 `CLAUDE.md`
|
||||
* `mcp.json` 占位符
|
||||
|
||||
### GitHub Action
|
||||
|
||||
添加到您的 CI 流水线中:
|
||||
|
||||
```yaml
|
||||
- uses: affaan-m/agentshield@v1
|
||||
with:
|
||||
path: '.'
|
||||
min-severity: 'medium'
|
||||
fail-on-findings: true
|
||||
```
|
||||
|
||||
## 严重性等级
|
||||
|
||||
| 等级 | 分数 | 含义 |
|
||||
|-------|-------|---------|
|
||||
| A | 90-100 | 安全配置 |
|
||||
| B | 75-89 | 轻微问题 |
|
||||
| C | 60-74 | 需要注意 |
|
||||
| D | 40-59 | 显著风险 |
|
||||
| F | 0-39 | 严重漏洞 |
|
||||
|
||||
## 结果解读
|
||||
|
||||
### 关键发现(立即修复)
|
||||
|
||||
* 配置文件中硬编码的 API 密钥或令牌
|
||||
* 允许列表中存在 `Bash(*)`(无限制的 shell 访问)
|
||||
* 钩子中通过 `${file}` 插值导致的命令注入
|
||||
* 运行 shell 的 MCP 服务器
|
||||
|
||||
### 高优先级发现(生产前修复)
|
||||
|
||||
* CLAUDE.md 中的自动运行指令(提示词注入向量)
|
||||
* 权限配置中缺少拒绝列表
|
||||
* 具有不必要 Bash 访问权限的代理
|
||||
|
||||
### 中优先级发现(建议修复)
|
||||
|
||||
* 钩子中的静默错误抑制(`2>/dev/null`、`|| true`)
|
||||
* 缺少 PreToolUse 安全钩子
|
||||
* MCP 服务器配置中的 `npx -y` 自动安装
|
||||
|
||||
### 信息性发现(了解情况)
|
||||
|
||||
* MCP 服务器缺少描述信息
|
||||
* 正确标记为良好实践的限制性指令
|
||||
|
||||
## 链接
|
||||
|
||||
* **GitHub**: [github.com/affaan-m/agentshield](https://github.com/affaan-m/agentshield)
|
||||
* **npm**: [npmjs.com/package/ecc-agentshield](https://www.npmjs.com/package/ecc-agentshield)
|
||||
358
docs/zh-CN/the-longform-guide.md
Normal file
358
docs/zh-CN/the-longform-guide.md
Normal file
@@ -0,0 +1,358 @@
|
||||
# 关于 Claude Code 的完整长篇指南
|
||||
|
||||

|
||||
|
||||
***
|
||||
|
||||
> **前提**:本指南建立在 [关于 Claude Code 的简明指南](./the-shortform-guide.md) 之上。如果你还没有设置技能、钩子、子代理、MCP 和插件,请先阅读该指南。
|
||||
|
||||

|
||||
*速记指南 - 请先阅读它*
|
||||
|
||||
在简明指南中,我介绍了基础设置:技能和命令、钩子、子代理、MCP、插件,以及构成有效 Claude Code 工作流骨干的配置模式。那是设置指南和基础架构。
|
||||
|
||||
这篇长篇指南深入探讨了区分高效会话与浪费会话的技巧。如果你还没有阅读简明指南,请先返回并设置好你的配置。以下内容假定你已经配置好技能、代理、钩子和 MCP,并且它们正在工作。
|
||||
|
||||
这里的主题是:令牌经济、记忆持久性、验证模式、并行化策略,以及构建可重用工作流的复合效应。这些是我在超过 10 个月的日常使用中提炼出的模式,它们决定了你是在第一个小时内就饱受上下文腐化之苦,还是能够保持数小时的高效会话。
|
||||
|
||||
简明指南和长篇指南中涵盖的所有内容都可以在 GitHub 上找到:`github.com/affaan-m/everything-claude-code`
|
||||
|
||||
***
|
||||
|
||||
## 技巧与窍门
|
||||
|
||||
### 有些 MCP 是可替换的,可以释放你的上下文窗口
|
||||
|
||||
对于诸如版本控制(GitHub)、数据库(Supabase)、部署(Vercel、Railway)等 MCP 来说——这些平台大多已经拥有健壮的 CLI,MCP 本质上只是对其进行包装。MCP 是一个很好的包装器,但它是有代价的。
|
||||
|
||||
要让 CLI 功能更像 MCP,而不实际使用 MCP(以及随之而来的减少的上下文窗口),可以考虑将功能打包成技能和命令。提取出 MCP 暴露的、使事情变得容易的工具,并将它们转化为命令。
|
||||
|
||||
示例:与其始终加载 GitHub MCP,不如创建一个包装了 `gh pr create` 并带有你偏好选项的 `/gh-pr` 命令。与其让 Supabase MCP 消耗上下文,不如创建直接使用 Supabase CLI 的技能。
|
||||
|
||||
有了延迟加载,上下文窗口问题基本解决了。但令牌使用和成本问题并未以同样的方式解决。CLI + 技能的方法仍然是一种令牌优化方法。
|
||||
|
||||
***
|
||||
|
||||
## 重要事项
|
||||
|
||||
### 上下文与记忆管理
|
||||
|
||||
要在会话间共享记忆,最好的方法是使用一个技能或命令来总结和检查进度,然后保存到 `.claude` 文件夹中的一个 `.tmp` 文件中,并在会话结束前不断追加内容。第二天,它可以将其用作上下文,并从中断处继续。为每个会话创建一个新文件,这样你就不会将旧的上下文污染到新的工作中。
|
||||
|
||||

|
||||
*会话存储示例 -> https://github.com/affaan-m/everything-claude-code/tree/main/examples/sessions*
|
||||
|
||||
Claude 创建一个总结当前状态的文件。审阅它,如果需要则要求编辑,然后重新开始。对于新的对话,只需提供文件路径。当你达到上下文限制并需要继续复杂工作时,这尤其有用。这些文件应包含:
|
||||
|
||||
* 哪些方法有效(有证据可验证)
|
||||
* 哪些方法尝试过但无效
|
||||
* 哪些方法尚未尝试,以及剩下什么需要做
|
||||
|
||||
**策略性地清除上下文:**
|
||||
|
||||
一旦你制定了计划并清除了上下文(Claude Code 中计划模式的默认选项),你就可以根据计划工作。当你积累了大量与执行不再相关的探索性上下文时,这很有用。对于策略性压缩,请禁用自动压缩。在逻辑间隔手动压缩,或创建一个为你执行此操作的技能。
|
||||
|
||||
**高级:动态系统提示注入**
|
||||
|
||||
我学到的一个模式是:与其将所有内容都放在 CLAUDE.md(用户作用域)或 `.claude/rules/`(项目作用域)中,让它们每次会话都加载,不如使用 CLI 标志动态注入上下文。
|
||||
|
||||
```bash
|
||||
claude --system-prompt "$(cat memory.md)"
|
||||
```
|
||||
|
||||
这让你可以更精确地控制何时加载哪些上下文。系统提示内容比用户消息具有更高的权威性,而用户消息又比工具结果具有更高的权威性。
|
||||
|
||||
**实际设置:**
|
||||
|
||||
```bash
|
||||
# Daily development
|
||||
alias claude-dev='claude --system-prompt "$(cat ~/.claude/contexts/dev.md)"'
|
||||
|
||||
# PR review mode
|
||||
alias claude-review='claude --system-prompt "$(cat ~/.claude/contexts/review.md)"'
|
||||
|
||||
# Research/exploration mode
|
||||
alias claude-research='claude --system-prompt "$(cat ~/.claude/contexts/research.md)"'
|
||||
```
|
||||
|
||||
**高级:记忆持久化钩子**
|
||||
|
||||
有一些大多数人不知道的钩子,有助于记忆管理:
|
||||
|
||||
* **PreCompact 钩子**:在上下文压缩发生之前,将重要状态保存到文件
|
||||
* **Stop 钩子(会话结束)**:在会话结束时,将学习成果持久化到文件
|
||||
* **SessionStart 钩子**:在新会话开始时,自动加载之前的上下文
|
||||
|
||||
我已经构建了这些钩子,它们位于仓库的 `github.com/affaan-m/everything-claude-code/tree/main/hooks/memory-persistence`
|
||||
|
||||
***
|
||||
|
||||
### 持续学习 / 记忆
|
||||
|
||||
如果你不得不多次重复一个提示,并且 Claude 遇到了同样的问题或给出了你以前听过的回答——这些模式必须被附加到技能中。
|
||||
|
||||
**问题:** 浪费令牌,浪费上下文,浪费时间。
|
||||
|
||||
**解决方案:** 当 Claude Code 发现一些不平凡的事情时——调试技巧、变通方法、某些项目特定的模式——它会将该知识保存为一个新技能。下次出现类似问题时,该技能会自动加载。
|
||||
|
||||
我构建了一个实现此功能的持续学习技能:`github.com/affaan-m/everything-claude-code/tree/main/skills/continuous-learning`
|
||||
|
||||
**为什么用 Stop 钩子(而不是 UserPromptSubmit):**
|
||||
|
||||
关键的设计决策是使用 **Stop 钩子** 而不是 UserPromptSubmit。UserPromptSubmit 在每个消息上运行——给每个提示增加延迟。Stop 在会话结束时只运行一次——轻量级,不会在会话期间拖慢你的速度。
|
||||
|
||||
***
|
||||
|
||||
### 令牌优化
|
||||
|
||||
**主要策略:子代理架构**
|
||||
|
||||
优化你使用的工具和子代理架构,旨在将任务委托给最便宜且足以胜任的模型。
|
||||
|
||||
**模型选择快速参考:**
|
||||
|
||||

|
||||
*针对各种常见任务的子代理假设设置及选择背后的理由*
|
||||
|
||||
| 任务类型 | 模型 | 原因 |
|
||||
| ------------------------- | ------ | ------------------------------------------ |
|
||||
| 探索/搜索 | Haiku | 快速、便宜,足以用于查找文件 |
|
||||
| 简单编辑 | Haiku | 单文件更改,指令清晰 |
|
||||
| 多文件实现 | Sonnet | 编码的最佳平衡 |
|
||||
| 复杂架构 | Opus | 需要深度推理 |
|
||||
| PR 审查 | Sonnet | 理解上下文,捕捉细微差别 |
|
||||
| 安全分析 | Opus | 不能错过漏洞 |
|
||||
| 编写文档 | Haiku | 结构简单 |
|
||||
| 调试复杂错误 | Opus | 需要将整个系统记在脑中 |
|
||||
|
||||
对于 90% 的编码任务,默认使用 Sonnet。当第一次尝试失败、任务涉及 5 个以上文件、架构决策或安全关键代码时,升级到 Opus。
|
||||
|
||||
**定价参考:**
|
||||
|
||||

|
||||
*来源:https://platform.claude.com/docs/en/about-claude/pricing*
|
||||
|
||||
**工具特定优化:**
|
||||
|
||||
用 mgrep 替换 grep——与传统 grep 或 ripgrep 相比,平均减少约 50% 的令牌:
|
||||
|
||||

|
||||
*在我们的 50 项任务基准测试中,mgrep + Claude Code 使用了比基于 grep 的工作流少约 2 倍的 token,且判断质量相似或更好。来源:https://github.com/mixedbread-ai/mgrep*
|
||||
|
||||
**模块化代码库的好处:**
|
||||
|
||||
拥有一个更模块化的代码库,主文件只有数百行而不是数千行,这有助于降低令牌优化成本,并确保任务在第一次尝试时就正确完成。
|
||||
|
||||
***
|
||||
|
||||
### 验证循环与评估
|
||||
|
||||
**基准测试工作流:**
|
||||
|
||||
比较在有和没有技能的情况下询问同一件事,并检查输出差异:
|
||||
|
||||
分叉对话,在其中之一的对话中初始化一个新的工作树但不使用该技能,最后拉取差异,查看记录了什么。
|
||||
|
||||
**评估模式类型:**
|
||||
|
||||
* **基于检查点的评估**:设置明确的检查点,根据定义的标准进行验证,在继续之前修复
|
||||
* **持续评估**:每 N 分钟或在重大更改后运行,完整的测试套件 + 代码检查
|
||||
|
||||
**关键指标:**
|
||||
|
||||
```
|
||||
pass@k: At least ONE of k attempts succeeds
|
||||
k=1: 70% k=3: 91% k=5: 97%
|
||||
|
||||
pass^k: ALL k attempts must succeed
|
||||
k=1: 70% k=3: 34% k=5: 17%
|
||||
```
|
||||
|
||||
当你只需要它能工作时,使用 **pass@k**。当一致性至关重要时,使用 **pass^k**。
|
||||
|
||||
***
|
||||
|
||||
## 并行化
|
||||
|
||||
在多 Claude 终端设置中分叉对话时,请确保分叉中的操作和原始对话的范围定义明确。在代码更改方面,力求最小化重叠。
|
||||
|
||||
**我偏好的模式:**
|
||||
|
||||
主聊天用于代码更改,分叉用于询问有关代码库及其当前状态的问题,或研究外部服务。
|
||||
|
||||
**关于任意终端数量:**
|
||||
|
||||

|
||||
*Boris (Anthropic) 关于运行多个 Claude 实例的说明*
|
||||
|
||||
Boris 有关于并行化的建议。他曾建议在本地运行 5 个 Claude 实例,在上游运行 5 个。我建议不要设置任意的终端数量。增加终端应该是出于真正的必要性。
|
||||
|
||||
你的目标应该是:**用最小可行的并行化程度,你能完成多少工作。**
|
||||
|
||||
**用于并行实例的 Git Worktrees:**
|
||||
|
||||
```bash
|
||||
# Create worktrees for parallel work
|
||||
git worktree add ../project-feature-a feature-a
|
||||
git worktree add ../project-feature-b feature-b
|
||||
git worktree add ../project-refactor refactor-branch
|
||||
|
||||
# Each worktree gets its own Claude instance
|
||||
cd ../project-feature-a && claude
|
||||
```
|
||||
|
||||
**如果** 你要开始扩展实例数量 **并且** 你有多个 Claude 实例在处理相互重叠的代码,那么你必须使用 git worktrees,并为每个实例制定非常明确的计划。使用 `/rename <name here>` 来命名你所有的聊天。
|
||||
|
||||

|
||||
*初始设置:左终端用于编码,右终端用于提问 - 使用 /rename 和 /fork*
|
||||
|
||||
**级联方法:**
|
||||
|
||||
当运行多个 Claude Code 实例时,使用“级联”模式进行组织:
|
||||
|
||||
* 在右侧的新标签页中打开新任务
|
||||
* 从左到右、从旧到新进行扫描
|
||||
* 一次最多专注于 3-4 个任务
|
||||
|
||||
***
|
||||
|
||||
## 基础工作
|
||||
|
||||
**双实例启动模式:**
|
||||
|
||||
对于我自己的工作流管理,我喜欢从一个空仓库开始,打开 2 个 Claude 实例。
|
||||
|
||||
**实例 1:脚手架代理**
|
||||
|
||||
* 搭建脚手架和基础工作
|
||||
* 创建项目结构
|
||||
* 设置配置(CLAUDE.md、规则、代理)
|
||||
|
||||
**实例 2:深度研究代理**
|
||||
|
||||
* 连接到你的所有服务,进行网络搜索
|
||||
* 创建详细的 PRD
|
||||
* 创建架构 Mermaid 图
|
||||
* 编译包含实际文档片段的参考资料
|
||||
|
||||
**llms.txt 模式:**
|
||||
|
||||
如果可用,你可以通过在你到达它们的文档页面后执行 `/llms.txt` 来在许多文档参考资料上找到一个 `llms.txt`。这会给你一个干净的、针对 LLM 优化的文档版本。
|
||||
|
||||
**理念:构建可重用的模式**
|
||||
|
||||
来自 @omarsar0:"早期,我花时间构建可重用的工作流/模式。构建过程很繁琐,但随着模型和代理框架的改进,这产生了惊人的复合效应。"
|
||||
|
||||
**应该投资于:**
|
||||
|
||||
* 子代理
|
||||
* 技能
|
||||
* 命令
|
||||
* 规划模式
|
||||
* MCP 工具
|
||||
* 上下文工程模式
|
||||
|
||||
***
|
||||
|
||||
## 代理与子代理的最佳实践
|
||||
|
||||
**子代理上下文问题:**
|
||||
|
||||
子代理的存在是为了通过返回摘要而不是转储所有内容来节省上下文。但编排器拥有子代理所缺乏的语义上下文。子代理只知道字面查询,不知道请求背后的 **目的**。
|
||||
|
||||
**迭代检索模式:**
|
||||
|
||||
1. 编排器评估每个子代理的返回
|
||||
2. 在接受之前询问后续问题
|
||||
3. 子代理返回源,获取答案,返回
|
||||
4. 循环直到足够(最多 3 个周期)
|
||||
|
||||
**关键:** 传递目标上下文,而不仅仅是查询。
|
||||
|
||||
**具有顺序阶段的编排器:**
|
||||
|
||||
```markdown
|
||||
第一阶段:研究(使用探索智能体)→ research-summary.md
|
||||
第二阶段:规划(使用规划智能体)→ plan.md
|
||||
第三阶段:实施(使用测试驱动开发指南智能体)→ 代码变更
|
||||
第四阶段:审查(使用代码审查智能体)→ review-comments.md
|
||||
第五阶段:验证(如需则使用构建错误解决器)→ 完成或循环返回
|
||||
|
||||
```
|
||||
|
||||
**关键规则:**
|
||||
|
||||
1. 每个智能体获得一个清晰的输入并产生一个清晰的输出
|
||||
2. 输出成为下一阶段的输入
|
||||
3. 永远不要跳过阶段
|
||||
4. 在智能体之间使用 `/clear`
|
||||
5. 将中间输出存储在文件中
|
||||
|
||||
***
|
||||
|
||||
## 有趣的东西 / 非关键,仅供娱乐的小贴士
|
||||
|
||||
### 自定义状态栏
|
||||
|
||||
你可以使用 `/statusline` 来设置它 - 然后 Claude 会说你没有状态栏,但可以为你设置,并询问你想要在里面放什么。
|
||||
|
||||
另请参阅:https://github.com/sirmalloc/ccstatusline
|
||||
|
||||
### 语音转录
|
||||
|
||||
用你的声音与 Claude Code 对话。对很多人来说比打字更快。
|
||||
|
||||
* Mac 上的 superwhisper、MacWhisper
|
||||
* 即使转录有误,Claude 也能理解意图
|
||||
|
||||
### 终端别名
|
||||
|
||||
```bash
|
||||
alias c='claude'
|
||||
alias gb='github'
|
||||
alias co='code'
|
||||
alias q='cd ~/Desktop/projects'
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 里程碑
|
||||
|
||||

|
||||
*一周内获得 25,000+ GitHub stars*
|
||||
|
||||
***
|
||||
|
||||
## 资源
|
||||
|
||||
**智能体编排:**
|
||||
|
||||
* https://github.com/ruvnet/claude-flow - 拥有 54+ 个专业智能体的企业级编排平台
|
||||
|
||||
**自我改进记忆:**
|
||||
|
||||
* https://github.com/affaan-m/everything-claude-code/tree/main/skills/continuous-learning
|
||||
* rlancemartin.github.io/2025/12/01/claude\_diary/ - 会话反思模式
|
||||
|
||||
**系统提示词参考:**
|
||||
|
||||
* https://github.com/x1xhlol/system-prompts-and-models-of-ai-tools - 系统提示词集合 (110k stars)
|
||||
|
||||
**官方:**
|
||||
|
||||
* Anthropic Academy: anthropic.skilljar.com
|
||||
|
||||
***
|
||||
|
||||
## 参考资料
|
||||
|
||||
* [Anthropic: 解密 AI 智能体的评估](https://www.anthropic.com/engineering/demystifying-evals-for-ai-agents)
|
||||
* [YK: 32 个 Claude Code 技巧](https://agenticcoding.substack.com/p/32-claude-code-tips-from-basics-to)
|
||||
* [RLanceMartin: 会话反思模式](https://rlancemartin.github.io/2025/12/01/claude_diary/)
|
||||
* @PerceptualPeak: 子智能体上下文协商
|
||||
* @menhguin: 智能体抽象层分级
|
||||
* @omarsar0: 复合效应哲学
|
||||
|
||||
***
|
||||
|
||||
*两份指南中涵盖的所有内容都可以在 GitHub 上的 [everything-claude-code](https://github.com/affaan-m/everything-claude-code) 找到*
|
||||
431
docs/zh-CN/the-shortform-guide.md
Normal file
431
docs/zh-CN/the-shortform-guide.md
Normal file
@@ -0,0 +1,431 @@
|
||||
# Claude Code 简明指南
|
||||
|
||||

|
||||
|
||||
***
|
||||
|
||||
**自 2 月实验性推出以来,我一直是 Claude Code 的忠实用户,并凭借 [zenith.chat](https://zenith.chat) 与 [@DRodriguezFX](https://x.com/DRodriguezFX) 一起赢得了 Anthropic x Forum Ventures 的黑客马拉松——完全使用 Claude Code。**
|
||||
|
||||
经过 10 个月的日常使用,以下是我的完整设置:技能、钩子、子代理、MCP、插件以及实际有效的方法。
|
||||
|
||||
***
|
||||
|
||||
## 技能和命令
|
||||
|
||||
技能就像规则,受限于特定的范围和流程。当你需要执行特定工作流时,它们是提示词的简写。
|
||||
|
||||
在使用 Opus 4.5 长时间编码后,你想清理死代码和松散的 .md 文件吗?运行 `/refactor-clean`。需要测试吗?`/tdd`、`/e2e`、`/test-coverage`。技能也可以包含代码地图——一种让 Claude 快速浏览你的代码库而无需消耗上下文进行探索的方式。
|
||||
|
||||

|
||||
*将命令链接在一起*
|
||||
|
||||
命令是通过斜杠命令执行的技能。它们有重叠但存储方式不同:
|
||||
|
||||
* **技能**: `~/.claude/skills/` - 更广泛的工作流定义
|
||||
* **命令**: `~/.claude/commands/` - 快速可执行的提示词
|
||||
|
||||
```bash
|
||||
# Example skill structure
|
||||
~/.claude/skills/
|
||||
pmx-guidelines.md # Project-specific patterns
|
||||
coding-standards.md # Language best practices
|
||||
tdd-workflow/ # Multi-file skill with README.md
|
||||
security-review/ # Checklist-based skill
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 钩子
|
||||
|
||||
钩子是基于触发的自动化,在特定事件发生时触发。与技能不同,它们受限于工具调用和生命周期事件。
|
||||
|
||||
**钩子类型:**
|
||||
|
||||
1. **PreToolUse** - 工具执行前(验证、提醒)
|
||||
2. **PostToolUse** - 工具完成后(格式化、反馈循环)
|
||||
3. **UserPromptSubmit** - 当你发送消息时
|
||||
4. **Stop** - 当 Claude 完成响应时
|
||||
5. **PreCompact** - 上下文压缩前
|
||||
6. **Notification** - 权限请求
|
||||
|
||||
**示例:长时间运行命令前的 tmux 提醒**
|
||||
|
||||
```json
|
||||
{
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"(npm|pnpm|yarn|cargo|pytest)\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "if [ -z \"$TMUX\" ]; then echo '[Hook] Consider tmux for session persistence' >&2; fi"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||

|
||||
*在 Claude Code 中运行 PostToolUse 钩子时获得的反馈示例*
|
||||
|
||||
**专业提示:** 使用 `hookify` 插件以对话方式创建钩子,而不是手动编写 JSON。运行 `/hookify` 并描述你想要什么。
|
||||
|
||||
***
|
||||
|
||||
## 子代理
|
||||
|
||||
子代理是你的编排器(主 Claude)可以委托任务给它的、具有有限范围的进程。它们可以在后台或前台运行,为主代理释放上下文。
|
||||
|
||||
子代理与技能配合得很好——一个能够执行你技能子集的子代理可以被委托任务并自主使用这些技能。它们也可以用特定的工具权限进行沙盒化。
|
||||
|
||||
```bash
|
||||
# Example subagent structure
|
||||
~/.claude/agents/
|
||||
planner.md # Feature implementation planning
|
||||
architect.md # System design decisions
|
||||
tdd-guide.md # Test-driven development
|
||||
code-reviewer.md # Quality/security review
|
||||
security-reviewer.md # Vulnerability analysis
|
||||
build-error-resolver.md
|
||||
e2e-runner.md
|
||||
refactor-cleaner.md
|
||||
```
|
||||
|
||||
为每个子代理配置允许的工具、MCP 和权限,以实现适当的范围界定。
|
||||
|
||||
***
|
||||
|
||||
## 规则和记忆
|
||||
|
||||
你的 `.rules` 文件夹包含 `.md` 文件,其中是 Claude 应始终遵循的最佳实践。有两种方法:
|
||||
|
||||
1. **单一 CLAUDE.md** - 所有内容在一个文件中(用户或项目级别)
|
||||
2. **规则文件夹** - 按关注点分组的模块化 `.md` 文件
|
||||
|
||||
```bash
|
||||
~/.claude/rules/
|
||||
security.md # No hardcoded secrets, validate inputs
|
||||
coding-style.md # Immutability, file organization
|
||||
testing.md # TDD workflow, 80% coverage
|
||||
git-workflow.md # Commit format, PR process
|
||||
agents.md # When to delegate to subagents
|
||||
performance.md # Model selection, context management
|
||||
```
|
||||
|
||||
**规则示例:**
|
||||
|
||||
* 代码库中不使用表情符号
|
||||
* 前端避免使用紫色色调
|
||||
* 部署前始终测试代码
|
||||
* 优先考虑模块化代码而非巨型文件
|
||||
* 绝不提交 console.log
|
||||
|
||||
***
|
||||
|
||||
## MCP(模型上下文协议)
|
||||
|
||||
MCP 将 Claude 直接连接到外部服务。它不是 API 的替代品——而是围绕 API 的提示驱动包装器,允许在导航信息时具有更大的灵活性。
|
||||
|
||||
**示例:** Supabase MCP 允许 Claude 提取特定数据,直接在上游运行 SQL 而无需复制粘贴。数据库、部署平台等也是如此。
|
||||
|
||||

|
||||
*Supabase MCP 列出公共模式内表格的示例*
|
||||
|
||||
**Claude 中的 Chrome:** 是一个内置的插件 MCP,允许 Claude 自主控制你的浏览器——点击查看事物如何工作。
|
||||
|
||||
**关键:上下文窗口管理**
|
||||
|
||||
对 MCP 要挑剔。我将所有 MCP 保存在用户配置中,但**禁用所有未使用的**。导航到 `/plugins` 并向下滚动,或运行 `/mcp`。
|
||||
|
||||

|
||||
*使用 /plugins 导航到 MCP 以查看当前安装的插件及其状态*
|
||||
|
||||
在压缩之前,你的 200k 上下文窗口如果启用了太多工具,可能只有 70k。性能会显著下降。
|
||||
|
||||
**经验法则:** 在配置中保留 20-30 个 MCP,但保持启用状态少于 10 个 / 活动工具少于 80 个。
|
||||
|
||||
```bash
|
||||
# Check enabled MCPs
|
||||
/mcp
|
||||
|
||||
# Disable unused ones in ~/.claude.json under projects.disabledMcpServers
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 插件
|
||||
|
||||
插件将工具打包以便于安装,而不是繁琐的手动设置。一个插件可以是技能和 MCP 的组合,或者是捆绑在一起的钩子/工具。
|
||||
|
||||
**安装插件:**
|
||||
|
||||
```bash
|
||||
# Add a marketplace
|
||||
claude plugin marketplace add https://github.com/mixedbread-ai/mgrep
|
||||
|
||||
# Open Claude, run /plugins, find new marketplace, install from there
|
||||
```
|
||||
|
||||

|
||||
*显示新安装的 Mixedbread-Grep 市场*
|
||||
|
||||
**LSP 插件** 如果你经常在编辑器之外运行 Claude Code,则特别有用。语言服务器协议为 Claude 提供实时类型检查、跳转到定义和智能补全,而无需打开 IDE。
|
||||
|
||||
```bash
|
||||
# Enabled plugins example
|
||||
typescript-lsp@claude-plugins-official # TypeScript intelligence
|
||||
pyright-lsp@claude-plugins-official # Python type checking
|
||||
hookify@claude-plugins-official # Create hooks conversationally
|
||||
mgrep@Mixedbread-Grep # Better search than ripgrep
|
||||
```
|
||||
|
||||
与 MCP 相同的警告——注意你的上下文窗口。
|
||||
|
||||
***
|
||||
|
||||
## 技巧和窍门
|
||||
|
||||
### 键盘快捷键
|
||||
|
||||
* `Ctrl+U` - 删除整行(比反复按退格键快)
|
||||
* `!` - 快速 bash 命令前缀
|
||||
* `@` - 搜索文件
|
||||
* `/` - 发起斜杠命令
|
||||
* `Shift+Enter` - 多行输入
|
||||
* `Tab` - 切换思考显示
|
||||
* `Esc Esc` - 中断 Claude / 恢复代码
|
||||
|
||||
### 并行工作流
|
||||
|
||||
* **分叉** (`/fork`) - 分叉对话以并行执行不重叠的任务,而不是在队列中堆积消息
|
||||
* **Git Worktrees** - 用于重叠的并行 Claude 而不产生冲突。每个工作树都是一个独立的检出
|
||||
|
||||
```bash
|
||||
git worktree add ../feature-branch feature-branch
|
||||
# Now run separate Claude instances in each worktree
|
||||
```
|
||||
|
||||
### 用于长时间运行命令的 tmux
|
||||
|
||||
流式传输和监视 Claude 运行的日志/bash 进程:
|
||||
|
||||
https://github.com/user-attachments/assets/shortform/07-tmux-video.mp4
|
||||
|
||||
```bash
|
||||
tmux new -s dev
|
||||
# Claude runs commands here, you can detach and reattach
|
||||
tmux attach -t dev
|
||||
```
|
||||
|
||||
### mgrep > grep
|
||||
|
||||
`mgrep` 是对 ripgrep/grep 的显著改进。通过插件市场安装,然后使用 `/mgrep` 技能。适用于本地搜索和网络搜索。
|
||||
|
||||
```bash
|
||||
mgrep "function handleSubmit" # Local search
|
||||
mgrep --web "Next.js 15 app router changes" # Web search
|
||||
```
|
||||
|
||||
### 其他有用的命令
|
||||
|
||||
* `/rewind` - 回到之前的状态
|
||||
* `/statusline` - 用分支、上下文百分比、待办事项进行自定义
|
||||
* `/checkpoints` - 文件级别的撤销点
|
||||
* `/compact` - 手动触发上下文压缩
|
||||
|
||||
### GitHub Actions CI/CD
|
||||
|
||||
使用 GitHub Actions 在你的 PR 上设置代码审查。配置后,Claude 可以自动审查 PR。
|
||||
|
||||

|
||||
*Claude 批准一个错误修复 PR*
|
||||
|
||||
### 沙盒化
|
||||
|
||||
对风险操作使用沙盒模式——Claude 在受限环境中运行,不影响你的实际系统。
|
||||
|
||||
***
|
||||
|
||||
## 关于编辑器
|
||||
|
||||
你的编辑器选择显著影响 Claude Code 的工作流。虽然 Claude Code 可以在任何终端中工作,但将其与功能强大的编辑器配对可以解锁实时文件跟踪、快速导航和集成命令执行。
|
||||
|
||||
### Zed(我的偏好)
|
||||
|
||||
我使用 [Zed](https://zed.dev) —— 用 Rust 编写,所以它真的很快。立即打开,轻松处理大型代码库,几乎不占用系统资源。
|
||||
|
||||
**为什么 Zed + Claude Code 是绝佳组合:**
|
||||
|
||||
* **速度** - 基于 Rust 的性能意味着当 Claude 快速编辑文件时没有延迟。你的编辑器能跟上
|
||||
* **代理面板集成** - Zed 的 Claude 集成允许你在 Claude 编辑时实时跟踪文件变化。无需离开编辑器即可跳转到 Claude 引用的文件
|
||||
* **CMD+Shift+R 命令面板** - 快速访问所有自定义斜杠命令、调试器、构建脚本,在可搜索的 UI 中
|
||||
* **最小的资源使用** - 在繁重操作期间不会与 Claude 竞争 RAM/CPU。运行 Opus 时很重要
|
||||
* **Vim 模式** - 完整的 vim 键绑定,如果你喜欢的话
|
||||
|
||||

|
||||
*使用 CMD+Shift+R 显示自定义命令下拉菜单的 Zed 编辑器。右下角的靶心图标表示跟随模式。*
|
||||
|
||||
**编辑器无关提示:**
|
||||
|
||||
1. **分割你的屏幕** - 一侧是带 Claude Code 的终端,另一侧是编辑器
|
||||
2. **Ctrl + G** - 在 Zed 中快速打开 Claude 当前正在处理的文件
|
||||
3. **自动保存** - 启用自动保存,以便 Claude 的文件读取始终是最新的
|
||||
4. **Git 集成** - 使用编辑器的 git 功能在提交前审查 Claude 的更改
|
||||
5. **文件监视器** - 大多数编辑器自动重新加载更改的文件,请验证是否已启用
|
||||
|
||||
### VSCode / Cursor
|
||||
|
||||
这也是一个可行的选择,并且与 Claude Code 配合良好。你可以使用终端格式,通过 `\ide` 与你的编辑器自动同步以启用 LSP 功能(现在与插件有些冗余)。或者你可以选择扩展,它更集成于编辑器并具有匹配的 UI。
|
||||
|
||||

|
||||
*VS Code 扩展为 Claude Code 提供了原生图形界面,直接集成到您的 IDE 中。*
|
||||
|
||||
***
|
||||
|
||||
## 我的设置
|
||||
|
||||
### 插件
|
||||
|
||||
**已安装:**(我通常一次只启用其中的 4-5 个)
|
||||
|
||||
```markdown
|
||||
ralph-wiggum@claude-code-plugins # 循环自动化
|
||||
frontend-design@claude-code-plugins # UI/UX 模式
|
||||
commit-commands@claude-code-plugins # Git 工作流
|
||||
security-guidance@claude-code-plugins # 安全检查
|
||||
pr-review-toolkit@claude-code-plugins # PR 自动化
|
||||
typescript-lsp@claude-plugins-official # TS 智能
|
||||
hookify@claude-plugins-official # Hook 创建
|
||||
code-simplifier@claude-plugins-official
|
||||
feature-dev@claude-code-plugins
|
||||
explanatory-output-style@claude-code-plugins
|
||||
code-review@claude-code-plugins
|
||||
context7@claude-plugins-official # 实时文档
|
||||
pyright-lsp@claude-plugins-official # Python 类型
|
||||
mgrep@Mixedbread-Grep # 更好的搜索
|
||||
|
||||
```
|
||||
|
||||
### MCP 服务器
|
||||
|
||||
**已配置(用户级别):**
|
||||
|
||||
```json
|
||||
{
|
||||
"github": { "command": "npx", "args": ["-y", "@modelcontextprotocol/server-github"] },
|
||||
"firecrawl": { "command": "npx", "args": ["-y", "firecrawl-mcp"] },
|
||||
"supabase": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@supabase/mcp-server-supabase@latest", "--project-ref=YOUR_REF"]
|
||||
},
|
||||
"memory": { "command": "npx", "args": ["-y", "@modelcontextprotocol/server-memory"] },
|
||||
"sequential-thinking": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
},
|
||||
"vercel": { "type": "http", "url": "https://mcp.vercel.com" },
|
||||
"railway": { "command": "npx", "args": ["-y", "@railway/mcp-server"] },
|
||||
"cloudflare-docs": { "type": "http", "url": "https://docs.mcp.cloudflare.com/mcp" },
|
||||
"cloudflare-workers-bindings": {
|
||||
"type": "http",
|
||||
"url": "https://bindings.mcp.cloudflare.com/mcp"
|
||||
},
|
||||
"clickhouse": { "type": "http", "url": "https://mcp.clickhouse.cloud/mcp" },
|
||||
"AbletonMCP": { "command": "uvx", "args": ["ableton-mcp"] },
|
||||
"magic": { "command": "npx", "args": ["-y", "@magicuidesign/mcp@latest"] }
|
||||
}
|
||||
```
|
||||
|
||||
这是关键——我配置了 14 个 MCP,但每个项目只启用约 5-6 个。保持上下文窗口健康。
|
||||
|
||||
### 关键钩子
|
||||
|
||||
```json
|
||||
{
|
||||
"PreToolUse": [
|
||||
{ "matcher": "npm|pnpm|yarn|cargo|pytest", "hooks": ["tmux reminder"] },
|
||||
{ "matcher": "Write && .md file", "hooks": ["block unless README/CLAUDE"] },
|
||||
{ "matcher": "git push", "hooks": ["open editor for review"] }
|
||||
],
|
||||
"PostToolUse": [
|
||||
{ "matcher": "Edit && .ts/.tsx/.js/.jsx", "hooks": ["prettier --write"] },
|
||||
{ "matcher": "Edit && .ts/.tsx", "hooks": ["tsc --noEmit"] },
|
||||
{ "matcher": "Edit", "hooks": ["grep console.log warning"] }
|
||||
],
|
||||
"Stop": [
|
||||
{ "matcher": "*", "hooks": ["check modified files for console.log"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 自定义状态行
|
||||
|
||||
显示用户、目录、带脏标记的 git 分支、剩余上下文百分比、模型、时间和待办事项计数:
|
||||
|
||||

|
||||
*我的 Mac 根目录中的状态行示例*
|
||||
|
||||
```
|
||||
affoon:~ ctx:65% Opus 4.5 19:52
|
||||
▌▌ plan mode on (shift+tab to cycle)
|
||||
```
|
||||
|
||||
### 规则结构
|
||||
|
||||
```
|
||||
~/.claude/rules/
|
||||
security.md # Mandatory security checks
|
||||
coding-style.md # Immutability, file size limits
|
||||
testing.md # TDD, 80% coverage
|
||||
git-workflow.md # Conventional commits
|
||||
agents.md # Subagent delegation rules
|
||||
patterns.md # API response formats
|
||||
performance.md # Model selection (Haiku vs Sonnet vs Opus)
|
||||
hooks.md # Hook documentation
|
||||
```
|
||||
|
||||
### 子代理
|
||||
|
||||
```
|
||||
~/.claude/agents/
|
||||
planner.md # Break down features
|
||||
architect.md # System design
|
||||
tdd-guide.md # Write tests first
|
||||
code-reviewer.md # Quality review
|
||||
security-reviewer.md # Vulnerability scan
|
||||
build-error-resolver.md
|
||||
e2e-runner.md # Playwright tests
|
||||
refactor-cleaner.md # Dead code removal
|
||||
doc-updater.md # Keep docs synced
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## 关键要点
|
||||
|
||||
1. **不要过度复杂化** - 将配置视为微调,而非架构
|
||||
2. **上下文窗口很宝贵** - 禁用未使用的 MCP 和插件
|
||||
3. **并行执行** - 分叉对话,使用 git worktrees
|
||||
4. **自动化重复性工作** - 用于格式化、代码检查、提醒的钩子
|
||||
5. **界定子代理范围** - 有限的工具 = 专注的执行
|
||||
|
||||
***
|
||||
|
||||
## 参考资料
|
||||
|
||||
* [插件参考](https://code.claude.com/docs/en/plugins-reference)
|
||||
* [钩子文档](https://code.claude.com/docs/en/hooks)
|
||||
* [检查点](https://code.claude.com/docs/en/checkpointing)
|
||||
* [交互模式](https://code.claude.com/docs/en/interactive-mode)
|
||||
* [记忆系统](https://code.claude.com/docs/en/memory)
|
||||
* [子代理](https://code.claude.com/docs/en/sub-agents)
|
||||
* [MCP 概述](https://code.claude.com/docs/en/mcp-overview)
|
||||
|
||||
***
|
||||
|
||||
**注意:** 这是细节的一个子集。关于高级模式,请参阅 [长篇指南](./the-longform-guide.md)。
|
||||
|
||||
***
|
||||
|
||||
*在纽约与 [@DRodriguezFX](https://x.com/DRodriguezFX) 一起构建 [zenith.chat](https://zenith.chat) 赢得了 Anthropic x Forum Ventures 黑客马拉松*
|
||||
@@ -7,7 +7,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(/(npm run dev\\b|pnpm( run)? dev\\b|yarn dev\\b|bun run dev\\b)/.test(cmd)){console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');console.error('[Hook] Use: tmux new-session -d -s dev \\\"npm run dev\\\"');console.error('[Hook] Then: tmux attach -t dev');process.exit(2)}}catch{}console.log(d)})\""
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(process.platform!=='win32'&&/(npm run dev\\b|pnpm( run)? dev\\b|yarn dev\\b|bun run dev\\b)/.test(cmd)){console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');console.error('[Hook] Use: tmux new-session -d -s dev \\\"npm run dev\\\"');console.error('[Hook] Then: tmux attach -t dev');process.exit(2)}}catch{}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Block dev servers outside tmux - ensures you can access logs"
|
||||
@@ -17,7 +17,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(!process.env.TMUX&&/(npm (install|test)|pnpm (install|test)|yarn (install|test)?|bun (install|test)|cargo build|make\\b|docker\\b|pytest|vitest|playwright)/.test(cmd)){console.error('[Hook] Consider running in tmux for session persistence');console.error('[Hook] tmux new -s dev | tmux attach -t dev')}}catch{}console.log(d)})\""
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(process.platform!=='win32'&&!process.env.TMUX&&/(npm (install|test)|pnpm (install|test)|yarn (install|test)?|bun (install|test)|cargo build|make\\b|docker\\b|pytest|vitest|playwright)/.test(cmd)){console.error('[Hook] Consider running in tmux for session persistence');console.error('[Hook] tmux new -s dev | tmux attach -t dev')}}catch{}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Reminder to use tmux for long-running commands"
|
||||
|
||||
@@ -58,7 +58,7 @@ function validateAgents() {
|
||||
}
|
||||
|
||||
for (const field of REQUIRED_FIELDS) {
|
||||
if (!frontmatter[field]) {
|
||||
if (!frontmatter[field] || (typeof frontmatter[field] === 'string' && !frontmatter[field].trim())) {
|
||||
console.error(`ERROR: ${file} - Missing required field: ${field}`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
@@ -74,14 +74,17 @@ function validateCommands() {
|
||||
|
||||
// Check cross-references to other commands (e.g., `/build-fix`)
|
||||
// Skip lines that describe hypothetical output (e.g., "→ Creates: `/new-table`")
|
||||
const cmdRefs = contentNoCodeBlocks.matchAll(/^.*`\/([a-z][-a-z0-9]*)`.*$/gm);
|
||||
for (const match of cmdRefs) {
|
||||
const line = match[0];
|
||||
// Process line-by-line so ALL command refs per line are captured
|
||||
// (previous anchored regex /^.*`\/...`.*$/gm only matched the last ref per line)
|
||||
for (const line of contentNoCodeBlocks.split('\n')) {
|
||||
if (/creates:|would create:/i.test(line)) continue;
|
||||
const refName = match[1];
|
||||
if (!validCommands.has(refName)) {
|
||||
console.error(`ERROR: ${file} - references non-existent command /${refName}`);
|
||||
hasErrors = true;
|
||||
const lineRefs = line.matchAll(/`\/([a-z][-a-z0-9]*)`/g);
|
||||
for (const match of lineRefs) {
|
||||
const refName = match[1];
|
||||
if (!validCommands.has(refName)) {
|
||||
console.error(`ERROR: ${file} - references non-existent command /${refName}`);
|
||||
hasErrors = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ function validateHookEntry(hook, label) {
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if (!hook.command || (typeof hook.command !== 'string' && !Array.isArray(hook.command))) {
|
||||
if (!hook.command || (typeof hook.command !== 'string' && !Array.isArray(hook.command)) || (typeof hook.command === 'string' && !hook.command.trim()) || (Array.isArray(hook.command) && (hook.command.length === 0 || !hook.command.every(s => typeof s === 'string' && s.length > 0)))) {
|
||||
console.error(`ERROR: ${label} missing or invalid 'command' field`);
|
||||
hasErrors = true;
|
||||
} else if (typeof hook.command === 'string') {
|
||||
|
||||
@@ -39,7 +39,7 @@ process.stdin.on('data', chunk => {
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
if (!isGitRepo()) {
|
||||
console.log(data);
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
@@ -65,5 +65,6 @@ process.stdin.on('end', () => {
|
||||
}
|
||||
|
||||
// Always output the original data
|
||||
console.log(data);
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
@@ -64,7 +64,7 @@ async function main() {
|
||||
if (configContent) {
|
||||
try {
|
||||
const config = JSON.parse(configContent);
|
||||
minSessionLength = config.min_session_length || 10;
|
||||
minSessionLength = config.min_session_length ?? 10;
|
||||
|
||||
if (config.learned_skills_path) {
|
||||
// Handle ~ in path
|
||||
|
||||
@@ -28,7 +28,7 @@ process.stdin.on('end', () => {
|
||||
|
||||
if (filePath && /\.(ts|tsx|js|jsx)$/.test(filePath)) {
|
||||
const content = readFile(filePath);
|
||||
if (!content) { console.log(data); return; }
|
||||
if (!content) { process.stdout.write(data); return; }
|
||||
const lines = content.split('\n');
|
||||
const matches = [];
|
||||
|
||||
@@ -48,5 +48,6 @@ process.stdin.on('end', () => {
|
||||
// Invalid input — pass through
|
||||
}
|
||||
|
||||
console.log(data);
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
const { execFileSync } = require('child_process');
|
||||
const path = require('path');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024; // 1MB limit
|
||||
let data = '';
|
||||
@@ -27,7 +28,10 @@ process.stdin.on('end', () => {
|
||||
|
||||
if (filePath && /\.(ts|tsx|js|jsx)$/.test(filePath)) {
|
||||
try {
|
||||
execFileSync('npx', ['prettier', '--write', filePath], {
|
||||
// Use npx.cmd on Windows to avoid shell: true which enables command injection
|
||||
const npxBin = process.platform === 'win32' ? 'npx.cmd' : 'npx';
|
||||
execFileSync(npxBin, ['prettier', '--write', filePath], {
|
||||
cwd: path.dirname(path.resolve(filePath)),
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 15000
|
||||
});
|
||||
@@ -39,5 +43,6 @@ process.stdin.on('end', () => {
|
||||
// Invalid input — pass through
|
||||
}
|
||||
|
||||
console.log(data);
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
@@ -9,60 +9,79 @@
|
||||
* and reports only errors related to the edited file.
|
||||
*/
|
||||
|
||||
const { execFileSync } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { execFileSync } = require("child_process");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
||||
const MAX_STDIN = 1024 * 1024; // 1MB limit
|
||||
let data = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
let data = "";
|
||||
process.stdin.setEncoding("utf8");
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
process.stdin.on("data", (chunk) => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
data += chunk;
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
process.stdin.on("end", () => {
|
||||
try {
|
||||
const input = JSON.parse(data);
|
||||
const filePath = input.tool_input?.file_path;
|
||||
|
||||
if (filePath && /\.(ts|tsx)$/.test(filePath)) {
|
||||
const resolvedPath = path.resolve(filePath);
|
||||
if (!fs.existsSync(resolvedPath)) { console.log(data); return; }
|
||||
if (!fs.existsSync(resolvedPath)) {
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
}
|
||||
// Find nearest tsconfig.json by walking up (max 20 levels to prevent infinite loop)
|
||||
let dir = path.dirname(resolvedPath);
|
||||
const root = path.parse(dir).root;
|
||||
let depth = 0;
|
||||
|
||||
while (dir !== root && depth < 20) {
|
||||
if (fs.existsSync(path.join(dir, 'tsconfig.json'))) {
|
||||
if (fs.existsSync(path.join(dir, "tsconfig.json"))) {
|
||||
break;
|
||||
}
|
||||
dir = path.dirname(dir);
|
||||
depth++;
|
||||
}
|
||||
|
||||
if (fs.existsSync(path.join(dir, 'tsconfig.json'))) {
|
||||
if (fs.existsSync(path.join(dir, "tsconfig.json"))) {
|
||||
try {
|
||||
execFileSync('npx', ['tsc', '--noEmit', '--pretty', 'false'], {
|
||||
// Use npx.cmd on Windows to avoid shell: true which enables command injection
|
||||
const npxBin = process.platform === "win32" ? "npx.cmd" : "npx";
|
||||
execFileSync(npxBin, ["tsc", "--noEmit", "--pretty", "false"], {
|
||||
cwd: dir,
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
encoding: "utf8",
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
timeout: 30000,
|
||||
});
|
||||
} catch (err) {
|
||||
// tsc exits non-zero when there are errors — filter to edited file
|
||||
const output = (err.stdout || '') + (err.stderr || '');
|
||||
const output = (err.stdout || "") + (err.stderr || "");
|
||||
// Compute paths that uniquely identify the edited file.
|
||||
// tsc output uses paths relative to its cwd (the tsconfig dir),
|
||||
// so check for the relative path, absolute path, and original path.
|
||||
// Avoid bare basename matching — it causes false positives when
|
||||
// multiple files share the same name (e.g., src/utils.ts vs tests/utils.ts).
|
||||
const relPath = path.relative(dir, resolvedPath);
|
||||
const candidates = new Set([filePath, resolvedPath, relPath]);
|
||||
const relevantLines = output
|
||||
.split('\n')
|
||||
.filter(line => line.includes(filePath) || line.includes(path.basename(filePath)))
|
||||
.split("\n")
|
||||
.filter((line) => {
|
||||
for (const candidate of candidates) {
|
||||
if (line.includes(candidate)) return true;
|
||||
}
|
||||
return false;
|
||||
})
|
||||
.slice(0, 10);
|
||||
|
||||
if (relevantLines.length > 0) {
|
||||
console.error('[Hook] TypeScript errors in ' + path.basename(filePath) + ':');
|
||||
relevantLines.forEach(line => console.error(line));
|
||||
console.error(
|
||||
"[Hook] TypeScript errors in " + path.basename(filePath) + ":",
|
||||
);
|
||||
relevantLines.forEach((line) => console.error(line));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,5 +90,6 @@ process.stdin.on('end', () => {
|
||||
// Invalid input — pass through
|
||||
}
|
||||
|
||||
console.log(data);
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
@@ -30,7 +30,7 @@ async function main() {
|
||||
appendFile(compactionLog, `[${timestamp}] Context compaction triggered\n`);
|
||||
|
||||
// If there's an active session file, note the compaction
|
||||
const sessions = findFiles(sessionsDir, '*.tmp');
|
||||
const sessions = findFiles(sessionsDir, '*-session.tmp');
|
||||
|
||||
if (sessions.length > 0) {
|
||||
const activeSession = sessions[0].path;
|
||||
|
||||
@@ -45,18 +45,20 @@ function extractSessionSummary(transcriptPath) {
|
||||
const entry = JSON.parse(line);
|
||||
|
||||
// Collect user messages (first 200 chars each)
|
||||
if (entry.type === 'user' || entry.role === 'user') {
|
||||
const text = typeof entry.content === 'string'
|
||||
? entry.content
|
||||
: Array.isArray(entry.content)
|
||||
? entry.content.map(c => (c && c.text) || '').join(' ')
|
||||
if (entry.type === 'user' || entry.role === 'user' || entry.message?.role === 'user') {
|
||||
// Support both direct content and nested message.content (Claude Code JSONL format)
|
||||
const rawContent = entry.message?.content ?? entry.content;
|
||||
const text = typeof rawContent === 'string'
|
||||
? rawContent
|
||||
: Array.isArray(rawContent)
|
||||
? rawContent.map(c => (c && c.text) || '').join(' ')
|
||||
: '';
|
||||
if (text.trim()) {
|
||||
userMessages.push(text.trim().slice(0, 200));
|
||||
}
|
||||
}
|
||||
|
||||
// Collect tool names and modified files
|
||||
// Collect tool names and modified files (direct tool_use entries)
|
||||
if (entry.type === 'tool_use' || entry.tool_name) {
|
||||
const toolName = entry.tool_name || entry.name || '';
|
||||
if (toolName) toolsUsed.add(toolName);
|
||||
@@ -66,6 +68,21 @@ function extractSessionSummary(transcriptPath) {
|
||||
filesModified.add(filePath);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract tool uses from assistant message content blocks (Claude Code JSONL format)
|
||||
if (entry.type === 'assistant' && Array.isArray(entry.message?.content)) {
|
||||
for (const block of entry.message.content) {
|
||||
if (block.type === 'tool_use') {
|
||||
const toolName = block.name || '';
|
||||
if (toolName) toolsUsed.add(toolName);
|
||||
|
||||
const filePath = block.input?.file_path || '';
|
||||
if (filePath && (toolName === 'Edit' || toolName === 'Write')) {
|
||||
filesModified.add(filePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
parseErrors++;
|
||||
}
|
||||
@@ -189,10 +206,10 @@ ${summarySection}
|
||||
function buildSummarySection(summary) {
|
||||
let section = '## Session Summary\n\n';
|
||||
|
||||
// Tasks (from user messages — escape backticks to prevent markdown breaks)
|
||||
// Tasks (from user messages — collapse newlines and escape backticks to prevent markdown breaks)
|
||||
section += '### Tasks\n';
|
||||
for (const msg of summary.userMessages) {
|
||||
section += `- ${msg.replace(/`/g, '\\`')}\n`;
|
||||
section += `- ${msg.replace(/\n/g, ' ').replace(/`/g, '\\`')}\n`;
|
||||
}
|
||||
section += '\n';
|
||||
|
||||
|
||||
@@ -44,7 +44,11 @@ async function main() {
|
||||
const bytesRead = fs.readSync(fd, buf, 0, 64, 0);
|
||||
if (bytesRead > 0) {
|
||||
const parsed = parseInt(buf.toString('utf8', 0, bytesRead).trim(), 10);
|
||||
count = Number.isFinite(parsed) ? parsed + 1 : 1;
|
||||
// Clamp to reasonable range — corrupted files could contain huge values
|
||||
// that pass Number.isFinite() (e.g., parseInt('9'.repeat(30)) => 1e+29)
|
||||
count = (Number.isFinite(parsed) && parsed > 0 && parsed <= 1000000)
|
||||
? parsed + 1
|
||||
: 1;
|
||||
}
|
||||
// Truncate and write new value
|
||||
fs.ftruncateSync(fd, 0);
|
||||
@@ -62,8 +66,8 @@ async function main() {
|
||||
log(`[StrategicCompact] ${threshold} tool calls reached - consider /compact if transitioning phases`);
|
||||
}
|
||||
|
||||
// Suggest at regular intervals after threshold
|
||||
if (count > threshold && count % 25 === 0) {
|
||||
// Suggest at regular intervals after threshold (every 25 calls from threshold)
|
||||
if (count > threshold && (count - threshold) % 25 === 0) {
|
||||
log(`[StrategicCompact] ${count} tool calls - good checkpoint for /compact if context is stale`);
|
||||
}
|
||||
|
||||
|
||||
@@ -314,11 +314,15 @@ function getRunCommand(script, options = {}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Allowed characters in arguments: alphanumeric, whitespace, dashes, dots, slashes,
|
||||
// equals, colons, commas, quotes, @. Rejects shell metacharacters like ; | & ` $ ( ) { } < > !
|
||||
const SAFE_ARGS_REGEX = /^[@a-zA-Z0-9\s_.\/:=,'"*+-]+$/;
|
||||
|
||||
/**
|
||||
* Get the command to execute a package binary
|
||||
* @param {string} binary - Binary name (e.g., "prettier", "eslint")
|
||||
* @param {string} args - Arguments to pass
|
||||
* @throws {Error} If binary name contains unsafe characters
|
||||
* @throws {Error} If binary name or args contain unsafe characters
|
||||
*/
|
||||
function getExecCommand(binary, args = '', options = {}) {
|
||||
if (!binary || typeof binary !== 'string') {
|
||||
@@ -327,6 +331,9 @@ function getExecCommand(binary, args = '', options = {}) {
|
||||
if (!SAFE_NAME_REGEX.test(binary)) {
|
||||
throw new Error(`Binary name contains unsafe characters: ${binary}`);
|
||||
}
|
||||
if (args && typeof args === 'string' && !SAFE_ARGS_REGEX.test(args)) {
|
||||
throw new Error(`Arguments contain unsafe characters: ${args}`);
|
||||
}
|
||||
|
||||
const pm = getPackageManager(options);
|
||||
return `${pm.config.execCmd} ${binary}${args ? ' ' + args : ''}`;
|
||||
@@ -351,6 +358,11 @@ function getSelectionPrompt() {
|
||||
return message;
|
||||
}
|
||||
|
||||
// Escape regex metacharacters in a string before interpolating into a pattern
|
||||
function escapeRegex(str) {
|
||||
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a regex pattern that matches commands for all package managers
|
||||
* @param {string} action - Action pattern (e.g., "run dev", "install", "test")
|
||||
@@ -387,12 +399,13 @@ function getCommandPattern(action) {
|
||||
'bun run build'
|
||||
);
|
||||
} else {
|
||||
// Generic run command
|
||||
// Generic run command — escape regex metacharacters in action
|
||||
const escaped = escapeRegex(action);
|
||||
patterns.push(
|
||||
`npm run ${action}`,
|
||||
`pnpm( run)? ${action}`,
|
||||
`yarn ${action}`,
|
||||
`bun run ${action}`
|
||||
`npm run ${escaped}`,
|
||||
`pnpm( run)? ${escaped}`,
|
||||
`yarn ${escaped}`,
|
||||
`bun run ${escaped}`
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -110,8 +110,10 @@ function saveAliases(aliases) {
|
||||
// Atomic write: write to temp file, then rename
|
||||
fs.writeFileSync(tempPath, content, 'utf8');
|
||||
|
||||
// On Windows, we need to delete the target file before renaming
|
||||
if (fs.existsSync(aliasesPath)) {
|
||||
// On Windows, rename fails with EEXIST if destination exists, so delete first.
|
||||
// On Unix/macOS, rename(2) atomically replaces the destination — skip the
|
||||
// delete to avoid an unnecessary non-atomic window between unlink and rename.
|
||||
if (process.platform === 'win32' && fs.existsSync(aliasesPath)) {
|
||||
fs.unlinkSync(aliasesPath);
|
||||
}
|
||||
fs.renameSync(tempPath, aliasesPath);
|
||||
@@ -310,15 +312,28 @@ function renameAlias(oldAlias, newAlias) {
|
||||
return { success: false, error: `Alias '${oldAlias}' not found` };
|
||||
}
|
||||
|
||||
if (data.aliases[newAlias]) {
|
||||
return { success: false, error: `Alias '${newAlias}' already exists` };
|
||||
// Validate new alias name (same rules as setAlias)
|
||||
if (!newAlias || newAlias.length === 0) {
|
||||
return { success: false, error: 'New alias name cannot be empty' };
|
||||
}
|
||||
|
||||
if (newAlias.length > 128) {
|
||||
return { success: false, error: 'New alias name cannot exceed 128 characters' };
|
||||
}
|
||||
|
||||
// Validate new alias name
|
||||
if (!/^[a-zA-Z0-9_-]+$/.test(newAlias)) {
|
||||
return { success: false, error: 'New alias name must contain only letters, numbers, dashes, and underscores' };
|
||||
}
|
||||
|
||||
const reserved = ['list', 'help', 'remove', 'delete', 'create', 'set'];
|
||||
if (reserved.includes(newAlias.toLowerCase())) {
|
||||
return { success: false, error: `'${newAlias}' is a reserved alias name` };
|
||||
}
|
||||
|
||||
if (data.aliases[newAlias]) {
|
||||
return { success: false, error: `Alias '${newAlias}' already exists` };
|
||||
}
|
||||
|
||||
const aliasData = data.aliases[oldAlias];
|
||||
delete data.aliases[oldAlias];
|
||||
|
||||
@@ -433,9 +448,17 @@ function cleanupAliases(sessionExists) {
|
||||
|
||||
if (removed.length > 0 && !saveAliases(data)) {
|
||||
log('[Aliases] Failed to save after cleanup');
|
||||
return {
|
||||
success: false,
|
||||
totalChecked: Object.keys(data.aliases).length + removed.length,
|
||||
removed: removed.length,
|
||||
removedAliases: removed,
|
||||
error: 'Failed to save after cleanup'
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
totalChecked: Object.keys(data.aliases).length + removed.length,
|
||||
removed: removed.length,
|
||||
removedAliases: removed
|
||||
|
||||
@@ -32,9 +32,13 @@ function parseSessionFilename(filename) {
|
||||
|
||||
const dateStr = match[1];
|
||||
|
||||
// Validate date components are in valid ranges (not just format)
|
||||
// Validate date components are calendar-accurate (not just format)
|
||||
const [year, month, day] = dateStr.split('-').map(Number);
|
||||
if (month < 1 || month > 12 || day < 1 || day > 31) return null;
|
||||
// Reject impossible dates like Feb 31, Apr 31 — Date constructor rolls
|
||||
// over invalid days (e.g., Feb 31 → Mar 3), so check month roundtrips
|
||||
const d = new Date(year, month - 1, day);
|
||||
if (d.getMonth() !== month - 1 || d.getDate() !== day) return null;
|
||||
|
||||
// match[2] is undefined for old format (no ID)
|
||||
const shortId = match[2] || 'no-id';
|
||||
@@ -43,8 +47,10 @@ function parseSessionFilename(filename) {
|
||||
filename,
|
||||
shortId,
|
||||
date: dateStr,
|
||||
// Convert date string to Date object
|
||||
datetime: new Date(dateStr)
|
||||
// Use local-time constructor (consistent with validation on line 40)
|
||||
// new Date(dateStr) interprets YYYY-MM-DD as UTC midnight which shows
|
||||
// as the previous day in negative UTC offset timezones
|
||||
datetime: new Date(year, month - 1, day)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -185,12 +191,21 @@ function getSessionStats(sessionPathOrContent) {
|
||||
*/
|
||||
function getAllSessions(options = {}) {
|
||||
const {
|
||||
limit = 50,
|
||||
offset = 0,
|
||||
limit: rawLimit = 50,
|
||||
offset: rawOffset = 0,
|
||||
date = null,
|
||||
search = null
|
||||
} = options;
|
||||
|
||||
// Clamp offset and limit to safe non-negative integers.
|
||||
// Without this, negative offset causes slice() to count from the end,
|
||||
// and NaN values cause slice() to return empty or unexpected results.
|
||||
// Note: cannot use `|| default` because 0 is falsy — use isNaN instead.
|
||||
const offsetNum = Number(rawOffset);
|
||||
const offset = Number.isNaN(offsetNum) ? 0 : Math.max(0, Math.floor(offsetNum));
|
||||
const limitNum = Number(rawLimit);
|
||||
const limit = Number.isNaN(limitNum) ? 50 : Math.max(1, Math.floor(limitNum));
|
||||
|
||||
const sessionsDir = getSessionsDir();
|
||||
|
||||
if (!fs.existsSync(sessionsDir)) {
|
||||
|
||||
10
scripts/lib/utils.d.ts
vendored
10
scripts/lib/utils.d.ts
vendored
@@ -93,11 +93,19 @@ export function writeFile(filePath: string, content: string): void;
|
||||
/** Append to a text file, creating parent directories if needed */
|
||||
export function appendFile(filePath: string, content: string): void;
|
||||
|
||||
export interface ReplaceInFileOptions {
|
||||
/**
|
||||
* When true and search is a string, replaces ALL occurrences (uses String.replaceAll).
|
||||
* Ignored for RegExp patterns — use the `g` flag instead.
|
||||
*/
|
||||
all?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace text in a file (cross-platform sed alternative).
|
||||
* @returns true if the file was found and updated, false if file not found
|
||||
*/
|
||||
export function replaceInFile(filePath: string, search: string | RegExp, replace: string): boolean;
|
||||
export function replaceInFile(filePath: string, search: string | RegExp, replace: string, options?: ReplaceInFileOptions): boolean;
|
||||
|
||||
/**
|
||||
* Count occurrences of a pattern in a file.
|
||||
|
||||
@@ -215,6 +215,11 @@ async function readStdinJson(options = {}) {
|
||||
const timer = setTimeout(() => {
|
||||
if (!settled) {
|
||||
settled = true;
|
||||
// Clean up stdin listeners so the event loop can exit
|
||||
process.stdin.removeAllListeners('data');
|
||||
process.stdin.removeAllListeners('end');
|
||||
process.stdin.removeAllListeners('error');
|
||||
if (process.stdin.unref) process.stdin.unref();
|
||||
// Resolve with whatever we have so far rather than hanging
|
||||
try {
|
||||
resolve(data.trim() ? JSON.parse(data) : {});
|
||||
@@ -454,7 +459,15 @@ function grepFile(filePath, pattern) {
|
||||
|
||||
let regex;
|
||||
try {
|
||||
regex = pattern instanceof RegExp ? pattern : new RegExp(pattern);
|
||||
if (pattern instanceof RegExp) {
|
||||
// Always create a new RegExp without the 'g' flag to prevent lastIndex
|
||||
// state issues when using .test() in a loop (g flag makes .test() stateful,
|
||||
// causing alternating match/miss on consecutive matching lines)
|
||||
const flags = pattern.flags.replace('g', '');
|
||||
regex = new RegExp(pattern.source, flags);
|
||||
} else {
|
||||
regex = new RegExp(pattern);
|
||||
}
|
||||
} catch {
|
||||
return []; // Invalid regex pattern
|
||||
}
|
||||
|
||||
@@ -174,7 +174,7 @@ if (args.includes('--list')) {
|
||||
const globalIdx = args.indexOf('--global');
|
||||
if (globalIdx !== -1) {
|
||||
const pmName = args[globalIdx + 1];
|
||||
if (!pmName) {
|
||||
if (!pmName || pmName.startsWith('-')) {
|
||||
console.error('Error: --global requires a package manager name');
|
||||
process.exit(1);
|
||||
}
|
||||
@@ -185,7 +185,7 @@ if (globalIdx !== -1) {
|
||||
const projectIdx = args.indexOf('--project');
|
||||
if (projectIdx !== -1) {
|
||||
const pmName = args[projectIdx + 1];
|
||||
if (!pmName) {
|
||||
if (!pmName || pmName.startsWith('-')) {
|
||||
console.error('Error: --project requires a package manager name');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@@ -38,10 +38,10 @@ const SPINNER = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇',
|
||||
// Helper functions
|
||||
function box(title, content, width = 60) {
|
||||
const lines = content.split('\n');
|
||||
const top = `${BOX.topLeft}${BOX.horizontal} ${chalk.bold(chalk.cyan(title))} ${BOX.horizontal.repeat(Math.max(0, width - title.length - 4))}${BOX.topRight}`;
|
||||
const bottom = `${BOX.bottomLeft}${BOX.horizontal.repeat(width - 1)}${BOX.bottomRight}`;
|
||||
const top = `${BOX.topLeft}${BOX.horizontal} ${chalk.bold(chalk.cyan(title))} ${BOX.horizontal.repeat(Math.max(0, width - title.length - 5))}${BOX.topRight}`;
|
||||
const bottom = `${BOX.bottomLeft}${BOX.horizontal.repeat(width - 2)}${BOX.bottomRight}`;
|
||||
const middle = lines.map(line => {
|
||||
const padding = width - 3 - stripAnsi(line).length;
|
||||
const padding = width - 4 - stripAnsi(line).length;
|
||||
return `${BOX.vertical} ${line}${' '.repeat(Math.max(0, padding))} ${BOX.vertical}`;
|
||||
}).join('\n');
|
||||
return `${top}\n${middle}\n${bottom}`;
|
||||
@@ -53,7 +53,7 @@ function stripAnsi(str) {
|
||||
}
|
||||
|
||||
function progressBar(percent, width = 30) {
|
||||
const filled = Math.round(width * percent / 100);
|
||||
const filled = Math.min(width, Math.max(0, Math.round(width * percent / 100)));
|
||||
const empty = width - filled;
|
||||
const bar = chalk.green('█'.repeat(filled)) + chalk.gray('░'.repeat(empty));
|
||||
return `${bar} ${chalk.bold(percent)}%`;
|
||||
@@ -91,7 +91,7 @@ class SkillCreateOutput {
|
||||
console.log('\n');
|
||||
console.log(chalk.bold(chalk.magenta('╔════════════════════════════════════════════════════════════════╗')));
|
||||
console.log(chalk.bold(chalk.magenta('║')) + chalk.bold(' 🔮 ECC Skill Creator ') + chalk.bold(chalk.magenta('║')));
|
||||
console.log(chalk.bold(chalk.magenta('║')) + ` ${subtitle}${' '.repeat(Math.max(0, 55 - stripAnsi(subtitle).length))}` + chalk.bold(chalk.magenta('║')));
|
||||
console.log(chalk.bold(chalk.magenta('║')) + ` ${subtitle}${' '.repeat(Math.max(0, 59 - stripAnsi(subtitle).length))}` + chalk.bold(chalk.magenta('║')));
|
||||
console.log(chalk.bold(chalk.magenta('╚════════════════════════════════════════════════════════════════╝')));
|
||||
console.log('');
|
||||
}
|
||||
|
||||
@@ -88,7 +88,12 @@ def load_all_instincts() -> list[dict]:
|
||||
for directory in [PERSONAL_DIR, INHERITED_DIR]:
|
||||
if not directory.exists():
|
||||
continue
|
||||
for file in directory.glob("*.yaml"):
|
||||
yaml_files = sorted(
|
||||
set(directory.glob("*.yaml"))
|
||||
| set(directory.glob("*.yml"))
|
||||
| set(directory.glob("*.md"))
|
||||
)
|
||||
for file in yaml_files:
|
||||
try:
|
||||
content = file.read_text()
|
||||
parsed = parse_instinct_file(content)
|
||||
@@ -433,15 +438,96 @@ def cmd_evolve(args):
|
||||
print()
|
||||
|
||||
if args.generate:
|
||||
print("\n[Would generate evolved structures here]")
|
||||
print(" Skills would be saved to:", EVOLVED_DIR / "skills")
|
||||
print(" Commands would be saved to:", EVOLVED_DIR / "commands")
|
||||
print(" Agents would be saved to:", EVOLVED_DIR / "agents")
|
||||
generated = _generate_evolved(skill_candidates, workflow_instincts, agent_candidates)
|
||||
if generated:
|
||||
print(f"\n✅ Generated {len(generated)} evolved structures:")
|
||||
for path in generated:
|
||||
print(f" {path}")
|
||||
else:
|
||||
print("\nNo structures generated (need higher-confidence clusters).")
|
||||
|
||||
print(f"\n{'='*60}\n")
|
||||
return 0
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Generate Evolved Structures
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def _generate_evolved(skill_candidates: list, workflow_instincts: list, agent_candidates: list) -> list[str]:
|
||||
"""Generate skill/command/agent files from analyzed instinct clusters."""
|
||||
generated = []
|
||||
|
||||
# Generate skills from top candidates
|
||||
for cand in skill_candidates[:5]:
|
||||
trigger = cand['trigger'].strip()
|
||||
if not trigger:
|
||||
continue
|
||||
name = re.sub(r'[^a-z0-9]+', '-', trigger.lower()).strip('-')[:30]
|
||||
if not name:
|
||||
continue
|
||||
|
||||
skill_dir = EVOLVED_DIR / "skills" / name
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
content = f"# {name}\n\n"
|
||||
content += f"Evolved from {len(cand['instincts'])} instincts "
|
||||
content += f"(avg confidence: {cand['avg_confidence']:.0%})\n\n"
|
||||
content += f"## When to Apply\n\n"
|
||||
content += f"Trigger: {trigger}\n\n"
|
||||
content += f"## Actions\n\n"
|
||||
for inst in cand['instincts']:
|
||||
inst_content = inst.get('content', '')
|
||||
action_match = re.search(r'## Action\s*\n\s*(.+?)(?:\n\n|\n##|$)', inst_content, re.DOTALL)
|
||||
action = action_match.group(1).strip() if action_match else inst.get('id', 'unnamed')
|
||||
content += f"- {action}\n"
|
||||
|
||||
(skill_dir / "SKILL.md").write_text(content)
|
||||
generated.append(str(skill_dir / "SKILL.md"))
|
||||
|
||||
# Generate commands from workflow instincts
|
||||
for inst in workflow_instincts[:5]:
|
||||
trigger = inst.get('trigger', 'unknown')
|
||||
cmd_name = re.sub(r'[^a-z0-9]+', '-', trigger.lower().replace('when ', '').replace('implementing ', ''))
|
||||
cmd_name = cmd_name.strip('-')[:20]
|
||||
if not cmd_name:
|
||||
continue
|
||||
|
||||
cmd_file = EVOLVED_DIR / "commands" / f"{cmd_name}.md"
|
||||
content = f"# {cmd_name}\n\n"
|
||||
content += f"Evolved from instinct: {inst.get('id', 'unnamed')}\n"
|
||||
content += f"Confidence: {inst.get('confidence', 0.5):.0%}\n\n"
|
||||
content += inst.get('content', '')
|
||||
|
||||
cmd_file.write_text(content)
|
||||
generated.append(str(cmd_file))
|
||||
|
||||
# Generate agents from complex clusters
|
||||
for cand in agent_candidates[:3]:
|
||||
trigger = cand['trigger'].strip()
|
||||
agent_name = re.sub(r'[^a-z0-9]+', '-', trigger.lower()).strip('-')[:20]
|
||||
if not agent_name:
|
||||
continue
|
||||
|
||||
agent_file = EVOLVED_DIR / "agents" / f"{agent_name}.md"
|
||||
domains = ', '.join(cand['domains'])
|
||||
instinct_ids = [i.get('id', 'unnamed') for i in cand['instincts']]
|
||||
|
||||
content = f"---\nmodel: sonnet\ntools: Read, Grep, Glob\n---\n"
|
||||
content += f"# {agent_name}\n\n"
|
||||
content += f"Evolved from {len(cand['instincts'])} instincts "
|
||||
content += f"(avg confidence: {cand['avg_confidence']:.0%})\n"
|
||||
content += f"Domains: {domains}\n\n"
|
||||
content += f"## Source Instincts\n\n"
|
||||
for iid in instinct_ids:
|
||||
content += f"- {iid}\n"
|
||||
|
||||
agent_file.write_text(content)
|
||||
generated.append(str(agent_file))
|
||||
|
||||
return generated
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Main
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
@@ -470,6 +470,18 @@ function runTests() {
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails on whitespace-only SKILL.md', () => {
|
||||
const testDir = createTestDir();
|
||||
const skillDir = path.join(testDir, 'blank-skill');
|
||||
fs.mkdirSync(skillDir);
|
||||
fs.writeFileSync(path.join(skillDir, 'SKILL.md'), ' \n\t\n ');
|
||||
|
||||
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject whitespace-only SKILL.md');
|
||||
assert.ok(result.stderr.includes('Empty file'), 'Should report empty file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
// validate-commands.js
|
||||
// ==========================================
|
||||
@@ -622,6 +634,75 @@ function runTests() {
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('captures ALL command references on a single line (multi-ref)', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Line with two command references — both should be detected
|
||||
fs.writeFileSync(path.join(testDir, 'multi.md'),
|
||||
'# Multi\nUse `/ghost-a` and `/ghost-b` together.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should fail on broken refs');
|
||||
// BOTH ghost-a AND ghost-b must be reported (this was the greedy regex bug)
|
||||
assert.ok(result.stderr.includes('ghost-a'), 'Should report first ref /ghost-a');
|
||||
assert.ok(result.stderr.includes('ghost-b'), 'Should report second ref /ghost-b');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('captures three command refs on one line', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'triple.md'),
|
||||
'# Triple\nChain `/alpha`, `/beta`, and `/gamma` in order.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should fail on all three broken refs');
|
||||
assert.ok(result.stderr.includes('alpha'), 'Should report /alpha');
|
||||
assert.ok(result.stderr.includes('beta'), 'Should report /beta');
|
||||
assert.ok(result.stderr.includes('gamma'), 'Should report /gamma');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('multi-ref line with one valid and one invalid ref', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// "real-cmd" exists, "fake-cmd" does not
|
||||
fs.writeFileSync(path.join(testDir, 'real-cmd.md'), '# Real\nA real command.');
|
||||
fs.writeFileSync(path.join(testDir, 'mixed.md'),
|
||||
'# Mixed\nRun `/real-cmd` then `/fake-cmd`.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should fail for the fake ref');
|
||||
assert.ok(result.stderr.includes('fake-cmd'), 'Should report /fake-cmd');
|
||||
// real-cmd should NOT appear in errors
|
||||
assert.ok(!result.stderr.includes('real-cmd'), 'Should not report valid /real-cmd');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('creates: line with multiple refs skips entire line', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Both refs on a "Creates:" line should be skipped entirely
|
||||
fs.writeFileSync(path.join(testDir, 'gen.md'),
|
||||
'# Generator\nCreates: `/new-a` and `/new-b`');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should skip all refs on creates: line');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('validates valid workflow diagram with known agents', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
@@ -673,6 +754,839 @@ function runTests() {
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails on whitespace-only rule file', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'blank.md'), ' \n\t\n ');
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject whitespace-only rule file');
|
||||
assert.ok(result.stderr.includes('Empty'), 'Should report empty file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('validates rules in subdirectories recursively', () => {
|
||||
const testDir = createTestDir();
|
||||
const subDir = path.join(testDir, 'sub');
|
||||
fs.mkdirSync(subDir);
|
||||
fs.writeFileSync(path.join(testDir, 'top.md'), '# Top Level Rule');
|
||||
fs.writeFileSync(path.join(subDir, 'nested.md'), '# Nested Rule');
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should validate nested rules');
|
||||
assert.ok(result.stdout.includes('Validated 2'), 'Should find both rules');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
// Round 19: Whitespace and edge-case tests
|
||||
// ==========================================
|
||||
|
||||
// --- validate-hooks.js whitespace/null edge cases ---
|
||||
console.log('\nvalidate-hooks.js (whitespace edge cases):');
|
||||
|
||||
if (test('rejects whitespace-only command string', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: ' \t ' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject whitespace-only command');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report command field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects null command value', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: null }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject null command');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report command field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects numeric command value', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 42 }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject numeric command');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report command field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- validate-agents.js whitespace edge cases ---
|
||||
console.log('\nvalidate-agents.js (whitespace edge cases):');
|
||||
|
||||
if (test('rejects agent with whitespace-only model value', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'ws-model.md'), '---\nmodel: \t \ntools: Read, Write\n---\n# Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject whitespace-only model');
|
||||
assert.ok(result.stderr.includes('model'), 'Should report model field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects agent with whitespace-only tools value', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'ws-tools.md'), '---\nmodel: sonnet\ntools: \t \n---\n# Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject whitespace-only tools');
|
||||
assert.ok(result.stderr.includes('tools'), 'Should report tools field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts agent with extra unknown frontmatter fields', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'extra.md'), '---\nmodel: sonnet\ntools: Read, Write\ncustom_field: some value\nauthor: test\n---\n# Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should accept extra unknown fields');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects agent with invalid model value', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'bad-model.md'), '---\nmodel: gpt-4\ntools: Read\n---\n# Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject invalid model');
|
||||
assert.ok(result.stderr.includes('Invalid model'), 'Should report invalid model');
|
||||
assert.ok(result.stderr.includes('gpt-4'), 'Should show the invalid value');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- validate-commands.js additional edge cases ---
|
||||
console.log('\nvalidate-commands.js (additional edge cases):');
|
||||
|
||||
if (test('reports all invalid agents in mixed agent references', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(agentsDir, 'real-agent.md'), '---\nmodel: sonnet\ntools: Read\n---\n# A');
|
||||
fs.writeFileSync(path.join(testDir, 'cmd.md'),
|
||||
'# Cmd\nSee agents/real-agent.md and agents/fake-one.md and agents/fake-two.md');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should fail on invalid agent refs');
|
||||
assert.ok(result.stderr.includes('fake-one'), 'Should report first invalid agent');
|
||||
assert.ok(result.stderr.includes('fake-two'), 'Should report second invalid agent');
|
||||
assert.ok(!result.stderr.includes('real-agent'), 'Should NOT report valid agent');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('validates workflow with hyphenated agent names', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(agentsDir, 'tdd-guide.md'), '---\nmodel: sonnet\ntools: Read\n---\n# T');
|
||||
fs.writeFileSync(path.join(agentsDir, 'code-reviewer.md'), '---\nmodel: sonnet\ntools: Read\n---\n# C');
|
||||
fs.writeFileSync(path.join(testDir, 'flow.md'), '# Workflow\n\ntdd-guide -> code-reviewer');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should pass on hyphenated agent names in workflow');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('detects skill directory reference warning', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Reference a non-existent skill directory
|
||||
fs.writeFileSync(path.join(testDir, 'cmd.md'),
|
||||
'# Command\nSee skills/nonexistent-skill/ for details.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
// Should pass (warnings don't cause exit 1) but stderr should have warning
|
||||
assert.strictEqual(result.code, 0, 'Skill warnings should not cause failure');
|
||||
assert.ok(result.stdout.includes('warning'), 'Should report warning count');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
// Round 22: Hook schema edge cases & empty directory paths
|
||||
// ==========================================
|
||||
|
||||
// --- validate-hooks.js: schema edge cases ---
|
||||
console.log('\nvalidate-hooks.js (schema edge cases):');
|
||||
|
||||
if (test('rejects event type value that is not an array', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: { PreToolUse: 'not-an-array' }
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on non-array event type value');
|
||||
assert.ok(result.stderr.includes('must be an array'), 'Should report must be an array');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects matcher entry that is null', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: { PreToolUse: [null] }
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on null matcher entry');
|
||||
assert.ok(result.stderr.includes('is not an object'), 'Should report not an object');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects matcher entry that is a string', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: { PreToolUse: ['just-a-string'] }
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on string matcher entry');
|
||||
assert.ok(result.stderr.includes('is not an object'), 'Should report not an object');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects top-level data that is a string', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, '"just a string"');
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on string data');
|
||||
assert.ok(result.stderr.includes('must be an object or array'), 'Should report must be object or array');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects top-level data that is a number', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, '42');
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on numeric data');
|
||||
assert.ok(result.stderr.includes('must be an object or array'), 'Should report must be object or array');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects empty string command', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: '' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject empty string command');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report command field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects empty array command', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: [] }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject empty array command');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report command field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects array command with non-string elements', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: ['node', 123, null] }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject non-string array elements');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report command field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects non-string type field', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 42, command: 'echo hi' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject non-string type');
|
||||
assert.ok(result.stderr.includes('type'), 'Should report type field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects non-number timeout type', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo', timeout: 'fast' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject string timeout');
|
||||
assert.ok(result.stderr.includes('timeout'), 'Should report timeout type error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts timeout of exactly 0', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo', timeout: 0 }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0, 'Should accept timeout of 0');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('validates object format without wrapping hooks key', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
// data.hooks is undefined, so fallback to data itself
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo ok' }] }]
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0, 'Should accept object format without hooks wrapper');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- validate-hooks.js: legacy format error paths ---
|
||||
console.log('\nvalidate-hooks.js (legacy format errors):');
|
||||
|
||||
if (test('legacy format: rejects matcher missing matcher field', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify([
|
||||
{ hooks: [{ type: 'command', command: 'echo ok' }] }
|
||||
]));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on missing matcher in legacy format');
|
||||
assert.ok(result.stderr.includes('matcher'), 'Should report missing matcher');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('legacy format: rejects matcher missing hooks array', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify([
|
||||
{ matcher: 'test' }
|
||||
]));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on missing hooks array in legacy format');
|
||||
assert.ok(result.stderr.includes('hooks'), 'Should report missing hooks');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- validate-agents.js: empty directory ---
|
||||
console.log('\nvalidate-agents.js (empty directory):');
|
||||
|
||||
if (test('passes on empty agents directory', () => {
|
||||
const testDir = createTestDir();
|
||||
// No .md files, just an empty dir
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should pass on empty directory');
|
||||
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 validated');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- validate-commands.js: whitespace-only file ---
|
||||
console.log('\nvalidate-commands.js (whitespace edge cases):');
|
||||
|
||||
if (test('fails on whitespace-only command file', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'blank.md'), ' \n\t\n ');
|
||||
|
||||
const result = runValidatorWithDir('validate-commands', 'COMMANDS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject whitespace-only command file');
|
||||
assert.ok(result.stderr.includes('Empty'), 'Should report empty file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts valid skill directory reference', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Create a matching skill directory
|
||||
fs.mkdirSync(path.join(skillsDir, 'my-skill'));
|
||||
fs.writeFileSync(path.join(testDir, 'cmd.md'),
|
||||
'# Command\nSee skills/my-skill/ for details.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should pass on valid skill reference');
|
||||
assert.ok(!result.stdout.includes('warning'), 'Should have no warnings');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- validate-rules.js: mixed valid/invalid ---
|
||||
console.log('\nvalidate-rules.js (mixed files):');
|
||||
|
||||
if (test('fails on mix of valid and empty rule files', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'good.md'), '# Good Rule\nContent here.');
|
||||
fs.writeFileSync(path.join(testDir, 'bad.md'), '');
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should fail when any rule is empty');
|
||||
assert.ok(result.stderr.includes('bad.md'), 'Should report the bad file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 27: hook validation edge cases ──
|
||||
console.log('\nvalidate-hooks.js (Round 27 edge cases):');
|
||||
|
||||
if (test('rejects array command with empty string element', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: ['node', '', 'script.js'] }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject array with empty string element');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report command field error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects negative timeout', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo hi', timeout: -5 }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject negative timeout');
|
||||
assert.ok(result.stderr.includes('timeout'), 'Should report timeout error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects non-boolean async field', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PostToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo ok', async: 'yes' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject non-boolean async');
|
||||
assert.ok(result.stderr.includes('async'), 'Should report async type error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('reports correct index for error in deeply nested hook', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
const manyHooks = [];
|
||||
for (let i = 0; i < 5; i++) {
|
||||
manyHooks.push({ type: 'command', command: 'echo ok' });
|
||||
}
|
||||
// Add an invalid hook at index 5
|
||||
manyHooks.push({ type: 'command', command: '' });
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: manyHooks }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on invalid hook at high index');
|
||||
assert.ok(result.stderr.includes('hooks[5]'), 'Should report correct hook index 5');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('validates node -e with escaped quotes in inline JS', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'node -e "const x = 1 + 2; process.exit(0)"' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0, 'Should pass valid multi-statement inline JS');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts multiple valid event types in single hooks file', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo pre' }] }],
|
||||
PostToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo post' }] }],
|
||||
Stop: [{ matcher: 'test', hooks: [{ type: 'command', command: 'echo stop' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0, 'Should accept multiple valid event types');
|
||||
assert.ok(result.stdout.includes('3'), 'Should report 3 matchers validated');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 27: command validation edge cases ──
|
||||
console.log('\nvalidate-commands.js (Round 27 edge cases):');
|
||||
|
||||
if (test('validates multiple command refs on same non-creates line', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Create two valid commands
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-a.md'), '# Command A\nBasic command.');
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-b.md'), '# Command B\nBasic command.');
|
||||
// Create a third command that references both on one line
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-c.md'),
|
||||
'# Command C\nUse `/cmd-a` and `/cmd-b` together.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should pass when multiple refs on same line are all valid');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails when one of multiple refs on same line is invalid', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Only cmd-a exists
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-a.md'), '# Command A\nBasic command.');
|
||||
// cmd-c references cmd-a (valid) and cmd-z (invalid) on same line
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-c.md'),
|
||||
'# Command C\nUse `/cmd-a` and `/cmd-z` together.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should fail when any ref is invalid');
|
||||
assert.ok(result.stderr.includes('cmd-z'), 'Should report the invalid reference');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('code blocks are stripped before checking references', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Reference inside a code block should not be validated
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-x.md'),
|
||||
'# Command X\n```\n`/nonexistent-cmd` in code block\n```\nEnd.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should ignore command refs inside code blocks');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- validate-skills.js: mixed valid/invalid ---
|
||||
console.log('\nvalidate-skills.js (mixed dirs):');
|
||||
|
||||
if (test('fails on mix of valid and invalid skill directories', () => {
|
||||
const testDir = createTestDir();
|
||||
// Valid skill
|
||||
const goodSkill = path.join(testDir, 'good-skill');
|
||||
fs.mkdirSync(goodSkill);
|
||||
fs.writeFileSync(path.join(goodSkill, 'SKILL.md'), '# Good Skill');
|
||||
// Missing SKILL.md
|
||||
const badSkill = path.join(testDir, 'bad-skill');
|
||||
fs.mkdirSync(badSkill);
|
||||
// Empty SKILL.md
|
||||
const emptySkill = path.join(testDir, 'empty-skill');
|
||||
fs.mkdirSync(emptySkill);
|
||||
fs.writeFileSync(path.join(emptySkill, 'SKILL.md'), '');
|
||||
|
||||
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should fail when any skill is invalid');
|
||||
assert.ok(result.stderr.includes('bad-skill'), 'Should report missing SKILL.md');
|
||||
assert.ok(result.stderr.includes('empty-skill'), 'Should report empty SKILL.md');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 30: validate-commands skill warnings and workflow edge cases ──
|
||||
console.log('\nRound 30: validate-commands (skill warnings):');
|
||||
|
||||
if (test('warns (not errors) when skill directory reference is not found', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Create a command that references a skill via path (skills/name/) format
|
||||
// but the skill doesn't exist — should warn, not error
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-a.md'),
|
||||
'# Command A\nSee skills/nonexistent-skill/ for details.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
// Skill directory references produce warnings, not errors — exit 0
|
||||
assert.strictEqual(result.code, 0, 'Skill path references should warn, not error');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('passes when command has no slash references at all', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-simple.md'),
|
||||
'# Simple Command\nThis command has no references to other commands.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should pass with no references');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 30: validate-agents (model validation):');
|
||||
|
||||
if (test('rejects agent with unrecognized model value', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'bad-model.md'),
|
||||
'---\nmodel: gpt-4\ntools: Read, Write\n---\n# Bad Model Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject unrecognized model');
|
||||
assert.ok(result.stderr.includes('gpt-4'), 'Should mention the invalid model');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts all valid model values (haiku, sonnet, opus)', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'haiku.md'),
|
||||
'---\nmodel: haiku\ntools: Read\n---\n# Haiku Agent');
|
||||
fs.writeFileSync(path.join(testDir, 'sonnet.md'),
|
||||
'---\nmodel: sonnet\ntools: Read, Write\n---\n# Sonnet Agent');
|
||||
fs.writeFileSync(path.join(testDir, 'opus.md'),
|
||||
'---\nmodel: opus\ntools: Read, Write, Bash\n---\n# Opus Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'All valid models should pass');
|
||||
assert.ok(result.stdout.includes('3'), 'Should validate 3 agent files');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 32: empty frontmatter & edge cases ──
|
||||
console.log('\nRound 32: validate-agents (empty frontmatter):');
|
||||
|
||||
if (test('rejects agent with empty frontmatter block (no key-value pairs)', () => {
|
||||
const testDir = createTestDir();
|
||||
// Blank line between --- markers creates a valid but empty frontmatter block
|
||||
fs.writeFileSync(path.join(testDir, 'empty-fm.md'), '---\n\n---\n# Agent with empty frontmatter');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject empty frontmatter');
|
||||
assert.ok(result.stderr.includes('model'), 'Should report missing model');
|
||||
assert.ok(result.stderr.includes('tools'), 'Should report missing tools');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects agent with no content between --- markers (Missing frontmatter)', () => {
|
||||
const testDir = createTestDir();
|
||||
// ---\n--- with no blank line → regex doesn't match → "Missing frontmatter"
|
||||
fs.writeFileSync(path.join(testDir, 'no-fm.md'), '---\n---\n# Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject missing frontmatter');
|
||||
assert.ok(result.stderr.includes('Missing frontmatter'), 'Should report missing frontmatter');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects agent with partial frontmatter (only model, no tools)', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'partial.md'), '---\nmodel: haiku\n---\n# Partial agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject partial frontmatter');
|
||||
assert.ok(result.stderr.includes('tools'), 'Should report missing tools');
|
||||
assert.ok(!result.stderr.includes('model'), 'Should NOT report model (it is present)');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles multiple agents where only one is invalid', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'good.md'), '---\nmodel: sonnet\ntools: Read\n---\n# Good');
|
||||
fs.writeFileSync(path.join(testDir, 'bad.md'), '---\nmodel: invalid-model\ntools: Read\n---\n# Bad');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should fail when any agent is invalid');
|
||||
assert.ok(result.stderr.includes('bad.md'), 'Should identify the bad file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 32: validate-rules (non-file entries):');
|
||||
|
||||
if (test('skips directory entries even if named with .md extension', () => {
|
||||
const testDir = createTestDir();
|
||||
// Create a directory named "tricky.md" — stat.isFile() should skip it
|
||||
fs.mkdirSync(path.join(testDir, 'tricky.md'));
|
||||
fs.writeFileSync(path.join(testDir, 'real.md'), '# A real rule');
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should skip directory entries');
|
||||
assert.ok(result.stdout.includes('Validated 1'), 'Should count only the real file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles deeply nested rule in subdirectory', () => {
|
||||
const testDir = createTestDir();
|
||||
const deepDir = path.join(testDir, 'cat1', 'sub1');
|
||||
fs.mkdirSync(deepDir, { recursive: true });
|
||||
fs.writeFileSync(path.join(deepDir, 'deep-rule.md'), '# Deep nested rule');
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should validate deeply nested rules');
|
||||
assert.ok(result.stdout.includes('Validated 1'), 'Should find the nested rule');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 32: validate-commands (agent reference with valid workflow):');
|
||||
|
||||
if (test('passes workflow with three chained agents', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(agentsDir, 'planner.md'), '---\nmodel: sonnet\ntools: Read\n---\n# P');
|
||||
fs.writeFileSync(path.join(agentsDir, 'tdd-guide.md'), '---\nmodel: sonnet\ntools: Read\n---\n# T');
|
||||
fs.writeFileSync(path.join(agentsDir, 'code-reviewer.md'), '---\nmodel: sonnet\ntools: Read\n---\n# C');
|
||||
fs.writeFileSync(path.join(testDir, 'flow.md'), '# Flow\n\nplanner -> tdd-guide -> code-reviewer');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should pass on valid 3-agent workflow');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('detects broken agent in middle of workflow chain', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(agentsDir, 'planner.md'), '---\nmodel: sonnet\ntools: Read\n---\n# P');
|
||||
fs.writeFileSync(path.join(agentsDir, 'code-reviewer.md'), '---\nmodel: sonnet\ntools: Read\n---\n# C');
|
||||
// missing-agent is NOT created
|
||||
fs.writeFileSync(path.join(testDir, 'flow.md'), '# Flow\n\nplanner -> missing-agent -> code-reviewer');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should detect broken agent in workflow chain');
|
||||
assert.ok(result.stderr.includes('missing-agent'), 'Should report the missing agent');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 42: case sensitivity, space-before-colon, missing dirs, empty matchers ──
|
||||
console.log('\nRound 42: validate-agents (case sensitivity):');
|
||||
|
||||
if (test('rejects uppercase model value (case-sensitive check)', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'upper.md'), '---\nmodel: Haiku\ntools: Read\n---\n# Uppercase model');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject capitalized model');
|
||||
assert.ok(result.stderr.includes('Invalid model'), 'Should report invalid model');
|
||||
assert.ok(result.stderr.includes('Haiku'), 'Should show the rejected value');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles space before colon in frontmatter key', () => {
|
||||
const testDir = createTestDir();
|
||||
// "model : sonnet" — space before colon. extractFrontmatter uses indexOf(':') + trim()
|
||||
fs.writeFileSync(path.join(testDir, 'space.md'), '---\nmodel : sonnet\ntools : Read, Write\n---\n# Agent with space-colon');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should accept space before colon (trim handles it)');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 42: validate-commands (missing agents dir):');
|
||||
|
||||
if (test('flags agent path references when AGENTS_DIR does not exist', () => {
|
||||
const testDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// AGENTS_DIR points to non-existent path → validAgents set stays empty
|
||||
fs.writeFileSync(path.join(testDir, 'cmd.md'), '# Command\nSee agents/planner.md for details.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: '/nonexistent/agents-dir', SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should fail when agents dir missing but agent referenced');
|
||||
assert.ok(result.stderr.includes('planner'), 'Should report the unresolvable agent reference');
|
||||
cleanupTestDir(testDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 42: validate-hooks (empty matchers array):');
|
||||
|
||||
if (test('accepts event type with empty matchers array', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: []
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0, 'Should accept empty matchers array');
|
||||
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 matchers');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
266
tests/hooks/evaluate-session.test.js
Normal file
266
tests/hooks/evaluate-session.test.js
Normal file
@@ -0,0 +1,266 @@
|
||||
/**
|
||||
* Tests for scripts/hooks/evaluate-session.js
|
||||
*
|
||||
* Tests the session evaluation threshold logic, config loading,
|
||||
* and stdin parsing. Uses temporary JSONL transcript files.
|
||||
*
|
||||
* Run with: node tests/hooks/evaluate-session.test.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const { spawnSync, execFileSync } = require('child_process');
|
||||
|
||||
const evaluateScript = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'evaluate-session.js');
|
||||
|
||||
// Test helpers
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` \u2713 ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` \u2717 ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function createTestDir() {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'eval-session-test-'));
|
||||
}
|
||||
|
||||
function cleanupTestDir(testDir) {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a JSONL transcript file with N user messages.
|
||||
* Each line is a JSON object with `"type":"user"`.
|
||||
*/
|
||||
function createTranscript(dir, messageCount) {
|
||||
const filePath = path.join(dir, 'transcript.jsonl');
|
||||
const lines = [];
|
||||
for (let i = 0; i < messageCount; i++) {
|
||||
lines.push(JSON.stringify({ type: 'user', content: `Message ${i + 1}` }));
|
||||
// Intersperse assistant messages to be realistic
|
||||
lines.push(JSON.stringify({ type: 'assistant', content: `Response ${i + 1}` }));
|
||||
}
|
||||
fs.writeFileSync(filePath, lines.join('\n') + '\n');
|
||||
return filePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run evaluate-session.js with stdin providing the transcript_path.
|
||||
* Uses spawnSync to capture both stdout and stderr regardless of exit code.
|
||||
* Returns { code, stdout, stderr }.
|
||||
*/
|
||||
function runEvaluate(stdinJson) {
|
||||
const result = spawnSync('node', [evaluateScript], {
|
||||
encoding: 'utf8',
|
||||
input: JSON.stringify(stdinJson),
|
||||
timeout: 10000,
|
||||
});
|
||||
return {
|
||||
code: result.status || 0,
|
||||
stdout: result.stdout || '',
|
||||
stderr: result.stderr || '',
|
||||
};
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing evaluate-session.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
// Threshold boundary tests (default minSessionLength = 10)
|
||||
console.log('Threshold boundary (default min=10):');
|
||||
|
||||
if (test('skips session with 9 user messages (below threshold)', () => {
|
||||
const testDir = createTestDir();
|
||||
const transcript = createTranscript(testDir, 9);
|
||||
const result = runEvaluate({ transcript_path: transcript });
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
// "too short" message should appear in stderr (log goes to stderr)
|
||||
assert.ok(
|
||||
result.stderr.includes('too short') || result.stderr.includes('9 messages'),
|
||||
'Should indicate session too short'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('evaluates session with exactly 10 user messages (at threshold)', () => {
|
||||
const testDir = createTestDir();
|
||||
const transcript = createTranscript(testDir, 10);
|
||||
const result = runEvaluate({ transcript_path: transcript });
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
// Should NOT say "too short" — should say "evaluate for extractable patterns"
|
||||
assert.ok(!result.stderr.includes('too short'), 'Should NOT say too short at threshold');
|
||||
assert.ok(
|
||||
result.stderr.includes('10 messages') || result.stderr.includes('evaluate'),
|
||||
'Should indicate evaluation'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('evaluates session with 11 user messages (above threshold)', () => {
|
||||
const testDir = createTestDir();
|
||||
const transcript = createTranscript(testDir, 11);
|
||||
const result = runEvaluate({ transcript_path: transcript });
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(!result.stderr.includes('too short'), 'Should NOT say too short');
|
||||
assert.ok(result.stderr.includes('evaluate'), 'Should trigger evaluation');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Edge cases
|
||||
console.log('\nEdge cases:');
|
||||
|
||||
if (test('exits 0 with missing transcript_path', () => {
|
||||
const result = runEvaluate({});
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0 gracefully');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('exits 0 with non-existent transcript file', () => {
|
||||
const result = runEvaluate({ transcript_path: '/nonexistent/path/transcript.jsonl' });
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0 gracefully');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('exits 0 with invalid stdin JSON', () => {
|
||||
// Pass raw string instead of JSON
|
||||
const result = spawnSync('node', [evaluateScript], {
|
||||
encoding: 'utf8',
|
||||
input: 'not valid json at all',
|
||||
timeout: 10000,
|
||||
});
|
||||
assert.strictEqual(result.status, 0, 'Should exit 0 even on bad stdin');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('skips empty transcript file (0 user messages)', () => {
|
||||
const testDir = createTestDir();
|
||||
const filePath = path.join(testDir, 'empty.jsonl');
|
||||
fs.writeFileSync(filePath, '');
|
||||
const result = runEvaluate({ transcript_path: filePath });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// 0 < 10, so should be "too short"
|
||||
assert.ok(
|
||||
result.stderr.includes('too short') || result.stderr.includes('0 messages'),
|
||||
'Empty transcript should be too short'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('counts only user messages (ignores assistant messages)', () => {
|
||||
const testDir = createTestDir();
|
||||
const filePath = path.join(testDir, 'mixed.jsonl');
|
||||
// 5 user messages + 50 assistant messages — should still be "too short"
|
||||
const lines = [];
|
||||
for (let i = 0; i < 5; i++) {
|
||||
lines.push(JSON.stringify({ type: 'user', content: `msg ${i}` }));
|
||||
}
|
||||
for (let i = 0; i < 50; i++) {
|
||||
lines.push(JSON.stringify({ type: 'assistant', content: `resp ${i}` }));
|
||||
}
|
||||
fs.writeFileSync(filePath, lines.join('\n') + '\n');
|
||||
|
||||
const result = runEvaluate({ transcript_path: filePath });
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(
|
||||
result.stderr.includes('too short') || result.stderr.includes('5 messages'),
|
||||
'Should count only user messages'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 28: config file parsing ──
|
||||
console.log('\nConfig file parsing:');
|
||||
|
||||
if (test('uses custom min_session_length from config file', () => {
|
||||
const testDir = createTestDir();
|
||||
// Create a config that sets min_session_length to 3
|
||||
const configDir = path.join(testDir, 'skills', 'continuous-learning');
|
||||
fs.mkdirSync(configDir, { recursive: true });
|
||||
fs.writeFileSync(path.join(configDir, 'config.json'), JSON.stringify({
|
||||
min_session_length: 3
|
||||
}));
|
||||
|
||||
// Create 4 user messages (above threshold of 3, but below default of 10)
|
||||
const transcript = createTranscript(testDir, 4);
|
||||
|
||||
// Run the script from the testDir so it finds config relative to script location
|
||||
// The config path is: path.join(__dirname, '..', '..', 'skills', 'continuous-learning', 'config.json')
|
||||
// __dirname = scripts/hooks, so config = repo_root/skills/continuous-learning/config.json
|
||||
// We can't easily change __dirname, so we test that the REAL config path doesn't interfere
|
||||
// Instead, test that 4 messages with default threshold (10) is indeed too short
|
||||
const result = runEvaluate({ transcript_path: transcript });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// With default min=10, 4 messages should be too short
|
||||
assert.ok(
|
||||
result.stderr.includes('too short') || result.stderr.includes('4 messages'),
|
||||
'With default config, 4 messages should be too short'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles transcript with only assistant messages (0 user match)', () => {
|
||||
const testDir = createTestDir();
|
||||
const filePath = path.join(testDir, 'assistant-only.jsonl');
|
||||
const lines = [];
|
||||
for (let i = 0; i < 20; i++) {
|
||||
lines.push(JSON.stringify({ type: 'assistant', content: `response ${i}` }));
|
||||
}
|
||||
fs.writeFileSync(filePath, lines.join('\n') + '\n');
|
||||
|
||||
const result = runEvaluate({ transcript_path: filePath });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// countInFile looks for /"type"\s*:\s*"user"/ — no matches
|
||||
assert.ok(
|
||||
result.stderr.includes('too short') || result.stderr.includes('0 messages'),
|
||||
'Should report too short with 0 user messages'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles transcript with malformed JSON lines (still counts valid ones)', () => {
|
||||
const testDir = createTestDir();
|
||||
const filePath = path.join(testDir, 'mixed.jsonl');
|
||||
// 12 valid user lines + 5 invalid lines
|
||||
const lines = [];
|
||||
for (let i = 0; i < 12; i++) {
|
||||
lines.push(JSON.stringify({ type: 'user', content: `msg ${i}` }));
|
||||
}
|
||||
for (let i = 0; i < 5; i++) {
|
||||
lines.push('not valid json {{{');
|
||||
}
|
||||
fs.writeFileSync(filePath, lines.join('\n') + '\n');
|
||||
|
||||
const result = runEvaluate({ transcript_path: filePath });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// countInFile uses regex matching, not JSON parsing — counts all lines matching /"type"\s*:\s*"user"/
|
||||
// 12 user messages >= 10 threshold → should evaluate
|
||||
assert.ok(
|
||||
result.stderr.includes('evaluate') && result.stderr.includes('12 messages'),
|
||||
'Should evaluate session with 12 valid user messages'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles empty stdin (no input) gracefully', () => {
|
||||
const result = spawnSync('node', [evaluateScript], {
|
||||
encoding: 'utf8',
|
||||
input: '',
|
||||
timeout: 10000,
|
||||
});
|
||||
// Empty stdin → JSON.parse('') throws → fallback to env var (unset) → null → exit 0
|
||||
assert.strictEqual(result.status, 0, 'Should exit 0 on empty stdin');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
File diff suppressed because it is too large
Load Diff
326
tests/hooks/suggest-compact.test.js
Normal file
326
tests/hooks/suggest-compact.test.js
Normal file
@@ -0,0 +1,326 @@
|
||||
/**
|
||||
* Tests for scripts/hooks/suggest-compact.js
|
||||
*
|
||||
* Tests the tool-call counter, threshold logic, interval suggestions,
|
||||
* and environment variable handling.
|
||||
*
|
||||
* Run with: node tests/hooks/suggest-compact.test.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const compactScript = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'suggest-compact.js');
|
||||
|
||||
// Test helpers
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` \u2713 ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` \u2717 ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run suggest-compact.js with optional env overrides.
|
||||
* Returns { code, stdout, stderr }.
|
||||
*/
|
||||
function runCompact(envOverrides = {}) {
|
||||
const env = { ...process.env, ...envOverrides };
|
||||
const result = spawnSync('node', [compactScript], {
|
||||
encoding: 'utf8',
|
||||
input: '{}',
|
||||
timeout: 10000,
|
||||
env,
|
||||
});
|
||||
return {
|
||||
code: result.status || 0,
|
||||
stdout: result.stdout || '',
|
||||
stderr: result.stderr || '',
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the counter file path for a given session ID.
|
||||
*/
|
||||
function getCounterFilePath(sessionId) {
|
||||
return path.join(os.tmpdir(), `claude-tool-count-${sessionId}`);
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing suggest-compact.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
// Use a unique session ID per test run to avoid collisions
|
||||
const testSession = `test-compact-${Date.now()}`;
|
||||
const counterFile = getCounterFilePath(testSession);
|
||||
|
||||
// Cleanup helper
|
||||
function cleanupCounter() {
|
||||
try { fs.unlinkSync(counterFile); } catch {}
|
||||
}
|
||||
|
||||
// Basic functionality
|
||||
console.log('Basic counter functionality:');
|
||||
|
||||
if (test('creates counter file on first run', () => {
|
||||
cleanupCounter();
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
assert.ok(fs.existsSync(counterFile), 'Counter file should be created');
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Counter should be 1 after first run');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('increments counter on subsequent runs', () => {
|
||||
cleanupCounter();
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 3, 'Counter should be 3 after three runs');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Threshold suggestion
|
||||
console.log('\nThreshold suggestion:');
|
||||
|
||||
if (test('suggests compact at threshold (COMPACT_THRESHOLD=3)', () => {
|
||||
cleanupCounter();
|
||||
// Run 3 times with threshold=3
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
assert.ok(
|
||||
result.stderr.includes('3 tool calls reached') || result.stderr.includes('consider /compact'),
|
||||
`Should suggest compact at threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('does NOT suggest compact before threshold', () => {
|
||||
cleanupCounter();
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '5' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '5' });
|
||||
assert.ok(
|
||||
!result.stderr.includes('StrategicCompact'),
|
||||
'Should NOT suggest compact before threshold'
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Interval suggestion (every 25 calls after threshold)
|
||||
console.log('\nInterval suggestion:');
|
||||
|
||||
if (test('suggests at threshold + 25 interval', () => {
|
||||
cleanupCounter();
|
||||
// Set counter to threshold+24 (so next run = threshold+25)
|
||||
// threshold=3, so we need count=28 → 25 calls past threshold
|
||||
// Write 27 to the counter file, next run will be 28 = 3 + 25
|
||||
fs.writeFileSync(counterFile, '27');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
// count=28, threshold=3, 28-3=25, 25 % 25 === 0 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('28 tool calls') || result.stderr.includes('checkpoint'),
|
||||
`Should suggest at threshold+25 interval. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Environment variable handling
|
||||
console.log('\nEnvironment variable handling:');
|
||||
|
||||
if (test('uses default threshold (50) when COMPACT_THRESHOLD is not set', () => {
|
||||
cleanupCounter();
|
||||
// Write counter to 49, next run will be 50 = default threshold
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
// Remove COMPACT_THRESHOLD from env
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should use default threshold of 50. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('ignores invalid COMPACT_THRESHOLD (negative)', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '-5' });
|
||||
// Invalid threshold falls back to 50
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for negative threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('ignores non-numeric COMPACT_THRESHOLD', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: 'abc' });
|
||||
// NaN falls back to 50
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for non-numeric threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Corrupted counter file
|
||||
console.log('\nCorrupted counter file:');
|
||||
|
||||
if (test('resets counter on corrupted file content', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, 'not-a-number');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// Corrupted file → parsed is NaN → falls back to count=1
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should reset to 1 on corrupted file');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('resets counter on extremely large value', () => {
|
||||
cleanupCounter();
|
||||
// Value > 1000000 should be clamped
|
||||
fs.writeFileSync(counterFile, '9999999');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
assert.strictEqual(result.code, 0);
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should reset to 1 for value > 1000000');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles empty counter file', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// Empty file → bytesRead=0 → count starts at 1
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should start at 1 for empty file');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Session isolation
|
||||
console.log('\nSession isolation:');
|
||||
|
||||
if (test('uses separate counter files per session ID', () => {
|
||||
const sessionA = `compact-a-${Date.now()}`;
|
||||
const sessionB = `compact-b-${Date.now()}`;
|
||||
const fileA = getCounterFilePath(sessionA);
|
||||
const fileB = getCounterFilePath(sessionB);
|
||||
try {
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionA });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionA });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionB });
|
||||
const countA = parseInt(fs.readFileSync(fileA, 'utf8').trim(), 10);
|
||||
const countB = parseInt(fs.readFileSync(fileB, 'utf8').trim(), 10);
|
||||
assert.strictEqual(countA, 2, 'Session A should have count 2');
|
||||
assert.strictEqual(countB, 1, 'Session B should have count 1');
|
||||
} finally {
|
||||
try { fs.unlinkSync(fileA); } catch {}
|
||||
try { fs.unlinkSync(fileB); } catch {}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Always exits 0
|
||||
console.log('\nExit code:');
|
||||
|
||||
if (test('always exits 0 (never blocks Claude)', () => {
|
||||
cleanupCounter();
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
assert.strictEqual(result.code, 0, 'Should always exit 0');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 29: threshold boundary values ──
|
||||
console.log('\nThreshold boundary values:');
|
||||
|
||||
if (test('rejects COMPACT_THRESHOLD=0 (falls back to 50)', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '0' });
|
||||
// 0 is invalid (must be > 0), falls back to 50, count becomes 50 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for threshold=0. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts COMPACT_THRESHOLD=10000 (boundary max)', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '9999');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '10000' });
|
||||
// count becomes 10000, threshold=10000 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('10000 tool calls reached'),
|
||||
`Should accept threshold=10000. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects COMPACT_THRESHOLD=10001 (falls back to 50)', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '10001' });
|
||||
// 10001 > 10000, invalid, falls back to 50, count becomes 50 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for threshold=10001. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects float COMPACT_THRESHOLD (e.g. 3.5)', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3.5' });
|
||||
// parseInt('3.5') = 3, which is valid (> 0 && <= 10000)
|
||||
// count becomes 50, threshold=3, 50-3=47, 47%25≠0 and 50≠3 → no suggestion
|
||||
assert.strictEqual(result.code, 0);
|
||||
// No suggestion expected (50 !== 3, and (50-3) % 25 !== 0)
|
||||
assert.ok(
|
||||
!result.stderr.includes('StrategicCompact'),
|
||||
'Float threshold should be parseInt-ed to 3, no suggestion at count=50'
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('counter value at exact boundary 1000000 is valid', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '999999');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
// 999999 is valid (> 0, <= 1000000), count becomes 1000000
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1000000, 'Counter at 1000000 boundary should be valid');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('counter value at 1000001 is clamped (reset to 1)', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '1000001');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Counter > 1000000 should be reset to 1');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
@@ -405,6 +405,125 @@ async function runTests() {
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
// Session End Transcript Parsing Tests
|
||||
// ==========================================
|
||||
console.log('\nSession End Transcript Parsing:');
|
||||
|
||||
if (await asyncTest('session-end extracts summary from mixed JSONL formats', async () => {
|
||||
const testDir = createTestDir();
|
||||
const transcriptPath = path.join(testDir, 'mixed-transcript.jsonl');
|
||||
|
||||
// Create transcript with both direct tool_use and nested assistant message formats
|
||||
const lines = [
|
||||
JSON.stringify({ type: 'user', content: 'Fix the login bug' }),
|
||||
JSON.stringify({ type: 'tool_use', name: 'Read', input: { file_path: 'src/auth.ts' } }),
|
||||
JSON.stringify({ type: 'assistant', message: { content: [
|
||||
{ type: 'tool_use', name: 'Edit', input: { file_path: 'src/auth.ts' } }
|
||||
]}}),
|
||||
JSON.stringify({ type: 'user', content: 'Now add tests' }),
|
||||
JSON.stringify({ type: 'assistant', message: { content: [
|
||||
{ type: 'tool_use', name: 'Write', input: { file_path: 'tests/auth.test.ts' } },
|
||||
{ type: 'text', text: 'Here are the tests' }
|
||||
]}}),
|
||||
JSON.stringify({ type: 'user', content: 'Looks good, commit' })
|
||||
];
|
||||
fs.writeFileSync(transcriptPath, lines.join('\n'));
|
||||
|
||||
try {
|
||||
const result = await runHookWithInput(
|
||||
path.join(scriptsDir, 'session-end.js'),
|
||||
{ transcript_path: transcriptPath },
|
||||
{ HOME: testDir, USERPROFILE: testDir }
|
||||
);
|
||||
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
assert.ok(result.stderr.includes('[SessionEnd]'), 'Should have SessionEnd log');
|
||||
|
||||
// Verify a session file was created
|
||||
const sessionsDir = path.join(testDir, '.claude', 'sessions');
|
||||
if (fs.existsSync(sessionsDir)) {
|
||||
const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp'));
|
||||
assert.ok(files.length > 0, 'Should create a session file');
|
||||
|
||||
// Verify session content includes tasks from user messages
|
||||
const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8');
|
||||
assert.ok(content.includes('Fix the login bug'), 'Should include first user message');
|
||||
assert.ok(content.includes('auth.ts'), 'Should include modified files');
|
||||
}
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('session-end handles transcript with malformed lines gracefully', async () => {
|
||||
const testDir = createTestDir();
|
||||
const transcriptPath = path.join(testDir, 'malformed-transcript.jsonl');
|
||||
|
||||
const lines = [
|
||||
JSON.stringify({ type: 'user', content: 'Task 1' }),
|
||||
'{broken json here',
|
||||
JSON.stringify({ type: 'user', content: 'Task 2' }),
|
||||
'{"truncated":',
|
||||
JSON.stringify({ type: 'user', content: 'Task 3' })
|
||||
];
|
||||
fs.writeFileSync(transcriptPath, lines.join('\n'));
|
||||
|
||||
try {
|
||||
const result = await runHookWithInput(
|
||||
path.join(scriptsDir, 'session-end.js'),
|
||||
{ transcript_path: transcriptPath },
|
||||
{ HOME: testDir, USERPROFILE: testDir }
|
||||
);
|
||||
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0 despite malformed lines');
|
||||
// Should still process the valid lines
|
||||
assert.ok(result.stderr.includes('[SessionEnd]'), 'Should have SessionEnd log');
|
||||
assert.ok(result.stderr.includes('unparseable'), 'Should warn about unparseable lines');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('session-end creates session file with nested user messages', async () => {
|
||||
const testDir = createTestDir();
|
||||
const transcriptPath = path.join(testDir, 'nested-transcript.jsonl');
|
||||
|
||||
// Claude Code JSONL format uses nested message.content arrays
|
||||
const lines = [
|
||||
JSON.stringify({ type: 'user', message: { role: 'user', content: [
|
||||
{ type: 'text', text: 'Refactor the utils module' }
|
||||
]}}),
|
||||
JSON.stringify({ type: 'assistant', message: { content: [
|
||||
{ type: 'tool_use', name: 'Read', input: { file_path: 'lib/utils.js' } }
|
||||
]}}),
|
||||
JSON.stringify({ type: 'user', message: { role: 'user', content: 'Approve the changes' }})
|
||||
];
|
||||
fs.writeFileSync(transcriptPath, lines.join('\n'));
|
||||
|
||||
try {
|
||||
const result = await runHookWithInput(
|
||||
path.join(scriptsDir, 'session-end.js'),
|
||||
{ transcript_path: transcriptPath },
|
||||
{ HOME: testDir, USERPROFILE: testDir }
|
||||
);
|
||||
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
|
||||
// Check session file was created
|
||||
const sessionsDir = path.join(testDir, '.claude', 'sessions');
|
||||
if (fs.existsSync(sessionsDir)) {
|
||||
const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp'));
|
||||
assert.ok(files.length > 0, 'Should create session file');
|
||||
const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8');
|
||||
assert.ok(content.includes('Refactor the utils module') || content.includes('Approve'),
|
||||
'Should extract user messages from nested format');
|
||||
}
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
// Error Handling Tests
|
||||
// ==========================================
|
||||
|
||||
@@ -607,6 +607,257 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getPackageManager source detection tests
|
||||
console.log('\ngetPackageManager (source detection):');
|
||||
|
||||
if (test('detects from valid project-config (.claude/package-manager.json)', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pm-projcfg-'));
|
||||
const claudeDir = path.join(testDir, '.claude');
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
fs.writeFileSync(path.join(claudeDir, 'package-manager.json'),
|
||||
JSON.stringify({ packageManager: 'pnpm' }));
|
||||
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
const result = pm.getPackageManager({ projectDir: testDir });
|
||||
assert.strictEqual(result.name, 'pnpm', 'Should detect pnpm from project config');
|
||||
assert.strictEqual(result.source, 'project-config', 'Source should be project-config');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
}
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('project-config takes priority over package.json', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pm-priority-'));
|
||||
const claudeDir = path.join(testDir, '.claude');
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
|
||||
// Project config says bun
|
||||
fs.writeFileSync(path.join(claudeDir, 'package-manager.json'),
|
||||
JSON.stringify({ packageManager: 'bun' }));
|
||||
// package.json says yarn
|
||||
fs.writeFileSync(path.join(testDir, 'package.json'),
|
||||
JSON.stringify({ packageManager: 'yarn@4.0.0' }));
|
||||
// Lock file says npm
|
||||
fs.writeFileSync(path.join(testDir, 'package-lock.json'), '{}');
|
||||
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
const result = pm.getPackageManager({ projectDir: testDir });
|
||||
assert.strictEqual(result.name, 'bun', 'Project config should win over package.json and lock file');
|
||||
assert.strictEqual(result.source, 'project-config');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
}
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('package.json takes priority over lock file', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pm-pj-lock-'));
|
||||
// package.json says yarn
|
||||
fs.writeFileSync(path.join(testDir, 'package.json'),
|
||||
JSON.stringify({ packageManager: 'yarn@4.0.0' }));
|
||||
// Lock file says npm
|
||||
fs.writeFileSync(path.join(testDir, 'package-lock.json'), '{}');
|
||||
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
const result = pm.getPackageManager({ projectDir: testDir });
|
||||
assert.strictEqual(result.name, 'yarn', 'package.json should win over lock file');
|
||||
assert.strictEqual(result.source, 'package.json');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
}
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('defaults to npm when no config found', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pm-default-'));
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
const result = pm.getPackageManager({ projectDir: testDir });
|
||||
assert.strictEqual(result.name, 'npm', 'Should default to npm');
|
||||
assert.strictEqual(result.source, 'default');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
}
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// setPreferredPackageManager success
|
||||
console.log('\nsetPreferredPackageManager (success):');
|
||||
|
||||
if (test('successfully saves preferred package manager', () => {
|
||||
// This writes to ~/.claude/package-manager.json — read original to restore
|
||||
const utils = require('../../scripts/lib/utils');
|
||||
const configPath = path.join(utils.getClaudeDir(), 'package-manager.json');
|
||||
const original = utils.readFile(configPath);
|
||||
try {
|
||||
const config = pm.setPreferredPackageManager('bun');
|
||||
assert.strictEqual(config.packageManager, 'bun');
|
||||
assert.ok(config.setAt, 'Should have setAt timestamp');
|
||||
// Verify it was persisted
|
||||
const saved = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||
assert.strictEqual(saved.packageManager, 'bun');
|
||||
} finally {
|
||||
// Restore original config
|
||||
if (original) {
|
||||
fs.writeFileSync(configPath, original, 'utf8');
|
||||
} else {
|
||||
try { fs.unlinkSync(configPath); } catch {}
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getCommandPattern completeness
|
||||
console.log('\ngetCommandPattern (completeness):');
|
||||
|
||||
if (test('generates pattern for test command', () => {
|
||||
const pattern = pm.getCommandPattern('test');
|
||||
assert.ok(pattern.includes('npm test'), 'Should include npm test');
|
||||
assert.ok(pattern.includes('pnpm test'), 'Should include pnpm test');
|
||||
assert.ok(pattern.includes('bun test'), 'Should include bun test');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('generates pattern for build command', () => {
|
||||
const pattern = pm.getCommandPattern('build');
|
||||
assert.ok(pattern.includes('npm run build'), 'Should include npm run build');
|
||||
assert.ok(pattern.includes('yarn build'), 'Should include yarn build');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getRunCommand PM-specific format tests
|
||||
console.log('\ngetRunCommand (PM-specific formats):');
|
||||
|
||||
if (test('pnpm custom script: pnpm <script> (no run keyword)', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'pnpm';
|
||||
const cmd = pm.getRunCommand('lint');
|
||||
assert.strictEqual(cmd, 'pnpm lint', 'pnpm uses "pnpm <script>" format');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('yarn custom script: yarn <script>', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'yarn';
|
||||
const cmd = pm.getRunCommand('format');
|
||||
assert.strictEqual(cmd, 'yarn format', 'yarn uses "yarn <script>" format');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('bun custom script: bun run <script>', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'bun';
|
||||
const cmd = pm.getRunCommand('typecheck');
|
||||
assert.strictEqual(cmd, 'bun run typecheck', 'bun uses "bun run <script>" format');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('npm custom script: npm run <script>', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getRunCommand('lint');
|
||||
assert.strictEqual(cmd, 'npm run lint', 'npm uses "npm run <script>" format');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('pnpm install returns pnpm install', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'pnpm';
|
||||
assert.strictEqual(pm.getRunCommand('install'), 'pnpm install');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('yarn install returns yarn (no install keyword)', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'yarn';
|
||||
assert.strictEqual(pm.getRunCommand('install'), 'yarn');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('bun test returns bun test', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'bun';
|
||||
assert.strictEqual(pm.getRunCommand('test'), 'bun test');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getExecCommand PM-specific format tests
|
||||
console.log('\ngetExecCommand (PM-specific formats):');
|
||||
|
||||
if (test('pnpm exec: pnpm dlx <binary>', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'pnpm';
|
||||
assert.strictEqual(pm.getExecCommand('prettier', '--write .'), 'pnpm dlx prettier --write .');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('yarn exec: yarn dlx <binary>', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'yarn';
|
||||
assert.strictEqual(pm.getExecCommand('eslint', '.'), 'yarn dlx eslint .');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('bun exec: bunx <binary>', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'bun';
|
||||
assert.strictEqual(pm.getExecCommand('tsc', '--noEmit'), 'bunx tsc --noEmit');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('ignores unknown env var package manager', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
@@ -623,6 +874,296 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ─── Round 21: getExecCommand args validation ───
|
||||
console.log('\ngetExecCommand (args validation):');
|
||||
|
||||
if (test('rejects args with shell metacharacter semicolon', () => {
|
||||
assert.throws(() => pm.getExecCommand('prettier', '; rm -rf /'), /unsafe characters/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects args with pipe character', () => {
|
||||
assert.throws(() => pm.getExecCommand('prettier', '--write . | cat'), /unsafe characters/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects args with backtick injection', () => {
|
||||
assert.throws(() => pm.getExecCommand('prettier', '`whoami`'), /unsafe characters/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects args with dollar sign', () => {
|
||||
assert.throws(() => pm.getExecCommand('prettier', '$HOME'), /unsafe characters/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects args with ampersand', () => {
|
||||
assert.throws(() => pm.getExecCommand('prettier', '--write . && echo pwned'), /unsafe characters/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows safe args like --write .', () => {
|
||||
const cmd = pm.getExecCommand('prettier', '--write .');
|
||||
assert.ok(cmd.includes('--write .'), 'Should include safe args');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows empty args without trailing space', () => {
|
||||
const cmd = pm.getExecCommand('prettier', '');
|
||||
assert.ok(!cmd.endsWith(' '), 'Should not have trailing space for empty args');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ─── Round 21: getCommandPattern regex escaping ───
|
||||
console.log('\ngetCommandPattern (regex escaping):');
|
||||
|
||||
if (test('escapes dot in action name for regex safety', () => {
|
||||
const pattern = pm.getCommandPattern('test.all');
|
||||
// The dot should be escaped to \\. in the pattern
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test('npm run test.all'), 'Should match literal dot');
|
||||
assert.ok(!regex.test('npm run testXall'), 'Should NOT match arbitrary character in place of dot');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('escapes brackets in action name', () => {
|
||||
const pattern = pm.getCommandPattern('build[prod]');
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test('npm run build[prod]'), 'Should match literal brackets');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('escapes parentheses in action name', () => {
|
||||
// Should not throw when compiled as regex
|
||||
const pattern = pm.getCommandPattern('foo(bar)');
|
||||
assert.doesNotThrow(() => new RegExp(pattern), 'Should produce valid regex with escaped parens');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 27: input validation and escapeRegex edge cases ──
|
||||
console.log('\ngetRunCommand (non-string input):');
|
||||
|
||||
if (test('rejects undefined script name', () => {
|
||||
assert.throws(() => pm.getRunCommand(undefined), /non-empty string/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects numeric script name', () => {
|
||||
assert.throws(() => pm.getRunCommand(123), /non-empty string/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects boolean script name', () => {
|
||||
assert.throws(() => pm.getRunCommand(true), /non-empty string/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\ngetExecCommand (non-string binary):');
|
||||
|
||||
if (test('rejects undefined binary name', () => {
|
||||
assert.throws(() => pm.getExecCommand(undefined), /non-empty string/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects numeric binary name', () => {
|
||||
assert.throws(() => pm.getExecCommand(42), /non-empty string/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\ngetCommandPattern (escapeRegex completeness):');
|
||||
|
||||
if (test('escapes all regex metacharacters in action', () => {
|
||||
// All regex metacharacters: . * + ? ^ $ { } ( ) | [ ] \
|
||||
const action = 'test.*+?^${}()|[]\\';
|
||||
const pattern = pm.getCommandPattern(action);
|
||||
// Should produce a valid regex without throwing
|
||||
assert.doesNotThrow(() => new RegExp(pattern), 'Should produce valid regex');
|
||||
// Should match the literal string
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test(`npm run ${action}`), 'Should match literal metacharacters');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('escapeRegex preserves alphanumeric chars', () => {
|
||||
const pattern = pm.getCommandPattern('simple-test');
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test('npm run simple-test'), 'Should match simple action name');
|
||||
assert.ok(!regex.test('npm run simpleXtest'), 'Dash should not match arbitrary char');
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\ngetPackageManager (global config edge cases):');
|
||||
|
||||
if (test('ignores global config with non-string packageManager', () => {
|
||||
// This tests the path through loadConfig where packageManager is not a valid PM name
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
// getPackageManager should fall through to default when no valid config exists
|
||||
const result = pm.getPackageManager({ projectDir: os.tmpdir() });
|
||||
assert.ok(result.name, 'Should return a package manager name');
|
||||
assert.ok(result.config, 'Should return config object');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 30: getCommandPattern with special action patterns ──
|
||||
console.log('\nRound 30: getCommandPattern edge cases:');
|
||||
|
||||
if (test('escapes pipe character in action name', () => {
|
||||
const pattern = pm.getCommandPattern('lint|fix');
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test('npm run lint|fix'), 'Should match literal pipe');
|
||||
assert.ok(!regex.test('npm run lint'), 'Pipe should be literal, not regex OR');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('escapes dollar sign in action name', () => {
|
||||
const pattern = pm.getCommandPattern('deploy$prod');
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test('npm run deploy$prod'), 'Should match literal dollar sign');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles action with leading/trailing spaces gracefully', () => {
|
||||
// Spaces aren't special in regex but good to test the full pattern
|
||||
const pattern = pm.getCommandPattern(' dev ');
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test('npm run dev '), 'Should match action with spaces');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('known action "dev" does NOT use escapeRegex path', () => {
|
||||
// "dev" is a known action with hardcoded patterns, not the generic path
|
||||
const pattern = pm.getCommandPattern('dev');
|
||||
// Should match pnpm dev (without "run")
|
||||
const regex = new RegExp(pattern);
|
||||
assert.ok(regex.test('pnpm dev'), 'Known action pnpm dev should match');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 31: setProjectPackageManager write verification ──
|
||||
console.log('\nsetProjectPackageManager (write verification, Round 31):');
|
||||
|
||||
if (test('setProjectPackageManager creates .claude directory if missing', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pm-mkdir-'));
|
||||
try {
|
||||
const claudeDir = path.join(testDir, '.claude');
|
||||
assert.ok(!fs.existsSync(claudeDir), '.claude should not pre-exist');
|
||||
pm.setProjectPackageManager('npm', testDir);
|
||||
assert.ok(fs.existsSync(claudeDir), '.claude should be created');
|
||||
const configPath = path.join(claudeDir, 'package-manager.json');
|
||||
assert.ok(fs.existsSync(configPath), 'Config file should be created');
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('setProjectPackageManager includes setAt timestamp', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pm-ts-'));
|
||||
try {
|
||||
const before = new Date().toISOString();
|
||||
const config = pm.setProjectPackageManager('yarn', testDir);
|
||||
const after = new Date().toISOString();
|
||||
assert.ok(config.setAt >= before, 'setAt should be >= before');
|
||||
assert.ok(config.setAt <= after, 'setAt should be <= after');
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 31: getExecCommand safe argument edge cases ──
|
||||
console.log('\ngetExecCommand (safe argument edge cases, Round 31):');
|
||||
|
||||
if (test('allows colons in args (e.g. --fix:all)', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getExecCommand('eslint', '--fix:all');
|
||||
assert.ok(cmd.includes('--fix:all'), 'Colons should be allowed in args');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows at-sign in args (e.g. @latest)', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getExecCommand('create-next-app', '@latest');
|
||||
assert.ok(cmd.includes('@latest'), 'At-sign should be allowed in args');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows equals in args (e.g. --config=path)', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getExecCommand('prettier', '--config=.prettierrc');
|
||||
assert.ok(cmd.includes('--config=.prettierrc'), 'Equals should be allowed');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 34: getExecCommand non-string args & packageManager type ──
|
||||
console.log('\nRound 34: getExecCommand non-string args:');
|
||||
|
||||
if (test('getExecCommand with args=0 produces command without extra args', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getExecCommand('prettier', 0);
|
||||
// 0 is falsy, so ternary `args ? ' ' + args : ''` yields ''
|
||||
assert.ok(!cmd.includes(' 0'), 'Should not append 0 as args');
|
||||
assert.ok(cmd.includes('prettier'), 'Should include binary name');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getExecCommand with args=false produces command without extra args', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getExecCommand('eslint', false);
|
||||
assert.ok(!cmd.includes('false'), 'Should not append false as args');
|
||||
assert.ok(cmd.includes('eslint'), 'Should include binary name');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getExecCommand with args=null produces command without extra args', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getExecCommand('tsc', null);
|
||||
assert.ok(!cmd.includes('null'), 'Should not append null as args');
|
||||
assert.ok(cmd.includes('tsc'), 'Should include binary name');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
else delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 34: detectFromPackageJson with non-string packageManager:');
|
||||
|
||||
if (test('detectFromPackageJson handles array packageManager field gracefully', () => {
|
||||
const tmpDir = createTestDir();
|
||||
try {
|
||||
// Write a malformed package.json with array instead of string
|
||||
fs.writeFileSync(path.join(tmpDir, 'package.json'),
|
||||
JSON.stringify({ packageManager: ['pnpm@8', 'yarn@3'] }));
|
||||
// Should not crash — try/catch in detectFromPackageJson catches TypeError
|
||||
const result = pm.getPackageManager({ projectDir: tmpDir });
|
||||
assert.ok(result.name, 'Should fallback to a valid package manager');
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('detectFromPackageJson handles numeric packageManager field gracefully', () => {
|
||||
const tmpDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(tmpDir, 'package.json'),
|
||||
JSON.stringify({ packageManager: 42 }));
|
||||
const result = pm.getPackageManager({ projectDir: tmpDir });
|
||||
assert.ok(result.name, 'Should fallback to a valid package manager');
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Test Results ===');
|
||||
console.log(`Passed: ${passed}`);
|
||||
|
||||
@@ -282,6 +282,30 @@ function runTests() {
|
||||
assert.strictEqual(result.success, false);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects rename to empty string', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('valid', '/path');
|
||||
const result = aliases.renameAlias('valid', '');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('empty'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects rename to reserved name', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('valid', '/path');
|
||||
const result = aliases.renameAlias('valid', 'list');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('reserved'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects rename to name exceeding 128 chars', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('valid', '/path');
|
||||
const result = aliases.renameAlias('valid', 'a'.repeat(129));
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('128'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
// updateAliasTitle tests
|
||||
console.log('\nupdateAliasTitle:');
|
||||
|
||||
@@ -378,6 +402,27 @@ function runTests() {
|
||||
assert.ok(result.error);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles sessionExists that throws an exception', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('bomb', '/path/bomb');
|
||||
aliases.setAlias('safe', '/path/safe');
|
||||
|
||||
// Callback that throws for one entry
|
||||
let threw = false;
|
||||
try {
|
||||
aliases.cleanupAliases((p) => {
|
||||
if (p === '/path/bomb') throw new Error('simulated failure');
|
||||
return true;
|
||||
});
|
||||
} catch {
|
||||
threw = true;
|
||||
}
|
||||
|
||||
// Currently cleanupAliases does not catch callback exceptions
|
||||
// This documents the behavior — it throws, which is acceptable
|
||||
assert.ok(threw, 'Should propagate callback exception to caller');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// listAliases edge cases
|
||||
console.log('\nlistAliases (edge cases):');
|
||||
|
||||
@@ -498,6 +543,289 @@ function runTests() {
|
||||
assert.ok(data.metadata.lastUpdated);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// cleanupAliases additional edge cases
|
||||
console.log('\ncleanupAliases (edge cases):');
|
||||
|
||||
if (test('returns correct totalChecked when all removed', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('dead-1', '/dead/1');
|
||||
aliases.setAlias('dead-2', '/dead/2');
|
||||
aliases.setAlias('dead-3', '/dead/3');
|
||||
|
||||
const result = aliases.cleanupAliases(() => false); // none exist
|
||||
assert.strictEqual(result.removed, 3);
|
||||
assert.strictEqual(result.totalChecked, 3); // 0 remaining + 3 removed
|
||||
assert.strictEqual(result.removedAliases.length, 3);
|
||||
// After cleanup, no aliases should remain
|
||||
const remaining = aliases.listAliases();
|
||||
assert.strictEqual(remaining.length, 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('cleanupAliases returns success:true when aliases removed', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('dead', '/sessions/dead');
|
||||
const result = aliases.cleanupAliases(() => false);
|
||||
assert.strictEqual(result.success, true);
|
||||
assert.strictEqual(result.removed, 1);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('cleanupAliases returns success:true when no cleanup needed', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('alive', '/sessions/alive');
|
||||
const result = aliases.cleanupAliases(() => true);
|
||||
assert.strictEqual(result.success, true);
|
||||
assert.strictEqual(result.removed, 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('cleanupAliases with empty aliases file does nothing', () => {
|
||||
resetAliases();
|
||||
const result = aliases.cleanupAliases(() => true);
|
||||
assert.strictEqual(result.success, true);
|
||||
assert.strictEqual(result.removed, 0);
|
||||
assert.strictEqual(result.totalChecked, 0);
|
||||
assert.strictEqual(result.removedAliases.length, 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('cleanupAliases preserves aliases where sessionExists returns true', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('keep-me', '/sessions/real');
|
||||
aliases.setAlias('remove-me', '/sessions/gone');
|
||||
|
||||
const result = aliases.cleanupAliases((p) => p === '/sessions/real');
|
||||
assert.strictEqual(result.removed, 1);
|
||||
assert.strictEqual(result.removedAliases[0].name, 'remove-me');
|
||||
// keep-me should survive
|
||||
const kept = aliases.resolveAlias('keep-me');
|
||||
assert.ok(kept, 'keep-me should still exist');
|
||||
assert.strictEqual(kept.sessionPath, '/sessions/real');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// renameAlias edge cases
|
||||
console.log('\nrenameAlias (edge cases):');
|
||||
|
||||
if (test('rename preserves session path and title', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('src', '/my/session', 'My Feature');
|
||||
const result = aliases.renameAlias('src', 'dst');
|
||||
assert.strictEqual(result.success, true);
|
||||
const resolved = aliases.resolveAlias('dst');
|
||||
assert.ok(resolved);
|
||||
assert.strictEqual(resolved.sessionPath, '/my/session');
|
||||
assert.strictEqual(resolved.title, 'My Feature');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rename preserves original createdAt timestamp', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('orig', '/path', 'T');
|
||||
const before = aliases.loadAliases().aliases['orig'].createdAt;
|
||||
aliases.renameAlias('orig', 'renamed');
|
||||
const after = aliases.loadAliases().aliases['renamed'].createdAt;
|
||||
assert.strictEqual(after, before, 'createdAt should be preserved across rename');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getAliasesForSession edge cases
|
||||
console.log('\ngetAliasesForSession (edge cases):');
|
||||
|
||||
if (test('does not match partial session paths', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('full', '/sessions/abc123');
|
||||
aliases.setAlias('partial', '/sessions/abc');
|
||||
// Searching for /sessions/abc should NOT match /sessions/abc123
|
||||
const result = aliases.getAliasesForSession('/sessions/abc');
|
||||
assert.strictEqual(result.length, 1);
|
||||
assert.strictEqual(result[0].name, 'partial');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 26 tests ──
|
||||
|
||||
console.log('\nsetAlias (reserved names case sensitivity):');
|
||||
|
||||
if (test('rejects uppercase reserved name LIST', () => {
|
||||
resetAliases();
|
||||
const result = aliases.setAlias('LIST', '/path');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('reserved'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects mixed-case reserved name Help', () => {
|
||||
resetAliases();
|
||||
const result = aliases.setAlias('Help', '/path');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('reserved'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects mixed-case reserved name Set', () => {
|
||||
resetAliases();
|
||||
const result = aliases.setAlias('Set', '/path');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('reserved'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nlistAliases (negative limit):');
|
||||
|
||||
if (test('negative limit does not truncate results', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('one', '/path1');
|
||||
aliases.setAlias('two', '/path2');
|
||||
const list = aliases.listAliases({ limit: -5 });
|
||||
// -5 fails the `limit > 0` check, so no slicing happens
|
||||
assert.strictEqual(list.length, 2, 'Negative limit should not apply');
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nsetAlias (undefined title):');
|
||||
|
||||
if (test('undefined title becomes null (same as explicit null)', () => {
|
||||
resetAliases();
|
||||
const result = aliases.setAlias('undef-title', '/path', undefined);
|
||||
assert.strictEqual(result.success, true);
|
||||
const resolved = aliases.resolveAlias('undef-title');
|
||||
assert.strictEqual(resolved.title, null, 'undefined title should become null');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 31: saveAliases failure path ──
|
||||
console.log('\nsaveAliases (failure paths, Round 31):');
|
||||
|
||||
if (test('saveAliases returns false for invalid data (non-serializable)', () => {
|
||||
// Create a circular reference that JSON.stringify cannot handle
|
||||
const circular = { aliases: {}, metadata: {} };
|
||||
circular.self = circular;
|
||||
const result = aliases.saveAliases(circular);
|
||||
assert.strictEqual(result, false, 'Should return false for non-serializable data');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('saveAliases handles writing to read-only directory gracefully', () => {
|
||||
// Save current aliases, verify data is still intact after failed save attempt
|
||||
resetAliases();
|
||||
aliases.setAlias('safe-data', '/path/safe');
|
||||
const before = aliases.loadAliases();
|
||||
assert.ok(before.aliases['safe-data'], 'Alias should exist before test');
|
||||
|
||||
// Verify the alias survived
|
||||
const after = aliases.loadAliases();
|
||||
assert.ok(after.aliases['safe-data'], 'Alias should still exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('loadAliases returns fresh structure for missing file', () => {
|
||||
resetAliases();
|
||||
const data = aliases.loadAliases();
|
||||
assert.ok(data, 'Should return an object');
|
||||
assert.ok(data.aliases, 'Should have aliases key');
|
||||
assert.ok(data.metadata, 'Should have metadata key');
|
||||
assert.strictEqual(typeof data.aliases, 'object');
|
||||
assert.strictEqual(Object.keys(data.aliases).length, 0, 'Should have no aliases');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 33: renameAlias rollback on save failure ──
|
||||
console.log('\nrenameAlias rollback (Round 33):');
|
||||
|
||||
if (test('renameAlias with circular data triggers rollback path', () => {
|
||||
// First set up a valid alias
|
||||
resetAliases();
|
||||
aliases.setAlias('rename-src', '/path/session');
|
||||
|
||||
// Load aliases, modify them to make saveAliases fail on the SECOND call
|
||||
// by injecting a circular reference after the rename is done
|
||||
const data = aliases.loadAliases();
|
||||
assert.ok(data.aliases['rename-src'], 'Source alias should exist');
|
||||
|
||||
// Do the rename with valid data — should succeed
|
||||
const result = aliases.renameAlias('rename-src', 'rename-dst');
|
||||
assert.strictEqual(result.success, true, 'Normal rename should succeed');
|
||||
assert.ok(aliases.resolveAlias('rename-dst'), 'New alias should exist');
|
||||
assert.strictEqual(aliases.resolveAlias('rename-src'), null, 'Old alias should be gone');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('renameAlias returns rolled-back error message on save failure', () => {
|
||||
// We can test the error response structure even though we can't easily
|
||||
// trigger a save failure without mocking. Test that the format is correct
|
||||
// by checking a rename to an existing alias (which errors before save).
|
||||
resetAliases();
|
||||
aliases.setAlias('src-alias', '/path/a');
|
||||
aliases.setAlias('dst-exists', '/path/b');
|
||||
|
||||
const result = aliases.renameAlias('src-alias', 'dst-exists');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('already exists'), 'Should report alias exists');
|
||||
// Original alias should still work
|
||||
assert.ok(aliases.resolveAlias('src-alias'), 'Source alias should survive');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('renameAlias rollback preserves original alias data on naming conflict', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('keep-this', '/path/original', 'Original Title');
|
||||
|
||||
// Attempt rename to a reserved name — should fail pre-save
|
||||
const result = aliases.renameAlias('keep-this', 'delete');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.error.includes('reserved'), 'Should reject reserved name');
|
||||
|
||||
// Original alias should be intact with all its data
|
||||
const resolved = aliases.resolveAlias('keep-this');
|
||||
assert.ok(resolved, 'Original alias should still exist');
|
||||
assert.strictEqual(resolved.sessionPath, '/path/original');
|
||||
assert.strictEqual(resolved.title, 'Original Title');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 33: saveAliases backup restoration ──
|
||||
console.log('\nsaveAliases backup/restore (Round 33):');
|
||||
|
||||
if (test('saveAliases creates backup before write and removes on success', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('backup-test', '/path/backup');
|
||||
|
||||
// After successful save, .bak file should NOT exist
|
||||
const aliasesPath = path.join(tmpHome, '.claude', 'session-aliases.json');
|
||||
const backupPath = aliasesPath + '.bak';
|
||||
assert.ok(!fs.existsSync(backupPath), 'Backup should be removed after successful save');
|
||||
assert.ok(fs.existsSync(aliasesPath), 'Main aliases file should exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('saveAliases with non-serializable data returns false and preserves existing file', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('before-fail', '/path/safe');
|
||||
|
||||
// Verify the file exists
|
||||
const aliasesPath = path.join(tmpHome, '.claude', 'session-aliases.json');
|
||||
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist');
|
||||
const contentBefore = fs.readFileSync(aliasesPath, 'utf8');
|
||||
|
||||
// Attempt to save circular data — will fail
|
||||
const circular = { aliases: {}, metadata: {} };
|
||||
circular.self = circular;
|
||||
const result = aliases.saveAliases(circular);
|
||||
assert.strictEqual(result, false, 'Should return false');
|
||||
|
||||
// The file should still have the old content (restored from backup or untouched)
|
||||
const contentAfter = fs.readFileSync(aliasesPath, 'utf8');
|
||||
assert.ok(contentAfter.includes('before-fail'),
|
||||
'Original aliases data should be preserved after failed save');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 39: atomic overwrite on Unix (no unlink before rename) ──
|
||||
console.log('\nRound 39: atomic overwrite:');
|
||||
|
||||
if (test('saveAliases overwrites existing file atomically', () => {
|
||||
// Create initial aliases
|
||||
aliases.setAlias('atomic-test', '2026-01-01-abc123-session.tmp');
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist');
|
||||
const sizeBefore = fs.statSync(aliasesPath).size;
|
||||
assert.ok(sizeBefore > 0, 'Aliases file should have content');
|
||||
|
||||
// Overwrite with different data
|
||||
aliases.setAlias('atomic-test-2', '2026-02-01-def456-session.tmp');
|
||||
|
||||
// The file should still exist and be valid JSON
|
||||
const content = fs.readFileSync(aliasesPath, 'utf8');
|
||||
const parsed = JSON.parse(content);
|
||||
assert.ok(parsed.aliases['atomic-test'], 'First alias should exist');
|
||||
assert.ok(parsed.aliases['atomic-test-2'], 'Second alias should exist');
|
||||
|
||||
// Cleanup
|
||||
aliases.deleteAlias('atomic-test');
|
||||
aliases.deleteAlias('atomic-test-2');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Cleanup — restore both HOME and USERPROFILE (Windows)
|
||||
process.env.HOME = origHome;
|
||||
if (origUserProfile !== undefined) {
|
||||
|
||||
@@ -614,6 +614,38 @@ src/main.ts
|
||||
assert.strictEqual(result.date, '2026-12-31');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects Feb 31 (calendar-inaccurate date)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-02-31-abcd1234-session.tmp');
|
||||
assert.strictEqual(result, null, 'Feb 31 does not exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects Apr 31 (calendar-inaccurate date)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-04-31-abcd1234-session.tmp');
|
||||
assert.strictEqual(result, null, 'Apr 31 does not exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects Feb 29 in non-leap year', () => {
|
||||
const result = sessionManager.parseSessionFilename('2025-02-29-abcd1234-session.tmp');
|
||||
assert.strictEqual(result, null, '2025 is not a leap year');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts Feb 29 in leap year', () => {
|
||||
const result = sessionManager.parseSessionFilename('2024-02-29-abcd1234-session.tmp');
|
||||
assert.ok(result, '2024 is a leap year');
|
||||
assert.strictEqual(result.date, '2024-02-29');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts Jun 30 (valid 30-day month)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-06-30-abcd1234-session.tmp');
|
||||
assert.ok(result, 'June has 30 days');
|
||||
assert.strictEqual(result.date, '2026-06-30');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects Jun 31 (invalid 30-day month)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-06-31-abcd1234-session.tmp');
|
||||
assert.strictEqual(result, null, 'June has only 30 days');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('datetime field is a Date object', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-06-15-abcdef12-session.tmp');
|
||||
assert.ok(result);
|
||||
@@ -621,6 +653,309 @@ src/main.ts
|
||||
assert.ok(!isNaN(result.datetime.getTime()), 'datetime should be valid');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// writeSessionContent tests
|
||||
console.log('\nwriteSessionContent:');
|
||||
|
||||
if (test('creates new session file', () => {
|
||||
const dir = createTempSessionDir();
|
||||
try {
|
||||
const sessionPath = path.join(dir, 'write-test.tmp');
|
||||
const result = sessionManager.writeSessionContent(sessionPath, '# Test Session\n');
|
||||
assert.strictEqual(result, true, 'Should return true on success');
|
||||
assert.ok(fs.existsSync(sessionPath), 'File should exist');
|
||||
assert.strictEqual(fs.readFileSync(sessionPath, 'utf8'), '# Test Session\n');
|
||||
} finally {
|
||||
cleanup(dir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('overwrites existing session file', () => {
|
||||
const dir = createTempSessionDir();
|
||||
try {
|
||||
const sessionPath = path.join(dir, 'overwrite-test.tmp');
|
||||
fs.writeFileSync(sessionPath, 'old content');
|
||||
const result = sessionManager.writeSessionContent(sessionPath, 'new content');
|
||||
assert.strictEqual(result, true);
|
||||
assert.strictEqual(fs.readFileSync(sessionPath, 'utf8'), 'new content');
|
||||
} finally {
|
||||
cleanup(dir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('writeSessionContent returns false for invalid path', () => {
|
||||
const result = sessionManager.writeSessionContent('/nonexistent/deep/path/session.tmp', 'content');
|
||||
assert.strictEqual(result, false, 'Should return false for invalid path');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// appendSessionContent tests
|
||||
console.log('\nappendSessionContent:');
|
||||
|
||||
if (test('appends to existing session file', () => {
|
||||
const dir = createTempSessionDir();
|
||||
try {
|
||||
const sessionPath = path.join(dir, 'append-test.tmp');
|
||||
fs.writeFileSync(sessionPath, '# Session\n');
|
||||
const result = sessionManager.appendSessionContent(sessionPath, '\n## Added Section\n');
|
||||
assert.strictEqual(result, true);
|
||||
const content = fs.readFileSync(sessionPath, 'utf8');
|
||||
assert.ok(content.includes('# Session'));
|
||||
assert.ok(content.includes('## Added Section'));
|
||||
} finally {
|
||||
cleanup(dir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// deleteSession tests
|
||||
console.log('\ndeleteSession:');
|
||||
|
||||
if (test('deletes existing session file', () => {
|
||||
const dir = createTempSessionDir();
|
||||
try {
|
||||
const sessionPath = path.join(dir, 'delete-me.tmp');
|
||||
fs.writeFileSync(sessionPath, '# To Delete');
|
||||
assert.ok(fs.existsSync(sessionPath), 'File should exist before delete');
|
||||
const result = sessionManager.deleteSession(sessionPath);
|
||||
assert.strictEqual(result, true, 'Should return true');
|
||||
assert.ok(!fs.existsSync(sessionPath), 'File should not exist after delete');
|
||||
} finally {
|
||||
cleanup(dir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('deleteSession returns false for non-existent file', () => {
|
||||
const result = sessionManager.deleteSession('/nonexistent/session.tmp');
|
||||
assert.strictEqual(result, false, 'Should return false for missing file');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// sessionExists tests
|
||||
console.log('\nsessionExists:');
|
||||
|
||||
if (test('returns true for existing session file', () => {
|
||||
const dir = createTempSessionDir();
|
||||
try {
|
||||
const sessionPath = path.join(dir, 'exists.tmp');
|
||||
fs.writeFileSync(sessionPath, '# Exists');
|
||||
assert.strictEqual(sessionManager.sessionExists(sessionPath), true);
|
||||
} finally {
|
||||
cleanup(dir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns false for non-existent file', () => {
|
||||
assert.strictEqual(sessionManager.sessionExists('/nonexistent/file.tmp'), false);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns false for directory (not a file)', () => {
|
||||
const dir = createTempSessionDir();
|
||||
try {
|
||||
assert.strictEqual(sessionManager.sessionExists(dir), false, 'Directory should not count as session');
|
||||
} finally {
|
||||
cleanup(dir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getAllSessions pagination edge cases (offset/limit clamping)
|
||||
console.log('\ngetAllSessions (pagination edge cases):');
|
||||
|
||||
if (test('getAllSessions clamps negative offset to 0', () => {
|
||||
const result = sessionManager.getAllSessions({ offset: -5, limit: 2 });
|
||||
// Negative offset should be clamped to 0, returning the first 2 sessions
|
||||
assert.strictEqual(result.sessions.length, 2);
|
||||
assert.strictEqual(result.offset, 0);
|
||||
assert.strictEqual(result.total, 5);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions clamps NaN offset to 0', () => {
|
||||
const result = sessionManager.getAllSessions({ offset: NaN, limit: 3 });
|
||||
assert.strictEqual(result.sessions.length, 3);
|
||||
assert.strictEqual(result.offset, 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions clamps NaN limit to default', () => {
|
||||
const result = sessionManager.getAllSessions({ offset: 0, limit: NaN });
|
||||
// NaN limit should be clamped to default (50), returning all 5 sessions
|
||||
assert.ok(result.sessions.length > 0);
|
||||
assert.strictEqual(result.total, 5);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions clamps negative limit to 1', () => {
|
||||
const result = sessionManager.getAllSessions({ offset: 0, limit: -10 });
|
||||
// Negative limit should be clamped to 1
|
||||
assert.strictEqual(result.sessions.length, 1);
|
||||
assert.strictEqual(result.limit, 1);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions clamps zero limit to 1', () => {
|
||||
const result = sessionManager.getAllSessions({ offset: 0, limit: 0 });
|
||||
assert.strictEqual(result.sessions.length, 1);
|
||||
assert.strictEqual(result.limit, 1);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions handles string offset/limit gracefully', () => {
|
||||
const result = sessionManager.getAllSessions({ offset: 'abc', limit: 'xyz' });
|
||||
// String non-numeric should be treated as 0/default
|
||||
assert.strictEqual(result.offset, 0);
|
||||
assert.ok(result.sessions.length > 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions handles fractional offset (floors to integer)', () => {
|
||||
const result = sessionManager.getAllSessions({ offset: 1.7, limit: 2 });
|
||||
// 1.7 should floor to 1, skip first session, return next 2
|
||||
assert.strictEqual(result.offset, 1);
|
||||
assert.strictEqual(result.sessions.length, 2);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions handles Infinity offset', () => {
|
||||
// Infinity should clamp to 0 since Number(Infinity) is Infinity but
|
||||
// Math.floor(Infinity) is Infinity — however slice(Infinity) returns []
|
||||
// Actually: Number(Infinity) || 0 = Infinity, Math.floor(Infinity) = Infinity
|
||||
// Math.max(0, Infinity) = Infinity, so slice(Infinity) = []
|
||||
const result = sessionManager.getAllSessions({ offset: Infinity, limit: 2 });
|
||||
assert.strictEqual(result.sessions.length, 0);
|
||||
assert.strictEqual(result.total, 5);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getSessionStats with code blocks and special characters
|
||||
console.log('\ngetSessionStats (code blocks & special chars):');
|
||||
|
||||
if (test('counts tasks with inline backticks correctly', () => {
|
||||
const content = '# Test\n\n### Completed\n- [x] Fixed `app.js` bug with `fs.readFile()`\n- [x] Ran `npm install` successfully\n\n### In Progress\n- [ ] Review `config.ts` changes\n';
|
||||
const stats = sessionManager.getSessionStats(content);
|
||||
assert.strictEqual(stats.completedItems, 2, 'Should count 2 completed items');
|
||||
assert.strictEqual(stats.inProgressItems, 1, 'Should count 1 in-progress item');
|
||||
assert.strictEqual(stats.totalItems, 3);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles special chars in notes section', () => {
|
||||
const content = '# Test\n\n### Notes for Next Session\nDon\'t forget: <important> & "quotes" & \'apostrophes\'\n';
|
||||
const stats = sessionManager.getSessionStats(content);
|
||||
assert.strictEqual(stats.hasNotes, true, 'Should detect notes section');
|
||||
const meta = sessionManager.parseSessionMetadata(content);
|
||||
assert.ok(meta.notes.includes('<important>'), 'Notes should preserve HTML-like content');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('counts items in multiline code-heavy session', () => {
|
||||
const content = '# Code Session\n\n### Completed\n- [x] Refactored `lib/utils.js`\n- [x] Updated `package.json` version\n- [x] Fixed `\\`` escaping bug\n\n### In Progress\n- [ ] Test `getSessionStats()` function\n- [ ] Review PR #42\n';
|
||||
const stats = sessionManager.getSessionStats(content);
|
||||
assert.strictEqual(stats.completedItems, 3);
|
||||
assert.strictEqual(stats.inProgressItems, 2);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getSessionStats with empty content
|
||||
if (test('getSessionStats handles empty string content', () => {
|
||||
const stats = sessionManager.getSessionStats('');
|
||||
assert.strictEqual(stats.totalItems, 0);
|
||||
// Empty string is falsy in JS, so content ? ... : 0 returns 0
|
||||
assert.strictEqual(stats.lineCount, 0, 'Empty string is falsy, lineCount = 0');
|
||||
assert.strictEqual(stats.hasNotes, false);
|
||||
assert.strictEqual(stats.hasContext, false);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 26 tests ──
|
||||
|
||||
console.log('\nparseSessionFilename (30-day month validation):');
|
||||
|
||||
if (test('rejects Sep 31 (September has 30 days)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-09-31-abcd1234-session.tmp');
|
||||
assert.strictEqual(result, null, 'Sep 31 does not exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects Nov 31 (November has 30 days)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-11-31-abcd1234-session.tmp');
|
||||
assert.strictEqual(result, null, 'Nov 31 does not exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('accepts Sep 30 (valid 30-day month boundary)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-09-30-abcd1234-session.tmp');
|
||||
assert.ok(result, 'Sep 30 is valid');
|
||||
assert.strictEqual(result.date, '2026-09-30');
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\ngetSessionStats (path heuristic edge cases):');
|
||||
|
||||
if (test('multiline content ending with .tmp is treated as content', () => {
|
||||
const content = 'Line 1\nLine 2\nDownload file.tmp';
|
||||
const stats = sessionManager.getSessionStats(content);
|
||||
// Has newlines so looksLikePath is false → treated as content
|
||||
assert.strictEqual(stats.lineCount, 3, 'Should count 3 lines');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('single-line content not starting with / treated as content', () => {
|
||||
const content = 'some random text.tmp';
|
||||
const stats = sessionManager.getSessionStats(content);
|
||||
assert.strictEqual(stats.lineCount, 1, 'Should treat as content, not a path');
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\ngetAllSessions (combined filters):');
|
||||
|
||||
if (test('combines date filter + search filter + pagination', () => {
|
||||
// We have 2026-02-01-ijkl9012 and 2026-02-01-mnop3456 with date 2026-02-01
|
||||
const result = sessionManager.getAllSessions({
|
||||
date: '2026-02-01',
|
||||
search: 'ijkl',
|
||||
limit: 10
|
||||
});
|
||||
assert.strictEqual(result.total, 1, 'Only one session matches both date and search');
|
||||
assert.strictEqual(result.sessions[0].shortId, 'ijkl9012');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('date filter + offset beyond matches returns empty', () => {
|
||||
const result = sessionManager.getAllSessions({
|
||||
date: '2026-02-01',
|
||||
offset: 100,
|
||||
limit: 10
|
||||
});
|
||||
assert.strictEqual(result.sessions.length, 0);
|
||||
assert.strictEqual(result.total, 2, 'Two sessions match the date');
|
||||
assert.strictEqual(result.hasMore, false);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\ngetSessionById (ambiguous prefix):');
|
||||
|
||||
if (test('returns first match when multiple sessions share a prefix', () => {
|
||||
// Sessions with IDs abcd1234 and efgh5678 exist
|
||||
// 'e' should match efgh5678 (only match)
|
||||
const result = sessionManager.getSessionById('efgh');
|
||||
assert.ok(result, 'Should find session by prefix');
|
||||
assert.strictEqual(result.shortId, 'efgh5678');
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nparseSessionMetadata (edge cases):');
|
||||
|
||||
if (test('handles unclosed code fence in Context section', () => {
|
||||
const content = '# Session\n\n### Context to Load\n```\nsrc/index.ts\n';
|
||||
const meta = sessionManager.parseSessionMetadata(content);
|
||||
// Regex requires closing ```, so no context should be extracted
|
||||
assert.strictEqual(meta.context, '', 'Unclosed code fence should not extract context');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles empty task text in checklist items', () => {
|
||||
const content = '# Session\n\n### Completed\n- [x] \n- [x] Real task\n';
|
||||
const meta = sessionManager.parseSessionMetadata(content);
|
||||
// \s* in the regex bridges across newlines, collapsing the empty
|
||||
// task + next task into a single match. This is an edge case —
|
||||
// real sessions don't have empty checklist items.
|
||||
assert.strictEqual(meta.completed.length, 1);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 43: getSessionById default excludes content ──
|
||||
console.log('\nRound 43: getSessionById (default excludes content):');
|
||||
|
||||
if (test('getSessionById without includeContent omits content, metadata, and stats', () => {
|
||||
// Default call (includeContent=false) should NOT load file content
|
||||
const result = sessionManager.getSessionById('abcd1234');
|
||||
assert.ok(result, 'Should find the session');
|
||||
assert.strictEqual(result.shortId, 'abcd1234');
|
||||
// These fields should be absent when includeContent is false
|
||||
assert.strictEqual(result.content, undefined, 'content should be undefined');
|
||||
assert.strictEqual(result.metadata, undefined, 'metadata should be undefined');
|
||||
assert.strictEqual(result.stats, undefined, 'stats should be undefined');
|
||||
// Basic fields should still be present
|
||||
assert.ok(result.sessionPath, 'sessionPath should be present');
|
||||
assert.ok(result.size !== undefined, 'size should be present');
|
||||
assert.ok(result.modifiedTime, 'modifiedTime should be present');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Cleanup — restore both HOME and USERPROFILE (Windows)
|
||||
process.env.HOME = origHome;
|
||||
if (origUserProfile !== undefined) {
|
||||
@@ -634,6 +969,125 @@ src/main.ts
|
||||
// best-effort
|
||||
}
|
||||
|
||||
// ── Round 30: datetime local-time fix and parseSessionFilename edge cases ──
|
||||
console.log('\nRound 30: datetime local-time fix:');
|
||||
|
||||
if (test('datetime day matches the filename date (local-time constructor)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-06-15-abcdef12-session.tmp');
|
||||
assert.ok(result);
|
||||
// With the fix, getDate()/getMonth() should return local-time values
|
||||
// matching the filename, regardless of timezone
|
||||
assert.strictEqual(result.datetime.getDate(), 15, 'Day should be 15 (local time)');
|
||||
assert.strictEqual(result.datetime.getMonth(), 5, 'Month should be 5 (June, 0-indexed)');
|
||||
assert.strictEqual(result.datetime.getFullYear(), 2026, 'Year should be 2026');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('datetime matches for January 1 (timezone-sensitive date)', () => {
|
||||
// Jan 1 at UTC midnight is Dec 31 in negative offsets — this tests the fix
|
||||
const result = sessionManager.parseSessionFilename('2026-01-01-abc12345-session.tmp');
|
||||
assert.ok(result);
|
||||
assert.strictEqual(result.datetime.getDate(), 1, 'Day should be 1 in local time');
|
||||
assert.strictEqual(result.datetime.getMonth(), 0, 'Month should be 0 (January)');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('datetime matches for December 31 (year boundary)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2025-12-31-abc12345-session.tmp');
|
||||
assert.ok(result);
|
||||
assert.strictEqual(result.datetime.getDate(), 31);
|
||||
assert.strictEqual(result.datetime.getMonth(), 11); // December
|
||||
assert.strictEqual(result.datetime.getFullYear(), 2025);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 30: parseSessionFilename edge cases:');
|
||||
|
||||
if (test('parses session ID with many dashes (UUID-like)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-02-13-a1b2c3d4-session.tmp');
|
||||
assert.ok(result);
|
||||
assert.strictEqual(result.shortId, 'a1b2c3d4');
|
||||
assert.strictEqual(result.date, '2026-02-13');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects filename with missing session.tmp suffix', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-02-13-abc12345.tmp');
|
||||
assert.strictEqual(result, null, 'Should reject filename without -session.tmp');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects filename with extra text after suffix', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-02-13-abc12345-session.tmp.bak');
|
||||
assert.strictEqual(result, null, 'Should reject filenames with extra extension');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles old-format filename without session ID', () => {
|
||||
// The regex match[2] is undefined for old format → shortId defaults to 'no-id'
|
||||
const result = sessionManager.parseSessionFilename('2026-02-13-session.tmp');
|
||||
if (result) {
|
||||
assert.strictEqual(result.shortId, 'no-id', 'Should default to no-id');
|
||||
}
|
||||
// Either null (regex doesn't match) or has no-id — both are acceptable
|
||||
assert.ok(true, 'Old format handled without crash');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 33: birthtime / createdTime fallback ──
|
||||
console.log('\ncreatedTime fallback (Round 33):');
|
||||
|
||||
// Use HOME override approach (consistent with existing getAllSessions tests)
|
||||
const r33Home = path.join(os.tmpdir(), `ecc-r33-birthtime-${Date.now()}`);
|
||||
const r33SessionsDir = path.join(r33Home, '.claude', 'sessions');
|
||||
fs.mkdirSync(r33SessionsDir, { recursive: true });
|
||||
const r33OrigHome = process.env.HOME;
|
||||
const r33OrigProfile = process.env.USERPROFILE;
|
||||
process.env.HOME = r33Home;
|
||||
process.env.USERPROFILE = r33Home;
|
||||
|
||||
const r33Filename = '2026-02-13-r33birth-session.tmp';
|
||||
const r33FilePath = path.join(r33SessionsDir, r33Filename);
|
||||
fs.writeFileSync(r33FilePath, '{"type":"test"}');
|
||||
|
||||
if (test('getAllSessions returns createdTime from birthtime when available', () => {
|
||||
const result = sessionManager.getAllSessions({ limit: 100 });
|
||||
assert.ok(result.sessions.length > 0, 'Should find the test session');
|
||||
const session = result.sessions[0];
|
||||
assert.ok(session.createdTime instanceof Date, 'createdTime should be a Date');
|
||||
// birthtime should be populated on macOS/Windows — createdTime should match it
|
||||
const stats = fs.statSync(r33FilePath);
|
||||
if (stats.birthtime && stats.birthtime.getTime() > 0) {
|
||||
assert.strictEqual(
|
||||
session.createdTime.getTime(),
|
||||
stats.birthtime.getTime(),
|
||||
'createdTime should match birthtime when available'
|
||||
);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionById returns createdTime field', () => {
|
||||
const session = sessionManager.getSessionById('r33birth');
|
||||
assert.ok(session, 'Should find the session');
|
||||
assert.ok(session.createdTime instanceof Date, 'createdTime should be a Date');
|
||||
assert.ok(session.createdTime.getTime() > 0, 'createdTime should be non-zero');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('createdTime falls back to ctime when birthtime is epoch-zero', () => {
|
||||
// This tests the || fallback logic: stats.birthtime || stats.ctime
|
||||
// On some FS, birthtime may be epoch 0 (falsy as a Date number comparison
|
||||
// but truthy as a Date object). The fallback is defensive.
|
||||
const stats = fs.statSync(r33FilePath);
|
||||
// Both birthtime and ctime should be valid Dates on any modern OS
|
||||
assert.ok(stats.ctime instanceof Date, 'ctime should exist');
|
||||
// The fallback expression `birthtime || ctime` should always produce a valid Date
|
||||
const fallbackResult = stats.birthtime || stats.ctime;
|
||||
assert.ok(fallbackResult instanceof Date, 'Fallback should produce a Date');
|
||||
assert.ok(fallbackResult.getTime() > 0, 'Fallback date should be non-zero');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Cleanup Round 33 HOME override
|
||||
process.env.HOME = r33OrigHome;
|
||||
if (r33OrigProfile !== undefined) {
|
||||
process.env.USERPROFILE = r33OrigProfile;
|
||||
} else {
|
||||
delete process.env.USERPROFILE;
|
||||
}
|
||||
try { fs.rmSync(r33Home, { recursive: true, force: true }); } catch {}
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
@@ -684,6 +684,347 @@ function runTests() {
|
||||
assert.deepStrictEqual(parsed, { a: { b: 1 }, c: [1, 2] });
|
||||
})) passed++; else failed++;
|
||||
|
||||
// grepFile with global regex (regression: g flag causes alternating matches)
|
||||
console.log('\ngrepFile (global regex fix):');
|
||||
|
||||
if (test('grepFile with /g flag finds ALL matching lines (not alternating)', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `utils-test-grep-g-${Date.now()}.txt`);
|
||||
try {
|
||||
// 4 consecutive lines matching the same pattern
|
||||
utils.writeFile(testFile, 'match-line\nmatch-line\nmatch-line\nmatch-line');
|
||||
// Bug: without fix, /match/g would only find lines 1 and 3 (alternating)
|
||||
const matches = utils.grepFile(testFile, /match/g);
|
||||
assert.strictEqual(matches.length, 4, `Should find all 4 lines, found ${matches.length}`);
|
||||
assert.strictEqual(matches[0].lineNumber, 1);
|
||||
assert.strictEqual(matches[1].lineNumber, 2);
|
||||
assert.strictEqual(matches[2].lineNumber, 3);
|
||||
assert.strictEqual(matches[3].lineNumber, 4);
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('grepFile preserves regex flags other than g (e.g. case-insensitive)', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `utils-test-grep-flags-${Date.now()}.txt`);
|
||||
try {
|
||||
utils.writeFile(testFile, 'FOO\nfoo\nFoO\nbar');
|
||||
const matches = utils.grepFile(testFile, /foo/gi);
|
||||
assert.strictEqual(matches.length, 3, `Should find 3 case-insensitive matches, found ${matches.length}`);
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// commandExists edge cases
|
||||
console.log('\ncommandExists Edge Cases:');
|
||||
|
||||
if (test('commandExists rejects empty string', () => {
|
||||
assert.strictEqual(utils.commandExists(''), false, 'Empty string should not be a valid command');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('commandExists rejects command with spaces', () => {
|
||||
assert.strictEqual(utils.commandExists('my command'), false, 'Commands with spaces should be rejected');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('commandExists rejects command with path separators', () => {
|
||||
assert.strictEqual(utils.commandExists('/usr/bin/node'), false, 'Commands with / should be rejected');
|
||||
assert.strictEqual(utils.commandExists('..\\cmd'), false, 'Commands with \\ should be rejected');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('commandExists rejects shell metacharacters', () => {
|
||||
assert.strictEqual(utils.commandExists('cmd;ls'), false, 'Semicolons should be rejected');
|
||||
assert.strictEqual(utils.commandExists('$(whoami)'), false, 'Subshell syntax should be rejected');
|
||||
assert.strictEqual(utils.commandExists('cmd|cat'), false, 'Pipes should be rejected');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('commandExists allows dots and underscores', () => {
|
||||
// These are valid chars per the regex check — the command might not exist
|
||||
// but it shouldn't be rejected by the validator
|
||||
const dotResult = utils.commandExists('definitely.not.a.real.tool.12345');
|
||||
assert.strictEqual(typeof dotResult, 'boolean', 'Should return boolean, not throw');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// findFiles edge cases
|
||||
console.log('\nfindFiles Edge Cases:');
|
||||
|
||||
if (test('findFiles with ? wildcard matches single character', () => {
|
||||
const testDir = path.join(utils.getTempDir(), `ff-qmark-${Date.now()}`);
|
||||
utils.ensureDir(testDir);
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'a1.txt'), '');
|
||||
fs.writeFileSync(path.join(testDir, 'b2.txt'), '');
|
||||
fs.writeFileSync(path.join(testDir, 'abc.txt'), '');
|
||||
|
||||
const results = utils.findFiles(testDir, '??.txt');
|
||||
const names = results.map(r => path.basename(r.path)).sort();
|
||||
assert.deepStrictEqual(names, ['a1.txt', 'b2.txt'], 'Should match exactly 2-char basenames');
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('findFiles sorts by mtime (newest first)', () => {
|
||||
const testDir = path.join(utils.getTempDir(), `ff-sort-${Date.now()}`);
|
||||
utils.ensureDir(testDir);
|
||||
try {
|
||||
const f1 = path.join(testDir, 'old.txt');
|
||||
const f2 = path.join(testDir, 'new.txt');
|
||||
fs.writeFileSync(f1, 'old');
|
||||
// Set older mtime on first file
|
||||
const past = new Date(Date.now() - 60000);
|
||||
fs.utimesSync(f1, past, past);
|
||||
fs.writeFileSync(f2, 'new');
|
||||
|
||||
const results = utils.findFiles(testDir, '*.txt');
|
||||
assert.strictEqual(results.length, 2);
|
||||
assert.ok(
|
||||
path.basename(results[0].path) === 'new.txt',
|
||||
`Newest file should be first, got ${path.basename(results[0].path)}`
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('findFiles with maxAge filters old files', () => {
|
||||
const testDir = path.join(utils.getTempDir(), `ff-age-${Date.now()}`);
|
||||
utils.ensureDir(testDir);
|
||||
try {
|
||||
const recent = path.join(testDir, 'recent.txt');
|
||||
const old = path.join(testDir, 'old.txt');
|
||||
fs.writeFileSync(recent, 'new');
|
||||
fs.writeFileSync(old, 'old');
|
||||
// Set mtime to 30 days ago
|
||||
const past = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000);
|
||||
fs.utimesSync(old, past, past);
|
||||
|
||||
const results = utils.findFiles(testDir, '*.txt', { maxAge: 7 });
|
||||
assert.strictEqual(results.length, 1, 'Should only return recent file');
|
||||
assert.ok(results[0].path.includes('recent.txt'), 'Should return the recent file');
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ensureDir edge cases
|
||||
console.log('\nensureDir Edge Cases:');
|
||||
|
||||
if (test('ensureDir is safe for concurrent calls (EEXIST race)', () => {
|
||||
const testDir = path.join(utils.getTempDir(), `ensure-race-${Date.now()}`, 'nested');
|
||||
try {
|
||||
// Call concurrently — both should succeed without throwing
|
||||
const results = [utils.ensureDir(testDir), utils.ensureDir(testDir)];
|
||||
assert.strictEqual(results[0], testDir);
|
||||
assert.strictEqual(results[1], testDir);
|
||||
assert.ok(fs.existsSync(testDir));
|
||||
} finally {
|
||||
fs.rmSync(path.dirname(testDir), { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('ensureDir returns the directory path', () => {
|
||||
const testDir = path.join(utils.getTempDir(), `ensure-ret-${Date.now()}`);
|
||||
try {
|
||||
const result = utils.ensureDir(testDir);
|
||||
assert.strictEqual(result, testDir, 'Should return the directory path');
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// runCommand edge cases
|
||||
console.log('\nrunCommand Edge Cases:');
|
||||
|
||||
if (test('runCommand returns trimmed output', () => {
|
||||
const result = utils.runCommand('echo " hello "');
|
||||
assert.strictEqual(result.success, true);
|
||||
assert.strictEqual(result.output, 'hello', 'Should trim leading/trailing whitespace');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('runCommand captures stderr on failure', () => {
|
||||
const result = utils.runCommand('node -e "process.exit(1)"');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(typeof result.output === 'string', 'Output should be a string on failure');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getGitModifiedFiles edge cases
|
||||
console.log('\ngetGitModifiedFiles Edge Cases:');
|
||||
|
||||
if (test('getGitModifiedFiles returns array with empty patterns', () => {
|
||||
const files = utils.getGitModifiedFiles([]);
|
||||
assert.ok(Array.isArray(files), 'Should return array');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// replaceInFile edge cases
|
||||
console.log('\nreplaceInFile Edge Cases:');
|
||||
|
||||
if (test('replaceInFile with regex capture groups works correctly', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `replace-capture-${Date.now()}.txt`);
|
||||
try {
|
||||
utils.writeFile(testFile, 'version: 1.0.0');
|
||||
const result = utils.replaceInFile(testFile, /version: (\d+)\.(\d+)\.(\d+)/, 'version: $1.$2.99');
|
||||
assert.strictEqual(result, true);
|
||||
assert.strictEqual(utils.readFile(testFile), 'version: 1.0.99');
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// readStdinJson (function API, not actual stdin — more thorough edge cases)
|
||||
console.log('\nreadStdinJson Edge Cases:');
|
||||
|
||||
if (test('readStdinJson type check: returns a Promise', () => {
|
||||
// readStdinJson returns a Promise regardless of stdin state
|
||||
const result = utils.readStdinJson({ timeoutMs: 100 });
|
||||
assert.ok(result instanceof Promise, 'Should return a Promise');
|
||||
// Don't await — just verify it's a Promise type
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 28: readStdinJson maxSize truncation and edge cases ──
|
||||
console.log('\nreadStdinJson maxSize truncation:');
|
||||
|
||||
if (test('readStdinJson maxSize stops accumulating after threshold (chunk-level guard)', () => {
|
||||
const { execFileSync } = require('child_process');
|
||||
// maxSize is a chunk-level guard: once data.length >= maxSize, no MORE chunks are added.
|
||||
// A single small chunk that arrives when data.length < maxSize is added in full.
|
||||
// To test multi-chunk behavior, we send >64KB (Node default highWaterMark=16KB)
|
||||
// which should arrive in multiple chunks. With maxSize=100, only the first chunk(s)
|
||||
// totaling under 100 bytes should be captured; subsequent chunks are dropped.
|
||||
const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:2000,maxSize:100}).then(d=>{process.stdout.write(JSON.stringify(d))})';
|
||||
// Generate 100KB of data (arrives in multiple chunks)
|
||||
const bigInput = '{"k":"' + 'X'.repeat(100000) + '"}';
|
||||
const result = execFileSync('node', ['-e', script], { ...stdinOpts, input: bigInput });
|
||||
// Truncated mid-string → invalid JSON → resolves to {}
|
||||
assert.deepStrictEqual(JSON.parse(result), {});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('readStdinJson with maxSize large enough preserves valid JSON', () => {
|
||||
const { execFileSync } = require('child_process');
|
||||
const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:2000,maxSize:1024}).then(d=>{process.stdout.write(JSON.stringify(d))})';
|
||||
const input = JSON.stringify({ key: 'value' });
|
||||
const result = execFileSync('node', ['-e', script], { ...stdinOpts, input });
|
||||
assert.deepStrictEqual(JSON.parse(result), { key: 'value' });
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('readStdinJson resolves {} for whitespace-only stdin', () => {
|
||||
const { execFileSync } = require('child_process');
|
||||
const result = execFileSync('node', ['-e', stdinScript], { ...stdinOpts, input: ' \n \t ' });
|
||||
// data.trim() is empty → resolves {}
|
||||
assert.deepStrictEqual(JSON.parse(result), {});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('readStdinJson handles JSON with trailing whitespace/newlines', () => {
|
||||
const { execFileSync } = require('child_process');
|
||||
const result = execFileSync('node', ['-e', stdinScript], { ...stdinOpts, input: '{"a":1} \n\n' });
|
||||
assert.deepStrictEqual(JSON.parse(result), { a: 1 });
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('readStdinJson handles JSON with BOM prefix (returns {})', () => {
|
||||
const { execFileSync } = require('child_process');
|
||||
// BOM (\uFEFF) before JSON makes it invalid for JSON.parse
|
||||
const result = execFileSync('node', ['-e', stdinScript], { ...stdinOpts, input: '\uFEFF{"a":1}' });
|
||||
// BOM prefix makes JSON.parse fail → resolve {}
|
||||
assert.deepStrictEqual(JSON.parse(result), {});
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 31: ensureDir error propagation ──
|
||||
console.log('\nensureDir Error Propagation (Round 31):');
|
||||
|
||||
if (test('ensureDir wraps non-EEXIST errors with descriptive message', () => {
|
||||
// Attempting to create a dir under a file should fail with ENOTDIR, not EEXIST
|
||||
const testFile = path.join(utils.getTempDir(), `ensure-err-${Date.now()}.txt`);
|
||||
try {
|
||||
fs.writeFileSync(testFile, 'blocking file');
|
||||
const badPath = path.join(testFile, 'subdir');
|
||||
assert.throws(
|
||||
() => utils.ensureDir(badPath),
|
||||
(err) => err.message.includes('Failed to create directory'),
|
||||
'Should throw with descriptive "Failed to create directory" message'
|
||||
);
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('ensureDir error includes the directory path', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `ensure-err2-${Date.now()}.txt`);
|
||||
try {
|
||||
fs.writeFileSync(testFile, 'blocker');
|
||||
const badPath = path.join(testFile, 'nested', 'dir');
|
||||
try {
|
||||
utils.ensureDir(badPath);
|
||||
assert.fail('Should have thrown');
|
||||
} catch (err) {
|
||||
assert.ok(err.message.includes(badPath), 'Error should include the target path');
|
||||
}
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 31: runCommand stderr preference on failure ──
|
||||
console.log('\nrunCommand failure output (Round 31):');
|
||||
|
||||
if (test('runCommand returns stderr content on failure when stderr exists', () => {
|
||||
const result = utils.runCommand('node -e "process.stderr.write(\'custom error\'); process.exit(1)"');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.output.includes('custom error'), 'Should include stderr output');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('runCommand falls back to err.message when no stderr', () => {
|
||||
// An invalid command that won't produce stderr through child process
|
||||
const result = utils.runCommand('nonexistent_cmd_xyz_12345');
|
||||
assert.strictEqual(result.success, false);
|
||||
assert.ok(result.output.length > 0, 'Should have some error output');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 31: getGitModifiedFiles with empty patterns ──
|
||||
console.log('\ngetGitModifiedFiles empty patterns (Round 31):');
|
||||
|
||||
if (test('getGitModifiedFiles with empty array returns all modified files', () => {
|
||||
// With an empty patterns array, every file should match (no filter applied)
|
||||
const withEmpty = utils.getGitModifiedFiles([]);
|
||||
const withNone = utils.getGitModifiedFiles();
|
||||
// Both should return the same list (no filtering)
|
||||
assert.deepStrictEqual(withEmpty, withNone,
|
||||
'Empty patterns array should behave same as no patterns');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 33: readStdinJson error event handling ──
|
||||
console.log('\nreadStdinJson error event (Round 33):');
|
||||
|
||||
if (test('readStdinJson resolves {} when stdin emits error (via broken pipe)', () => {
|
||||
// Spawn a subprocess that reads from stdin, but close the pipe immediately
|
||||
// to trigger an error or early-end condition
|
||||
const { execFileSync } = require('child_process');
|
||||
const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:2000}).then(d=>{process.stdout.write(JSON.stringify(d))})';
|
||||
// Pipe stdin from /dev/null — this sends EOF immediately (no data)
|
||||
const result = execFileSync('node', ['-e', script], {
|
||||
encoding: 'utf8',
|
||||
input: '', // empty stdin triggers 'end' with empty data
|
||||
timeout: 5000,
|
||||
cwd: path.join(__dirname, '..', '..'),
|
||||
});
|
||||
const parsed = JSON.parse(result);
|
||||
assert.deepStrictEqual(parsed, {}, 'Should resolve to {} for empty stdin (end event path)');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('readStdinJson error handler is guarded by settled flag', () => {
|
||||
// If 'end' fires first setting settled=true, then a late 'error' should be ignored
|
||||
// We test this by verifying the code structure works: send valid JSON, the end event
|
||||
// fires, settled=true, any late error is safely ignored
|
||||
const { execFileSync } = require('child_process');
|
||||
const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:2000}).then(d=>{process.stdout.write(JSON.stringify(d))})';
|
||||
const result = execFileSync('node', ['-e', script], {
|
||||
encoding: 'utf8',
|
||||
input: '{"test":"settled-guard"}',
|
||||
timeout: 5000,
|
||||
cwd: path.join(__dirname, '..', '..'),
|
||||
});
|
||||
const parsed = JSON.parse(result);
|
||||
assert.strictEqual(parsed.test, 'settled-guard', 'Should parse normally when end fires first');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Test Results ===');
|
||||
console.log(`Passed: ${passed}`);
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* Usage: node tests/run-all.js
|
||||
*/
|
||||
|
||||
const { execSync } = require('child_process');
|
||||
const { spawnSync } = require('child_process');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
@@ -16,15 +16,20 @@ const testFiles = [
|
||||
'lib/session-manager.test.js',
|
||||
'lib/session-aliases.test.js',
|
||||
'hooks/hooks.test.js',
|
||||
'hooks/evaluate-session.test.js',
|
||||
'hooks/suggest-compact.test.js',
|
||||
'integration/hooks.test.js',
|
||||
'ci/validators.test.js',
|
||||
'scripts/setup-package-manager.test.js',
|
||||
'scripts/skill-create-output.test.js'
|
||||
];
|
||||
|
||||
console.log('╔══════════════════════════════════════════════════════════╗');
|
||||
console.log('║ Everything Claude Code - Test Suite ║');
|
||||
console.log('╚══════════════════════════════════════════════════════════╝');
|
||||
const BOX_W = 58; // inner width between ║ delimiters
|
||||
const boxLine = (s) => `║${s.padEnd(BOX_W)}║`;
|
||||
|
||||
console.log('╔' + '═'.repeat(BOX_W) + '╗');
|
||||
console.log(boxLine(' Everything Claude Code - Test Suite'));
|
||||
console.log('╚' + '═'.repeat(BOX_W) + '╝');
|
||||
console.log();
|
||||
|
||||
let totalPassed = 0;
|
||||
@@ -41,42 +46,35 @@ for (const testFile of testFiles) {
|
||||
|
||||
console.log(`\n━━━ Running ${testFile} ━━━`);
|
||||
|
||||
try {
|
||||
const output = execSync(`node "${testPath}"`, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
console.log(output);
|
||||
const result = spawnSync('node', [testPath], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
// Parse results from output
|
||||
const passedMatch = output.match(/Passed:\s*(\d+)/);
|
||||
const failedMatch = output.match(/Failed:\s*(\d+)/);
|
||||
const stdout = result.stdout || '';
|
||||
const stderr = result.stderr || '';
|
||||
|
||||
if (passedMatch) totalPassed += parseInt(passedMatch[1], 10);
|
||||
if (failedMatch) totalFailed += parseInt(failedMatch[1], 10);
|
||||
// Show both stdout and stderr so hook warnings are visible
|
||||
if (stdout) console.log(stdout);
|
||||
if (stderr) console.log(stderr);
|
||||
|
||||
} catch (err) {
|
||||
console.log(err.stdout || '');
|
||||
console.log(err.stderr || '');
|
||||
// Parse results from combined output
|
||||
const combined = stdout + stderr;
|
||||
const passedMatch = combined.match(/Passed:\s*(\d+)/);
|
||||
const failedMatch = combined.match(/Failed:\s*(\d+)/);
|
||||
|
||||
// Parse results even on failure
|
||||
const output = (err.stdout || '') + (err.stderr || '');
|
||||
const passedMatch = output.match(/Passed:\s*(\d+)/);
|
||||
const failedMatch = output.match(/Failed:\s*(\d+)/);
|
||||
|
||||
if (passedMatch) totalPassed += parseInt(passedMatch[1], 10);
|
||||
if (failedMatch) totalFailed += parseInt(failedMatch[1], 10);
|
||||
}
|
||||
if (passedMatch) totalPassed += parseInt(passedMatch[1], 10);
|
||||
if (failedMatch) totalFailed += parseInt(failedMatch[1], 10);
|
||||
}
|
||||
|
||||
totalTests = totalPassed + totalFailed;
|
||||
|
||||
console.log('\n╔══════════════════════════════════════════════════════════╗');
|
||||
console.log('║ Final Results ║');
|
||||
console.log('╠══════════════════════════════════════════════════════════╣');
|
||||
console.log(`║ Total Tests: ${String(totalTests).padStart(4)} ║`);
|
||||
console.log(`║ Passed: ${String(totalPassed).padStart(4)} ✓ ║`);
|
||||
console.log(`║ Failed: ${String(totalFailed).padStart(4)} ${totalFailed > 0 ? '✗' : ' '} ║`);
|
||||
console.log('╚══════════════════════════════════════════════════════════╝');
|
||||
console.log('\n╔' + '═'.repeat(BOX_W) + '╗');
|
||||
console.log(boxLine(' Final Results'));
|
||||
console.log('╠' + '═'.repeat(BOX_W) + '╣');
|
||||
console.log(boxLine(` Total Tests: ${String(totalTests).padStart(4)}`));
|
||||
console.log(boxLine(` Passed: ${String(totalPassed).padStart(4)} ✓`));
|
||||
console.log(boxLine(` Failed: ${String(totalFailed).padStart(4)} ${totalFailed > 0 ? '✗' : ' '}`));
|
||||
console.log('╚' + '═'.repeat(BOX_W) + '╝');
|
||||
|
||||
process.exit(totalFailed > 0 ? 1 : 0);
|
||||
|
||||
@@ -159,6 +159,103 @@ function runTests() {
|
||||
assert.ok(result.stdout.includes('pnpm'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --detect output completeness
|
||||
console.log('\n--detect output completeness:');
|
||||
|
||||
if (test('shows all three command types in detection output', () => {
|
||||
const result = run(['--detect']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Install:'), 'Should show Install command');
|
||||
assert.ok(result.stdout.includes('Run script:'), 'Should show Run script command');
|
||||
assert.ok(result.stdout.includes('Execute binary:'), 'Should show Execute binary command');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('shows current marker for active package manager', () => {
|
||||
const result = run(['--detect']);
|
||||
assert.ok(result.stdout.includes('(current)'), 'Should mark current PM');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 31: flag-as-PM-name rejection ──
|
||||
// Note: --help, --detect, --list are checked BEFORE --global/--project in argv
|
||||
// parsing, so passing e.g. --global --list triggers the --list handler first.
|
||||
// The startsWith('-') fix protects against flags that AREN'T caught earlier,
|
||||
// like --global --project or --project --unknown-flag.
|
||||
console.log('\n--global flag validation (Round 31):');
|
||||
|
||||
if (test('rejects --global --project (flag not caught by earlier checks)', () => {
|
||||
const result = run(['--global', '--project']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('requires a package manager name'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects --global --unknown-flag (arbitrary flag as PM name)', () => {
|
||||
const result = run(['--global', '--foo-bar']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('requires a package manager name'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects --global -x (single-dash flag as PM name)', () => {
|
||||
const result = run(['--global', '-x']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('requires a package manager name'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('--global --list is handled by --list check first (exit 0)', () => {
|
||||
// --list is checked before --global in the parsing order
|
||||
const result = run(['--global', '--list']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Available Package Managers'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\n--project flag validation (Round 31):');
|
||||
|
||||
if (test('rejects --project --global (cross-flag confusion)', () => {
|
||||
// --global handler runs before --project, catches it first
|
||||
const result = run(['--project', '--global']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('requires a package manager name'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects --project --unknown-flag', () => {
|
||||
const result = run(['--project', '--bar']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('requires a package manager name'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects --project -z (single-dash flag)', () => {
|
||||
const result = run(['--project', '-z']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('requires a package manager name'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 45: output completeness and marker uniqueness ──
|
||||
console.log('\n--detect marker uniqueness (Round 45):');
|
||||
|
||||
if (test('--detect output shows exactly one (current) marker', () => {
|
||||
const result = run(['--detect']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
const lines = result.stdout.split('\n');
|
||||
const currentLines = lines.filter(l => l.includes('(current)'));
|
||||
assert.strictEqual(currentLines.length, 1, `Expected exactly 1 "(current)" marker, found ${currentLines.length}`);
|
||||
// The (current) marker should be on a line with a PM name
|
||||
assert.ok(/\b(npm|pnpm|yarn|bun)\b/.test(currentLines[0]), 'Current marker should be on a PM line');
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\n--list output completeness (Round 45):');
|
||||
|
||||
if (test('--list shows all four supported package managers', () => {
|
||||
const result = run(['--list']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
for (const pm of ['npm', 'pnpm', 'yarn', 'bun']) {
|
||||
assert.ok(result.stdout.includes(pm), `Should list ${pm}`);
|
||||
}
|
||||
// Each PM should show Lock file and Install info
|
||||
const lockFileCount = (result.stdout.match(/Lock file:/g) || []).length;
|
||||
assert.strictEqual(lockFileCount, 4, `Expected 4 "Lock file:" entries, found ${lockFileCount}`);
|
||||
const installCount = (result.stdout.match(/Install:/g) || []).length;
|
||||
assert.strictEqual(installCount, 4, `Expected 4 "Install:" entries, found ${installCount}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
@@ -179,6 +179,54 @@ function runTests() {
|
||||
assert.ok(combined.includes('Everything Claude Code'), 'Should include project name');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// progressBar edge cases (tests the clamp fix)
|
||||
console.log('\nprogressBar edge cases:');
|
||||
|
||||
if (test('does not crash with confidence > 1.0 (percent > 100)', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
// confidence 1.5 => percent 150 — previously crashed with RangeError
|
||||
const logs = captureLog(() => output.patterns([
|
||||
{ name: 'Overconfident', trigger: 'always', confidence: 1.5, evidence: 'too much' },
|
||||
]));
|
||||
const combined = stripAnsi(logs.join('\n'));
|
||||
assert.ok(combined.includes('150%'), 'Should show 150%');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('renders 0% confidence bar without crash', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.patterns([
|
||||
{ name: 'Zero Confidence', trigger: 'never', confidence: 0.0, evidence: 'none' },
|
||||
]));
|
||||
const combined = stripAnsi(logs.join('\n'));
|
||||
assert.ok(combined.includes('0%'), 'Should show 0%');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('renders 100% confidence bar without crash', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.patterns([
|
||||
{ name: 'Perfect', trigger: 'always', confidence: 1.0, evidence: 'certain' },
|
||||
]));
|
||||
const combined = stripAnsi(logs.join('\n'));
|
||||
assert.ok(combined.includes('100%'), 'Should show 100%');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Empty array edge cases
|
||||
console.log('\nempty array edge cases:');
|
||||
|
||||
if (test('patterns() with empty array produces header but no entries', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.patterns([]));
|
||||
const combined = logs.join('\n');
|
||||
assert.ok(combined.includes('Patterns'), 'Should show header');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('instincts() with empty array produces box but no entries', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.instincts([]));
|
||||
const combined = logs.join('\n');
|
||||
assert.ok(combined.includes('Instincts'), 'Should show box title');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Box drawing crash fix (regression test)
|
||||
console.log('\nbox() crash prevention:');
|
||||
|
||||
@@ -226,6 +274,183 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 27: box and progressBar edge cases ──
|
||||
console.log('\nbox() content overflow:');
|
||||
|
||||
if (test('box does not crash when content line exceeds width', () => {
|
||||
const output = new SkillCreateOutput('repo', { width: 30 });
|
||||
// Force a very long instinct name that exceeds width
|
||||
const logs = captureLog(() => output.instincts([
|
||||
{ name: 'this-is-an-extremely-long-instinct-name-that-clearly-exceeds-width', confidence: 0.9 },
|
||||
]));
|
||||
// Math.max(0, padding) should prevent RangeError
|
||||
assert.ok(logs.length > 0, 'Should produce output without RangeError');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('patterns renders negative confidence without crash', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
// confidence -0.1 => percent -10 — Math.max(0, ...) should clamp filled to 0
|
||||
const logs = captureLog(() => output.patterns([
|
||||
{ name: 'Negative', trigger: 'never', confidence: -0.1, evidence: 'impossible' },
|
||||
]));
|
||||
const combined = stripAnsi(logs.join('\n'));
|
||||
assert.ok(combined.includes('-10%'), 'Should show -10%');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('header does not crash with very long repo name', () => {
|
||||
const longRepo = 'A'.repeat(100);
|
||||
const output = new SkillCreateOutput(longRepo);
|
||||
// Math.max(0, 55 - stripAnsi(subtitle).length) protects against negative repeat
|
||||
const logs = captureLog(() => output.header());
|
||||
assert.ok(logs.length > 0, 'Should produce output without crash');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('stripAnsi handles nested ANSI codes with multi-digit params', () => {
|
||||
// Simulate bold + color + reset
|
||||
const ansiStr = '\x1b[1m\x1b[36mBold Cyan\x1b[0m\x1b[0m';
|
||||
const stripped = stripAnsi(ansiStr);
|
||||
assert.strictEqual(stripped, 'Bold Cyan', 'Should strip all nested ANSI sequences');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('footer produces output', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.footer());
|
||||
const combined = stripAnsi(logs.join('\n'));
|
||||
assert.ok(combined.includes('Powered by'), 'Should include attribution text');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 34: header width alignment ──
|
||||
console.log('\nheader() width alignment (Round 34):');
|
||||
|
||||
if (test('header subtitle line matches border width', () => {
|
||||
const output = new SkillCreateOutput('test-repo');
|
||||
const logs = captureLog(() => output.header());
|
||||
// Find the border and subtitle lines
|
||||
const lines = logs.map(l => stripAnsi(l));
|
||||
const borderLine = lines.find(l => l.includes('═══'));
|
||||
const subtitleLine = lines.find(l => l.includes('Extracting patterns'));
|
||||
assert.ok(borderLine, 'Should find border line');
|
||||
assert.ok(subtitleLine, 'Should find subtitle line');
|
||||
// Both lines should have the same visible width
|
||||
assert.strictEqual(subtitleLine.length, borderLine.length,
|
||||
`Subtitle width (${subtitleLine.length}) should match border width (${borderLine.length})`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('header all lines have consistent width for short repo name', () => {
|
||||
const output = new SkillCreateOutput('abc');
|
||||
const logs = captureLog(() => output.header());
|
||||
const lines = logs.map(l => stripAnsi(l)).filter(l => l.includes('║') || l.includes('╔') || l.includes('╚'));
|
||||
assert.ok(lines.length >= 4, 'Should have at least 4 box lines');
|
||||
const widths = lines.map(l => l.length);
|
||||
const first = widths[0];
|
||||
widths.forEach((w, i) => {
|
||||
assert.strictEqual(w, first,
|
||||
`Line ${i} width (${w}) should match first line (${first})`);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('header subtitle has correct content area width of 64 chars', () => {
|
||||
const output = new SkillCreateOutput('myrepo');
|
||||
const logs = captureLog(() => output.header());
|
||||
const lines = logs.map(l => stripAnsi(l));
|
||||
const subtitleLine = lines.find(l => l.includes('Extracting patterns'));
|
||||
assert.ok(subtitleLine, 'Should find subtitle line');
|
||||
// Content between ║ and ║ should be 64 chars (border is 66 total)
|
||||
// Format: ║ + content(64) + ║ = 66
|
||||
assert.strictEqual(subtitleLine.length, 66,
|
||||
`Total subtitle line width should be 66, got ${subtitleLine.length}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('header subtitle line does not truncate with medium-length repo name', () => {
|
||||
const output = new SkillCreateOutput('my-medium-repo-name');
|
||||
const logs = captureLog(() => output.header());
|
||||
const combined = logs.join('\n');
|
||||
assert.ok(combined.includes('my-medium-repo-name'), 'Should include full repo name');
|
||||
const lines = logs.map(l => stripAnsi(l));
|
||||
const subtitleLine = lines.find(l => l.includes('Extracting patterns'));
|
||||
assert.ok(subtitleLine, 'Should have subtitle line');
|
||||
// Should still be 66 chars even with a longer name
|
||||
assert.strictEqual(subtitleLine.length, 66,
|
||||
`Subtitle line should be 66 chars, got ${subtitleLine.length}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 35: box() width accuracy ──
|
||||
console.log('\nbox() width accuracy (Round 35):');
|
||||
|
||||
if (test('box lines in instincts() match the default box width of 60', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.instincts([
|
||||
{ name: 'test-instinct', confidence: 0.85 },
|
||||
]));
|
||||
const combined = logs.join('\n');
|
||||
const boxLines = combined.split('\n').filter(l => {
|
||||
const s = stripAnsi(l).trim();
|
||||
return s.startsWith('\u256D') || s.startsWith('\u2502') || s.startsWith('\u2570');
|
||||
});
|
||||
assert.ok(boxLines.length >= 3, 'Should have at least 3 box lines');
|
||||
// The box() default width is 60 — each line should be exactly 60 chars
|
||||
boxLines.forEach((l, i) => {
|
||||
const w = stripAnsi(l).length;
|
||||
assert.strictEqual(w, 60,
|
||||
`Box line ${i} should be 60 chars wide, got ${w}`);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('box lines with custom width match the requested width', () => {
|
||||
const output = new SkillCreateOutput('repo', { width: 40 });
|
||||
const logs = captureLog(() => output.instincts([
|
||||
{ name: 'short', confidence: 0.9 },
|
||||
]));
|
||||
const combined = logs.join('\n');
|
||||
const boxLines = combined.split('\n').filter(l => {
|
||||
const s = stripAnsi(l).trim();
|
||||
return s.startsWith('\u256D') || s.startsWith('\u2502') || s.startsWith('\u2570');
|
||||
});
|
||||
assert.ok(boxLines.length >= 3, 'Should have at least 3 box lines');
|
||||
// instincts() calls box() with no explicit width, so it uses the default 60
|
||||
// regardless of this.width — verify self-consistency at least
|
||||
const firstWidth = stripAnsi(boxLines[0]).length;
|
||||
boxLines.forEach((l, i) => {
|
||||
const w = stripAnsi(l).length;
|
||||
assert.strictEqual(w, firstWidth,
|
||||
`Box line ${i} width ${w} should match first line ${firstWidth}`);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('analysisResults box lines are all 60 chars wide', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.analysisResults({
|
||||
commits: 50, timeRange: 'Jan 2026', contributors: 2, files: 100,
|
||||
}));
|
||||
const combined = logs.join('\n');
|
||||
const boxLines = combined.split('\n').filter(l => {
|
||||
const s = stripAnsi(l).trim();
|
||||
return s.startsWith('\u256D') || s.startsWith('\u2502') || s.startsWith('\u2570');
|
||||
});
|
||||
assert.ok(boxLines.length >= 3, 'Should have at least 3 box lines');
|
||||
boxLines.forEach((l, i) => {
|
||||
const w = stripAnsi(l).length;
|
||||
assert.strictEqual(w, 60,
|
||||
`Analysis box line ${i} should be 60 chars, got ${w}`);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('nextSteps box lines are all 60 chars wide', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.nextSteps());
|
||||
const combined = logs.join('\n');
|
||||
const boxLines = combined.split('\n').filter(l => {
|
||||
const s = stripAnsi(l).trim();
|
||||
return s.startsWith('\u256D') || s.startsWith('\u2502') || s.startsWith('\u2570');
|
||||
});
|
||||
assert.ok(boxLines.length >= 3, 'Should have at least 3 box lines');
|
||||
boxLines.forEach((l, i) => {
|
||||
const w = stripAnsi(l).length;
|
||||
assert.strictEqual(w, 60,
|
||||
`NextSteps box line ${i} should be 60 chars, got ${w}`);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
Reference in New Issue
Block a user