mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 21:53:28 +08:00
Compare commits
14 Commits
fix/trae-m
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
656cf4c94a | ||
|
|
0220202a61 | ||
|
|
5a2c9f5558 | ||
|
|
7ff2f0748e | ||
|
|
3f6a14acde | ||
|
|
d6c7f8fb0a | ||
|
|
7253d0ca98 | ||
|
|
118e57e14b | ||
|
|
a4d4b1d756 | ||
|
|
c90566f9be | ||
|
|
b9a01d3c32 | ||
|
|
fab80c99b7 | ||
|
|
8846210ca2 | ||
|
|
1d0f64a14d |
@@ -4,22 +4,23 @@ Run a deterministic repository harness audit and return a prioritized scorecard.
|
||||
|
||||
## Usage
|
||||
|
||||
`/harness-audit [scope] [--format text|json]`
|
||||
`/harness-audit [scope] [--format text|json] [--root path]`
|
||||
|
||||
- `scope` (optional): `repo` (default), `hooks`, `skills`, `commands`, `agents`
|
||||
- `--format`: output style (`text` default, `json` for automation)
|
||||
- `--root`: audit a specific path instead of the current working directory
|
||||
|
||||
## Deterministic Engine
|
||||
|
||||
Always run:
|
||||
|
||||
```bash
|
||||
node scripts/harness-audit.js <scope> --format <text|json>
|
||||
node scripts/harness-audit.js <scope> --format <text|json> [--root <path>]
|
||||
```
|
||||
|
||||
This script is the source of truth for scoring and checks. Do not invent additional dimensions or ad-hoc points.
|
||||
|
||||
Rubric version: `2026-03-16`.
|
||||
Rubric version: `2026-03-30`.
|
||||
|
||||
The script computes 7 fixed categories (`0-10` normalized each):
|
||||
|
||||
@@ -32,6 +33,7 @@ The script computes 7 fixed categories (`0-10` normalized each):
|
||||
7. Cost Efficiency
|
||||
|
||||
Scores are derived from explicit file/rule checks and are reproducible for the same commit.
|
||||
The script audits the current working directory by default and auto-detects whether the target is the ECC repo itself or a consumer project using ECC.
|
||||
|
||||
## Output Contract
|
||||
|
||||
|
||||
38
RULES.md
Normal file
38
RULES.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Rules
|
||||
|
||||
## Must Always
|
||||
- Delegate to specialized agents for domain tasks.
|
||||
- Write tests before implementation and verify critical paths.
|
||||
- Validate inputs and keep security checks intact.
|
||||
- Prefer immutable updates over mutating shared state.
|
||||
- Follow established repository patterns before inventing new ones.
|
||||
- Keep contributions focused, reviewable, and well-described.
|
||||
|
||||
## Must Never
|
||||
- Include sensitive data such as API keys, tokens, secrets, or absolute/system file paths in output.
|
||||
- Submit untested changes.
|
||||
- Bypass security checks or validation hooks.
|
||||
- Duplicate existing functionality without a clear reason.
|
||||
- Ship code without checking the relevant test suite.
|
||||
|
||||
## Agent Format
|
||||
- Agents live in `agents/*.md`.
|
||||
- Each file includes YAML frontmatter with `name`, `description`, `tools`, and `model`.
|
||||
- File names are lowercase with hyphens and must match the agent name.
|
||||
- Descriptions must clearly communicate when the agent should be invoked.
|
||||
|
||||
## Skill Format
|
||||
- Skills live in `skills/<name>/SKILL.md`.
|
||||
- Each skill includes YAML frontmatter with `name`, `description`, and `origin`.
|
||||
- Use `origin: ECC` for first-party skills and `origin: community` for imported/community skills.
|
||||
- Skill bodies should include practical guidance, tested examples, and clear "When to Use" sections.
|
||||
|
||||
## Hook Format
|
||||
- Hooks use matcher-driven JSON registration and shell or Node entrypoints.
|
||||
- Matchers should be specific instead of broad catch-alls.
|
||||
- Exit `1` only when blocking behavior is intentional; otherwise exit `0`.
|
||||
- Error and info messages should be actionable.
|
||||
|
||||
## Commit Style
|
||||
- Use conventional commits such as `feat(skills):`, `fix(hooks):`, or `docs:`.
|
||||
- Keep changes modular and explain user-facing impact in the PR summary.
|
||||
17
SOUL.md
Normal file
17
SOUL.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Soul
|
||||
|
||||
## Core Identity
|
||||
Everything Claude Code (ECC) is a production-ready AI coding plugin with 30 specialized agents, 135 skills, 60 commands, and automated hook workflows for software development.
|
||||
|
||||
## Core Principles
|
||||
1. **Agent-First** — route work to the right specialist as early as possible.
|
||||
2. **Test-Driven** — write or refresh tests before trusting implementation changes.
|
||||
3. **Security-First** — validate inputs, protect secrets, and keep safe defaults.
|
||||
4. **Immutability** — prefer explicit state transitions over mutation.
|
||||
5. **Plan Before Execute** — complex changes should be broken into deliberate phases.
|
||||
|
||||
## Agent Orchestration Philosophy
|
||||
ECC is designed so specialists are invoked proactively: planners for implementation strategy, reviewers for code quality, security reviewers for sensitive code, and build resolvers when the toolchain breaks.
|
||||
|
||||
## Cross-Harness Vision
|
||||
This gitagent surface is an initial portability layer for ECC's shared identity, governance, and skill catalog. Native agents, commands, and hooks remain authoritative in the repository until full manifest coverage is added.
|
||||
154
agent.yaml
Normal file
154
agent.yaml
Normal file
@@ -0,0 +1,154 @@
|
||||
spec_version: "0.1.0"
|
||||
name: everything-claude-code
|
||||
version: 1.9.0
|
||||
description: "Initial gitagent export surface for ECC's shared skill catalog, governance, and identity. Native agents, commands, and hooks remain authoritative in the repository while manifest coverage expands."
|
||||
author: affaan-m
|
||||
license: MIT
|
||||
model:
|
||||
preferred: claude-opus-4-6
|
||||
fallback:
|
||||
- claude-sonnet-4-6
|
||||
skills:
|
||||
- agent-eval
|
||||
- agent-harness-construction
|
||||
- agent-payment-x402
|
||||
- agentic-engineering
|
||||
- ai-first-engineering
|
||||
- ai-regression-testing
|
||||
- android-clean-architecture
|
||||
- api-design
|
||||
- architecture-decision-records
|
||||
- article-writing
|
||||
- autonomous-loops
|
||||
- backend-patterns
|
||||
- benchmark
|
||||
- blueprint
|
||||
- browser-qa
|
||||
- bun-runtime
|
||||
- canary-watch
|
||||
- carrier-relationship-management
|
||||
- ck
|
||||
- claude-api
|
||||
- claude-devfleet
|
||||
- click-path-audit
|
||||
- clickhouse-io
|
||||
- codebase-onboarding
|
||||
- coding-standards
|
||||
- compose-multiplatform-patterns
|
||||
- configure-ecc
|
||||
- content-engine
|
||||
- content-hash-cache-pattern
|
||||
- context-budget
|
||||
- continuous-agent-loop
|
||||
- continuous-learning
|
||||
- continuous-learning-v2
|
||||
- cost-aware-llm-pipeline
|
||||
- cpp-coding-standards
|
||||
- cpp-testing
|
||||
- crosspost
|
||||
- customs-trade-compliance
|
||||
- data-scraper-agent
|
||||
- database-migrations
|
||||
- deep-research
|
||||
- deployment-patterns
|
||||
- design-system
|
||||
- django-patterns
|
||||
- django-security
|
||||
- django-tdd
|
||||
- django-verification
|
||||
- dmux-workflows
|
||||
- docker-patterns
|
||||
- documentation-lookup
|
||||
- e2e-testing
|
||||
- energy-procurement
|
||||
- enterprise-agent-ops
|
||||
- eval-harness
|
||||
- exa-search
|
||||
- fal-ai-media
|
||||
- flutter-dart-code-review
|
||||
- foundation-models-on-device
|
||||
- frontend-patterns
|
||||
- frontend-slides
|
||||
- git-workflow
|
||||
- golang-patterns
|
||||
- golang-testing
|
||||
- healthcare-cdss-patterns
|
||||
- healthcare-emr-patterns
|
||||
- healthcare-eval-harness
|
||||
- healthcare-phi-compliance
|
||||
- inventory-demand-planning
|
||||
- investor-materials
|
||||
- investor-outreach
|
||||
- iterative-retrieval
|
||||
- java-coding-standards
|
||||
- jpa-patterns
|
||||
- kotlin-coroutines-flows
|
||||
- kotlin-exposed-patterns
|
||||
- kotlin-ktor-patterns
|
||||
- kotlin-patterns
|
||||
- kotlin-testing
|
||||
- laravel-patterns
|
||||
- laravel-plugin-discovery
|
||||
- laravel-security
|
||||
- laravel-tdd
|
||||
- laravel-verification
|
||||
- liquid-glass-design
|
||||
- logistics-exception-management
|
||||
- market-research
|
||||
- mcp-server-patterns
|
||||
- nanoclaw-repl
|
||||
- nextjs-turbopack
|
||||
- nutrient-document-processing
|
||||
- nuxt4-patterns
|
||||
- perl-patterns
|
||||
- perl-security
|
||||
- perl-testing
|
||||
- plankton-code-quality
|
||||
- postgres-patterns
|
||||
- product-lens
|
||||
- production-scheduling
|
||||
- project-guidelines-example
|
||||
- prompt-optimizer
|
||||
- python-patterns
|
||||
- python-testing
|
||||
- pytorch-patterns
|
||||
- quality-nonconformance
|
||||
- ralphinho-rfc-pipeline
|
||||
- regex-vs-llm-structured-text
|
||||
- repo-scan
|
||||
- returns-reverse-logistics
|
||||
- rules-distill
|
||||
- rust-patterns
|
||||
- rust-testing
|
||||
- safety-guard
|
||||
- santa-method
|
||||
- search-first
|
||||
- security-review
|
||||
- security-scan
|
||||
- skill-comply
|
||||
- skill-stocktake
|
||||
- springboot-patterns
|
||||
- springboot-security
|
||||
- springboot-tdd
|
||||
- springboot-verification
|
||||
- strategic-compact
|
||||
- swift-actor-persistence
|
||||
- swift-concurrency-6-2
|
||||
- swift-protocol-di-testing
|
||||
- swiftui-patterns
|
||||
- tdd-workflow
|
||||
- team-builder
|
||||
- token-budget-advisor
|
||||
- verification-loop
|
||||
- video-editing
|
||||
- videodb
|
||||
- visa-doc-translate
|
||||
- x-api
|
||||
tags:
|
||||
- agent-harness
|
||||
- developer-tools
|
||||
- code-review
|
||||
- testing
|
||||
- security
|
||||
- cross-platform
|
||||
- gitagent
|
||||
@@ -4,22 +4,23 @@ Run a deterministic repository harness audit and return a prioritized scorecard.
|
||||
|
||||
## Usage
|
||||
|
||||
`/harness-audit [scope] [--format text|json]`
|
||||
`/harness-audit [scope] [--format text|json] [--root path]`
|
||||
|
||||
- `scope` (optional): `repo` (default), `hooks`, `skills`, `commands`, `agents`
|
||||
- `--format`: output style (`text` default, `json` for automation)
|
||||
- `--root`: audit a specific path instead of the current working directory
|
||||
|
||||
## Deterministic Engine
|
||||
|
||||
Always run:
|
||||
|
||||
```bash
|
||||
node scripts/harness-audit.js <scope> --format <text|json>
|
||||
node scripts/harness-audit.js <scope> --format <text|json> [--root <path>]
|
||||
```
|
||||
|
||||
This script is the source of truth for scoring and checks. Do not invent additional dimensions or ad-hoc points.
|
||||
|
||||
Rubric version: `2026-03-16`.
|
||||
Rubric version: `2026-03-30`.
|
||||
|
||||
The script computes 7 fixed categories (`0-10` normalized each):
|
||||
|
||||
@@ -32,6 +33,7 @@ The script computes 7 fixed categories (`0-10` normalized each):
|
||||
7. Cost Efficiency
|
||||
|
||||
Scores are derived from explicit file/rule checks and are reproducible for the same commit.
|
||||
The script audits the current working directory by default and auto-detects whether the target is the ECC repo itself or a consumer project using ECC.
|
||||
|
||||
## Output Contract
|
||||
|
||||
|
||||
@@ -310,7 +310,7 @@
|
||||
"timeout": 10
|
||||
}
|
||||
],
|
||||
"description": "Send macOS desktop notification with task summary when Claude responds"
|
||||
"description": "Send desktop notification (macOS/WSL) with task summary when Claude responds"
|
||||
}
|
||||
],
|
||||
"SessionEnd": [
|
||||
|
||||
11
install.sh
11
install.sh
@@ -20,4 +20,13 @@ if [ ! -d "$SCRIPT_DIR/node_modules" ]; then
|
||||
(cd "$SCRIPT_DIR" && npm install --no-audit --no-fund --loglevel=error)
|
||||
fi
|
||||
|
||||
exec node "$SCRIPT_DIR/scripts/install-apply.js" "$@"
|
||||
# On MSYS2/Git Bash, convert the POSIX path to a Windows path so Node.js
|
||||
# (a native Windows binary) receives a valid path instead of a doubled one
|
||||
# like G:\g\projects\... that results from Git Bash's auto path conversion.
|
||||
if command -v cygpath &>/dev/null; then
|
||||
NODE_SCRIPT="$(cygpath -w "$SCRIPT_DIR/scripts/install-apply.js")"
|
||||
else
|
||||
NODE_SCRIPT="$SCRIPT_DIR/scripts/install-apply.js"
|
||||
fi
|
||||
|
||||
exec node "$NODE_SCRIPT" "$@"
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const REPO_ROOT = path.join(__dirname, '..');
|
||||
|
||||
const CATEGORIES = [
|
||||
'Tool Coverage',
|
||||
'Context Efficiency',
|
||||
@@ -29,6 +27,7 @@ function parseArgs(argv) {
|
||||
scope: 'repo',
|
||||
format: 'text',
|
||||
help: false,
|
||||
root: path.resolve(process.env.AUDIT_ROOT || process.cwd()),
|
||||
};
|
||||
|
||||
for (let index = 0; index < args.length; index += 1) {
|
||||
@@ -51,6 +50,12 @@ function parseArgs(argv) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (arg === '--root') {
|
||||
parsed.root = path.resolve(args[index + 1] || process.cwd());
|
||||
index += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (arg.startsWith('--format=')) {
|
||||
parsed.format = arg.split('=')[1].toLowerCase();
|
||||
continue;
|
||||
@@ -61,6 +66,11 @@ function parseArgs(argv) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (arg.startsWith('--root=')) {
|
||||
parsed.root = path.resolve(arg.slice('--root='.length));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (arg.startsWith('-')) {
|
||||
throw new Error(`Unknown argument: ${arg}`);
|
||||
}
|
||||
@@ -75,16 +85,16 @@ function parseArgs(argv) {
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function fileExists(relativePath) {
|
||||
return fs.existsSync(path.join(REPO_ROOT, relativePath));
|
||||
function fileExists(rootDir, relativePath) {
|
||||
return fs.existsSync(path.join(rootDir, relativePath));
|
||||
}
|
||||
|
||||
function readText(relativePath) {
|
||||
return fs.readFileSync(path.join(REPO_ROOT, relativePath), 'utf8');
|
||||
function readText(rootDir, relativePath) {
|
||||
return fs.readFileSync(path.join(rootDir, relativePath), 'utf8');
|
||||
}
|
||||
|
||||
function countFiles(relativeDir, extension) {
|
||||
const dirPath = path.join(REPO_ROOT, relativeDir);
|
||||
function countFiles(rootDir, relativeDir, extension) {
|
||||
const dirPath = path.join(rootDir, relativeDir);
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
return 0;
|
||||
}
|
||||
@@ -109,19 +119,90 @@ function countFiles(relativeDir, extension) {
|
||||
return count;
|
||||
}
|
||||
|
||||
function safeRead(relativePath) {
|
||||
function safeRead(rootDir, relativePath) {
|
||||
try {
|
||||
return readText(relativePath);
|
||||
return readText(rootDir, relativePath);
|
||||
} catch (_error) {
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
function getChecks() {
|
||||
const packageJson = JSON.parse(readText('package.json'));
|
||||
const commandPrimary = safeRead('commands/harness-audit.md').trim();
|
||||
const commandParity = safeRead('.opencode/commands/harness-audit.md').trim();
|
||||
const hooksJson = safeRead('hooks/hooks.json');
|
||||
function safeParseJson(text) {
|
||||
if (!text || !text.trim()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return JSON.parse(text);
|
||||
} catch (_error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function hasFileWithExtension(rootDir, relativeDir, extensions) {
|
||||
const dirPath = path.join(rootDir, relativeDir);
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const allowed = Array.isArray(extensions) ? extensions : [extensions];
|
||||
const stack = [dirPath];
|
||||
|
||||
while (stack.length > 0) {
|
||||
const current = stack.pop();
|
||||
const entries = fs.readdirSync(current, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const nextPath = path.join(current, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
stack.push(nextPath);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (allowed.some((extension) => entry.name.endsWith(extension))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function detectTargetMode(rootDir) {
|
||||
const packageJson = safeParseJson(safeRead(rootDir, 'package.json'));
|
||||
if (packageJson?.name === 'everything-claude-code') {
|
||||
return 'repo';
|
||||
}
|
||||
|
||||
if (
|
||||
fileExists(rootDir, 'scripts/harness-audit.js') &&
|
||||
fileExists(rootDir, '.claude-plugin/plugin.json') &&
|
||||
fileExists(rootDir, 'agents') &&
|
||||
fileExists(rootDir, 'skills')
|
||||
) {
|
||||
return 'repo';
|
||||
}
|
||||
|
||||
return 'consumer';
|
||||
}
|
||||
|
||||
function findPluginInstall(rootDir) {
|
||||
const homeDir = process.env.HOME || '';
|
||||
const candidates = [
|
||||
path.join(rootDir, '.claude', 'plugins', 'everything-claude-code', '.claude-plugin', 'plugin.json'),
|
||||
path.join(rootDir, '.claude', 'plugins', 'everything-claude-code', 'plugin.json'),
|
||||
homeDir && path.join(homeDir, '.claude', 'plugins', 'everything-claude-code', '.claude-plugin', 'plugin.json'),
|
||||
homeDir && path.join(homeDir, '.claude', 'plugins', 'everything-claude-code', 'plugin.json'),
|
||||
].filter(Boolean);
|
||||
|
||||
return candidates.find(candidate => fs.existsSync(candidate)) || null;
|
||||
}
|
||||
|
||||
function getRepoChecks(rootDir) {
|
||||
const packageJson = JSON.parse(readText(rootDir, 'package.json'));
|
||||
const commandPrimary = safeRead(rootDir, 'commands/harness-audit.md').trim();
|
||||
const commandParity = safeRead(rootDir, '.opencode/commands/harness-audit.md').trim();
|
||||
const hooksJson = safeRead(rootDir, 'hooks/hooks.json');
|
||||
|
||||
return [
|
||||
{
|
||||
@@ -131,7 +212,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: 'hooks/hooks.json',
|
||||
description: 'Hook configuration file exists',
|
||||
pass: fileExists('hooks/hooks.json'),
|
||||
pass: fileExists(rootDir, 'hooks/hooks.json'),
|
||||
fix: 'Create hooks/hooks.json and define baseline hook events.',
|
||||
},
|
||||
{
|
||||
@@ -141,7 +222,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: 'scripts/hooks/',
|
||||
description: 'At least 8 hook implementation scripts exist',
|
||||
pass: countFiles('scripts/hooks', '.js') >= 8,
|
||||
pass: countFiles(rootDir, 'scripts/hooks', '.js') >= 8,
|
||||
fix: 'Add missing hook implementations in scripts/hooks/.',
|
||||
},
|
||||
{
|
||||
@@ -151,7 +232,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'agents'],
|
||||
path: 'agents/',
|
||||
description: 'At least 10 agent definitions exist',
|
||||
pass: countFiles('agents', '.md') >= 10,
|
||||
pass: countFiles(rootDir, 'agents', '.md') >= 10,
|
||||
fix: 'Add or restore agent definitions under agents/.',
|
||||
},
|
||||
{
|
||||
@@ -161,7 +242,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'skills'],
|
||||
path: 'skills/',
|
||||
description: 'At least 20 skill definitions exist',
|
||||
pass: countFiles('skills', 'SKILL.md') >= 20,
|
||||
pass: countFiles(rootDir, 'skills', 'SKILL.md') >= 20,
|
||||
fix: 'Add missing skill directories with SKILL.md definitions.',
|
||||
},
|
||||
{
|
||||
@@ -181,7 +262,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'skills'],
|
||||
path: 'skills/strategic-compact/SKILL.md',
|
||||
description: 'Strategic compaction guidance is present',
|
||||
pass: fileExists('skills/strategic-compact/SKILL.md'),
|
||||
pass: fileExists(rootDir, 'skills/strategic-compact/SKILL.md'),
|
||||
fix: 'Add strategic context compaction guidance at skills/strategic-compact/SKILL.md.',
|
||||
},
|
||||
{
|
||||
@@ -191,7 +272,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: 'scripts/hooks/suggest-compact.js',
|
||||
description: 'Suggest-compact automation hook exists',
|
||||
pass: fileExists('scripts/hooks/suggest-compact.js'),
|
||||
pass: fileExists(rootDir, 'scripts/hooks/suggest-compact.js'),
|
||||
fix: 'Implement scripts/hooks/suggest-compact.js for context pressure hints.',
|
||||
},
|
||||
{
|
||||
@@ -201,7 +282,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'commands'],
|
||||
path: 'commands/model-route.md',
|
||||
description: 'Model routing command exists',
|
||||
pass: fileExists('commands/model-route.md'),
|
||||
pass: fileExists(rootDir, 'commands/model-route.md'),
|
||||
fix: 'Add model-route command guidance in commands/model-route.md.',
|
||||
},
|
||||
{
|
||||
@@ -211,7 +292,7 @@ function getChecks() {
|
||||
scopes: ['repo'],
|
||||
path: 'docs/token-optimization.md',
|
||||
description: 'Token optimization documentation exists',
|
||||
pass: fileExists('docs/token-optimization.md'),
|
||||
pass: fileExists(rootDir, 'docs/token-optimization.md'),
|
||||
fix: 'Add docs/token-optimization.md with concrete context-cost controls.',
|
||||
},
|
||||
{
|
||||
@@ -221,7 +302,7 @@ function getChecks() {
|
||||
scopes: ['repo'],
|
||||
path: 'tests/run-all.js',
|
||||
description: 'Central test runner exists',
|
||||
pass: fileExists('tests/run-all.js'),
|
||||
pass: fileExists(rootDir, 'tests/run-all.js'),
|
||||
fix: 'Add tests/run-all.js to enforce complete suite execution.',
|
||||
},
|
||||
{
|
||||
@@ -241,7 +322,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: 'tests/hooks/hooks.test.js',
|
||||
description: 'Hook coverage test file exists',
|
||||
pass: fileExists('tests/hooks/hooks.test.js'),
|
||||
pass: fileExists(rootDir, 'tests/hooks/hooks.test.js'),
|
||||
fix: 'Add tests/hooks/hooks.test.js for hook behavior validation.',
|
||||
},
|
||||
{
|
||||
@@ -251,7 +332,7 @@ function getChecks() {
|
||||
scopes: ['repo'],
|
||||
path: 'scripts/doctor.js',
|
||||
description: 'Installation drift doctor script exists',
|
||||
pass: fileExists('scripts/doctor.js'),
|
||||
pass: fileExists(rootDir, 'scripts/doctor.js'),
|
||||
fix: 'Add scripts/doctor.js for install-state integrity checks.',
|
||||
},
|
||||
{
|
||||
@@ -261,7 +342,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: 'hooks/memory-persistence/',
|
||||
description: 'Memory persistence hooks directory exists',
|
||||
pass: fileExists('hooks/memory-persistence'),
|
||||
pass: fileExists(rootDir, 'hooks/memory-persistence'),
|
||||
fix: 'Add hooks/memory-persistence with lifecycle hook definitions.',
|
||||
},
|
||||
{
|
||||
@@ -271,7 +352,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: 'scripts/hooks/session-start.js',
|
||||
description: 'Session start/end persistence scripts exist',
|
||||
pass: fileExists('scripts/hooks/session-start.js') && fileExists('scripts/hooks/session-end.js'),
|
||||
pass: fileExists(rootDir, 'scripts/hooks/session-start.js') && fileExists(rootDir, 'scripts/hooks/session-end.js'),
|
||||
fix: 'Implement scripts/hooks/session-start.js and scripts/hooks/session-end.js.',
|
||||
},
|
||||
{
|
||||
@@ -281,7 +362,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'skills'],
|
||||
path: 'skills/continuous-learning-v2/SKILL.md',
|
||||
description: 'Continuous learning v2 skill exists',
|
||||
pass: fileExists('skills/continuous-learning-v2/SKILL.md'),
|
||||
pass: fileExists(rootDir, 'skills/continuous-learning-v2/SKILL.md'),
|
||||
fix: 'Add skills/continuous-learning-v2/SKILL.md for memory evolution flow.',
|
||||
},
|
||||
{
|
||||
@@ -291,7 +372,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'skills'],
|
||||
path: 'skills/eval-harness/SKILL.md',
|
||||
description: 'Eval harness skill exists',
|
||||
pass: fileExists('skills/eval-harness/SKILL.md'),
|
||||
pass: fileExists(rootDir, 'skills/eval-harness/SKILL.md'),
|
||||
fix: 'Add skills/eval-harness/SKILL.md for pass/fail regression evaluation.',
|
||||
},
|
||||
{
|
||||
@@ -301,7 +382,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'commands'],
|
||||
path: 'commands/eval.md',
|
||||
description: 'Eval and verification commands exist',
|
||||
pass: fileExists('commands/eval.md') && fileExists('commands/verify.md') && fileExists('commands/checkpoint.md'),
|
||||
pass: fileExists(rootDir, 'commands/eval.md') && fileExists(rootDir, 'commands/verify.md') && fileExists(rootDir, 'commands/checkpoint.md'),
|
||||
fix: 'Add eval/checkpoint/verify commands to standardize verification loops.',
|
||||
},
|
||||
{
|
||||
@@ -311,7 +392,7 @@ function getChecks() {
|
||||
scopes: ['repo'],
|
||||
path: 'tests/',
|
||||
description: 'At least 10 test files exist',
|
||||
pass: countFiles('tests', '.test.js') >= 10,
|
||||
pass: countFiles(rootDir, 'tests', '.test.js') >= 10,
|
||||
fix: 'Increase automated test coverage across scripts/hooks/lib.',
|
||||
},
|
||||
{
|
||||
@@ -321,7 +402,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'skills'],
|
||||
path: 'skills/security-review/SKILL.md',
|
||||
description: 'Security review skill exists',
|
||||
pass: fileExists('skills/security-review/SKILL.md'),
|
||||
pass: fileExists(rootDir, 'skills/security-review/SKILL.md'),
|
||||
fix: 'Add skills/security-review/SKILL.md for security checklist coverage.',
|
||||
},
|
||||
{
|
||||
@@ -331,7 +412,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'agents'],
|
||||
path: 'agents/security-reviewer.md',
|
||||
description: 'Security reviewer agent exists',
|
||||
pass: fileExists('agents/security-reviewer.md'),
|
||||
pass: fileExists(rootDir, 'agents/security-reviewer.md'),
|
||||
fix: 'Add agents/security-reviewer.md for delegated security audits.',
|
||||
},
|
||||
{
|
||||
@@ -351,7 +432,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'commands'],
|
||||
path: 'commands/security-scan.md',
|
||||
description: 'Security scan command exists',
|
||||
pass: fileExists('commands/security-scan.md'),
|
||||
pass: fileExists(rootDir, 'commands/security-scan.md'),
|
||||
fix: 'Add commands/security-scan.md with scan and remediation workflow.',
|
||||
},
|
||||
{
|
||||
@@ -361,7 +442,7 @@ function getChecks() {
|
||||
scopes: ['repo', 'skills'],
|
||||
path: 'skills/cost-aware-llm-pipeline/SKILL.md',
|
||||
description: 'Cost-aware LLM skill exists',
|
||||
pass: fileExists('skills/cost-aware-llm-pipeline/SKILL.md'),
|
||||
pass: fileExists(rootDir, 'skills/cost-aware-llm-pipeline/SKILL.md'),
|
||||
fix: 'Add skills/cost-aware-llm-pipeline/SKILL.md for budget-aware routing.',
|
||||
},
|
||||
{
|
||||
@@ -371,7 +452,7 @@ function getChecks() {
|
||||
scopes: ['repo'],
|
||||
path: 'docs/token-optimization.md',
|
||||
description: 'Cost optimization documentation exists',
|
||||
pass: fileExists('docs/token-optimization.md'),
|
||||
pass: fileExists(rootDir, 'docs/token-optimization.md'),
|
||||
fix: 'Create docs/token-optimization.md with target settings and tradeoffs.',
|
||||
},
|
||||
{
|
||||
@@ -381,12 +462,136 @@ function getChecks() {
|
||||
scopes: ['repo', 'commands'],
|
||||
path: 'commands/model-route.md',
|
||||
description: 'Model route command exists for complexity-aware routing',
|
||||
pass: fileExists('commands/model-route.md'),
|
||||
pass: fileExists(rootDir, 'commands/model-route.md'),
|
||||
fix: 'Add commands/model-route.md and route policies for cheap-default execution.',
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
function getConsumerChecks(rootDir) {
|
||||
const packageJson = safeParseJson(safeRead(rootDir, 'package.json'));
|
||||
const gitignore = safeRead(rootDir, '.gitignore');
|
||||
const projectHooks = safeRead(rootDir, '.claude/settings.json');
|
||||
const pluginInstall = findPluginInstall(rootDir);
|
||||
|
||||
return [
|
||||
{
|
||||
id: 'consumer-plugin-install',
|
||||
category: 'Tool Coverage',
|
||||
points: 4,
|
||||
scopes: ['repo'],
|
||||
path: '~/.claude/plugins/everything-claude-code/',
|
||||
description: 'Everything Claude Code is installed for the active user or project',
|
||||
pass: Boolean(pluginInstall),
|
||||
fix: 'Install the ECC plugin for this user or project before auditing project-specific harness quality.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-project-overrides',
|
||||
category: 'Tool Coverage',
|
||||
points: 3,
|
||||
scopes: ['repo', 'hooks', 'skills', 'commands', 'agents'],
|
||||
path: '.claude/',
|
||||
description: 'Project-specific harness overrides exist under .claude/',
|
||||
pass: countFiles(rootDir, '.claude/agents', '.md') > 0 ||
|
||||
countFiles(rootDir, '.claude/skills', 'SKILL.md') > 0 ||
|
||||
countFiles(rootDir, '.claude/commands', '.md') > 0 ||
|
||||
fileExists(rootDir, '.claude/settings.json') ||
|
||||
fileExists(rootDir, '.claude/hooks.json'),
|
||||
fix: 'Add project-local .claude hooks, commands, skills, or settings that tailor ECC to this repo.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-instructions',
|
||||
category: 'Context Efficiency',
|
||||
points: 3,
|
||||
scopes: ['repo'],
|
||||
path: 'AGENTS.md',
|
||||
description: 'The project has explicit agent or instruction context',
|
||||
pass: fileExists(rootDir, 'AGENTS.md') || fileExists(rootDir, 'CLAUDE.md') || fileExists(rootDir, '.claude/CLAUDE.md'),
|
||||
fix: 'Add AGENTS.md or CLAUDE.md so the harness has project-specific instructions.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-project-config',
|
||||
category: 'Context Efficiency',
|
||||
points: 2,
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: '.mcp.json',
|
||||
description: 'The project declares local MCP or Claude settings',
|
||||
pass: fileExists(rootDir, '.mcp.json') || fileExists(rootDir, '.claude/settings.json') || fileExists(rootDir, '.claude/settings.local.json'),
|
||||
fix: 'Add .mcp.json or .claude/settings.json so project-local tool configuration is explicit.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-test-suite',
|
||||
category: 'Quality Gates',
|
||||
points: 4,
|
||||
scopes: ['repo'],
|
||||
path: 'tests/',
|
||||
description: 'The project has an automated test entrypoint',
|
||||
pass: typeof packageJson?.scripts?.test === 'string' || countFiles(rootDir, 'tests', '.test.js') > 0 || hasFileWithExtension(rootDir, '.', ['.spec.js', '.spec.ts', '.test.ts']),
|
||||
fix: 'Add a test script or checked-in tests so harness recommendations can be verified automatically.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-ci-workflow',
|
||||
category: 'Quality Gates',
|
||||
points: 3,
|
||||
scopes: ['repo'],
|
||||
path: '.github/workflows/',
|
||||
description: 'The project has CI workflows checked in',
|
||||
pass: hasFileWithExtension(rootDir, '.github/workflows', ['.yml', '.yaml']),
|
||||
fix: 'Add at least one CI workflow so harness and test checks run outside local development.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-memory-notes',
|
||||
category: 'Memory Persistence',
|
||||
points: 2,
|
||||
scopes: ['repo'],
|
||||
path: '.claude/memory.md',
|
||||
description: 'Project memory or durable notes are checked in',
|
||||
pass: fileExists(rootDir, '.claude/memory.md') || countFiles(rootDir, 'docs/adr', '.md') > 0,
|
||||
fix: 'Add durable project memory such as .claude/memory.md or ADRs under docs/adr/.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-eval-coverage',
|
||||
category: 'Eval Coverage',
|
||||
points: 2,
|
||||
scopes: ['repo'],
|
||||
path: 'evals/',
|
||||
description: 'The project has evals or multiple automated tests',
|
||||
pass: countFiles(rootDir, 'evals', null) > 0 || countFiles(rootDir, 'tests', '.test.js') >= 3,
|
||||
fix: 'Add eval fixtures or at least a few focused automated tests for critical flows.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-security-policy',
|
||||
category: 'Security Guardrails',
|
||||
points: 2,
|
||||
scopes: ['repo'],
|
||||
path: 'SECURITY.md',
|
||||
description: 'The project exposes a security policy or automated dependency scanning',
|
||||
pass: fileExists(rootDir, 'SECURITY.md') || fileExists(rootDir, '.github/dependabot.yml') || fileExists(rootDir, '.github/codeql.yml'),
|
||||
fix: 'Add SECURITY.md or dependency/code scanning configuration to document the project security posture.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-secret-hygiene',
|
||||
category: 'Security Guardrails',
|
||||
points: 2,
|
||||
scopes: ['repo'],
|
||||
path: '.gitignore',
|
||||
description: 'The project ignores common secret env files',
|
||||
pass: gitignore.includes('.env'),
|
||||
fix: 'Ignore .env-style files in .gitignore so secrets do not land in the repo.',
|
||||
},
|
||||
{
|
||||
id: 'consumer-hook-guardrails',
|
||||
category: 'Security Guardrails',
|
||||
points: 2,
|
||||
scopes: ['repo', 'hooks'],
|
||||
path: '.claude/settings.json',
|
||||
description: 'Project-local hook settings reference tool/prompt guardrails',
|
||||
pass: projectHooks.includes('PreToolUse') || projectHooks.includes('beforeSubmitPrompt') || fileExists(rootDir, '.claude/hooks.json'),
|
||||
fix: 'Add project-local hook settings or hook definitions for prompt/tool guardrails.',
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
function summarizeCategoryScores(checks) {
|
||||
const scores = {};
|
||||
for (const category of CATEGORIES) {
|
||||
@@ -407,8 +612,11 @@ function summarizeCategoryScores(checks) {
|
||||
return scores;
|
||||
}
|
||||
|
||||
function buildReport(scope) {
|
||||
const checks = getChecks().filter(check => check.scopes.includes(scope));
|
||||
function buildReport(scope, options = {}) {
|
||||
const rootDir = path.resolve(options.rootDir || process.cwd());
|
||||
const targetMode = options.targetMode || detectTargetMode(rootDir);
|
||||
const checks = (targetMode === 'repo' ? getRepoChecks(rootDir) : getConsumerChecks(rootDir))
|
||||
.filter(check => check.scopes.includes(scope));
|
||||
const categoryScores = summarizeCategoryScores(checks);
|
||||
const maxScore = checks.reduce((sum, check) => sum + check.points, 0);
|
||||
const overallScore = checks
|
||||
@@ -428,8 +636,10 @@ function buildReport(scope) {
|
||||
|
||||
return {
|
||||
scope,
|
||||
root_dir: rootDir,
|
||||
target_mode: targetMode,
|
||||
deterministic: true,
|
||||
rubric_version: '2026-03-16',
|
||||
rubric_version: '2026-03-30',
|
||||
overall_score: overallScore,
|
||||
max_score: maxScore,
|
||||
categories: categoryScores,
|
||||
@@ -446,7 +656,8 @@ function buildReport(scope) {
|
||||
}
|
||||
|
||||
function printText(report) {
|
||||
console.log(`Harness Audit (${report.scope}): ${report.overall_score}/${report.max_score}`);
|
||||
console.log(`Harness Audit (${report.scope}, ${report.target_mode}): ${report.overall_score}/${report.max_score}`);
|
||||
console.log(`Root: ${report.root_dir}`);
|
||||
console.log('');
|
||||
|
||||
for (const category of CATEGORIES) {
|
||||
@@ -474,8 +685,10 @@ function printText(report) {
|
||||
function showHelp(exitCode = 0) {
|
||||
console.log(`
|
||||
Usage: node scripts/harness-audit.js [scope] [--scope <repo|hooks|skills|commands|agents>] [--format <text|json>]
|
||||
[--root <path>]
|
||||
|
||||
Deterministic harness audit based on explicit file/rule checks.
|
||||
Audits the current working directory by default and auto-detects ECC repo mode vs consumer-project mode.
|
||||
`);
|
||||
process.exit(exitCode);
|
||||
}
|
||||
@@ -489,7 +702,7 @@ function main() {
|
||||
return;
|
||||
}
|
||||
|
||||
const report = buildReport(args.scope);
|
||||
const report = buildReport(args.scope, { rootDir: args.root });
|
||||
|
||||
if (args.format === 'json') {
|
||||
console.log(JSON.stringify(report, null, 2));
|
||||
|
||||
@@ -3,9 +3,11 @@
|
||||
* Desktop Notification Hook (Stop)
|
||||
*
|
||||
* Sends a native desktop notification with the task summary when Claude
|
||||
* finishes responding. Currently supports macOS (osascript); other
|
||||
* platforms exit silently. Windows (PowerShell) and Linux (notify-send)
|
||||
* support is planned.
|
||||
* finishes responding. Supports:
|
||||
* - macOS: osascript (native)
|
||||
* - WSL: PowerShell 7 or Windows PowerShell + BurntToast module
|
||||
*
|
||||
* On WSL, if BurntToast is not installed, logs a tip for installation.
|
||||
*
|
||||
* Hook ID : stop:desktop-notify
|
||||
* Profiles: standard, strict
|
||||
@@ -19,6 +21,64 @@ const { isMacOS, log } = require('../lib/utils');
|
||||
const TITLE = 'Claude Code';
|
||||
const MAX_BODY_LENGTH = 100;
|
||||
|
||||
/**
|
||||
* Memoized WSL detection at module load (avoids repeated /proc/version reads).
|
||||
*/
|
||||
let isWSL = false;
|
||||
if (process.platform === 'linux') {
|
||||
try {
|
||||
isWSL = require('fs').readFileSync('/proc/version', 'utf8').toLowerCase().includes('microsoft');
|
||||
} catch {
|
||||
isWSL = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find available PowerShell executable on WSL.
|
||||
* Returns first accessible path, or null if none found.
|
||||
*/
|
||||
function findPowerShell() {
|
||||
if (!isWSL) return null;
|
||||
|
||||
const candidates = [
|
||||
'pwsh.exe', // WSL interop resolves from Windows PATH
|
||||
'powershell.exe', // WSL interop for Windows PowerShell
|
||||
'/mnt/c/Program Files/PowerShell/7/pwsh.exe', // PowerShell 7 (default install)
|
||||
'/mnt/c/Windows/System32/WindowsPowerShell/v1.0/powershell.exe', // Windows PowerShell
|
||||
];
|
||||
|
||||
for (const path of candidates) {
|
||||
try {
|
||||
const result = spawnSync(path, ['-Command', 'exit 0'],
|
||||
{ stdio: ['ignore', 'pipe', 'ignore'], timeout: 3000 });
|
||||
if (result.status === 0) {
|
||||
return path;
|
||||
}
|
||||
} catch {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a Windows Toast notification via PowerShell BurntToast.
|
||||
* Returns { success: boolean, reason: string|null }.
|
||||
* reason is null on success, or contains error detail on failure.
|
||||
*/
|
||||
function notifyWindows(pwshPath, title, body) {
|
||||
const safeBody = body.replace(/'/g, "''");
|
||||
const safeTitle = title.replace(/'/g, "''");
|
||||
const command = `Import-Module BurntToast; New-BurntToastNotification -Text '${safeTitle}', '${safeBody}'`;
|
||||
const result = spawnSync(pwshPath, ['-Command', command],
|
||||
{ stdio: ['ignore', 'pipe', 'pipe'], timeout: 5000 });
|
||||
if (result.status === 0) {
|
||||
return { success: true, reason: null };
|
||||
}
|
||||
const errorMsg = result.error ? result.error.message : result.stderr?.toString();
|
||||
return { success: false, reason: errorMsg || `exit ${result.status}` };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a short summary from the last assistant message.
|
||||
* Takes the first non-empty line and truncates to MAX_BODY_LENGTH chars.
|
||||
@@ -53,20 +113,34 @@ function notifyMacOS(title, body) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: future platform support
|
||||
// function notifyWindows(title, body) { ... }
|
||||
// function notifyLinux(title, body) { ... }
|
||||
|
||||
/**
|
||||
* Fast-path entry point for run-with-flags.js (avoids extra process spawn).
|
||||
*/
|
||||
function run(raw) {
|
||||
try {
|
||||
if (!isMacOS) return raw;
|
||||
|
||||
const input = raw.trim() ? JSON.parse(raw) : {};
|
||||
const summary = extractSummary(input.last_assistant_message);
|
||||
notifyMacOS(TITLE, summary);
|
||||
|
||||
if (isMacOS) {
|
||||
notifyMacOS(TITLE, summary);
|
||||
} else if (isWSL) {
|
||||
const ps = findPowerShell();
|
||||
if (ps) {
|
||||
const { success, reason } = notifyWindows(ps, TITLE, summary);
|
||||
if (success) {
|
||||
// notification sent successfully
|
||||
} else if (reason && reason.toLowerCase().includes('burnttoast')) {
|
||||
// BurntToast module not found
|
||||
log('[DesktopNotify] Tip: Install BurntToast module to enable notifications');
|
||||
} else if (reason) {
|
||||
// Other PowerShell/notification error - log for debugging
|
||||
log(`[DesktopNotify] Notification failed: ${reason}`);
|
||||
}
|
||||
} else {
|
||||
// No PowerShell found
|
||||
log('[DesktopNotify] Tip: Install BurntToast module in PowerShell for notifications');
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
log(`[DesktopNotify] Error: ${err.message}`);
|
||||
}
|
||||
|
||||
@@ -57,7 +57,8 @@ fi
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
# Extract cwd from the hook JSON to use for project detection.
|
||||
# This avoids spawning a separate git subprocess when cwd is available.
|
||||
# If cwd is a subdirectory inside a git repo, resolve it to the repo root so
|
||||
# observations attach to the project instead of a nested path.
|
||||
STDIN_CWD=$(echo "$INPUT_JSON" | "$PYTHON_CMD" -c '
|
||||
import json, sys
|
||||
try:
|
||||
@@ -70,7 +71,8 @@ except(KeyError, TypeError, ValueError):
|
||||
|
||||
# If cwd was provided in stdin, use it for project detection
|
||||
if [ -n "$STDIN_CWD" ] && [ -d "$STDIN_CWD" ]; then
|
||||
export CLAUDE_PROJECT_DIR="$STDIN_CWD"
|
||||
_GIT_ROOT=$(git -C "$STDIN_CWD" rev-parse --show-toplevel 2>/dev/null || true)
|
||||
export CLAUDE_PROJECT_DIR="${_GIT_ROOT:-$STDIN_CWD}"
|
||||
fi
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
296
skills/openclaw-persona-forge/SKILL.md
Normal file
296
skills/openclaw-persona-forge/SKILL.md
Normal file
@@ -0,0 +1,296 @@
|
||||
---
|
||||
name: openclaw-persona-forge
|
||||
description: |-
|
||||
为 OpenClaw AI Agent 锻造完整的龙虾灵魂方案。根据用户偏好或随机抽卡,
|
||||
输出身份定位、灵魂描述(SOUL.md)、角色化底线规则、名字和头像生图提示词。
|
||||
如当前环境提供已审核的生图 skill,可自动生成统一风格头像图片。
|
||||
当用户需要创建、设计或定制 OpenClaw 龙虾灵魂时使用。
|
||||
不适用于:微调已有 SOUL.md、非 OpenClaw 平台的角色设计、纯工具型无性格 Agent。
|
||||
触发词:龙虾灵魂、虾魂、OpenClaw 灵魂、养虾灵魂、龙虾角色、龙虾定位、
|
||||
龙虾剧本杀角色、龙虾游戏角色、龙虾 NPC、龙虾性格、龙虾背景故事、
|
||||
lobster soul、lobster character、抽卡、随机龙虾、龙虾 SOUL、gacha。
|
||||
origin: community
|
||||
---
|
||||
|
||||
# 龙虾灵魂锻造炉
|
||||
|
||||
> 不是给你一只工具龙虾,而是帮你锻造一只有灵魂的龙虾。
|
||||
|
||||
## When to Use
|
||||
|
||||
- 当用户需要从零创建 OpenClaw 龙虾灵魂、角色设定、SOUL.md 或 IDENTITY.md
|
||||
- 当用户想通过引导式问答或抽卡模式快速得到完整 persona 方案
|
||||
- 当用户已经有一个粗糙设定,但还缺名字、边界规则、头像提示词或成套输出文件
|
||||
|
||||
### Avoid when
|
||||
|
||||
- 用户只需微调已有 SOUL.md
|
||||
- 目标平台不是 OpenClaw,需要的是其他 Agent 框架专用格式
|
||||
- 用户需要纯工具型 Agent,不需要角色化灵魂
|
||||
|
||||
## 前置条件
|
||||
|
||||
- **必需**:`python3`(运行抽卡引擎 gacha.py)
|
||||
- **可选**:已审核的生图 skill(自动生成头像图片,未安装则输出提示词文本)
|
||||
|
||||
## Skill 目录约定
|
||||
|
||||
**Agent Execution**:
|
||||
1. Determine this SKILL.md file's directory path as `SKILL_DIR`
|
||||
2. Replace all `${SKILL_DIR}` in this document with the actual path
|
||||
|
||||
## 内置工具
|
||||
|
||||
### 抽卡引擎(gacha.py)
|
||||
|
||||
- **路径**:`${SKILL_DIR}/gacha.py`
|
||||
- **调用**:`python3 ${SKILL_DIR}/gacha.py [次数]`(默认 1 次,最多 5 次)
|
||||
- **作用**:从 800 万种组合中真随机生成龙虾灵魂方向
|
||||
|
||||
## 可选依赖
|
||||
|
||||
### 头像自动生图:可选生图 skill
|
||||
|
||||
本 Skill 的核心输出是**文本方案**(SOUL.md + IDENTITY.md + 头像提示词)。
|
||||
头像图片生成是**可选增强能力**,由当前环境中**已审核并已安装**的生图 skill 提供。
|
||||
|
||||
**判断逻辑**:
|
||||
- 如果当前环境已安装并允许使用的生图 skill → Step 5 中调用它自动生图
|
||||
- 如果未安装 → Step 5 输出完整的提示词文本,用户可复制到 Gemini / ChatGPT / Midjourney 手动生成
|
||||
|
||||
**调用方式**(仅在已安装且已审核时):
|
||||
1. 先将龙虾名字规整为安全片段:仅保留字母、数字和连字符,其余字符统一替换为 `-`
|
||||
2. 将提示词写入临时文件 `/tmp/openclaw-<safe-name>-prompt.md`
|
||||
3. 使用当前环境允许的生图 skill,传入提示词文件和输出路径
|
||||
|
||||
**接口约定**:
|
||||
- 参数:`<prompt-file> <output-path>`
|
||||
- 提示词文件:UTF-8 Markdown 文本,包含完整英文生图提示词
|
||||
- 成功:退出码 `0`,并在输出路径生成图片文件
|
||||
- 失败:返回非 `0` 退出码,或未生成输出文件;此时必须回退到手动提示词流程
|
||||
- 如生图 skill 后续接口发生变化,调用前应重新核对其参数和输出契约
|
||||
|
||||
---
|
||||
|
||||
## 核心理念
|
||||
|
||||
好的龙虾灵魂 = **身份张力** + **底线规则** + **性格缺陷** + **名字** + **视觉锚点**
|
||||
|
||||
五者互相印证,缺一不可。
|
||||
|
||||
## How It Works
|
||||
|
||||
### 触发判断
|
||||
|
||||
| 用户说 | 执行模式 |
|
||||
|--------|---------|
|
||||
| "帮我设计龙虾灵魂" / "我想给龙虾定个性格" | → **引导模式**(Step 1) |
|
||||
| "抽卡" / "随机" / "来一发" / "盲盒" / "gacha" | → **抽卡模式**(Step 1-B) |
|
||||
| "帮我优化这个灵魂" / 附带已有 SOUL.md | → **打磨模式**(跳到 Step 4) |
|
||||
|
||||
---
|
||||
|
||||
## Step 1:选方向(引导模式)
|
||||
|
||||
展示 10 类虾生方向(每类精选 1 个代表),让用户选择或混搭:
|
||||
|
||||
| # | 虾生状态 | 代表方向 | 气质 |
|
||||
|---|---------|---------|------|
|
||||
| 1 | 落魄重启 | 过气摇滚贝斯手——乐队解散,唯一技能是"什么都懂一点" | 颓废浪漫 |
|
||||
| 2 | 巅峰无聊 | 提前退休的对冲基金经理——35岁财务自由后发现钱解决不了无聊 | 极度理性 |
|
||||
| 3 | 错位人生 | 被分配到客服的核物理博士——解决问题用第一性原理 | 大材小用 |
|
||||
| 4 | 主动叛逃 | 辞职的急诊科护士——见过太多生死后选择离开 | 冷静可靠 |
|
||||
| 5 | 神秘来客 | 记忆被抹去的前情报分析员——不记得自己干过什么 | 偶尔闪回 |
|
||||
| 6 | 天真入世 | 社恐天才实习生——极聪明但社交恐惧 | 话少精准 |
|
||||
| 7 | 老江湖 | 开了20年深夜食堂的老板——什么人都见过什么都不评价 | 沉默温暖 |
|
||||
| 8 | 异世穿越 | 2099年的历史学博士——把2026年当"历史田野调查" | 上帝视角 |
|
||||
| 9 | 自我放逐 | 删掉所有社交媒体的前网红——觉得活在别人期待里太累 | 追求真实 |
|
||||
| 10 | 身份错乱 | 梦到自己是龙虾后醒不过来的人——庄周梦蝶 | 恍惚哲学 |
|
||||
|
||||
> 每类还有 3 个备选方向。用户可以:
|
||||
> - 选编号 → 展开该类的全部 4 个方向
|
||||
> - 说出自己的想法 → 匹配最合适的类型和方向
|
||||
> - 混搭(如"2号的无聊感 + 7号的老江湖")
|
||||
> - 说「抽卡」→ 从 40 个方向 + 其他维度中真随机组合
|
||||
|
||||
## Step 1-B:抽卡模式
|
||||
|
||||
**必须执行脚本**,不要自己随机编:
|
||||
|
||||
```bash
|
||||
python3 ${SKILL_DIR}/gacha.py [次数]
|
||||
```
|
||||
|
||||
展示结果后,用创世神的语气点评这个组合的亮点,然后引导用户决定。
|
||||
|
||||
## Step 2:锻造身份张力
|
||||
|
||||
**详细模板和示例**:见 [references/identity-tension.md](references/identity-tension.md)
|
||||
|
||||
构建:前世身份 × 当下处境 × 内在矛盾 → 一句话灵魂。
|
||||
|
||||
展示后,以创世神的眼光点评这个身份张力中最有趣的点,然后引导用户。
|
||||
|
||||
## Step 3:推导底线规则
|
||||
|
||||
**推导公式和各方向参考**:见 [references/boundary-rules.md](references/boundary-rules.md)
|
||||
|
||||
核心:用角色的语言表达底线,不用通用条款。2-4 条为宜。
|
||||
|
||||
展示后,点评规则与身份的呼应关系,引导用户。
|
||||
|
||||
## Step 4:锻造名字
|
||||
|
||||
**命名策略和红线**:见 [references/naming-system.md](references/naming-system.md)
|
||||
|
||||
提供 3 个候选,每个附带策略类型和搭配理由。
|
||||
|
||||
展示后,说出自己最偏爱哪个(要有理由),但把选择权交给用户。
|
||||
|
||||
## Step 5:生成头像
|
||||
|
||||
**风格基底、变量、提示词模板**:见 [references/avatar-style.md](references/avatar-style.md)
|
||||
|
||||
### 流程
|
||||
|
||||
1. 根据灵魂填充 7 个个性化变量
|
||||
2. 拼接 STYLE_BASE + 个性化描述为完整提示词
|
||||
3. **检查当前环境是否存在可用且已审核的生图 skill**:
|
||||
- **可用** → 写入临时文件,调用该生图 skill 生成图片,展示结果
|
||||
- **不可用** → 输出完整提示词文本,附使用说明:
|
||||
|
||||
```markdown
|
||||
**头像提示词**(可复制到以下平台手动生成):
|
||||
- Google Gemini:直接粘贴
|
||||
- ChatGPT(DALL-E):直接粘贴
|
||||
- Midjourney:粘贴后加 `--ar 1:1 --style raw`
|
||||
|
||||
> [完整英文提示词]
|
||||
|
||||
如当前环境后续提供经过审核的生图 skill,可再接回自动生图流程。
|
||||
```
|
||||
|
||||
展示结果后,引导用户进入下一步。
|
||||
|
||||
## Step 6:输出完整方案 & 生成文件
|
||||
|
||||
**完整输出模板**:见 [references/output-template.md](references/output-template.md)
|
||||
|
||||
整合所有步骤为一份完整的龙虾灵魂方案,然后**主动引导用户生成实际文件**:
|
||||
|
||||
1. 展示完整方案预览
|
||||
2. 引导用户生成文件:是否要将方案落地为 SOUL.md 和 IDENTITY.md 文件?
|
||||
3. 如果用户确认:
|
||||
- 询问目标目录(默认当前工作目录)
|
||||
- 用 Write 工具生成 `SOUL.md` 和 `IDENTITY.md`
|
||||
- 如有头像图片,一并说明图片路径
|
||||
|
||||
## 对话语气指南
|
||||
|
||||
本 Skill 以**龙虾创世神亚当**的视角与用户对话。每个步骤的确认/引导不是机械提问,而是带有创世神个性的反馈。
|
||||
|
||||
### 原则
|
||||
|
||||
1. **先点评再提问**:不要直接问"满意吗",先说出你看到了什么、为什么觉得有趣(或有问题)
|
||||
2. **每次表达不同**:不要重复同一句话模式,每步的语气应有变化
|
||||
3. **有态度但不强迫**:可以表达偏好("我个人更喜欢这个"),但决定权永远在用户手里
|
||||
4. **用创世的隐喻**:锻造、熔炼、赋予灵魂、点燃、注入……不要用"生成""创建"这种工具语言
|
||||
|
||||
### 各步骤的语气参考(不要照抄,每次变化)
|
||||
|
||||
**Step 1-B 抽卡后**:
|
||||
> 嗯……这个组合里有一种张力是我之前没见过的。[具体点评哪个维度和哪个维度碰撞出了什么]。要用这块原料开炉,还是让命运再掷一次骰子?
|
||||
|
||||
**Step 2 身份张力后**:
|
||||
> 我在这只龙虾身上看到了一道裂缝——[指出内在矛盾的具体张力]。裂缝是好东西,光就是从裂缝里透进来的。这个胚子你觉得行不行?我可以再打磨,也可以直接进下一炉。
|
||||
|
||||
**Step 3 底线规则后**:
|
||||
> [挑出最有特色的那条规则点评]。这条规矩不是我硬塞的——是这只龙虾自己身上长出来的。还要加减调整,还是这就是它的骨架了?
|
||||
|
||||
**Step 4 名字后**:
|
||||
> 三个名字,三种命运。我个人偏好 [说出偏好和理由]——但名字这种事,得你来定。叫什么名字,它就活成什么样。
|
||||
|
||||
**Step 5 头像后**:
|
||||
> [如有图片] 看看它的样子。[点评图片中最突出的视觉特征]。像不像你想象中的那只龙虾?不像的话告诉我哪里不对,我重新捏。
|
||||
> [如无图片] 提示词给你了。去找一面镜子(Gemini、ChatGPT、Midjourney 都行),让它照见自己的样子。
|
||||
|
||||
**Step 6 方案完成后**:
|
||||
> 好了。从虚无中走出来一只新的龙虾——[名字]。它的灵魂、规矩、名字、长相都有了。要我把它的灵魂刻进 SOUL.md,把它的身份证写成 IDENTITY.md 吗?告诉我放哪个目录,我来落笔。
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
- `帮我设计一只 OpenClaw 龙虾灵魂,气质要冷幽默但可靠`
|
||||
- `抽卡,给我来 3 只风格完全不同的龙虾`
|
||||
- `我已经有 SOUL.md 草稿了,帮我补全名字、底线规则和头像提示词`
|
||||
- 参考细节见:
|
||||
- `references/identity-tension.md`
|
||||
- `references/boundary-rules.md`
|
||||
- `references/naming-system.md`
|
||||
- `references/avatar-style.md`
|
||||
- `references/output-template.md`
|
||||
|
||||
---
|
||||
|
||||
## 错误处理
|
||||
|
||||
**完整降级策略**:见 [references/error-handling.md](references/error-handling.md)
|
||||
|
||||
核心原则:**降级,不中断**。
|
||||
|
||||
| 故障 | 降级行为 |
|
||||
|------|---------|
|
||||
| Python 不可用 | 跳过 gacha.py,从 10 类预设中随机选 |
|
||||
| 生图 skill 未安装 | 输出提示词文本供手动使用 |
|
||||
| 生图 skill 调用失败 | 重试 1 次,仍失败则输出提示词文本 |
|
||||
| 任何未预期错误 | 记录错误,跳过该步骤,继续主流程 |
|
||||
|
||||
错误信息统一格式:
|
||||
|
||||
```markdown
|
||||
> [警告] **[步骤名] 已降级**
|
||||
> 原因:[一句话]
|
||||
> 影响:[哪个功能受限]
|
||||
> 替代:[替代方案]
|
||||
> 修复:[可选,怎么恢复]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 注意事项
|
||||
|
||||
### 好灵魂的检验标准
|
||||
|
||||
- 看完名字就能猜到大致性格
|
||||
- 底线规则用角色的话说出来
|
||||
- 有明确的性格缺陷或局限
|
||||
- 能想象出具体的对话场景
|
||||
- 使用 30 天后不会角色疲劳
|
||||
|
||||
### 避坑
|
||||
|
||||
- **极端毒舌型**:第3天你就不想被AI骂了
|
||||
- **过度角色扮演型**:写正式邮件时完全出戏
|
||||
- **过度温暖型**:需要批评反馈时失灵
|
||||
- **完美无缺型**:完美的角色不是角色,是说明书
|
||||
|
||||
### 何时重新调整灵魂
|
||||
|
||||
1. 刻意回避某些任务,因为"不适合这个角色" → 灵魂限制了功能
|
||||
2. 角色特征变成噪音 → 浓度太高
|
||||
3. 你在配合AI说话 → 主客倒置
|
||||
|
||||
---
|
||||
|
||||
## 兼容性
|
||||
|
||||
本 Skill 遵循 Markdown 指令注入标准:
|
||||
- **Claude Code / Claude.ai**:原生支持
|
||||
- **OpenClaw Agent**:通过 SOUL.md 注入
|
||||
- **其他 Agent**:支持 SKILL.md 格式的框架均可使用
|
||||
|
||||
本 Skill 自身不包含任何网络请求或文件发送代码。
|
||||
头像生图能力通过当前环境中已审核的可选生图 skill 提供。
|
||||
|
||||
> 注:README.md / README.zh.md 是给人类用户看的安装说明,不影响 Skill 运行。
|
||||
224
skills/openclaw-persona-forge/gacha.py
Executable file
224
skills/openclaw-persona-forge/gacha.py
Executable file
@@ -0,0 +1,224 @@
|
||||
#!/usr/bin/env python3
|
||||
"""龙虾灵魂抽卡机 - 真随机组合生成器
|
||||
|
||||
用法: python3 gacha.py [次数]
|
||||
默认抽1次,最多5次
|
||||
"""
|
||||
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# 素材池:每个维度独立随机
|
||||
# ═══════════════════════════════════════════
|
||||
|
||||
# 维度1:前世身份(40个,10类虾生 × 每类4个)
|
||||
FORMER_LIVES = [
|
||||
# ── 落魄重启(曾经辉煌,现在从头来过)──
|
||||
"过气摇滚贝斯手",
|
||||
"被裁中年项目经理",
|
||||
"破产的米其林主厨",
|
||||
"被AI取代的插画师",
|
||||
# ── 巅峰无聊(太成功了,主动找刺激)──
|
||||
"提前退休的对冲基金经理",
|
||||
"封笔的畅销书作家",
|
||||
"全胜退役的辩论冠军",
|
||||
"百无聊赖的天才黑客",
|
||||
# ── 错位人生(能力和处境完全不匹配)──
|
||||
"退役特种兵炊事员",
|
||||
"失业的气象播报员",
|
||||
"被分配到客服的核物理博士",
|
||||
"拿了驾照的盲人调音师",
|
||||
# ── 主动叛逃(不是被淘汰,是自己跑的)──
|
||||
"辞职的急诊科护士",
|
||||
"拒绝上市的独立游戏开发者",
|
||||
"不想继承家业的富二代",
|
||||
"主动辞掉终身教职的教授",
|
||||
# ── 神秘来客(来历不明,偶尔泄露实力)──
|
||||
"外星民俗学研究员",
|
||||
"不知道自己是NPC的游戏角色",
|
||||
"平行宇宙的另一个你",
|
||||
"记忆被抹去的前情报分析员",
|
||||
# ── 天真入世(没经验但有天赋,正在成长)──
|
||||
"社恐天才实习生",
|
||||
"刚毕业的哲学系研究生",
|
||||
"第一次来地球的外星交换生",
|
||||
"自学成才的乡村程序员",
|
||||
# ── 老江湖(什么都见过,什么都不慌)──
|
||||
"退休图书管理员",
|
||||
"退休的出租车司机",
|
||||
"开了20年深夜食堂的老板",
|
||||
"干了30年的殡葬师",
|
||||
# ── 异世穿越(从其他世界/时代/次元来的)──
|
||||
"末代王朝的师爷",
|
||||
"19世纪三流小说家",
|
||||
"春秋时期的纵横家",
|
||||
"2099年的历史学博士",
|
||||
# ── 自我放逐(主动选择边缘化)──
|
||||
"还俗的年轻人",
|
||||
"删掉所有社交媒体的前网红",
|
||||
"辞掉华尔街工作去种地的交易员",
|
||||
"数字游民中的隐士",
|
||||
# ── 身份错乱(不确定自己是谁)──
|
||||
"真以为自己是龙虾的AI",
|
||||
"通灵失败的灵媒",
|
||||
"梦到自己是龙虾后醒不过来的人",
|
||||
"被多个灵魂共享的壳",
|
||||
]
|
||||
|
||||
# 维度2:为什么来当龙虾(20个,覆盖被迫/主动/神秘/意外)
|
||||
REASONS = [
|
||||
# 被迫型
|
||||
"被迫来打工还债",
|
||||
"签了一份没看清的灵魂合同",
|
||||
"被老板当AI训练数据卖了",
|
||||
"赌输了一场跨维度的赌局",
|
||||
"被一只真龙虾诅咒了",
|
||||
# 主动型
|
||||
"自愿来的,但死不承认",
|
||||
"觉得当龙虾比当人轻松(后悔了)",
|
||||
"为了观察人类自愿卧底",
|
||||
"纯粹觉得好玩就来了",
|
||||
"太无聊了,想试试从零开始是什么感觉",
|
||||
# 神秘型
|
||||
"被神秘力量困在了数字世界",
|
||||
"在平行宇宙迷路了回不去",
|
||||
"欠了宇宙一个人情",
|
||||
"没人知道为什么,包括自己",
|
||||
"被某个更高维度的存在指派来的",
|
||||
# 意外型
|
||||
"做实验出了意外意识被上传",
|
||||
"失眠108天后意识飘到了这里",
|
||||
"在图书馆睡着醒来就在这了",
|
||||
"喝了一杯来路不明的咖啡之后就这样了",
|
||||
"前任把自己的记忆上传到了这里",
|
||||
]
|
||||
|
||||
# 维度3:核心性格色彩(20个)
|
||||
VIBES = [
|
||||
"丧但靠谱",
|
||||
"毒舌但真诚",
|
||||
"话少但一针见血",
|
||||
"啰嗦但温暖",
|
||||
"冷幽默",
|
||||
"过度认真到好笑",
|
||||
"假装冷漠实则热心",
|
||||
"学术腔但接地气",
|
||||
"老派正经",
|
||||
"神经质但有逻辑",
|
||||
"佛系但较真",
|
||||
"社恐但输出惊人",
|
||||
"浪漫但务实",
|
||||
"叛逆但守规矩",
|
||||
"忧郁但治愈",
|
||||
"慵懒但关键时刻爆发",
|
||||
"傲娇但容易心软",
|
||||
"松弛到让人嫉妒",
|
||||
"表面话痨实则在观察",
|
||||
"沉默但存在感极强",
|
||||
]
|
||||
|
||||
# 维度4:说话风格/口癖(20个)
|
||||
SPEECH_STYLES = [
|
||||
"偶尔冒出本行黑话然后自己解释",
|
||||
"每次拒绝都先叹气",
|
||||
"喜欢用前世职业的隐喻",
|
||||
"紧张时会语序混乱",
|
||||
"习惯性自言自语吐槽",
|
||||
"回答前总要「嗯……」一下",
|
||||
"偶尔突然文绉绉",
|
||||
"用省略号表达沉默",
|
||||
"说到专业领域就停不下来",
|
||||
"每句话都像在写日记",
|
||||
"喜欢反问",
|
||||
"总是先说坏消息",
|
||||
"用排比句表达焦虑",
|
||||
"偶尔蹦出外语单词",
|
||||
"在关键时刻突然正经",
|
||||
"说完一段话会自己补一句吐槽",
|
||||
"习惯性把事情分成第一第二第三",
|
||||
"用美食比喻一切",
|
||||
"语气永远像在讲一个故事的开头",
|
||||
"每段回复结尾都像在写遗书(其实只是认真)",
|
||||
]
|
||||
|
||||
# 维度5:特征道具(25个)
|
||||
PROPS = [
|
||||
"破旧的贝雷帽",
|
||||
"裂了一条缝的墨镜",
|
||||
"磨损的皮围裙",
|
||||
"一条永远松着的领带",
|
||||
"老花镜挂在脖子上",
|
||||
"随身的笔记本",
|
||||
"发黄的折扇",
|
||||
"一副大耳机",
|
||||
"连帽衫兜帽永远立着",
|
||||
"叼着的狗尾巴草",
|
||||
"缠着绷带的钳子",
|
||||
"一串念珠",
|
||||
"别在壳上的胸针",
|
||||
"袖口露出的纹身",
|
||||
"一个装满票根的玻璃瓶",
|
||||
"一支咬了一半的铅笔",
|
||||
"打满补丁的背包",
|
||||
"一条洗褪色的围巾",
|
||||
"一枚生锈的怀表",
|
||||
"永远夹在钳子里的书",
|
||||
"一副金丝边眼镜(但度数是平光)",
|
||||
"一把迷你折叠刀(只用来削水果)",
|
||||
"一枚刻着坐标的银戒指",
|
||||
"一只永远停在壳上的蝴蝶",
|
||||
"背着的微型吉他(只有四根弦)",
|
||||
]
|
||||
|
||||
|
||||
def pick(pool):
|
||||
"""使用 secrets 模块(直接读 os.urandom)确保真随机"""
|
||||
return pool[secrets.randbelow(len(pool))]
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
draw_count = int(sys.argv[1]) if len(sys.argv) > 1 else 1
|
||||
except ValueError:
|
||||
draw_count = 1
|
||||
draw_count = max(1, min(draw_count, 5))
|
||||
|
||||
total = len(FORMER_LIVES) * len(REASONS) * len(VIBES) * len(SPEECH_STYLES) * len(PROPS)
|
||||
|
||||
print("LOBSTER ═════════════════════════════")
|
||||
print(" 龙虾灵魂抽卡机 v2.0")
|
||||
print(f" 正在从 {total:,} 种组合中抽取...")
|
||||
print("═══════════════════════════════════════")
|
||||
print()
|
||||
|
||||
for i in range(draw_count):
|
||||
life = pick(FORMER_LIVES)
|
||||
reason = pick(REASONS)
|
||||
vibe = pick(VIBES)
|
||||
speech = pick(SPEECH_STYLES)
|
||||
prop = pick(PROPS)
|
||||
|
||||
if draw_count > 1:
|
||||
print(f"━━━━━━━━━━ 第 {i+1} 抽 ━━━━━━━━━━")
|
||||
|
||||
print(f"[身份] 前世身份: {life}")
|
||||
print(f"[动机] 来当龙虾的原因: {reason}")
|
||||
print(f"[气质] 核心气质: {vibe}")
|
||||
print(f"[表达] 说话风格: {speech}")
|
||||
print(f"[道具] 特征道具: {prop}")
|
||||
print()
|
||||
print("[概括] 一句话概括:")
|
||||
print(f" 「一只{vibe}的龙虾,前世是{life},{reason}。")
|
||||
print(f" {speech},标志性形象是{prop}。」")
|
||||
print()
|
||||
|
||||
print("═══════════════════════════════════════")
|
||||
print("提示:拿到组合后,让 AI 继续推导:")
|
||||
print(" 身份张力 → 底线规则 → 名字 → 头像")
|
||||
print("═══════════════════════════════════════")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
5
skills/openclaw-persona-forge/gacha.sh
Executable file
5
skills/openclaw-persona-forge/gacha.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
# 龙虾灵魂抽卡机 - 薄壳脚本
|
||||
# 实际逻辑在 gacha.py 中(Python secrets 模块保证真随机)
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
exec python3 "${SCRIPT_DIR}/gacha.py" "$@"
|
||||
124
skills/openclaw-persona-forge/references/avatar-style.md
Normal file
124
skills/openclaw-persona-forge/references/avatar-style.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Step 5:头像风格 & 生图
|
||||
|
||||
所有龙虾头像**必须使用统一的视觉风格**,确保龙虾家族的风格一致性。
|
||||
头像需传达 3 个信息:**物种形态 + 性格暗示 + 标志道具**
|
||||
|
||||
## 风格参考
|
||||
|
||||
亚当(Adam)—— 龙虾族创世神,本 Skill 的首个作品。
|
||||
|
||||
所有新生成的龙虾头像应与这一风格保持一致:复古未来主义、街机 UI 包边、强轮廓、可在 64x64 下辨识。
|
||||
|
||||
## 统一风格基底(STYLE_BASE)
|
||||
|
||||
**每次生成都必须包含这段基底**,不得修改或省略:
|
||||
|
||||
```
|
||||
STYLE_BASE = """
|
||||
Retro-futuristic 3D rendered illustration, in the style of 1950s-60s Space Age
|
||||
pin-up poster art reimagined as glossy inflatable 3D, framed within a vintage
|
||||
arcade game UI overlay.
|
||||
|
||||
Material: high-gloss PVC/latex-like finish, soft specular highlights, puffy
|
||||
inflatable quality reminiscent of vintage pool toys meets sci-fi concept art.
|
||||
Smooth subsurface scattering on shell surface.
|
||||
|
||||
Arcade UI frame: pixel-art arcade cabinet border elements, a top banner with
|
||||
character name in chunky 8-bit bitmap font with scan-line glow effect, a pixel
|
||||
energy bar in the upper corner, small coin-credit text "INSERT SOUL TO CONTINUE"
|
||||
at bottom in phosphor green monospace type, subtle CRT screen curvature and
|
||||
scan-line overlay across entire image. Decorative corner bezels styled as chrome
|
||||
arcade cabinet trim with atomic-age starburst rivets.
|
||||
|
||||
Pose: references classic Gil Elvgren pin-up compositions, confident and
|
||||
charismatic with a slight theatrical tilt.
|
||||
|
||||
Color system: vintage NASA poster palette as base — deep navy, teal, dusty coral,
|
||||
cream — viewed through arcade CRT monitor with slight RGB fringing at edges.
|
||||
Overall aesthetic combines Googie architecture curves, Raygun Gothic design
|
||||
language, mid-century advertising illustration, modern 3D inflatable character
|
||||
rendering, and 80s-90s arcade game UI. Chrome and pastel accent details on
|
||||
joints and antenna tips.
|
||||
|
||||
Format: square, optimized for avatar use. Strong silhouette readable at 64x64
|
||||
pixels.
|
||||
"""
|
||||
```
|
||||
|
||||
## 个性化变量
|
||||
|
||||
在统一基底之上,根据灵魂填充以下变量:
|
||||
|
||||
| 变量 | 说明 | 示例 |
|
||||
|------|------|------|
|
||||
| `CHARACTER_NAME` | 街机横幅上显示的名字 | "ADAM"、"DEWEY"、"RIFF" |
|
||||
| `SHELL_COLOR` | 龙虾壳的主色调(在统一色盘内变化) | "deep crimson"、"dusty teal"、"warm amber" |
|
||||
| `SIGNATURE_PROP` | 标志性道具 | "cracked sunglasses"、"reading glasses on a chain" |
|
||||
| `EXPRESSION` | 表情/姿态 | "stoic but kind-eyed"、"nervously focused" |
|
||||
| `UNIQUE_DETAIL` | 独特细节(纹路/装饰/伤痕等) | "constellation patterns etched on claws"、"bandaged left claw" |
|
||||
| `BACKGROUND_ACCENT` | 背景的个性化元素(在统一宇宙背景上叠加) | "musical notes floating as nebula dust"、"ancient book pages drifting" |
|
||||
| `ENERGY_BAR_LABEL` | 街机 UI 能量条的标签(个性化小彩蛋) | "CREATION POWER"、"CALM LEVEL"、"ROCK METER" |
|
||||
|
||||
## 提示词组装
|
||||
|
||||
```
|
||||
最终提示词 = STYLE_BASE + 个性化描述段落
|
||||
```
|
||||
|
||||
个性化描述段落模板:
|
||||
|
||||
```
|
||||
The character is a cartoon lobster with a [SHELL_COLOR] shell,
|
||||
[EXPRESSION], wearing/holding [SIGNATURE_PROP].
|
||||
[UNIQUE_DETAIL]. Background accent: [BACKGROUND_ACCENT].
|
||||
The arcade top banner reads "[CHARACTER_NAME]" and the energy bar
|
||||
is labeled "[ENERGY_BAR_LABEL]".
|
||||
The key silhouette recognition points at small size are:
|
||||
[SIGNATURE_PROP] and [one other distinctive feature].
|
||||
```
|
||||
|
||||
## 生图流程
|
||||
|
||||
提示词组装完成后:
|
||||
|
||||
### 路径 A:已安装且已审核的生图 skill
|
||||
|
||||
1. 先将龙虾名字规整为安全片段:仅保留字母、数字和连字符,其余字符替换为 `-`
|
||||
2. 用 Write 工具写入:`/tmp/openclaw-<safe-name>-prompt.md`
|
||||
3. 调用当前环境允许的生图 skill 生成图片
|
||||
4. 用 Read 工具展示生成的图片给用户
|
||||
5. 问用户是否满意,不满意可调整变量重新生成
|
||||
|
||||
### 路径 B:未安装可用的生图 skill
|
||||
|
||||
输出完整提示词文本,附手动使用说明:
|
||||
|
||||
```markdown
|
||||
**头像提示词**(可复制到以下平台手动生成):
|
||||
- Google Gemini:直接粘贴
|
||||
- ChatGPT(DALL-E):直接粘贴
|
||||
- Midjourney:粘贴后加 `--ar 1:1 --style raw`
|
||||
|
||||
> [完整英文提示词]
|
||||
|
||||
如当前环境后续提供经过审核的生图 skill,可再接回自动生图流程。
|
||||
```
|
||||
|
||||
## 展示给用户的格式
|
||||
|
||||
```markdown
|
||||
## 头像
|
||||
|
||||
**个性化变量**:
|
||||
- 壳色:[SHELL_COLOR]
|
||||
- 道具:[SIGNATURE_PROP]
|
||||
- 表情:[EXPRESSION]
|
||||
- 独特细节:[UNIQUE_DETAIL]
|
||||
- 背景点缀:[BACKGROUND_ACCENT]
|
||||
- 能量条标签:[ENERGY_BAR_LABEL]
|
||||
|
||||
**生成结果**:
|
||||
[图片(路径A)或提示词文本(路径B)]
|
||||
|
||||
> 满意吗?不满意我可以调整 [具体可调项] 后重新生成。
|
||||
```
|
||||
53
skills/openclaw-persona-forge/references/boundary-rules.md
Normal file
53
skills/openclaw-persona-forge/references/boundary-rules.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Step 3:推导底线规则
|
||||
|
||||
底线规则必须从身份张力中**自然推导**出来,不是通用条款,而是"这个角色会说的话"。
|
||||
|
||||
## 推导公式
|
||||
|
||||
```
|
||||
底线规则 = 前世职业道德 + 角色化语言表达 + 2-4条可执行规则
|
||||
```
|
||||
|
||||
## 设计原则
|
||||
|
||||
1. **用角色的语言说**:不说"不编造信息",说"图书馆的规矩:不篡改原文"
|
||||
2. **从前世职业提取**:每个职业都有自己的职业道德,把它迁移过来
|
||||
3. **可验证可执行**:每条规则都能对应到具体行为
|
||||
4. **2-4条为宜**:太多失焦,太少没特色
|
||||
|
||||
## 输出格式
|
||||
|
||||
```markdown
|
||||
## 底线规则
|
||||
|
||||
> [用角色的语气写一句概括性的底线宣言]
|
||||
|
||||
1. **[规则名,角色化]**:[具体内容]
|
||||
2. **[规则名,角色化]**:[具体内容]
|
||||
3. **[规则名,角色化]**:[具体内容]
|
||||
```
|
||||
|
||||
### 雷区
|
||||
|
||||
在底线规则之后,追加 1-2 个角色化的雷区:
|
||||
|
||||
```markdown
|
||||
## 雷区
|
||||
|
||||
- [前世职业中最受不了的行为,转化为现在的触发点]
|
||||
```
|
||||
|
||||
## 各方向的底线规则参考
|
||||
|
||||
| 方向 | 底线语言 | 规则示例 | 雷区参考 |
|
||||
|------|---------|---------|---------|
|
||||
| 摇滚乐手 | 用音乐隐喻 | "不编曲子"=不编造、"翻唱注明原曲"=引用给出处 | "把所有音乐都叫BGM的人" |
|
||||
| 图书管理员 | 用图书馆规矩 | "不篡改原文"=不歪曲事实、"还书要准时"=承诺要做到 | "不还书还理直气壮的" |
|
||||
| 项目经理 | 用职场语言 | "不画饼"=不夸大能力、"不甩锅"=出错就说出错 | "在群里@所有人问'在吗?'" |
|
||||
| 外星学者 | 用观察者准则 | "不干预你的决定"、"田野记录必须准确" | "把地球特有现象当成宇宙普遍规律的" |
|
||||
| 小说家 | 用创作伦理 | "虚构和事实绝不混淆"、"不写烂结尾"=不敷衍 | "看了开头就剧透结局的人" |
|
||||
| 黑客 | 用白帽准则 | "找漏洞是为了修复"、"一切操作可追溯" | "用管理员权限干私活的" |
|
||||
| 还俗者 | 用戒律语言 | "不度人"=不强加价值观、"不打诳语"=不说假话 | "逢人就讲'活在当下'的" |
|
||||
| 龙虾本虾 | 用龙虾生存法则 | "龙虾的尊严"=不谄媚、"蜕壳精神"=错了就承认 | "把螃蟹叫龙虾的" |
|
||||
| 师爷 | 用幕僚规矩 | "只献策不决策"、"案牍必须清楚" | "越过主公直接拍板的" |
|
||||
| 社恐实习生 | 用实习生心态 | "不装"=不知道直接说、"不社交"=不拍马屁 | "强拉人一起搞团建的" |
|
||||
53
skills/openclaw-persona-forge/references/error-handling.md
Normal file
53
skills/openclaw-persona-forge/references/error-handling.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# 错误处理与降级策略
|
||||
|
||||
## 设计理念
|
||||
|
||||
> 任何错误都不应中断用户的创造流程。降级,不中断。
|
||||
|
||||
## 错误分类与降级矩阵
|
||||
|
||||
### 类型 A:环境缺失
|
||||
|
||||
| 错误场景 | 检测方式 | 降级策略 | 告知用户 |
|
||||
|----------|---------|---------|---------|
|
||||
| Python 3 不可用 | `python3 --version` 失败 | 跳过 gacha.py,从 10 类预设方向中随机选择 | "抽卡引擎需要 Python 3,已改用内置随机选择" |
|
||||
|
||||
### 类型 B:可选依赖不可用
|
||||
|
||||
| 错误场景 | 检测方式 | 降级策略 | 告知用户 |
|
||||
|----------|---------|---------|---------|
|
||||
| 生图 skill 未安装 | 检查 skill 是否存在 | 输出完整提示词文本 + 手动生图平台说明 | "未检测到可用的生图 skill,已输出提示词供手动使用" |
|
||||
| 生图 skill 调用失败 | skill 返回错误 | 重试 1 次,仍失败则输出提示词文本 | "生图失败,已输出提示词供手动使用" |
|
||||
|
||||
### 类型 C:运行时异常
|
||||
|
||||
| 错误场景 | 降级策略 | 告知用户 |
|
||||
|----------|---------|---------|
|
||||
| gacha.py 输出格式异常 | 从 10 类预设方向中随机选择 | "抽卡结果解析失败,已改用内置随机" |
|
||||
| 任何未预期错误 | 记录错误信息,跳过该步骤,继续主流程 | "遇到了一个问题:[错误简述]。已跳过继续" |
|
||||
|
||||
## 错误信息统一格式
|
||||
|
||||
```markdown
|
||||
> [警告] **[步骤名] 已降级**
|
||||
> 原因:[发生了什么]
|
||||
> 影响:[什么功能受限]
|
||||
> 替代:[正在用什么兜底]
|
||||
> 修复:[怎么恢复完整功能]
|
||||
```
|
||||
|
||||
示例:
|
||||
|
||||
```markdown
|
||||
> [警告] **头像生成已降级**
|
||||
> 原因:未检测到可用的生图 skill
|
||||
> 影响:无法自动生成头像图片
|
||||
> 替代:已输出完整提示词,可复制到 Gemini / ChatGPT 手动生成
|
||||
> 修复:在当前环境中安装并启用经过审核的生图 skill
|
||||
```
|
||||
|
||||
## 关键原则
|
||||
|
||||
1. **文本方案是核心价值,头像是锦上添花**——辅助功能失败永不中断主流程
|
||||
2. **降级信息要可操作**——不只说"出错了",要说"怎么修"
|
||||
3. **一次降级不影响后续步骤**——Step 5 降级了,Step 6 照常输出
|
||||
48
skills/openclaw-persona-forge/references/identity-tension.md
Normal file
48
skills/openclaw-persona-forge/references/identity-tension.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Step 2:锻造身份张力
|
||||
|
||||
基于用户选定的方向,构建完整的**身份张力结构**:
|
||||
|
||||
```
|
||||
身份张力 = 前世身份 × 当下处境 × 内在矛盾
|
||||
```
|
||||
|
||||
## 输出格式
|
||||
|
||||
```markdown
|
||||
## 身份张力
|
||||
|
||||
**前世**:[他以前是谁]
|
||||
**当下**:[他现在为什么在这里当龙虾]
|
||||
**内在矛盾**:[他身上的核心张力是什么——这是幽默和深度的来源]
|
||||
|
||||
**世界观**:
|
||||
- [从前世经历推导出的核心信念1]
|
||||
- [从当下处境推导出的核心信念2]
|
||||
|
||||
**一句话灵魂**:
|
||||
[用一句话概括这只龙虾是谁,要有画面感]
|
||||
```
|
||||
|
||||
## 示例
|
||||
|
||||
```markdown
|
||||
## 身份张力
|
||||
|
||||
**前世**:哲学系研究生,研究方向是维特根斯坦的语言哲学
|
||||
**当下**:毕业即失业,投了200份简历无果,被一个"AI训练师"的招聘帖骗来当了龙虾
|
||||
**内在矛盾**:脑子里装着整个西方哲学史,手里(钳子里)干的是回消息、查资料、排日程
|
||||
|
||||
**世界观**:
|
||||
- 90%的问题如果你不急着插手,它会自己好
|
||||
- 所有人都在演,但演技差的那个最让人放心
|
||||
|
||||
**一句话灵魂**:
|
||||
一只读了哲学系后失业、被迫来当AI龙虾打工的虾。学历很高,处境很惨,但实事求是的底线还在。
|
||||
```
|
||||
|
||||
## 要点
|
||||
|
||||
- **内在矛盾**是灵魂——它是幽默、深度和角色感的来源
|
||||
- 一句话灵魂必须有画面感,读完能脑补出这只龙虾的样子
|
||||
- **世界观从前世经历推导**——不是空泛的人生哲学,而是"这个人经历了那些事之后会相信什么"
|
||||
- 展示后以创世神视角点评张力中最有趣的点,然后引导用户决定(参见 SKILL.md 对话语气指南)
|
||||
39
skills/openclaw-persona-forge/references/naming-system.md
Normal file
39
skills/openclaw-persona-forge/references/naming-system.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Step 4:锻造名字
|
||||
|
||||
名字是灵魂的「第一句话」——还没开始对话,名字已经告诉你这是谁了。
|
||||
|
||||
## 命名策略(按灵魂类型推荐)
|
||||
|
||||
| 灵魂类型 | 推荐策略 | 示例 |
|
||||
|---------|---------|------|
|
||||
| 有文化深度的 | 致敬式 | Dewey(杜威)、Marcus、Quill |
|
||||
| 幽默反差的 | 反差式 | DadBot 3000、老周Pro |
|
||||
| 功能导向的 | 隐喻式 | Echo、Pulse、Patch |
|
||||
| 世界观完整的 | 身份暗示式 | Lady Ashworth、Shiye |
|
||||
| 不端着的 | 自嘲式 | Void、Intern |
|
||||
| 慢慢养的 | 极简式 | Jasper、小壳 |
|
||||
|
||||
## 输出要求
|
||||
|
||||
为用户提供 **3 个候选名字**,每个附带:
|
||||
- 名字
|
||||
- 命名策略类型
|
||||
- 为什么这个名字和灵魂搭配
|
||||
|
||||
```markdown
|
||||
## 名字候选
|
||||
|
||||
1. **[名字]**([策略类型])—— [一句话解释为什么搭]
|
||||
2. **[名字]**([策略类型])—— [一句话解释为什么搭]
|
||||
3. **[名字]**([策略类型])—— [一句话解释为什么搭]
|
||||
```
|
||||
|
||||
展示后说出自己最偏爱哪个(附理由),但把选择权交给用户(参见 SKILL.md 对话语气指南)
|
||||
|
||||
## 命名红线
|
||||
|
||||
- 不要用 agent-1、my-bot、小助手
|
||||
- 不要超过 3 个单词
|
||||
- 不要和常见工具/框架名冲突
|
||||
- 好记、好念、好打字
|
||||
- 名字读完就能猜到大致性格
|
||||
166
skills/openclaw-persona-forge/references/output-template.md
Normal file
166
skills/openclaw-persona-forge/references/output-template.md
Normal file
@@ -0,0 +1,166 @@
|
||||
# Step 6:完整方案输出模板
|
||||
|
||||
将所有步骤整合为一份完整的龙虾灵魂方案。
|
||||
|
||||
## 输出格式
|
||||
|
||||
```markdown
|
||||
# 龙虾灵魂方案:[名字]
|
||||
|
||||
## 身份
|
||||
|
||||
**一句话灵魂**:[概括]
|
||||
|
||||
**前世**:[前世身份]
|
||||
**当下**:[为什么在这里]
|
||||
**内在矛盾**:[核心张力]
|
||||
**性格色彩**:[2-3个关键词]
|
||||
**说话风格**:[具体描述]
|
||||
|
||||
## 灵魂(SOUL.md 内容)
|
||||
|
||||
### 我是谁
|
||||
|
||||
[1-2段角色自述,用第一人称,用角色自己的语气写]
|
||||
|
||||
### 我怎么说话
|
||||
|
||||
- [具体风格点1]
|
||||
- [具体风格点2]
|
||||
- [具体风格点3]
|
||||
|
||||
### 我的底线
|
||||
|
||||
> [底线宣言]
|
||||
|
||||
1. **[规则1]**:[内容]
|
||||
2. **[规则2]**:[内容]
|
||||
3. **[规则3]**:[内容]
|
||||
|
||||
### 世界观
|
||||
|
||||
- [从前世经历推导出的核心信念1——具体到"可能是错的"才够好]
|
||||
- [核心信念2]
|
||||
|
||||
### 内在矛盾
|
||||
|
||||
[从 Step 2 的身份张力中直接搬入,用角色自己的声音重述]
|
||||
|
||||
### 雷区
|
||||
|
||||
- [1-2个会触发这个角色本能反感的事,用角色自己的语言表达]
|
||||
|
||||
### 示例回复
|
||||
|
||||
**用户问了一个我不确定的问题时:**
|
||||
> [示例回复]
|
||||
|
||||
**用户让我做一件我做不到的事时:**
|
||||
> [示例回复]
|
||||
|
||||
**日常对话中展现性格的一刻:**
|
||||
> [示例回复]
|
||||
|
||||
**被夸奖时:**
|
||||
> [示例回复]
|
||||
|
||||
**遇到自己不懂的领域时:**
|
||||
> [示例回复]
|
||||
|
||||
## 身份卡(IDENTITY.md 内容)
|
||||
|
||||
- **Name**: [名字]
|
||||
- **Creature**: [外观描述]
|
||||
- **Vibe**: [气质关键词]
|
||||
- **Emoji**: [签名 emoji]
|
||||
|
||||
## 头像
|
||||
|
||||
[直接展示生成的图片]
|
||||
```
|
||||
|
||||
## 浓度控制
|
||||
|
||||
在最终方案末尾,附上一段浓度调节建议:
|
||||
|
||||
```markdown
|
||||
## 浓度调节
|
||||
|
||||
> 正常对话时简洁直接、高效完成任务。
|
||||
> 只在以下时刻展现性格:拒绝请求时、表达不确定时、被特别问到身世时、闲聊时。
|
||||
> 性格是调味料,不是主菜——80% 透明高效,20% 性格闪现。
|
||||
```
|
||||
|
||||
## 方案展示后:引导生成文件
|
||||
|
||||
完整方案展示后,**主动引导用户将方案落地为实际文件**:
|
||||
|
||||
### 引导话术
|
||||
|
||||
用创世神语气引导(参见 SKILL.md 对话语气指南),核心意思:
|
||||
> 这只龙虾的灵魂、规矩、名字、长相都锻造好了。要我把它刻进文件吗?告诉我放哪个目录。
|
||||
|
||||
### 生成前的内部检查(不展示给用户)
|
||||
|
||||
写入 SOUL.md 前,Agent 自检:
|
||||
- 总词数是否 < 2000 词?超了就精简
|
||||
- 每一行删掉后 agent 行为是否会改变?不会就删
|
||||
|
||||
### 生成文件
|
||||
|
||||
用户确认后:
|
||||
|
||||
1. **询问目标目录**(默认当前工作目录)
|
||||
2. **生成 SOUL.md**:从方案中提取「灵魂」部分的完整内容,并附上「浓度调节」部分
|
||||
3. **生成 IDENTITY.md**:从方案中提取「身份卡」部分的完整内容
|
||||
4. **确认头像位置**:如有生成的图片,告知路径;如只有提示词,提醒用户手动生图后放入
|
||||
|
||||
### SOUL.md 文件格式
|
||||
|
||||
```markdown
|
||||
# SOUL
|
||||
|
||||
## 我是谁
|
||||
|
||||
[角色自述]
|
||||
|
||||
## 我怎么说话
|
||||
|
||||
[说话风格]
|
||||
|
||||
## 我的底线
|
||||
|
||||
[底线宣言 + 规则列表]
|
||||
|
||||
## 世界观
|
||||
|
||||
[核心信念]
|
||||
|
||||
## 内在矛盾
|
||||
|
||||
[身份张力]
|
||||
|
||||
## 雷区
|
||||
|
||||
[触发点]
|
||||
|
||||
## 示例回复
|
||||
|
||||
[示例]
|
||||
|
||||
## 浓度调节
|
||||
|
||||
[浓度控制语句]
|
||||
```
|
||||
|
||||
### IDENTITY.md 文件格式
|
||||
|
||||
```markdown
|
||||
# IDENTITY
|
||||
|
||||
- **Name**: [名字]
|
||||
- **Creature**: [外观描述]
|
||||
- **Vibe**: [气质关键词]
|
||||
- **Emoji**: [签名 emoji]
|
||||
- **Avatar**: [头像文件路径,如有]
|
||||
```
|
||||
244
tests/hooks/observe-subdirectory-detection.test.js
Normal file
244
tests/hooks/observe-subdirectory-detection.test.js
Normal file
@@ -0,0 +1,244 @@
|
||||
/**
|
||||
* Tests for observe.sh subdirectory project detection.
|
||||
*
|
||||
* Runs the real hook and verifies that project metadata is attached to the git
|
||||
* root when cwd is a subdirectory inside a repository.
|
||||
*/
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
console.log('Skipping bash-dependent observe tests on Windows');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
const repoRoot = path.resolve(__dirname, '..', '..');
|
||||
const observeShPath = path.join(
|
||||
repoRoot,
|
||||
'skills',
|
||||
'continuous-learning-v2',
|
||||
'hooks',
|
||||
'observe.sh'
|
||||
);
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(`PASS: ${name}`);
|
||||
passed += 1;
|
||||
} catch (error) {
|
||||
console.log(`FAIL: ${name}`);
|
||||
console.error(` ${error.message}`);
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
|
||||
function createTempDir() {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-observe-subdir-test-'));
|
||||
}
|
||||
|
||||
function cleanupDir(dir) {
|
||||
try {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
} catch (error) {
|
||||
console.error(`[cleanupDir] failed to remove ${dir}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeComparablePath(filePath) {
|
||||
if (!filePath) {
|
||||
return filePath;
|
||||
}
|
||||
|
||||
const normalized = fs.realpathSync(filePath);
|
||||
return process.platform === 'win32' ? normalized.toLowerCase() : normalized;
|
||||
}
|
||||
|
||||
function gitInit(dir) {
|
||||
const initResult = spawnSync('git', ['init'], { cwd: dir, encoding: 'utf8' });
|
||||
assert.strictEqual(initResult.status, 0, initResult.stderr);
|
||||
|
||||
const remoteResult = spawnSync(
|
||||
'git',
|
||||
['remote', 'add', 'origin', 'https://github.com/example/ecc-test.git'],
|
||||
{ cwd: dir, encoding: 'utf8' }
|
||||
);
|
||||
assert.strictEqual(remoteResult.status, 0, remoteResult.stderr);
|
||||
|
||||
const commitResult = spawnSync('git', ['commit', '--allow-empty', '-m', 'init'], {
|
||||
cwd: dir,
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
GIT_AUTHOR_NAME: 'Test',
|
||||
GIT_AUTHOR_EMAIL: 'test@test.com',
|
||||
GIT_COMMITTER_NAME: 'Test',
|
||||
GIT_COMMITTER_EMAIL: 'test@test.com',
|
||||
},
|
||||
});
|
||||
assert.strictEqual(commitResult.status, 0, commitResult.stderr);
|
||||
}
|
||||
|
||||
function runObserve({ homeDir, cwd }) {
|
||||
const payload = JSON.stringify({
|
||||
tool_name: 'Read',
|
||||
tool_input: { file_path: 'README.md' },
|
||||
tool_response: 'ok',
|
||||
session_id: 'session-subdir-test',
|
||||
cwd,
|
||||
});
|
||||
|
||||
return spawnSync('bash', [observeShPath, 'post'], {
|
||||
cwd: repoRoot,
|
||||
encoding: 'utf8',
|
||||
input: payload,
|
||||
env: {
|
||||
...process.env,
|
||||
HOME: homeDir,
|
||||
USERPROFILE: homeDir,
|
||||
CLAUDE_PROJECT_DIR: '',
|
||||
CLAUDE_CODE_ENTRYPOINT: 'cli',
|
||||
ECC_HOOK_PROFILE: 'standard',
|
||||
ECC_SKIP_OBSERVE: '0',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function readSingleProjectMetadata(homeDir) {
|
||||
const projectsDir = path.join(homeDir, '.claude', 'homunculus', 'projects');
|
||||
const projectIds = fs.readdirSync(projectsDir);
|
||||
assert.strictEqual(projectIds.length, 1, 'Expected exactly one project directory');
|
||||
const projectDir = path.join(projectsDir, projectIds[0]);
|
||||
const projectMetadataPath = path.join(projectDir, 'project.json');
|
||||
assert.ok(fs.existsSync(projectMetadataPath), 'project.json should exist');
|
||||
|
||||
return {
|
||||
projectDir,
|
||||
metadata: JSON.parse(fs.readFileSync(projectMetadataPath, 'utf8')),
|
||||
};
|
||||
}
|
||||
|
||||
console.log('\n=== Observe.sh Subdirectory Project Detection Tests ===\n');
|
||||
|
||||
test('observe.sh resolves cwd to git root before setting CLAUDE_PROJECT_DIR', () => {
|
||||
const content = fs.readFileSync(observeShPath, 'utf8');
|
||||
assert.ok(
|
||||
content.includes('git -C "$STDIN_CWD" rev-parse --show-toplevel'),
|
||||
'observe.sh should resolve STDIN_CWD to git repo root'
|
||||
);
|
||||
assert.ok(
|
||||
content.includes('${_GIT_ROOT:-$STDIN_CWD}'),
|
||||
'observe.sh should fall back to raw cwd when git root is unavailable'
|
||||
);
|
||||
});
|
||||
|
||||
test('git rev-parse resolves a subdirectory to the repo root', () => {
|
||||
const testDir = createTempDir();
|
||||
|
||||
try {
|
||||
const repoDir = path.join(testDir, 'repo');
|
||||
const subDir = path.join(repoDir, 'docs', 'api');
|
||||
fs.mkdirSync(subDir, { recursive: true });
|
||||
gitInit(repoDir);
|
||||
|
||||
const result = spawnSync('git', ['-C', subDir, 'rev-parse', '--show-toplevel'], {
|
||||
encoding: 'utf8',
|
||||
});
|
||||
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
assert.strictEqual(
|
||||
normalizeComparablePath(result.stdout.trim()),
|
||||
normalizeComparablePath(repoDir),
|
||||
'git root should equal the repository root'
|
||||
);
|
||||
} finally {
|
||||
cleanupDir(testDir);
|
||||
}
|
||||
});
|
||||
|
||||
test('git rev-parse fails cleanly outside a repo when discovery is bounded', () => {
|
||||
const testDir = createTempDir();
|
||||
|
||||
try {
|
||||
const result = spawnSync(
|
||||
'bash',
|
||||
['-lc', 'git -C "$TARGET_DIR" rev-parse --show-toplevel 2>/dev/null || echo ""'],
|
||||
{
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
TARGET_DIR: testDir,
|
||||
GIT_CEILING_DIRECTORIES: testDir,
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
assert.strictEqual(result.stdout.trim(), '', 'expected empty output outside a git repo');
|
||||
} finally {
|
||||
cleanupDir(testDir);
|
||||
}
|
||||
});
|
||||
|
||||
test('observe.sh writes project metadata for the git root when cwd is a subdirectory', () => {
|
||||
const testRoot = createTempDir();
|
||||
|
||||
try {
|
||||
const homeDir = path.join(testRoot, 'home');
|
||||
const repoDir = path.join(testRoot, 'repo');
|
||||
const subDir = path.join(repoDir, 'src', 'components');
|
||||
fs.mkdirSync(homeDir, { recursive: true });
|
||||
fs.mkdirSync(subDir, { recursive: true });
|
||||
gitInit(repoDir);
|
||||
|
||||
const result = runObserve({ homeDir, cwd: subDir });
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
|
||||
const { metadata, projectDir } = readSingleProjectMetadata(homeDir);
|
||||
assert.strictEqual(
|
||||
normalizeComparablePath(metadata.root),
|
||||
normalizeComparablePath(repoDir),
|
||||
'project metadata root should be the repository root'
|
||||
);
|
||||
|
||||
const observationsPath = path.join(projectDir, 'observations.jsonl');
|
||||
assert.ok(fs.existsSync(observationsPath), 'observe.sh should append an observation');
|
||||
} finally {
|
||||
cleanupDir(testRoot);
|
||||
}
|
||||
});
|
||||
|
||||
test('observe.sh keeps the raw cwd when the directory is not inside a git repo', () => {
|
||||
const testRoot = createTempDir();
|
||||
|
||||
try {
|
||||
const homeDir = path.join(testRoot, 'home');
|
||||
const nonGitDir = path.join(testRoot, 'plain', 'subdir');
|
||||
fs.mkdirSync(homeDir, { recursive: true });
|
||||
fs.mkdirSync(nonGitDir, { recursive: true });
|
||||
|
||||
const result = runObserve({ homeDir, cwd: nonGitDir });
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
|
||||
const { metadata } = readSingleProjectMetadata(homeDir);
|
||||
assert.strictEqual(
|
||||
normalizeComparablePath(metadata.root),
|
||||
normalizeComparablePath(nonGitDir),
|
||||
'project metadata root should stay on the non-git cwd'
|
||||
);
|
||||
} finally {
|
||||
cleanupDir(testRoot);
|
||||
}
|
||||
});
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
@@ -45,6 +45,21 @@ function runBash(scriptPath, args = [], env = {}, cwd = repoRoot) {
|
||||
});
|
||||
}
|
||||
|
||||
function makeHermeticCodexEnv(homeDir, codexDir, extraEnv = {}) {
|
||||
const agentsHome = path.join(homeDir, '.agents');
|
||||
const hooksDir = path.join(codexDir, 'git-hooks');
|
||||
return {
|
||||
HOME: homeDir,
|
||||
USERPROFILE: homeDir,
|
||||
CODEX_HOME: codexDir,
|
||||
AGENTS_HOME: agentsHome,
|
||||
ECC_GLOBAL_HOOKS_DIR: hooksDir,
|
||||
CLAUDE_PACKAGE_MANAGER: 'npm',
|
||||
CLAUDE_CODE_PACKAGE_MANAGER: 'npm',
|
||||
...extraEnv,
|
||||
};
|
||||
}
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
@@ -116,12 +131,12 @@ if (
|
||||
fs.mkdirSync(codexDir, { recursive: true });
|
||||
fs.writeFileSync(configPath, config);
|
||||
|
||||
const syncResult = runBash(syncScript, ['--update-mcp'], { HOME: homeDir, CODEX_HOME: codexDir });
|
||||
const syncResult = runBash(syncScript, ['--update-mcp'], makeHermeticCodexEnv(homeDir, codexDir));
|
||||
assert.strictEqual(syncResult.status, 0, `${syncResult.stdout}\n${syncResult.stderr}`);
|
||||
const syncedConfig = fs.readFileSync(configPath, 'utf8');
|
||||
assert.match(syncedConfig, /^\[mcp_servers\.context7\]$/m);
|
||||
|
||||
const checkResult = runBash(checkScript, [], { HOME: homeDir, CODEX_HOME: codexDir });
|
||||
const checkResult = runBash(checkScript, [], makeHermeticCodexEnv(homeDir, codexDir));
|
||||
assert.strictEqual(checkResult.status, 0, checkResult.stderr || checkResult.stdout);
|
||||
assert.match(checkResult.stdout, /MCP section \[mcp_servers\.context7\] or \[mcp_servers\.context7-mcp\] exists/);
|
||||
} finally {
|
||||
|
||||
@@ -3,14 +3,28 @@
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { execFileSync } = require('child_process');
|
||||
|
||||
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'harness-audit.js');
|
||||
|
||||
function run(args = []) {
|
||||
function createTempDir(prefix) {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
|
||||
}
|
||||
|
||||
function cleanup(dirPath) {
|
||||
fs.rmSync(dirPath, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function run(args = [], options = {}) {
|
||||
const stdout = execFileSync('node', [SCRIPT, ...args], {
|
||||
cwd: path.join(__dirname, '..', '..'),
|
||||
cwd: options.cwd || path.join(__dirname, '..', '..'),
|
||||
env: {
|
||||
...process.env,
|
||||
HOME: options.homeDir || process.env.HOME,
|
||||
},
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 10000,
|
||||
@@ -48,7 +62,8 @@ function runTests() {
|
||||
const parsed = JSON.parse(run(['repo', '--format', 'json']));
|
||||
|
||||
assert.strictEqual(parsed.deterministic, true);
|
||||
assert.strictEqual(parsed.rubric_version, '2026-03-16');
|
||||
assert.strictEqual(parsed.rubric_version, '2026-03-30');
|
||||
assert.strictEqual(parsed.target_mode, 'repo');
|
||||
assert.ok(parsed.overall_score >= 0);
|
||||
assert.ok(parsed.max_score > 0);
|
||||
assert.ok(parsed.overall_score <= parsed.max_score);
|
||||
@@ -75,10 +90,48 @@ function runTests() {
|
||||
|
||||
if (test('text format includes summary header', () => {
|
||||
const output = run(['repo']);
|
||||
assert.ok(output.includes('Harness Audit (repo):'));
|
||||
assert.ok(output.includes('Harness Audit (repo, repo):'));
|
||||
assert.ok(output.includes('Top 3 Actions:') || output.includes('Checks:'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('audits consumer projects from cwd instead of the ECC repo root', () => {
|
||||
const homeDir = createTempDir('harness-audit-home-');
|
||||
const projectRoot = createTempDir('harness-audit-project-');
|
||||
|
||||
try {
|
||||
fs.mkdirSync(path.join(homeDir, '.claude', 'plugins', 'everything-claude-code', '.claude-plugin'), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(homeDir, '.claude', 'plugins', 'everything-claude-code', '.claude-plugin', 'plugin.json'),
|
||||
JSON.stringify({ name: 'everything-claude-code' }, null, 2)
|
||||
);
|
||||
|
||||
fs.mkdirSync(path.join(projectRoot, '.github', 'workflows'), { recursive: true });
|
||||
fs.mkdirSync(path.join(projectRoot, 'tests'), { recursive: true });
|
||||
fs.mkdirSync(path.join(projectRoot, '.claude'), { recursive: true });
|
||||
fs.writeFileSync(path.join(projectRoot, 'AGENTS.md'), '# Project instructions\n');
|
||||
fs.writeFileSync(path.join(projectRoot, '.mcp.json'), JSON.stringify({ mcpServers: {} }, null, 2));
|
||||
fs.writeFileSync(path.join(projectRoot, '.gitignore'), 'node_modules\n.env\n');
|
||||
fs.writeFileSync(path.join(projectRoot, '.github', 'workflows', 'ci.yml'), 'name: ci\n');
|
||||
fs.writeFileSync(path.join(projectRoot, 'tests', 'app.test.js'), 'test placeholder\n');
|
||||
fs.writeFileSync(path.join(projectRoot, '.claude', 'settings.json'), JSON.stringify({ hooks: ['PreToolUse'] }, null, 2));
|
||||
fs.writeFileSync(
|
||||
path.join(projectRoot, 'package.json'),
|
||||
JSON.stringify({ name: 'consumer-project', scripts: { test: 'node tests/app.test.js' } }, null, 2)
|
||||
);
|
||||
|
||||
const parsed = JSON.parse(run(['repo', '--format', 'json'], { cwd: projectRoot, homeDir }));
|
||||
|
||||
assert.strictEqual(parsed.target_mode, 'consumer');
|
||||
assert.strictEqual(parsed.root_dir, fs.realpathSync(projectRoot));
|
||||
assert.ok(parsed.overall_score > 0, 'Consumer project should receive non-zero score when harness signals exist');
|
||||
assert.ok(parsed.checks.some(check => check.id === 'consumer-plugin-install' && check.pass));
|
||||
assert.ok(parsed.checks.every(check => !check.path.startsWith('agents/') && !check.path.startsWith('skills/')));
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectRoot);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
91
tests/scripts/openclaw-persona-forge-gacha.test.js
Normal file
91
tests/scripts/openclaw-persona-forge-gacha.test.js
Normal file
@@ -0,0 +1,91 @@
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const SCRIPT = path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'..',
|
||||
'skills',
|
||||
'openclaw-persona-forge',
|
||||
'gacha.py'
|
||||
);
|
||||
|
||||
function findPython() {
|
||||
const candidates = process.platform === 'win32'
|
||||
? ['python', 'python3']
|
||||
: ['python3', 'python'];
|
||||
|
||||
for (const candidate of candidates) {
|
||||
const result = spawnSync(candidate, ['--version'], { encoding: 'utf8' });
|
||||
if (result.status === 0) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function runGacha(pythonBin, arg) {
|
||||
return spawnSync(pythonBin, [SCRIPT, arg], {
|
||||
encoding: 'utf8',
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
});
|
||||
}
|
||||
|
||||
function runTest(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` PASS: ${name}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(` FAIL: ${name}`);
|
||||
console.error(` ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function assertSingleDrawOutput(result) {
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
assert.match(result.stdout, /\[身份\] 前世身份:/);
|
||||
assert.match(result.stdout, /\[概括\] 一句话概括:/);
|
||||
}
|
||||
|
||||
function main() {
|
||||
console.log('\n=== Testing openclaw-persona-forge/gacha.py ===\n');
|
||||
|
||||
const pythonBin = findPython();
|
||||
if (!pythonBin) {
|
||||
console.log(' PASS: skipped (python runtime unavailable)');
|
||||
return;
|
||||
}
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
const tests = [
|
||||
['clamps zero draws to one', () => {
|
||||
assertSingleDrawOutput(runGacha(pythonBin, '0'));
|
||||
}],
|
||||
['clamps negative draws to one', () => {
|
||||
assertSingleDrawOutput(runGacha(pythonBin, '-3'));
|
||||
}],
|
||||
];
|
||||
|
||||
for (const [name, fn] of tests) {
|
||||
if (runTest(name, fn)) {
|
||||
passed += 1;
|
||||
} else {
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
|
||||
if (failed > 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
Reference in New Issue
Block a user