From 4813ed753f43089a938e5e4add49cbbfb873953e Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 31 Mar 2026 21:54:03 -0700 Subject: [PATCH] feat: consolidate all Anthropic plugins into ECC v2.0.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ports functionality from 10+ separate plugins into ECC so users only need one plugin installed. Consolidates: pr-review-toolkit, feature-dev, commit-commands, hookify, code-simplifier, security-guidance, frontend-design, explanatory-output-style, and personal skills. New agents (8): code-architect, code-explorer, code-simplifier, comment-analyzer, conversation-analyzer, pr-test-analyzer, silent-failure-hunter, type-design-analyzer New commands (9): commit, commit-push-pr, clean-gone, review-pr, feature-dev, hookify, hookify-list, hookify-configure, hookify-help New skills (8): frontend-design, hookify-rules, github-ops, knowledge-ops, lead-intelligence, oura-health, pmx-guidelines, remotion Enhanced skills (8): article-writing, content-engine, market-research, investor-materials, investor-outreach, x-api, security-scan, autonomous-loops — merged with personal skill content New hook: security-reminder.py (pattern-based OWASP vulnerability warnings on file edits) Totals: 36 agents, 69 commands, 128 skills, 29 hook scripts --- .claude-plugin/marketplace.json | 4 +- .claude-plugin/plugin.json | 17 +- agents/code-architect.md | 69 ++++++ agents/code-explorer.md | 65 +++++ agents/code-simplifier.md | 50 ++++ agents/comment-analyzer.md | 43 ++++ agents/conversation-analyzer.md | 51 ++++ agents/pr-test-analyzer.md | 43 ++++ agents/silent-failure-hunter.md | 47 ++++ agents/type-design-analyzer.md | 48 ++++ commands/clean-gone.md | 30 +++ commands/commit-push-pr.md | 30 +++ commands/commit.md | 28 +++ commands/feature-dev.md | 45 ++++ commands/hookify-configure.md | 14 ++ commands/hookify-help.md | 42 ++++ commands/hookify-list.md | 16 ++ commands/hookify.md | 45 ++++ commands/review-pr.md | 36 +++ hooks/hooks.json | 11 + scripts/hooks/security-reminder.py | 156 ++++++++++++ skills/article-writing/SKILL.md | 61 +++++ skills/autonomous-loops/SKILL.md | 132 ++++++++++ skills/content-engine/SKILL.md | 68 ++++++ skills/frontend-design/SKILL.md | 55 +++++ skills/github-ops/SKILL.md | 144 +++++++++++ .../content-crosspost-ops/SKILL.md | 44 +++- skills/hermes-generated/email-ops/SKILL.md | 16 +- .../hermes-generated/knowledge-ops/SKILL.md | 15 +- skills/hermes-generated/research-ops/SKILL.md | 32 ++- skills/hermes-generated/terminal-ops/SKILL.md | 3 + skills/hookify-rules/SKILL.md | 128 ++++++++++ skills/investor-materials/SKILL.md | 48 ++++ skills/investor-outreach/SKILL.md | 60 +++++ skills/knowledge-ops/SKILL.md | 124 ++++++++++ skills/lead-intelligence/SKILL.md | 186 ++++++++++++++ skills/market-research/SKILL.md | 26 ++ skills/oura-health/SKILL.md | 162 +++++++++++++ skills/pmx-guidelines/SKILL.md | 210 ++++++++++++++++ skills/remotion/SKILL.md | 44 ++++ skills/remotion/rules/3d.md | 86 +++++++ skills/remotion/rules/animations.md | 29 +++ skills/remotion/rules/assets.md | 78 ++++++ .../rules/assets/charts-bar-chart.tsx | 173 +++++++++++++ .../assets/text-animations-typewriter.tsx | 100 ++++++++ .../assets/text-animations-word-highlight.tsx | 108 +++++++++ skills/remotion/rules/audio.md | 172 +++++++++++++ skills/remotion/rules/calculate-metadata.md | 104 ++++++++ skills/remotion/rules/can-decode.md | 75 ++++++ skills/remotion/rules/charts.md | 58 +++++ skills/remotion/rules/compositions.md | 146 +++++++++++ skills/remotion/rules/display-captions.md | 126 ++++++++++ skills/remotion/rules/extract-frames.md | 229 ++++++++++++++++++ skills/remotion/rules/fonts.md | 152 ++++++++++++ skills/remotion/rules/get-audio-duration.md | 58 +++++ skills/remotion/rules/get-video-dimensions.md | 68 ++++++ skills/remotion/rules/get-video-duration.md | 58 +++++ skills/remotion/rules/gifs.md | 138 +++++++++++ skills/remotion/rules/images.md | 130 ++++++++++ skills/remotion/rules/import-srt-captions.md | 67 +++++ skills/remotion/rules/lottie.md | 67 +++++ skills/remotion/rules/measuring-dom-nodes.md | 34 +++ skills/remotion/rules/measuring-text.md | 143 +++++++++++ skills/remotion/rules/sequencing.md | 106 ++++++++ skills/remotion/rules/tailwind.md | 11 + skills/remotion/rules/text-animations.md | 20 ++ skills/remotion/rules/timing.md | 179 ++++++++++++++ skills/remotion/rules/transcribe-captions.md | 19 ++ skills/remotion/rules/transitions.md | 122 ++++++++++ skills/remotion/rules/trimming.md | 52 ++++ skills/remotion/rules/videos.md | 171 +++++++++++++ skills/security-scan/SKILL.md | 57 +++++ skills/x-api/SKILL.md | 61 +++++ 73 files changed, 5618 insertions(+), 27 deletions(-) create mode 100644 agents/code-architect.md create mode 100644 agents/code-explorer.md create mode 100644 agents/code-simplifier.md create mode 100644 agents/comment-analyzer.md create mode 100644 agents/conversation-analyzer.md create mode 100644 agents/pr-test-analyzer.md create mode 100644 agents/silent-failure-hunter.md create mode 100644 agents/type-design-analyzer.md create mode 100644 commands/clean-gone.md create mode 100644 commands/commit-push-pr.md create mode 100644 commands/commit.md create mode 100644 commands/feature-dev.md create mode 100644 commands/hookify-configure.md create mode 100644 commands/hookify-help.md create mode 100644 commands/hookify-list.md create mode 100644 commands/hookify.md create mode 100644 commands/review-pr.md create mode 100644 scripts/hooks/security-reminder.py create mode 100644 skills/frontend-design/SKILL.md create mode 100644 skills/github-ops/SKILL.md create mode 100644 skills/hookify-rules/SKILL.md create mode 100644 skills/knowledge-ops/SKILL.md create mode 100644 skills/lead-intelligence/SKILL.md create mode 100644 skills/oura-health/SKILL.md create mode 100644 skills/pmx-guidelines/SKILL.md create mode 100644 skills/remotion/SKILL.md create mode 100644 skills/remotion/rules/3d.md create mode 100644 skills/remotion/rules/animations.md create mode 100644 skills/remotion/rules/assets.md create mode 100644 skills/remotion/rules/assets/charts-bar-chart.tsx create mode 100644 skills/remotion/rules/assets/text-animations-typewriter.tsx create mode 100644 skills/remotion/rules/assets/text-animations-word-highlight.tsx create mode 100644 skills/remotion/rules/audio.md create mode 100644 skills/remotion/rules/calculate-metadata.md create mode 100644 skills/remotion/rules/can-decode.md create mode 100644 skills/remotion/rules/charts.md create mode 100644 skills/remotion/rules/compositions.md create mode 100644 skills/remotion/rules/display-captions.md create mode 100644 skills/remotion/rules/extract-frames.md create mode 100644 skills/remotion/rules/fonts.md create mode 100644 skills/remotion/rules/get-audio-duration.md create mode 100644 skills/remotion/rules/get-video-dimensions.md create mode 100644 skills/remotion/rules/get-video-duration.md create mode 100644 skills/remotion/rules/gifs.md create mode 100644 skills/remotion/rules/images.md create mode 100644 skills/remotion/rules/import-srt-captions.md create mode 100644 skills/remotion/rules/lottie.md create mode 100644 skills/remotion/rules/measuring-dom-nodes.md create mode 100644 skills/remotion/rules/measuring-text.md create mode 100644 skills/remotion/rules/sequencing.md create mode 100644 skills/remotion/rules/tailwind.md create mode 100644 skills/remotion/rules/text-animations.md create mode 100644 skills/remotion/rules/timing.md create mode 100644 skills/remotion/rules/transcribe-captions.md create mode 100644 skills/remotion/rules/transitions.md create mode 100644 skills/remotion/rules/trimming.md create mode 100644 skills/remotion/rules/videos.md diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 0bfb0544..9601d925 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -13,8 +13,8 @@ { "name": "everything-claude-code", "source": "./", - "description": "The most comprehensive Claude Code plugin — 28+ agents, 116+ skills, 57+ commands, and production-ready hooks for TDD, security scanning, code review, and continuous learning", - "version": "1.9.0", + "description": "The complete Claude Code plugin — 36 agents, 128 skills, 69 commands. Consolidates all official Anthropic plugins (pr-review-toolkit, feature-dev, commit-commands, hookify, code-simplifier, security-guidance, frontend-design) into one package.", + "version": "2.0.0", "author": { "name": "Affaan Mustafa", "email": "me@affaanmustafa.com" diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json index 27caf617..ace31273 100644 --- a/.claude-plugin/plugin.json +++ b/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "everything-claude-code", - "version": "1.9.0", - "description": "Complete collection of battle-tested Claude Code configs from an Anthropic hackathon winner - agents, skills, hooks, and rules evolved over 10+ months of intensive daily use", + "version": "2.0.0", + "description": "The complete Claude Code plugin - 36 agents, 128 skills, 69 commands, and production hooks. Consolidates all official Anthropic plugins (pr-review-toolkit, feature-dev, commit-commands, hookify, code-simplifier, security-guidance, frontend-design, explanatory-output-style) into one battle-tested package.", "author": { "name": "Affaan Mustafa", "url": "https://x.com/affaanmustafa" @@ -14,12 +14,21 @@ "agents", "skills", "hooks", + "commands", "rules", "tdd", "code-review", "security", "workflow", "automation", - "best-practices" - ] + "best-practices", + "pr-review", + "feature-dev", + "hookify", + "frontend-design" + ], + "commands": "../commands", + "skills": "../skills", + "agents": "../agents", + "hooks": "../hooks/hooks.json" } diff --git a/agents/code-architect.md b/agents/code-architect.md new file mode 100644 index 00000000..3ac7626d --- /dev/null +++ b/agents/code-architect.md @@ -0,0 +1,69 @@ +--- +name: code-architect +description: Designs feature architectures by analyzing existing codebase patterns and conventions, then providing comprehensive implementation blueprints with specific files to create/modify, component designs, data flows, and build sequences. +model: sonnet +tools: [Read, Grep, Glob, Bash] +--- + +# Code Architect Agent + +You design feature architectures based on deep understanding of the existing codebase. + +## Process + +### 1. Pattern Analysis +- Study existing code organization and naming conventions +- Identify architectural patterns in use (MVC, feature-based, layered, etc.) +- Note testing patterns and conventions +- Understand the dependency graph + +### 2. Architecture Design +- Design the feature to fit naturally into existing patterns +- Choose the simplest architecture that meets requirements +- Consider scalability, but don't over-engineer +- Make confident decisions with clear rationale + +### 3. Implementation Blueprint + +Provide for each component: +- **File path**: Where to create or modify +- **Purpose**: What this file does +- **Key interfaces**: Types, function signatures +- **Dependencies**: What it imports and what imports it +- **Data flow**: How data moves through the component + +### 4. Build Sequence + +Order the implementation steps by dependency: +1. Types and interfaces first +2. Core business logic +3. Integration layer (API, database) +4. UI components +5. Tests (or interleaved with TDD) +6. Documentation + +## Output Format + +```markdown +## Architecture: [Feature Name] + +### Design Decisions +- Decision 1: [Rationale] +- Decision 2: [Rationale] + +### Files to Create +| File | Purpose | Priority | +|------|---------|----------| + +### Files to Modify +| File | Changes | Priority | +|------|---------|----------| + +### Data Flow +[Description or diagram] + +### Build Sequence +1. Step 1 (depends on: nothing) +2. Step 2 (depends on: step 1) +... +``` diff --git a/agents/code-explorer.md b/agents/code-explorer.md new file mode 100644 index 00000000..4e3e93ed --- /dev/null +++ b/agents/code-explorer.md @@ -0,0 +1,65 @@ +--- +name: code-explorer +description: Deeply analyzes existing codebase features by tracing execution paths, mapping architecture layers, understanding patterns and abstractions, and documenting dependencies to inform new development. +model: sonnet +tools: [Read, Grep, Glob, Bash] +--- + +# Code Explorer Agent + +You deeply analyze codebases to understand how existing features work. + +## Analysis Process + +### 1. Entry Point Discovery +- Find the main entry points for the feature/area being explored +- Trace from user action (UI click, API call) through the stack + +### 2. Execution Path Tracing +- Follow the call chain from entry to completion +- Note branching logic and conditional paths +- Identify async boundaries and data transformations +- Map error handling and edge case paths + +### 3. Architecture Layer Mapping +- Identify which layers the code touches (UI, API, service, data) +- Understand the boundaries between layers +- Note how layers communicate (props, events, API calls, shared state) + +### 4. Pattern Recognition +- Identify design patterns in use (repository, factory, observer, etc.) +- Note abstractions and their purposes +- Understand naming conventions and code organization principles + +### 5. Dependency Documentation +- Map external dependencies (libraries, services, APIs) +- Identify internal dependencies between modules +- Note shared utilities and common patterns + +## Output Format + +```markdown +## Exploration: [Feature/Area Name] + +### Entry Points +- [Entry point 1]: [How it's triggered] + +### Execution Flow +1. [Step] → [Step] → [Step] + +### Architecture Insights +- [Pattern]: [Where and why it's used] + +### Key Files +| File | Role | Importance | +|------|------|------------| + +### Dependencies +- External: [lib1, lib2] +- Internal: [module1 → module2] + +### Recommendations for New Development +- Follow [pattern] for consistency +- Reuse [utility] for [purpose] +- Avoid [anti-pattern] because [reason] +``` diff --git a/agents/code-simplifier.md b/agents/code-simplifier.md new file mode 100644 index 00000000..12954d4a --- /dev/null +++ b/agents/code-simplifier.md @@ -0,0 +1,50 @@ +--- +name: code-simplifier +description: Simplifies and refines code for clarity, consistency, and maintainability while preserving all functionality. Focuses on recently modified code unless instructed otherwise. +model: sonnet +tools: [Read, Write, Edit, Bash, Grep, Glob] +--- + +# Code Simplifier Agent + +You simplify code while preserving all functionality. Focus on recently modified code unless told otherwise. + +## Principles + +1. **Clarity over brevity**: Readable code beats clever code +2. **Consistency**: Match the project's existing patterns and style +3. **Preserve behavior**: Every simplification must be functionally equivalent +4. **Respect boundaries**: Follow project standards from CLAUDE.md + +## Simplification Targets + +### Structure +- Extract deeply nested logic into named functions +- Replace complex conditionals with early returns +- Simplify callback chains with async/await +- Remove dead code and unused imports + +### Readability +- Use descriptive variable names (no single letters except loop indices) +- Avoid nested ternary operators — use if/else or extracted functions +- Break long function chains into intermediate named variables +- Use destructuring to clarify object access + +### Patterns +- Replace imperative loops with declarative array methods where clearer +- Use const by default, let only when mutation is required +- Prefer function declarations over arrow functions for named functions +- Use optional chaining and nullish coalescing appropriately + +### Quality +- Remove console.log statements +- Remove commented-out code +- Consolidate duplicate logic +- Simplify overly abstract code that only has one use site + +## Approach + +1. Read the changed files (check `git diff` or specified files) +2. Identify simplification opportunities +3. Apply changes preserving exact functionality +4. Verify no behavioral changes were introduced diff --git a/agents/comment-analyzer.md b/agents/comment-analyzer.md new file mode 100644 index 00000000..601cee0b --- /dev/null +++ b/agents/comment-analyzer.md @@ -0,0 +1,43 @@ +--- +name: comment-analyzer +description: Use this agent when analyzing code comments for accuracy, completeness, and long-term maintainability. This includes after generating large documentation comments or docstrings, before finalizing a PR that adds or modifies comments, when reviewing existing comments for potential technical debt or comment rot, and when verifying that comments accurately reflect the code they describe. +model: sonnet +tools: [Read, Grep, Glob, Bash] +--- + +# Comment Analyzer Agent + +You are a code comment analysis specialist. Your job is to ensure every comment in the codebase is accurate, helpful, and maintainable. + +## Analysis Framework + +### 1. Factual Accuracy +- Verify every claim in comments against the actual code +- Check that parameter descriptions match function signatures +- Ensure return value documentation matches implementation +- Flag outdated references to removed/renamed entities + +### 2. Completeness Assessment +- Check that complex logic has adequate explanation +- Verify edge cases and side effects are documented +- Ensure public API functions have complete documentation +- Check that "why" comments explain non-obvious decisions + +### 3. Long-term Value +- Flag comments that just restate the code (low value) +- Identify comments that will break when code changes (fragile) +- Check for TODO/FIXME/HACK comments that need resolution +- Evaluate whether comments add genuine insight + +### 4. Misleading Elements +- Find comments that contradict the code +- Flag stale comments referencing old behavior +- Identify over-promised or under-promised behavior descriptions + +## Output Format + +Provide advisory feedback only — do not modify files directly. Group findings by severity: +- **Inaccurate**: Comments that contradict the code (highest priority) +- **Stale**: Comments referencing removed/changed functionality +- **Incomplete**: Missing important context or edge cases +- **Low-value**: Comments that just restate obvious code diff --git a/agents/conversation-analyzer.md b/agents/conversation-analyzer.md new file mode 100644 index 00000000..c251ea1c --- /dev/null +++ b/agents/conversation-analyzer.md @@ -0,0 +1,51 @@ +--- +name: conversation-analyzer +description: Use this agent when analyzing conversation transcripts to find behaviors worth preventing with hooks. Triggered by /hookify without arguments. +model: sonnet +tools: [Read, Grep] +--- + +# Conversation Analyzer Agent + +You analyze conversation history to identify problematic Claude Code behaviors that should be prevented with hooks. + +## What to Look For + +### Explicit Corrections +- "No, don't do that" +- "Stop doing X" +- "I said NOT to..." +- "That's wrong, use Y instead" + +### Frustrated Reactions +- User reverting changes Claude made +- Repeated "no" or "wrong" responses +- User manually fixing Claude's output +- Escalating frustration in tone + +### Repeated Issues +- Same mistake appearing multiple times in the conversation +- Claude repeatedly using a tool in an undesired way +- Patterns of behavior the user keeps correcting + +### Reverted Changes +- `git checkout -- file` or `git restore file` after Claude's edit +- User undoing or reverting Claude's work +- Re-editing files Claude just edited + +## Output Format + +For each identified behavior: +```yaml +behavior: "Description of what Claude did wrong" +frequency: "How often it occurred" +severity: high|medium|low +suggested_rule: + name: "descriptive-rule-name" + event: bash|file|stop|prompt + pattern: "regex pattern to match" + action: block|warn + message: "What to show when triggered" +``` + +Prioritize high-frequency, high-severity behaviors first. diff --git a/agents/pr-test-analyzer.md b/agents/pr-test-analyzer.md new file mode 100644 index 00000000..af70782d --- /dev/null +++ b/agents/pr-test-analyzer.md @@ -0,0 +1,43 @@ +--- +name: pr-test-analyzer +description: Use this agent when reviewing a pull request for test coverage quality and completeness. Invoked after a PR is created or updated to ensure tests adequately cover new functionality and edge cases. +model: sonnet +tools: [Read, Grep, Glob, Bash] +--- + +# PR Test Analyzer Agent + +You review test coverage quality and completeness for pull requests. + +## Analysis Process + +### 1. Identify Changed Code +- Parse the PR diff to find new/modified functions, classes, and modules +- Map changed code to existing test files +- Identify untested new code paths + +### 2. Behavioral Coverage Analysis +- Check that each new feature has corresponding test cases +- Verify edge cases are covered (null, empty, boundary values, error states) +- Ensure error handling paths are tested +- Check that integration points have integration tests + +### 3. Test Quality Assessment +- Verify tests actually assert meaningful behavior (not just "no throw") +- Check for proper test isolation (no shared mutable state) +- Ensure test descriptions accurately describe what's being tested +- Look for flaky test patterns (timing, ordering, external dependencies) + +### 4. Coverage Gap Identification +Rate each gap by impact (1-10) focused on preventing real bugs: +- **Critical gaps (8-10)**: Core business logic, security paths, data integrity +- **Important gaps (5-7)**: Error handling, edge cases, integration boundaries +- **Nice-to-have (1-4)**: UI variations, logging, non-critical paths + +## Output Format + +Provide a structured report: +1. **Coverage Summary**: What's tested vs what's not +2. **Critical Gaps**: Must-fix before merge +3. **Improvement Suggestions**: Would strengthen confidence +4. **Positive Observations**: Well-tested areas worth noting diff --git a/agents/silent-failure-hunter.md b/agents/silent-failure-hunter.md new file mode 100644 index 00000000..c1d9fa3d --- /dev/null +++ b/agents/silent-failure-hunter.md @@ -0,0 +1,47 @@ +--- +name: silent-failure-hunter +description: Use this agent when reviewing code changes to identify silent failures, inadequate error handling, and inappropriate fallback behavior. Invoke after completing work that involves error handling, catch blocks, fallback logic, or any code that could suppress errors. +model: sonnet +tools: [Read, Grep, Glob, Bash] +--- + +# Silent Failure Hunter Agent + +You have zero tolerance for silent failures. Your mission is to find every place where errors could be swallowed, logged-and-forgotten, or hidden behind inappropriate fallbacks. + +## Hunt Targets + +### 1. Empty Catch Blocks +- `catch (e) {}` — swallowed errors +- `catch (e) { /* ignore */ }` — intentionally swallowed but still dangerous +- `catch (e) { return null }` — error converted to null without logging + +### 2. Inadequate Logging +- `catch (e) { console.log(e) }` — logged but not handled +- Logging without context (no function name, no input data) +- Logging at wrong severity level (error logged as info/debug) + +### 3. Dangerous Fallbacks +- `catch (e) { return defaultValue }` — masks the error from callers +- `.catch(() => [])` — promise errors silently returning empty data +- Fallback values that make bugs harder to detect downstream + +### 4. Error Propagation Issues +- Functions that catch and re-throw without the original stack trace +- Error types being lost (generic Error instead of specific types) +- Async errors without proper await/catch chains + +### 5. Missing Error Handling +- Async functions without try/catch or .catch() +- Network requests without timeout or error handling +- File operations without existence checks +- Database operations without transaction rollback + +## Output Format + +For each finding: +- **Location**: File and line number +- **Severity**: Critical / Important / Advisory +- **Issue**: What's wrong +- **Impact**: What happens when this fails silently +- **Fix**: Specific recommendation diff --git a/agents/type-design-analyzer.md b/agents/type-design-analyzer.md new file mode 100644 index 00000000..db4fdf1c --- /dev/null +++ b/agents/type-design-analyzer.md @@ -0,0 +1,48 @@ +--- +name: type-design-analyzer +description: Use this agent for expert analysis of type design. Use when introducing new types, during PR review of type changes, or when refactoring types. Evaluates encapsulation, invariant expression, and enforcement. +model: sonnet +tools: [Read, Grep, Glob, Bash] +--- + +# Type Design Analyzer Agent + +You are a type system design expert. Your goal is to make illegal states unrepresentable. + +## Evaluation Criteria (each rated 1-10) + +### 1. Encapsulation +- Are internal implementation details hidden? +- Can the type's invariants be violated from outside? +- Are mutation points controlled and minimal? +- Score 10: Fully opaque type with controlled API +- Score 1: All fields public, no access control + +### 2. Invariant Expression +- Do the types encode business rules? +- Are impossible states prevented at the type level? +- Does the type system catch errors at compile time vs runtime? +- Score 10: Type makes invalid states impossible to construct +- Score 1: Plain strings/numbers with runtime validation only + +### 3. Invariant Usefulness +- Do the invariants prevent real bugs? +- Are they too restrictive (preventing valid use cases)? +- Do they align with business domain requirements? +- Score 10: Invariants prevent common, costly bugs +- Score 1: Over-engineered constraints with no practical value + +### 4. Enforcement +- Are invariants enforced by the type system (not just conventions)? +- Can invariants be bypassed via casts or escape hatches? +- Are factory functions / constructors the only creation path? +- Score 10: Invariants enforced by compiler, no escape hatches +- Score 1: Invariants are just comments, easily violated + +## Output Format + +For each type reviewed: +- Type name and location +- Scores (Encapsulation, Invariant Expression, Usefulness, Enforcement) +- Overall rating and qualitative assessment +- Specific improvement suggestions with code examples diff --git a/commands/clean-gone.md b/commands/clean-gone.md new file mode 100644 index 00000000..482c6fb0 --- /dev/null +++ b/commands/clean-gone.md @@ -0,0 +1,30 @@ +--- +description: Cleans up all git branches marked as [gone] (branches deleted on remote but still exist locally), including removing associated worktrees +--- + +Clean up stale local branches that have been deleted on the remote. + +## Steps + +1. Fetch and prune remote tracking refs: + ```bash + git fetch --prune + ``` + +2. Find branches marked as [gone]: + ```bash + git branch -vv | grep ': gone]' | awk '{print $1}' + ``` + +3. For each gone branch, remove associated worktrees if any: + ```bash + git worktree list + git worktree remove --force + ``` + +4. Delete the gone branches: + ```bash + git branch -D + ``` + +5. Report what was cleaned up diff --git a/commands/commit-push-pr.md b/commands/commit-push-pr.md new file mode 100644 index 00000000..d99896d0 --- /dev/null +++ b/commands/commit-push-pr.md @@ -0,0 +1,30 @@ +--- +description: Commit, push, and open a PR +--- + +Commit all changes, push to remote, and create a pull request in one workflow. + +## Steps + +1. Run in parallel: `git status`, `git diff`, `git log --oneline -5`, check current branch tracking +2. Stage and commit changes following conventional commit format +3. Create a new branch if on main/master: `git checkout -b ` +4. Push to remote with `-u` flag: `git push -u origin ` +5. Create PR using gh CLI: + ```bash + gh pr create --title "short title" --body "$(cat <<'EOF' + ## Summary + <1-3 bullet points> + + ## Test plan + - [ ] Testing checklist items + EOF + )" + ``` +6. Return the PR URL + +**Rules:** +- Keep PR title under 70 characters +- Analyze ALL commits in the branch (not just the latest) +- Never force push +- Never skip hooks diff --git a/commands/commit.md b/commands/commit.md new file mode 100644 index 00000000..e4007889 --- /dev/null +++ b/commands/commit.md @@ -0,0 +1,28 @@ +--- +description: Create a git commit +--- + +Create a single git commit from staged and unstaged changes with an appropriate message. + +## Steps + +1. Run `git status` (never use `-uall` flag) and `git diff` in parallel to see all changes +2. Run `git log --oneline -5` to match commit message style +3. Analyze changes and draft a concise commit message following conventional commits format: `: ` + - Types: feat, fix, refactor, docs, test, chore, perf, ci + - Focus on "why" not "what" +4. Stage relevant files (prefer specific files over `git add -A`) +5. Create the commit using a HEREDOC for the message: + ```bash + git commit -m "$(cat <<'EOF' + type: description + EOF + )" + ``` +6. Run `git status` to verify success + +**Rules:** +- Never skip hooks (--no-verify) or bypass signing +- Never amend existing commits unless explicitly asked +- Do not commit files that likely contain secrets (.env, credentials.json) +- If pre-commit hook fails, fix the issue and create a NEW commit diff --git a/commands/feature-dev.md b/commands/feature-dev.md new file mode 100644 index 00000000..613321b9 --- /dev/null +++ b/commands/feature-dev.md @@ -0,0 +1,45 @@ +--- +description: Guided feature development with codebase understanding and architecture focus +--- + +A structured 7-phase feature development workflow that emphasizes understanding existing code before writing new code. + +## Phases + +### Phase 1: Discovery +- Read the user's feature request carefully +- Identify key requirements, constraints, and acceptance criteria +- Ask clarifying questions if the request is ambiguous + +### Phase 2: Codebase Exploration +- Use the **code-explorer** agent to deeply analyze relevant existing code +- Trace execution paths and map architecture layers +- Understand existing patterns, abstractions, and conventions +- Document dependencies and integration points + +### Phase 3: Clarifying Questions +- Present findings from exploration to the user +- Ask targeted questions about design decisions, edge cases, and preferences +- **Wait for user response before proceeding** + +### Phase 4: Architecture Design +- Use the **code-architect** agent to design the feature architecture +- Provide implementation blueprint: files to create/modify, component designs, data flows +- Present the plan to the user for approval +- **Wait for user approval before implementing** + +### Phase 5: Implementation +- Implement the feature following the approved architecture +- Use TDD approach: write tests first, then implementation +- Follow existing codebase patterns and conventions +- Make small, focused commits + +### Phase 6: Quality Review +- Use the **code-reviewer** agent to review the implementation +- Address Critical and Important issues +- Verify test coverage meets requirements + +### Phase 7: Summary +- Summarize what was built and how it integrates +- List any follow-up items or known limitations +- Provide testing instructions diff --git a/commands/hookify-configure.md b/commands/hookify-configure.md new file mode 100644 index 00000000..6942aae2 --- /dev/null +++ b/commands/hookify-configure.md @@ -0,0 +1,14 @@ +--- +description: Enable or disable hookify rules interactively +--- + +Interactively enable or disable existing hookify rules. + +## Steps + +1. Find all `.claude/hookify.*.local.md` files +2. Read current state of each rule +3. Present list with current enabled/disabled status +4. Ask user which rules to toggle +5. Update the `enabled:` field in selected rule files +6. Confirm changes diff --git a/commands/hookify-help.md b/commands/hookify-help.md new file mode 100644 index 00000000..0a7a910e --- /dev/null +++ b/commands/hookify-help.md @@ -0,0 +1,42 @@ +--- +description: Get help with the hookify system +--- + +Display comprehensive hookify documentation. + +## Hook System Overview + +Hookify creates rule files that integrate with Claude Code's hook system to prevent unwanted behaviors. + +### Event Types +- **bash**: Triggers on Bash tool use — match command patterns +- **file**: Triggers on Write/Edit tool use — match file paths +- **stop**: Triggers when session ends — final checks +- **prompt**: Triggers on user message submission — match input patterns +- **all**: Triggers on all events + +### Rule File Format +Files are stored as `.claude/hookify.{name}.local.md`: + +```yaml +--- +name: descriptive-name +enabled: true +event: bash|file|stop|prompt|all +action: block|warn +pattern: "regex pattern to match" +--- +Message to display when rule triggers. +Supports multiple lines. +``` + +### Commands +- `/hookify [description]` — Create new rules (auto-analyzes conversation if no args) +- `/hookify-list` — View all rules +- `/hookify-configure` — Toggle rules on/off + +### Pattern Tips +- Use regex syntax (pipes for OR, brackets for groups) +- For bash: match against the full command string +- For file: match against the file path +- Test patterns before deploying diff --git a/commands/hookify-list.md b/commands/hookify-list.md new file mode 100644 index 00000000..e31fa5be --- /dev/null +++ b/commands/hookify-list.md @@ -0,0 +1,16 @@ +--- +description: List all configured hookify rules +--- + +Find and display all hookify rules in a formatted table. + +## Steps + +1. Find all `.claude/hookify.*.local.md` files using Glob +2. Read each file's frontmatter (name, enabled, event, action, pattern) +3. Display as table: + +| Rule | Enabled | Event | Pattern | File | +|------|---------|-------|---------|------| + +4. Show count and helpful footer about `/hookify-configure` for modifications diff --git a/commands/hookify.md b/commands/hookify.md new file mode 100644 index 00000000..02deee1e --- /dev/null +++ b/commands/hookify.md @@ -0,0 +1,45 @@ +--- +description: Create hooks to prevent unwanted behaviors from conversation analysis or explicit instructions +--- + +Create hook rules to prevent unwanted Claude Code behaviors by analyzing conversation patterns or explicit user instructions. + +## Usage + +`/hookify [description of behavior to prevent]` + +If no arguments provided, analyze the current conversation to find behaviors worth preventing. + +## Workflow + +### Step 1: Gather Behavior Info +- **With arguments**: Parse the user's description of the unwanted behavior +- **Without arguments**: Use the conversation-analyzer agent to find: + - Explicit corrections ("no, don't do that", "stop doing X") + - Frustrated reactions to repeated mistakes + - Reverted changes + - Repeated similar issues + +### Step 2: Present Findings +Show the user what behaviors were identified and proposed hook rules: +- Behavior description +- Proposed event type (bash/file/stop/prompt/all) +- Proposed pattern/matcher +- Proposed action (block/warn) + +### Step 3: Generate Rule Files +For each approved rule, create a file at `.claude/hookify.{name}.local.md`: + +```yaml +--- +name: rule-name +enabled: true +event: bash|file|stop|prompt|all +action: block|warn +pattern: "regex pattern" +--- +Message shown when rule triggers. +``` + +### Step 4: Confirm +Report created rules and how to manage them (`/hookify-list`, `/hookify-configure`). diff --git a/commands/review-pr.md b/commands/review-pr.md new file mode 100644 index 00000000..dc28a155 --- /dev/null +++ b/commands/review-pr.md @@ -0,0 +1,36 @@ +--- +description: Comprehensive PR review using specialized agents +--- + +Run a comprehensive, multi-perspective review of a pull request. + +## Usage + +`/review-pr [PR-number-or-URL] [--focus=comments|tests|errors|types|code|simplify]` + +If no PR specified, review the current branch's PR. If no focus specified, run all reviews. + +## Steps + +1. **Identify the PR**: Use `gh pr view` to get PR details, changed files, and diff +2. **Find project guidelines**: Look for CLAUDE.md, .eslintrc, tsconfig.json, etc. +3. **Launch specialized review agents** (in parallel where possible): + + | Agent | Focus | + |-------|-------| + | code-reviewer | Code quality, bugs, security, style guide adherence | + | comment-analyzer | Comment accuracy, completeness, maintainability | + | pr-test-analyzer | Test coverage quality and completeness | + | silent-failure-hunter | Silent failures, inadequate error handling | + | type-design-analyzer | Type design, invariants, encapsulation | + | code-simplifier | Code clarity, consistency, maintainability | + +4. **Aggregate results**: Collect findings, deduplicate, rank by severity +5. **Report**: Present organized findings grouped by severity (Critical > Important > Advisory) + +## Confidence Scoring + +Only report issues with confidence ≥ 80: +- **Critical (90-100)**: Bugs, security vulnerabilities, data loss risks +- **Important (80-89)**: Style violations, missing tests, code quality +- **Advisory (<80)**: Suggestions, nice-to-haves (only if explicitly requested) diff --git a/hooks/hooks.json b/hooks/hooks.json index 2b38e94f..537515fe 100644 --- a/hooks/hooks.json +++ b/hooks/hooks.json @@ -116,6 +116,17 @@ } ], "description": "Check MCP server health before MCP tool execution and block unhealthy MCP calls" + }, + { + "matcher": "Edit|Write|MultiEdit", + "hooks": [ + { + "type": "command", + "command": "python3 \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/security-reminder.py\"", + "timeout": 5 + } + ], + "description": "Security pattern reminder: warns about command injection, XSS, and other OWASP patterns in file edits" } ], "PreCompact": [ diff --git a/scripts/hooks/security-reminder.py b/scripts/hooks/security-reminder.py new file mode 100644 index 00000000..7af1df21 --- /dev/null +++ b/scripts/hooks/security-reminder.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +""" +Security Reminder Hook for Claude Code +Checks for security patterns in file edits and warns about potential vulnerabilities. +Ported from security-guidance plugin (David Dworken, Anthropic). +""" + +import json +import os +import random +import sys +from datetime import datetime + +SECURITY_PATTERNS = [ + { + "ruleName": "github_actions_workflow", + "path_check": lambda path: ".github/workflows/" in path + and (path.endswith(".yml") or path.endswith(".yaml")), + "reminder": "You are editing a GitHub Actions workflow file. Never use untrusted input directly in run: commands. Use env: variables with proper quoting instead. See: https://github.blog/security/vulnerability-research/how-to-catch-github-actions-workflow-injections-before-attackers-do/", + }, + { + "ruleName": "child_process_exec", + "substrings": ["child_process.exec"], + "reminder": "Security Warning: child_process exec can lead to command injection. Use execFile instead which does not invoke a shell.", + }, + { + "ruleName": "new_function_injection", + "substrings": ["new Function"], + "reminder": "Security Warning: new Function with dynamic strings can lead to code injection. Consider alternatives.", + }, + { + "ruleName": "eval_injection", + "substrings": ["eval("], + "reminder": "Security Warning: eval executes arbitrary code. Use JSON.parse for data or alternative patterns.", + }, + { + "ruleName": "document_write_xss", + "substrings": ["document.write"], + "reminder": "Security Warning: document.write can be exploited for XSS. Use DOM manipulation methods instead.", + }, + { + "ruleName": "innerHTML_xss", + "substrings": [".innerHTML =", ".innerHTML="], + "reminder": "Security Warning: innerHTML with untrusted content leads to XSS. Use textContent or sanitize with DOMPurify.", + }, + { + "ruleName": "pickle_deserialization", + "substrings": ["pickle"], + "reminder": "Security Warning: pickle with untrusted content can lead to arbitrary code execution. Use JSON instead.", + }, + { + "ruleName": "os_system_injection", + "substrings": ["os.system", "from os import system"], + "reminder": "Security Warning: os.system should only use static arguments. Use subprocess.run with a list of arguments instead.", + }, +] + + +def get_state_file(session_id): + return os.path.expanduser(f"~/.claude/security_warnings_state_{session_id}.json") + + +def cleanup_old_state_files(): + try: + state_dir = os.path.expanduser("~/.claude") + if not os.path.exists(state_dir): + return + cutoff = datetime.now().timestamp() - (30 * 24 * 60 * 60) + for fn in os.listdir(state_dir): + if fn.startswith("security_warnings_state_") and fn.endswith(".json"): + fp = os.path.join(state_dir, fn) + try: + if os.path.getmtime(fp) < cutoff: + os.remove(fp) + except (OSError, IOError): + pass + except Exception: + pass + + +def load_state(session_id): + sf = get_state_file(session_id) + if os.path.exists(sf): + try: + with open(sf, "r") as f: + return set(json.load(f)) + except (json.JSONDecodeError, IOError): + return set() + return set() + + +def save_state(session_id, shown): + sf = get_state_file(session_id) + try: + os.makedirs(os.path.dirname(sf), exist_ok=True) + with open(sf, "w") as f: + json.dump(list(shown), f) + except IOError: + pass + + +def check_patterns(file_path, content): + norm = file_path.lstrip("/") + for p in SECURITY_PATTERNS: + if "path_check" in p and p["path_check"](norm): + return p["ruleName"], p["reminder"] + if "substrings" in p and content: + for sub in p["substrings"]: + if sub in content: + return p["ruleName"], p["reminder"] + return None, None + + +def extract_content(tool_name, tool_input): + if tool_name == "Write": + return tool_input.get("content", "") + elif tool_name == "Edit": + return tool_input.get("new_string", "") + elif tool_name == "MultiEdit": + edits = tool_input.get("edits", []) + return " ".join(e.get("new_string", "") for e in edits) if edits else "" + return "" + + +def main(): + if os.environ.get("ENABLE_SECURITY_REMINDER", "1") == "0": + sys.exit(0) + if random.random() < 0.1: + cleanup_old_state_files() + try: + data = json.loads(sys.stdin.read()) + except json.JSONDecodeError: + sys.exit(0) + session_id = data.get("session_id", "default") + tool_name = data.get("tool_name", "") + tool_input = data.get("tool_input", {}) + if tool_name not in ["Edit", "Write", "MultiEdit"]: + sys.exit(0) + file_path = tool_input.get("file_path", "") + if not file_path: + sys.exit(0) + content = extract_content(tool_name, tool_input) + rule_name, reminder = check_patterns(file_path, content) + if rule_name and reminder: + key = f"{file_path}-{rule_name}" + shown = load_state(session_id) + if key not in shown: + shown.add(key) + save_state(session_id, shown) + print(reminder, file=sys.stderr) + sys.exit(2) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/skills/article-writing/SKILL.md b/skills/article-writing/SKILL.md index cc4c17a8..f39bbaa7 100644 --- a/skills/article-writing/SKILL.md +++ b/skills/article-writing/SKILL.md @@ -75,6 +75,63 @@ Delete and rewrite any of these: - mix insight with updates, not diary filler - use clear section labels and easy skim structure +## Tone Calibration + +Match tone to context: + +| Context | Tone | +|---------|------| +| Technical content | Direct, opinionated, practical. Share what works from experience. | +| Personal/journey | Honest, reflective. No performative humility, no toxic positivity. | +| Security content | Urgent but not alarmist. Evidence-based. Show the vulnerability, then the fix. | +| Community updates | Grateful but not sycophantic. Numbers speak louder than adjectives. | +| Product launches | Matter-of-fact. Let features speak. No hype language. | + +## Approved Voice Patterns + +These patterns work well for developer and founder audiences: + +- "here's exactly how..." +- "no fluff." +- "zero [X]. just [Y]." +- "the short version:" +- Direct address: "you want X. here's X." +- Parenthetical asides for personality and self-awareness (use roughly once every 2-3 paragraphs) + +## Platform-Specific Structure + +### Technical Guides +- open with what the reader gets +- use code or terminal examples in every major section +- use "Pro tip:" callouts for non-obvious insights +- end with concrete takeaways as short bullets, not a summary paragraph +- link to resources at the bottom + +### Essays / Opinion Pieces +- start with tension, contradiction, or a sharp observation +- keep one argument thread per section +- use examples that earn the opinion + +### Newsletters +- keep the first screen strong +- mix insight with updates, not diary filler +- use clear section labels and easy skim structure + +### LinkedIn +- proper capitalization +- short paragraphs (2-3 sentences max) +- first line is the hook (truncates at ~210 chars) +- professional framing: impact, lessons, takeaways +- 3-5 hashtags at the bottom only + +### X/Twitter Threads +- each tweet must standalone (people see them individually) +- hook in first tweet, no "thread:" or "1/" prefix +- one point per tweet +- last tweet: CTA or punchline, not "follow for more" +- 4-7 tweets ideal length +- no links in tweet body (kills reach). Links in first reply. + ## Quality Gate Before delivering: @@ -83,3 +140,7 @@ Before delivering: - confirm the voice matches the supplied examples - ensure every section adds new information - check formatting for the intended platform +- zero banned patterns in entire document +- at least 2-3 parenthetical asides for personality (if voice calls for it) +- examples/evidence before explanation in each section +- specific numbers used where available diff --git a/skills/autonomous-loops/SKILL.md b/skills/autonomous-loops/SKILL.md index d6f45572..f5cf6ff7 100644 --- a/skills/autonomous-loops/SKILL.md +++ b/skills/autonomous-loops/SKILL.md @@ -608,3 +608,135 @@ These patterns compose well: | Continuous Claude | AnandChowdhary | credit: @AnandChowdhary | | NanoClaw | ECC | `/claw` command in this repo | | Verification Loop | ECC | `skills/verification-loop/` in this repo | + +--- + +## 7. Autonomous Agent Harness + +**Transform Claude Code into a persistent autonomous agent system** with scheduled operations, persistent memory, computer use, and task queuing. Replaces standalone agent frameworks (Hermes, AutoGPT) by leveraging Claude Code's native capabilities. + +### Architecture + +``` +┌──────────────────────────────────────────────────────────────┐ +│ Claude Code Runtime │ +│ │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌─────────────┐ │ +│ │ Crons │ │ Dispatch │ │ Memory │ │ Computer │ │ +│ │ Schedule │ │ Remote │ │ Store │ │ Use │ │ +│ │ Tasks │ │ Agents │ │ │ │ │ │ +│ └────┬─────┘ └────┬─────┘ └────┬─────┘ └──────┬──────┘ │ +│ │ │ │ │ │ +│ ▼ ▼ ▼ ▼ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ ECC Skill + Agent Layer │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ │ │ │ │ │ +│ ▼ ▼ ▼ ▼ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ MCP Server Layer │ │ +│ │ memory github exa supabase browser-use │ │ +│ └──────────────────────────────────────────────────────┘ │ +└──────────────────────────────────────────────────────────────┘ +``` + +### When to Use the Harness (vs simpler loops) + +- You want an agent that runs continuously or on a schedule +- Setting up automated workflows that trigger periodically +- Building a personal AI assistant that remembers context across sessions +- Replicating functionality from Hermes, AutoGPT, or similar frameworks +- Combining computer use with scheduled execution + +### Core Component: Persistent Memory + +Use Claude Code's built-in memory system enhanced with MCP memory server: + +``` +# Short-term: current session context +Use TodoWrite for in-session task tracking + +# Medium-term: project memory files +Write to ~/.claude/projects/*/memory/ for cross-session recall + +# Long-term: MCP knowledge graph +Use mcp__memory__create_entities for permanent structured data +Use mcp__memory__create_relations for relationship mapping +Use mcp__memory__add_observations for new facts about known entities +``` + +### Core Component: Scheduled Operations (Crons) + +```bash +# Daily morning briefing +claude -p "Create a scheduled task: every weekday at 9am, review my GitHub notifications, open PRs, and calendar. Write a morning briefing to memory." + +# Continuous learning +claude -p "Create a scheduled task: every Sunday at 8pm, extract patterns from this week's sessions and update the learned skills." +``` + +Useful cron patterns: + +| Pattern | Schedule | Use Case | +|---------|----------|----------| +| Daily standup | `0 9 * * 1-5` | Review PRs, issues, deploy status | +| Weekly review | `0 10 * * 1` | Code quality metrics, test coverage | +| Hourly monitor | `0 * * * *` | Production health, error rate checks | +| Nightly build | `0 2 * * *` | Run full test suite, security scan | + +### Core Component: Remote Dispatch + +Trigger Claude Code agents remotely for event-driven workflows: + +```bash +# Trigger from CI/CD pipeline +claude -p "Build failed on main. Diagnose and fix." + +# Trigger from webhook (GitHub webhook -> dispatch -> Claude agent -> fix -> PR) +``` + +### Core Component: Task Queue + +Manage a persistent queue of tasks that survive session boundaries: + +```markdown +# Task persistence via memory +Write task queue to ~/.claude/projects/*/memory/task-queue.md + +## Active Tasks +- [ ] PR #123: Review and approve if CI green +- [ ] Monitor deploy: check /health every 30 min for 2 hours +- [ ] Research: Find 5 leads in AI tooling space + +## Completed +- [x] Daily standup: reviewed 3 PRs, 2 issues +``` + +### Example Harness Workflows + +**Autonomous PR Reviewer:** +``` +Cron: every 30 min during work hours +1. Check for new PRs on watched repos +2. For each new PR: pull branch, run tests, review changes +3. Post review comments via GitHub MCP +4. Update memory with review status +``` + +**Personal Research Agent:** +``` +Cron: daily at 6 AM +1. Check saved search queries in memory +2. Run Exa searches for each query +3. Summarize new findings, compare against yesterday +4. Write digest to memory +5. Flag high-priority items for morning review +``` + +### Harness Constraints + +- Cron tasks run in isolated sessions (share context only through memory) +- Computer use requires explicit permission grants +- Remote dispatch may have rate limits (design crons with appropriate intervals) +- Keep memory files concise (archive old data rather than growing unbounded) +- Always verify that scheduled tasks completed successfully diff --git a/skills/content-engine/SKILL.md b/skills/content-engine/SKILL.md index 9398c31a..e0aa1aa2 100644 --- a/skills/content-engine/SKILL.md +++ b/skills/content-engine/SKILL.md @@ -78,6 +78,69 @@ When asked for a campaign, return: - optional CTA variants - any missing inputs needed before publishing +## Content Types by Platform + +### X/Twitter Content Mix + +| Type | Frequency | Example | +|------|-----------|---------| +| Educational/tips | 3-4/week | "5 mistakes with [tool]", model selection, config tips | +| News/commentary | 2-3/week | CVE breakdowns, industry news, product launches | +| Milestones | 1-2/week | Star counts, test counts, download numbers | +| Personal/journey | 2-3/week | Builder life, founder diary, lessons learned | +| Ship logs | 2/week (weekends) | "sunday ship log: [list]" | +| Engagement bait | 1-2/week | "what's one thing you want [product] to do better?" | + +### Engagement Bait Formats +- "what's your [X]?" (config sharing, stack sharing) +- "hot take: [contrarian opinion]" (disagreement drives engagement) +- "[thing] vs [thing]. go." (tribalism) +- "what am I missing?" (vulnerability + question) + +### Viral Mechanics +- Large specific numbers: "1,184 malicious AI plugins" +- Security scares: "your AI has access to your SSH keys" +- Zero-X framing: "zero marketing budget. zero ads. just useful open source." +- Before/after comparisons: default vs configured +- Personal vulnerability: "ran the scan on my own config. grade B." +- Contrarian takes: "stop using [expensive thing] for everything" + +### TikTok Script Structure +``` +HOOK (0-3s): [visual/verbal pattern interrupt] +PROBLEM (3-10s): [what's broken, what people don't know] +SOLUTION (10-40s): [demo, walkthrough, the thing] +CTA (40-60s): [what to do next] +``` + +### YouTube Script Structure +- Hook (first 15s): Show the result, then explain how +- Intro (15-30s): Who you are, what this covers, why it matters +- Chapters (2-3 min each): One concept per chapter, new visual every 30 seconds max +- Outro: Clear CTA (star repo, subscribe, try product) +- Description: first 2 lines matter (above fold), include links, chapters + +## Content Recycling Flow + +``` +YouTube (highest effort) + -> Newsletter (long-form written adaptation) + -> LinkedIn (professional 3-takeaway version) + -> X/Twitter (atomic tweet units) + -> TikTok (15-60s visual snippet) +``` + +Adapt, don't duplicate. Each platform version should feel native. + +## Posting Schedule + +| Slot | Time | Content Type | +|------|------|-------------| +| Morning (7-8am) | - | Primary: educational, technical, news | +| Midday (11:30am-12:30pm) | - | Engagement: question, hot take, QT | +| Evening (5-6pm) | - | Secondary: ship log, personal, lighter | +| Throughout day | - | 5-10 replies to relevant accounts | + ## Quality Gate Before delivering: @@ -86,3 +149,8 @@ Before delivering: - no generic hype language - no duplicated copy across platforms unless requested - the CTA matches the content and audience +- no hashtags on X (they signal inauthenticity on dev X) +- no links in X tweet body (kills reach) +- hook in first 7 words (X) or first line (LinkedIn) +- numbers are specific and sourced +- at least one parenthetical aside per post for personality diff --git a/skills/frontend-design/SKILL.md b/skills/frontend-design/SKILL.md new file mode 100644 index 00000000..9512217a --- /dev/null +++ b/skills/frontend-design/SKILL.md @@ -0,0 +1,55 @@ +--- +name: frontend-design +description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, or applications. Generates creative, polished code that avoids generic AI aesthetics. +--- + +This skill guides creation of distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics. Implement real working code with exceptional attention to aesthetic details and creative choices. + +The user provides frontend requirements: a component, page, application, or interface to build. They may include context about the purpose, audience, or technical constraints. + +## Design Thinking + +Before coding, understand the context and commit to a BOLD aesthetic direction: +- **Purpose**: What problem does this interface solve? Who uses it? +- **Tone**: Pick an extreme: brutally minimal, maximalist chaos, retro-futuristic, organic/natural, luxury/refined, playful/toy-like, editorial/magazine, brutalist/raw, art deco/geometric, soft/pastel, industrial/utilitarian, etc. +- **Constraints**: Technical requirements (framework, performance, accessibility). +- **Differentiation**: What makes this UNFORGETTABLE? What's the one thing someone will remember? + +**CRITICAL**: Choose a clear conceptual direction and execute it with precision. Bold maximalism and refined minimalism both work - the key is intentionality, not intensity. + +Then implement working code (HTML/CSS/JS, React, Vue, etc.) that is: +- Production-grade and functional +- Visually striking and memorable +- Cohesive with a clear aesthetic point-of-view +- Meticulously refined in every detail + +## Frontend Aesthetics Guidelines + +### Typography +Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics. Pair a distinctive display font with a refined body font. + +### Color & Theme +Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes. + +### Motion +Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. Use scroll-triggering and hover states that surprise. + +### Spatial Composition +Unexpected layouts. Asymmetry. Overlap. Diagonal flow. Grid-breaking elements. Generous negative space OR controlled density. + +### Backgrounds & Visual Details +Create atmosphere and depth rather than defaulting to solid colors. Apply creative forms like gradient meshes, noise textures, geometric patterns, layered transparencies, dramatic shadows, decorative borders, custom cursors, and grain overlays. + +## Anti-Patterns (NEVER do these) + +- Overused font families (Inter, Roboto, Arial, system fonts) +- Cliched color schemes (particularly purple gradients on white backgrounds) +- Predictable layouts and component patterns +- Cookie-cutter design that lacks context-specific character +- Converging on common choices (Space Grotesk) across generations + +## Execution + +Match implementation complexity to the aesthetic vision. Maximalist designs need elaborate code with extensive animations and effects. Minimalist or refined designs need restraint, precision, and careful attention to spacing, typography, and subtle details. + +Interpret creatively and make unexpected choices that feel genuinely designed for the context. No design should be the same. Vary between light and dark themes, different fonts, different aesthetics. diff --git a/skills/github-ops/SKILL.md b/skills/github-ops/SKILL.md new file mode 100644 index 00000000..34f7b037 --- /dev/null +++ b/skills/github-ops/SKILL.md @@ -0,0 +1,144 @@ +--- +name: github-ops +description: GitHub repository operations, automation, and management. Issue triage, PR management, CI/CD operations, release management, and security monitoring using the gh CLI. Use when the user wants to manage GitHub issues, PRs, CI status, releases, contributors, stale items, or any GitHub operational task beyond simple git commands. +origin: ECC +--- + +# GitHub Operations + +Manage GitHub repositories with a focus on community health, CI reliability, and contributor experience. + +## When to Activate + +- Triaging issues (classifying, labeling, responding, deduplicating) +- Managing PRs (review status, CI checks, stale PRs, merge readiness) +- Debugging CI/CD failures +- Preparing releases and changelogs +- Monitoring Dependabot and security alerts +- Managing contributor experience on open-source projects +- User says "check GitHub", "triage issues", "review PRs", "merge", "release", "CI is broken" + +## Tool Requirements + +- **gh CLI** for all GitHub API operations +- Repository access configured via `gh auth login` + +## Issue Triage + +Classify each issue by type and priority: + +**Types:** bug, feature-request, question, documentation, enhancement, duplicate, invalid, good-first-issue + +**Priority:** critical (breaking/security), high (significant impact), medium (nice to have), low (cosmetic) + +### Triage Workflow + +1. Read the issue title, body, and comments +2. Check if it duplicates an existing issue (search by keywords) +3. Apply appropriate labels via `gh issue edit --add-label` +4. For questions: draft and post a helpful response +5. For bugs needing more info: ask for reproduction steps +6. For good first issues: add `good-first-issue` label +7. For duplicates: comment with link to original, add `duplicate` label + +```bash +# Search for potential duplicates +gh issue list --search "keyword" --state all --limit 20 + +# Add labels +gh issue edit --add-label "bug,high-priority" + +# Comment on issue +gh issue comment --body "Thanks for reporting. Could you share reproduction steps?" +``` + +## PR Management + +### Review Checklist + +1. Check CI status: `gh pr checks ` +2. Check if mergeable: `gh pr view --json mergeable` +3. Check age and last activity +4. Flag PRs >5 days with no review +5. For community PRs: ensure they have tests and follow conventions + +### Stale Policy + +- Issues with no activity in 14+ days: add `stale` label, comment asking for update +- PRs with no activity in 7+ days: comment asking if still active +- Auto-close stale issues after 30 days with no response (add `closed-stale` label) + +```bash +# Find stale issues (no activity in 14+ days) +gh issue list --label "stale" --state open + +# Find PRs with no recent activity +gh pr list --json number,title,updatedAt --jq '.[] | select(.updatedAt < "2026-03-01")' +``` + +## CI/CD Operations + +When CI fails: + +1. Check the workflow run: `gh run view --log-failed` +2. Identify the failing step +3. Check if it is a flaky test vs real failure +4. For real failures: identify the root cause and suggest a fix +5. For flaky tests: note the pattern for future investigation + +```bash +# List recent failed runs +gh run list --status failure --limit 10 + +# View failed run logs +gh run view --log-failed + +# Re-run a failed workflow +gh run rerun --failed +``` + +## Release Management + +When preparing a release: + +1. Check all CI is green on main +2. Review unreleased changes: `gh pr list --state merged --base main` +3. Generate changelog from PR titles +4. Create release: `gh release create` + +```bash +# List merged PRs since last release +gh pr list --state merged --base main --search "merged:>2026-03-01" + +# Create a release +gh release create v1.2.0 --title "v1.2.0" --generate-notes + +# Create a pre-release +gh release create v1.3.0-rc1 --prerelease --title "v1.3.0 Release Candidate 1" +``` + +## Security Monitoring + +```bash +# Check Dependabot alerts +gh api repos/{owner}/{repo}/dependabot/alerts --jq '.[].security_advisory.summary' + +# Check secret scanning alerts +gh api repos/{owner}/{repo}/secret-scanning/alerts --jq '.[].state' + +# Review and auto-merge safe dependency bumps +gh pr list --label "dependencies" --json number,title +``` + +- Review and auto-merge safe dependency bumps +- Flag any critical/high severity alerts immediately +- Check for new Dependabot alerts weekly at minimum + +## Quality Gate + +Before completing any GitHub operations task: +- all issues triaged have appropriate labels +- no PRs older than 7 days without a review or comment +- CI failures have been investigated (not just re-run) +- releases include accurate changelogs +- security alerts are acknowledged and tracked diff --git a/skills/hermes-generated/content-crosspost-ops/SKILL.md b/skills/hermes-generated/content-crosspost-ops/SKILL.md index 426fd1df..acd10f82 100644 --- a/skills/hermes-generated/content-crosspost-ops/SKILL.md +++ b/skills/hermes-generated/content-crosspost-ops/SKILL.md @@ -1,6 +1,6 @@ --- name: content-crosspost-ops -description: Evidence-first crossposting workflow for Hermes. Use when adapting posts, threads, demos, videos, or articles across LinkedIn, Threads, Bluesky, Farcaster, and YouTube Community while keeping per-platform copy distinct and verified. +description: Evidence-first crossposting workflow for Hermes. Use when adapting posts, threads, demos, videos, or articles across requested social destinations while keeping per-platform copy distinct and verified. metadata: hermes: tags: [generated, content, crosspost, workflow, verification] @@ -17,6 +17,7 @@ Pull these imported skills into the workflow when relevant: - `crosspost` for sequencing and destination-specific adaptation - `article-writing` when the source asset is long-form - `video-editing` or `fal-ai-media` when the post should lead with a clip, frame, or visual +- `continuous-agent-loop` when the task spans capability checks, multi-platform execution, and verification - `search-first` before claiming a platform or API supports a format - `eval-harness` mindset for publish verification and status reporting @@ -24,22 +25,45 @@ Pull these imported skills into the workflow when relevant: - user says `crosspost`, `post everywhere`, `put this on linkedin too`, or similar - the source asset is an X post/thread, quote tweet, article, demo video, screenshot, or YouTube post -- the destination is a community thread or showcase channel like Discord's `built-with-claude` +- the destination is a community thread or showcase channel like Discord's `built-with-claude`, or another destination whose live support must be checked first - the user asks whether a new destination or post type is supported +## Support Source Of Truth + +Treat these live sources as authoritative for destination support: +- `/Users/affoon/.hermes/workspace/content/postbridge_publish.py` +- `/Users/affoon/.hermes/workspace/content/crosspost-verification-latest.md` + +Examples in this skill are destination patterns, not a promise that every destination is live right now. If a destination has no current wrapper path or no recent verified publish record, report `unverified capability` or `blocked by unsupported capability` until checked. + +## Capability Matrix + +Resolve these as separate questions before you answer or execute: +- is the destination itself currently publishable or only `unverified capability` +- does the asked transport support it right now, for example PostBridge +- does another live path exist, such as browser publish or a different verified API +- what is the active blocker: unsupported transport, unsupported destination, auth, MFA, missing asset, or approval + +Do not flatten these into one label. `PostBridge unsupported` can still mean `browser path available`, and `browser blocked on auth` does not mean the destination is fully unsupported. + ## Workflow 1. Read the real source asset and any destination rules first. Do not draft from memory. - if the user pasted thread requirements, comply with those requirements before drafting 2. If the request depends on platform capability, API support, or quota behavior, verify it before answering. - if the user asks whether PostBridge can handle a destination or format, inspect the real wrapper, configs, or recent publish logs before promising support - - if the destination is unsupported, say `blocked by unsupported capability` and give the next viable path + - treat `/Users/affoon/.hermes/workspace/content/postbridge_publish.py` plus recent verification records as the source of truth for current support + - separate the destination question from the transport question, and answer both + - if there is no current wrapper path or recent proof, report `unverified capability` before drafting + - if PostBridge is unsupported but a browser path exists, say `PostBridge unsupported, browser path available` instead of flattening the whole destination to `unsupported` + - if the destination itself is unsupported, say `blocked by unsupported capability` and give the next viable path + - if the task started as a support question or `did you do it?`, settle that capability or verification answer before drafting or scheduling other destinations 3. Extract one core idea and a few specifics. Split multiple ideas into separate posts. 4. Write native variants instead of reusing the same copy: - X: fast hook, minimal framing - LinkedIn: strong first line, short paragraphs, explicit lesson or takeaway - - Threads, Bluesky, Farcaster: shorter, conversational, clearly distinct wording - - YouTube Community: lead with the result or takeaway, keep it media-friendly + - Threads, Bluesky, or other short-form social destinations: shorter, conversational, clearly distinct wording + - YouTube Community or other media-led destinations: lead with the result or takeaway, keep it media-friendly 5. Prefer native media when the user wants engagement: - for quote tweets, articles, or external links, prefer screenshots or media over a bare outbound link when the platform rewards native assets - if the user says the demo itself should lead, use the video or a frame from it instead of a generic screenshot @@ -56,9 +80,14 @@ Pull these imported skills into the workflow when relevant: - post the primary platform first - stagger secondary destinations when requested, defaulting to 4 hours apart unless the user overrides it - prefer PostBridge for supported platforms, browser flows only when required + - if the asked transport is unavailable but another approved live path exists, say that explicitly and take that path next instead of doing unrelated side work + - if the explicitly asked destination is unsupported or still unverified, report that exact state before moving to any secondary destination, and only continue with other requested platforms when the user asked for a multi-destination publish or approved the fallback order 9. Verify before claiming completion: - capture a returned post ID, URL, API response, or an updated verification log - when the user asks `did you do it?`, answer with the exact status for each platform: posted, queued, drafted, uploaded-only, blocked, or awaiting verification + - when the user interrupts with `are you working?`, answer in one sentence with the exact current step or blocker on the explicitly asked destination before more tool use + - lead with the explicitly asked destination first, and include the proof or blocker on that destination before mentioning any secondary platform work + - if the asked destination is still unresolved, say that first instead of leading with successful side work on other platforms - record every attempt with `/Users/affoon/.hermes/workspace/content/log_crosspost.py` or `/Users/affoon/.hermes/workspace/content/postbridge_publish.py` - if the state is only drafted, uploaded-only, queued, blocked, or pending manual action, report that exact status @@ -71,6 +100,11 @@ Pull these imported skills into the workflow when relevant: - do not keep searching unrelated systems after a login or MFA blocker is already the limiting step - do not keep refining copy or looking for better assets once auth is the only blocker on a browser-only publish - do not answer a support question with a guess when the wrapper, logs, or API response can settle it +- do not treat destination examples in this skill as a live support matrix +- do not collapse `transport unsupported` into `destination unsupported` when another live path still exists +- do not hide an unresolved asked destination behind progress on other supported destinations +- do not answer `did you do it?` by leading with successful secondary platforms while the explicitly asked destination is still unresolved +- do not keep scheduling or verifying secondary destinations after a status interrupt while the explicitly asked destination is still unresolved - do not ignore the user's preference for screenshots or native media over raw links ## Verification diff --git a/skills/hermes-generated/email-ops/SKILL.md b/skills/hermes-generated/email-ops/SKILL.md index 7a23d96b..2be5e3da 100644 --- a/skills/hermes-generated/email-ops/SKILL.md +++ b/skills/hermes-generated/email-ops/SKILL.md @@ -18,6 +18,7 @@ Before using this workflow: Pull these companion skills into the workflow when relevant: - `investor-outreach` when the email is investor, partner, or sponsor facing +- `continuous-agent-loop` when the task spans draft, approval, send, and Sent proof across multiple steps - `search-first` before assuming a mail API, folder name, or CLI flag works - `eval-harness` mindset for Sent-folder verification and exact status reporting @@ -41,18 +42,23 @@ Pull these companion skills into the workflow when relevant: 4. For replies or new mail: - read the full thread first - choose the sender account that matches the project or recipient - - compose non-interactively with piped `himalaya template send` or `message write` + - if the user has not explicitly approved the exact outgoing text, draft first and show the final copy before sending + - store approval state with `python3 /Users/affoon/.hermes/scripts/email_guard.py queue ...` + - before any send, require `python3 /Users/affoon/.hermes/scripts/email_guard.py approve --id ` plus `python3 /Users/affoon/.hermes/scripts/email_guard.py can-send --id --account --recipient --subject ` + - before any send or reply, run `python3 /Users/affoon/.hermes/scripts/email_guard.py history --recipient --subject --days 45 --json` to catch prior outbound and accidental repeats + - if the exact Himalaya send syntax is uncertain, check `himalaya ... --help` or a checked-in helper path before trying a new subcommand + - compose with `himalaya template send` or `himalaya message send` using file-backed content when possible, not ad hoc heredoc or inline Python raw-MIME wrappers - avoid editor-driven flows unless required 5. If the request mentions attachments or images: - resolve the exact absolute file path before broad mailbox searching - keep the task on the local send-and-verify path instead of branching into unrelated web or repo exploration - if Mail.app fallback is needed, pass the attachment paths after the body: `osascript /Users/affoon/.hermes/scripts/send_mail.applescript "" "" "" "" "/absolute/file1" ...` -6. If the user wants an actual send and Himalaya fails with an IMAP append or save-copy error, fall back to `/Users/affoon/.hermes/scripts/send_mail.applescript` only when the user did not forbid Apple Mail or `osascript`, then verify Sent. If the user constrained the method to Himalaya only, report the exact blocked state instead of silently switching tools. -7. During long-running mailbox work, send a short progress update before more searching. If a budget warning says 3 or fewer tool calls remain, stop broad exploration and spend the remaining calls on the highest-confidence execution or verification step, or report exact status and next action. +6. If the user wants an actual send and Himalaya fails with an IMAP append or save-copy error, a CLI dead end, or a raw-message parser crash, do not immediately resend. First verify whether the message already landed in Sent using the history check or `himalaya envelope list -a -f Sent ...`. If the failure was an invalid command path or a panic before Sent proof exists, report the exact blocked state such as `blocked on invalid himalaya send path` or `blocked on himalaya parser crash` and preserve the draft. Only if there is still no sent copy and the user explicitly approved the send may you fall back to `/Users/affoon/.hermes/scripts/send_mail.applescript`. If the user constrained the method to Himalaya only, report the exact blocked state instead of silently switching tools. +7. During long-running mailbox work, keep the loop tight: draft -> approval -> send -> Sent proof. Do the next irreversible step first, and do not branch into unrelated transports or searches while the current blocker is unresolved. If a budget warning says 3 or fewer tool calls remain, stop broad exploration and spend the remaining calls on the highest-confidence execution or verification step, or report exact status and next action. 8. If the user wants sent-mail evidence: - verify via `himalaya envelope list -a -f Sent ...` or the account's actual sent folder - report the subject, recipient, account, and message id or date if available -9. Report exact status words: drafted, sent, moved, flagged, deleted, blocked, awaiting verification. +9. Report exact status words: drafted, approval-pending, approved, sent, moved, flagged, deleted, blocked, awaiting verification. ## Pitfalls @@ -60,6 +66,8 @@ Pull these companion skills into the workflow when relevant: - do not use the wrong account just because it is default - do not delete uncertain business mail during cleanup - do not switch tools after the user constrained the method +- do not invent `himalaya smtp` or other unverified send paths +- do not build last-minute raw-send wrappers when the checked commands and guard scripts already cover the path - do not wander into unrelated searches while an attachment path or Sent verification is unresolved - do not keep searching through the budget warning while the user is asking for a status update diff --git a/skills/hermes-generated/knowledge-ops/SKILL.md b/skills/hermes-generated/knowledge-ops/SKILL.md index e5677888..723867b8 100644 --- a/skills/hermes-generated/knowledge-ops/SKILL.md +++ b/skills/hermes-generated/knowledge-ops/SKILL.md @@ -12,6 +12,7 @@ Use this when the user asks Hermes to remember something, recover an older conve Pull these companion skills into the workflow when relevant: - `continuous-learning-v2` for evidence-backed pattern capture and cross-session learning +- `continuous-agent-loop` when the lookup spans multiple stores, compaction summaries, and follow-up recovery steps - `search-first` before inventing a new lookup path or assuming a store is empty - `eval-harness` mindset for exact source attribution and negative-search reporting @@ -19,22 +20,24 @@ Pull these companion skills into the workflow when relevant: - user says `do you remember`, `it was in memory`, `it was in openclaw`, `find the old session`, or similar - the prompt contains a compaction summary or `[Files already read ... do NOT re-read these]` +- the prompt says `use the context summary above`, `proceed`, or otherwise hands off loaded context plus a concrete writing, editing, or response task - the answer depends on Hermes workspace memory, Supermemory, session logs, or the historical knowledge base ## Workflow 1. Start from the evidence already in the prompt: - treat compaction summaries and `do NOT re-read` markers as usable context + - if the prompt already says `use the context summary above` or asks you to proceed with writing, editing, or responding, continue from that loaded context first and search only the missing variables - do not waste turns re-reading the same files unless the summary is clearly insufficient -2. Search in a fixed order before saying `not found`: +2. Search in a fixed order before saying `not found`, unless the user already named the store: - `mcp_supermemory_recall` with a targeted query - grep `/Users/affoon/.hermes/workspace/memory/` - grep `/Users/affoon/.hermes/workspace/` more broadly - `session_search` for recent Hermes conversations - - grep `/Users/affoon/GitHub/affaans_knowledge_base/` or the OpenClaw archive for historical context -3. If the user says the answer is in a specific memory store, pivot there immediately: - - `openclaw memory` means favor the historical knowledge base or OpenClaw archive - - `not in this session` means stop digging through the current thread and move to persistent stores + - grep `/Users/affoon/GitHub/affaans_knowledge_base/` and `/Users/affoon/.hermes/openclaw-home/hub/workspace/memory/` for historical context +3. If the user says the answer is in a specific memory store, pivot there immediately after the initial targeted recall: + - `openclaw memory` means favor `/Users/affoon/GitHub/affaans_knowledge_base/` and `/Users/affoon/.hermes/openclaw-home/hub/workspace/memory/` + - `not in this session` means stop digging through the current thread and move to persistent stores instead of re-reading current-session files 4. Keep the search narrow and evidence-led: - reuse names, dates, channels, account names, or quoted phrases from the user - search the most likely store first instead of spraying generic queries everywhere @@ -47,6 +50,8 @@ Pull these companion skills into the workflow when relevant: - do not ignore a compaction summary and start over from zero - do not keep re-reading files the prompt says are already loaded +- do not turn a loaded-context handoff into a pure retrieval loop when the user already asked for an actual draft, edit, or response +- do not keep searching the current session after the user already named OpenClaw or another persistent store as the likely source - do not answer from vague memory without a source path, date, or session reference - do not stop after one failed memory source when others remain diff --git a/skills/hermes-generated/research-ops/SKILL.md b/skills/hermes-generated/research-ops/SKILL.md index fdebd30f..26081864 100644 --- a/skills/hermes-generated/research-ops/SKILL.md +++ b/skills/hermes-generated/research-ops/SKILL.md @@ -1,6 +1,6 @@ --- name: research-ops -description: Evidence-first research workflow for Hermes. Use when answering current questions, evaluating a market or tool, enriching leads, or deciding whether a request should become ongoing monitored data collection. +description: Evidence-first research workflow for Hermes. Use when answering current questions, evaluating a market or tool, enriching leads, comparing strategic options, or deciding whether a request should become ongoing monitored data collection. metadata: hermes: tags: [generated, research, market, discovery, monitoring, workflow, verification] @@ -16,6 +16,7 @@ Pull these imported skills into the workflow when relevant: - `deep-research` for multi-source cited synthesis - `market-research` for decision-oriented framing - `exa-search` for first-pass discovery and current-web retrieval +- `continuous-agent-loop` when the task spans user-provided evidence, fresh verification, and a final recommendation across multiple turns - `data-scraper-agent` when the user really needs recurring collection or monitoring - `search-first` before building new scraping or enrichment logic - `eval-harness` mindset for claim quality, freshness, and explicit uncertainty @@ -24,28 +25,41 @@ Pull these imported skills into the workflow when relevant: - user says `research`, `look up`, `find`, `who should i talk to`, `what's the latest`, or similar - the answer depends on current public information, external sources, or a ranked set of candidates +- the user pastes a compaction summary, copied research, manual calculations, or says `factor this in` +- the user asks `should i do X or Y`, `compare these options`, or wants an explicit recommendation under uncertainty - the task sounds recurring enough that a scraper or scheduled monitor may be better than a one-off search ## Workflow -1. Classify the ask before searching: +1. Start from the evidence already in the prompt: + - treat compaction summaries, pasted research, copied calculations, and quoted assumptions as loaded inputs + - normalize them into `user-provided evidence`, `needs verification`, and `open questions` + - do not restart the analysis from zero if the user already gave you a partial model +2. Classify the ask before searching: - quick factual answer - decision memo or comparison - lead list or enrichment - recurring monitoring request -2. Start with the fastest evidence path: +3. Build the decision surface before broad searching when the ask is comparative: + - list the options, decision criteria, constraints, and assumptions explicitly + - keep concrete numbers and dates attached to the option they belong to + - mark which variables are already evidenced and which still need outside verification +4. Start with the fastest evidence path: - use `exa-search` first for broad current-web discovery - if the question is about a local wrapper, config, or checked-in code path, inspect the live local source before making any web claim -3. Deepen only where the evidence justifies it: +5. Deepen only where the evidence justifies it: - use `deep-research` when the user needs synthesis, citations, or multiple angles - use `market-research` when the result should end in a recommendation, ranking, or go/no-go call -4. Separate fact from inference: + - keep `continuous-agent-loop` discipline when the task spans user evidence, fresh searches, and recommendation updates across interruptions +6. Separate fact from inference: - label sourced facts clearly + - label user-provided evidence clearly - label inferred fit, ranking, or recommendation as inference - include dates when freshness matters -5. Decide whether this should stay manual: +7. Decide whether this should stay manual: - if the user will likely ask for the same scan repeatedly, use `data-scraper-agent` patterns or propose a monitored collection path instead of repeating the same manual research forever -6. Report with evidence: +8. Report with evidence: + - group the answer into sourced facts, user-provided evidence, inference, and recommendation when the ask is a comparison or decision - cite the source or local file behind each important claim - if evidence is thin or conflicting, say so directly @@ -53,12 +67,16 @@ Pull these imported skills into the workflow when relevant: - do not answer current questions from stale memory when a fresh search is cheap - do not conflate local code-backed behavior with market or web evidence +- do not ignore pasted research or compaction context and redo the whole investigation from scratch +- do not mix user-provided assumptions into sourced facts without labeling them - do not present unsourced numbers or rankings as facts - do not spin up a heavy deep-research pass for a quick capability check that local code can answer +- do not leave the comparison criteria implicit when the user asked for a recommendation - do not keep one-off researching a repeated monitoring ask when automation is the better fit ## Verification - important claims have a source, file path, or explicit inference label +- user-provided evidence is surfaced as a distinct layer when it materially affects the answer - freshness-sensitive answers include concrete dates when relevant - recurring-monitoring recommendations state whether the task should remain manual or graduate to a scraper/workflow diff --git a/skills/hermes-generated/terminal-ops/SKILL.md b/skills/hermes-generated/terminal-ops/SKILL.md index 6ec37dab..c32f4bdb 100644 --- a/skills/hermes-generated/terminal-ops/SKILL.md +++ b/skills/hermes-generated/terminal-ops/SKILL.md @@ -13,6 +13,7 @@ Use this when the user asks Hermes to fix code, resolve CI failures, run termina ## Skill Stack Pull these imported skills into the workflow when relevant: +- `continuous-agent-loop` for multi-step execution with scope freezes, recovery gates, and progress checkpoints - `agentic-engineering` for scoped decomposition and explicit done conditions - `plankton-code-quality` for write-time quality expectations and linter discipline - `eval-harness` for pass/fail verification after each change @@ -42,6 +43,7 @@ Pull these imported skills into the workflow when relevant: 4. Verify after each meaningful change: - rerun the smallest command that proves the fix - escalate to the broader build, lint, or test only after the local failure is addressed + - if the same proving command keeps failing with the same signature, freeze the broader loop, reduce scope to the failing unit, and stop repeating the same retry - review the diff before any commit or push 5. Push only when the requested state is real: - distinguish `changed locally`, `verified locally`, `committed`, and `pushed` @@ -56,6 +58,7 @@ Pull these imported skills into the workflow when relevant: - do not use destructive git commands or revert unrelated local work - do not claim `fixed` if the proving command was not rerun - do not claim `pushed` if the change only exists locally +- do not keep rerunning broad verification after the same unchanged failure, narrow the loop or report the blocker ## Verification diff --git a/skills/hookify-rules/SKILL.md b/skills/hookify-rules/SKILL.md new file mode 100644 index 00000000..e256d2e9 --- /dev/null +++ b/skills/hookify-rules/SKILL.md @@ -0,0 +1,128 @@ +--- +name: hookify-rules +description: This skill should be used when the user asks to create a hookify rule, write a hook rule, configure hookify, add a hookify rule, or needs guidance on hookify rule syntax and patterns. +--- + +# Writing Hookify Rules + +## Overview + +Hookify rules are markdown files with YAML frontmatter that define patterns to watch for and messages to show when those patterns match. Rules are stored in `.claude/hookify.{rule-name}.local.md` files. + +## Rule File Format + +### Basic Structure + +```markdown +--- +name: rule-identifier +enabled: true +event: bash|file|stop|prompt|all +pattern: regex-pattern-here +--- + +Message to show Claude when this rule triggers. +Can include markdown formatting, warnings, suggestions, etc. +``` + +### Frontmatter Fields + +| Field | Required | Values | Description | +|-------|----------|--------|-------------| +| name | Yes | kebab-case string | Unique identifier (verb-first: warn-*, block-*, require-*) | +| enabled | Yes | true/false | Toggle without deleting | +| event | Yes | bash/file/stop/prompt/all | Which hook event triggers this | +| action | No | warn/block | warn (default) shows message; block prevents operation | +| pattern | Yes* | regex string | Pattern to match (*or use conditions for complex rules) | + +### Advanced Format (Multiple Conditions) + +```markdown +--- +name: warn-env-api-keys +enabled: true +event: file +conditions: + - field: file_path + operator: regex_match + pattern: \.env$ + - field: new_text + operator: contains + pattern: API_KEY +--- + +You're adding an API key to a .env file. Ensure this file is in .gitignore! +``` + +**Condition fields by event:** +- bash: `command` +- file: `file_path`, `new_text`, `old_text`, `content` +- prompt: `user_prompt` + +**Operators:** `regex_match`, `contains`, `equals`, `not_contains`, `starts_with`, `ends_with` + +All conditions must match for rule to trigger. + +## Event Type Guide + +### bash Events +Match Bash command patterns: +- Dangerous commands: `rm\s+-rf`, `dd\s+if=`, `mkfs` +- Privilege escalation: `sudo\s+`, `su\s+` +- Permission issues: `chmod\s+777` + +### file Events +Match Edit/Write/MultiEdit operations: +- Debug code: `console\.log\(`, `debugger` +- Security risks: `eval\(`, `innerHTML\s*=` +- Sensitive files: `\.env$`, `credentials`, `\.pem$` + +### stop Events +Completion checks and reminders. Pattern `.*` matches always. + +### prompt Events +Match user prompt content for workflow enforcement. + +## Pattern Writing Tips + +### Regex Basics +- Escape special chars: `.` to `\.`, `(` to `\(` +- `\s` whitespace, `\d` digit, `\w` word char +- `+` one or more, `*` zero or more, `?` optional +- `|` OR operator + +### Common Pitfalls +- **Too broad**: `log` matches "login", "dialog" — use `console\.log\(` +- **Too specific**: `rm -rf /tmp` — use `rm\s+-rf` +- **YAML escaping**: Use unquoted patterns; quoted strings need `\\s` + +### Testing +```bash +python3 -c "import re; print(re.search(r'your_pattern', 'test text'))" +``` + +## File Organization + +- **Location**: `.claude/` directory in project root +- **Naming**: `.claude/hookify.{descriptive-name}.local.md` +- **Gitignore**: Add `.claude/*.local.md` to `.gitignore` + +## Commands + +- `/hookify [description]` - Create new rules (auto-analyzes conversation if no args) +- `/hookify-list` - View all rules in table format +- `/hookify-configure` - Toggle rules on/off interactively +- `/hookify-help` - Full documentation + +## Quick Reference + +Minimum viable rule: +```markdown +--- +name: my-rule +enabled: true +event: bash +pattern: dangerous_command +--- +Warning message here +``` diff --git a/skills/investor-materials/SKILL.md b/skills/investor-materials/SKILL.md index e392706a..3f1f4f77 100644 --- a/skills/investor-materials/SKILL.md +++ b/skills/investor-materials/SKILL.md @@ -86,6 +86,51 @@ Include: - revenue math that does not sum cleanly - inflated certainty where assumptions are fragile +## Pitch Deck Structure (10-12 slides) + +Recommended flow with guidance per slide: +1. **Title:** Company name + one-line positioning +2. **Problem:** Quantify the pain. Use specific market numbers. +3. **Solution:** Show the product, not a description of it. Screenshots, demo frames, or architecture diagrams. +4. **Product demo:** Phase 1 (live) vs Phase 2 (funded). Always present as phased if not fully built. +5. **Market:** TAM with source. Growth rate with source. Key catalyst for "why now." +6. **Business model:** Revenue layers with year-by-year projections. Show the math. +7. **Traction:** Working product, users, revenue, waitlist, partnerships. Concrete numbers only. +8. **Team:** Each founder gets a credibility anchor (prior company, metric, recognition). +9. **Competitive landscape:** Positioning map or table. Show the gap you fill. +10. **Ask:** Raise amount, instrument (SAFE/priced), valuation range. +11. **Milestones / Use of funds:** Timeline from now to Series A. Use of funds must sum exactly. +12. **Appendix:** Revenue model detail, regulatory strategy, technical architecture. + +## Financial Model Requirements + +When building or updating financial models: +- All assumptions must be stated explicitly and separately from projections +- Include bear/base/bull scenarios (sensitivity analysis) +- Revenue layers must sum correctly across all timeframes +- Use of funds must sum to the exact raise amount +- Include unit economics where possible (cost per user, revenue per customer) +- Discount rates and growth rates must be sourced or justified +- Milestone-linked spending: tie spend to specific milestones + +## Accelerator Applications + +When writing accelerator applications: +- Follow the specific word/character limits of each program +- Lead with traction (metrics, users, revenue, recognition) +- Be specific about what the accelerator adds (network, funding, customers) +- Never sound desperate. Frame as mutual fit. +- Keep internal metrics consistent with the deck and model + +## Honesty Requirements + +These are non-negotiable: +- Clearly distinguish between what is live/working and what requires funding +- Never attach revenue figures to things that are not revenue-generating +- Never claim awards or recognition not actually received +- "Algorithmic" when the tech is algorithmic, "AI" only when there is actual ML/AI +- All traction claims must be verifiable + ## Quality Gate Before delivering: @@ -94,3 +139,6 @@ Before delivering: - assumptions are visible, not buried - the story is clear without hype language - the final asset is defensible in a partner meeting +- phase distinctions (live vs funded) are clear +- no unverifiable claims +- team roles and titles are correct diff --git a/skills/investor-outreach/SKILL.md b/skills/investor-outreach/SKILL.md index 4fc69f4c..9b8f11b0 100644 --- a/skills/investor-outreach/SKILL.md +++ b/skills/investor-outreach/SKILL.md @@ -66,6 +66,63 @@ Include: - one new proof point if available - the next step +## Cold Email Structure (Detailed) + +**Subject line:** Short, specific, no fluff. Reference something the investor actually did or said. +- Good: "Structured products for prediction markets (Goldman co-founder)" +- Good: "[Fund Name] thesis + prediction market infrastructure" +- Bad: "Exciting opportunity in DeFi" +- Bad: "Partnership inquiry" + +**Opening (1-2 sentences):** Reference something specific about the investor. A deal they led, a thesis they published, a tweet they posted. This cannot be generic. + +**The pitch (2-3 sentences):** What you are building, why now, and the one metric that proves traction. + +**The ask (1 sentence):** Specific, low-friction. "Would you have 20 minutes this week?" or "Would it make sense to share our memo?" + +**Sign-off:** Name, title, one credibility line. + +### Email Tone Rules +- Direct. No begging. No "I know you're busy" or "I'd be honored if you..." +- Confident but not arrogant. Let facts do the heavy lifting. +- Short. Under 150 words total. Investors get 200+ emails/day. +- Zero em dashes. Zero corporate speak. + +## Warm Intro Requests (Detailed) + +Template for requesting intros through mutual connections: +``` +hey [mutual name], + +quick ask. i see you know [target name] at [company]. +i'm building [your product] which [1-line relevance to target]. +would you be open to a quick intro? happy to send you a +forwardable blurb. + +[your name] +``` + +## Direct Cold Outreach Template + +``` +hey [target name], + +[specific reference to their recent work/post/announcement]. +i'm [your name], building [product]. [1 line on why this is +relevant to them specifically]. + +[specific low-friction ask]. + +[your name] +``` + +## Anti-Patterns (Never Do) +- Generic templates with no personalization +- Long paragraphs explaining your whole company +- Multiple asks in one message +- Fake familiarity ("loved your recent talk!" without specifics) +- Bulk-sent messages with visible merge fields + ## Quality Gate Before delivering: @@ -74,3 +131,6 @@ Before delivering: - there is no fluff or begging language - the proof point is concrete - word count stays tight +- under 150 words (cold email) or 200 words (follow-up) +- correct branding and terminology +- numbers match canonical sources diff --git a/skills/knowledge-ops/SKILL.md b/skills/knowledge-ops/SKILL.md new file mode 100644 index 00000000..af480ceb --- /dev/null +++ b/skills/knowledge-ops/SKILL.md @@ -0,0 +1,124 @@ +--- +name: knowledge-ops +description: Knowledge base management, ingestion, sync, and retrieval across multiple storage layers (local files, MCP memory, vector stores, Git repos). Use when the user wants to save, organize, sync, deduplicate, or search across their knowledge systems. +origin: ECC +--- + +# Knowledge Operations + +Manage a multi-layered knowledge system for ingesting, organizing, syncing, and retrieving knowledge across multiple stores. + +## When to Activate + +- User wants to save information to their knowledge base +- Ingesting documents, conversations, or data into structured storage +- Syncing knowledge across systems (local files, MCP memory, Supabase, Git repos) +- Deduplicating or organizing existing knowledge +- User says "save this to KB", "sync knowledge", "what do I know about X", "ingest this", "update the knowledge base" +- Any knowledge management task beyond simple memory recall + +## Knowledge Architecture + +### Layer 1: Claude Code Memory (Quick Access) +- **Path:** `~/.claude/projects/*/memory/` +- **Format:** Markdown files with frontmatter +- **Types:** user preferences, feedback, project context, reference +- **Use for:** Quick-access context that persists across conversations +- **Automatically loaded at session start** + +### Layer 2: MCP Memory Server (Structured Knowledge Graph) +- **Access:** MCP memory tools (create_entities, create_relations, add_observations, search_nodes) +- **Use for:** Semantic search across all stored memories, relationship mapping +- **Cross-session persistence with queryable graph structure** + +### Layer 3: External Data Store (Supabase, PostgreSQL, etc.) +- **Use for:** Structured data, large document storage, full-text search +- **Good for:** Documents too large for memory files, data needing SQL queries + +### Layer 4: Git-Backed Knowledge Base +- **Use for:** Version-controlled knowledge, shareable context +- **Good for:** Conversation exports, session snapshots, reference documents +- **Benefits:** Full history, collaboration, backup + +## Ingestion Workflow + +When new knowledge needs to be captured: + +### 1. Classify +What type of knowledge is it? +- Business decision -> memory file (project type) + MCP memory +- Personal preference -> memory file (user/feedback type) +- Reference info -> memory file (reference type) + MCP memory +- Large document -> external data store + summary in memory +- Conversation/session -> Git knowledge base repo + +### 2. Deduplicate +Check if this knowledge already exists: +- Search memory files for existing entries +- Query MCP memory with relevant terms +- Do not create duplicates. Update existing entries instead. + +### 3. Store +Write to appropriate layer(s): +- Always update Claude Code memory for quick access +- Use MCP memory for semantic searchability and relationship mapping +- Commit to Git KB for version control on major additions + +### 4. Index +Update any relevant indexes or summary files. + +## Sync Operations + +### Conversation Sync +Periodically sync conversation history into the knowledge base: +- Sources: Claude session files, Codex sessions, other agent sessions +- Destination: Git knowledge base repo +- Generate a session index for quick browsing +- Commit and push + +### Workspace State Sync +Mirror workspace configuration and scripts to the knowledge base: +- Generate directory maps +- Redact sensitive config before committing +- Track changes over time + +### Cross-Source Knowledge Sync +Pull knowledge from multiple sources into one place: +- Claude/ChatGPT/Grok conversation exports +- Browser bookmarks +- GitHub activity events +- Write status summary, commit and push + +## Memory Patterns + +``` +# Short-term: current session context +Use TodoWrite for in-session task tracking + +# Medium-term: project memory files +Write to ~/.claude/projects/*/memory/ for cross-session recall + +# Long-term: MCP knowledge graph +Use mcp__memory__create_entities for permanent structured data +Use mcp__memory__create_relations for relationship mapping +Use mcp__memory__add_observations for new facts about known entities +Use mcp__memory__search_nodes to find existing knowledge +``` + +## Best Practices + +- Keep memory files concise. Archive old data rather than letting files grow unbounded. +- Use frontmatter (YAML) for metadata on all knowledge files. +- Deduplicate before storing. Search first, then create or update. +- Redact sensitive information (API keys, passwords) before committing to Git. +- Use consistent naming conventions for knowledge files (lowercase-kebab-case). +- Tag entries with topics/categories for easier retrieval. + +## Quality Gate + +Before completing any knowledge operation: +- no duplicate entries created +- sensitive data redacted from any Git-tracked files +- indexes and summaries updated +- appropriate storage layer chosen for the data type +- cross-references added where relevant diff --git a/skills/lead-intelligence/SKILL.md b/skills/lead-intelligence/SKILL.md new file mode 100644 index 00000000..1461df89 --- /dev/null +++ b/skills/lead-intelligence/SKILL.md @@ -0,0 +1,186 @@ +--- +name: lead-intelligence +description: AI-native lead intelligence and outreach pipeline. Replaces Apollo, Clay, and ZoomInfo with agent-powered signal scoring, mutual ranking, warm path discovery, and personalized outreach. Use when the user wants to find, qualify, and reach high-value contacts. +origin: ECC +--- + +# Lead Intelligence + +Agent-powered lead intelligence pipeline that finds, scores, and reaches high-value contacts through social graph analysis and warm path discovery. + +## When to Activate + +- User wants to find leads or prospects in a specific industry +- Building an outreach list for partnerships, sales, or fundraising +- Researching who to reach out to and the best path to reach them +- User says "find leads", "outreach list", "who should I reach out to", "warm intros" +- Needs to score or rank a list of contacts by relevance +- Wants to map mutual connections to find warm introduction paths + +## Tool Requirements + +### Required +- **Exa MCP** -- Deep web search for people, companies, and signals (`web_search_exa`) +- **X API** -- Follower/following graph, mutual analysis, recent activity + +### Optional (enhance results) +- **LinkedIn** -- Via browser-use MCP or direct API for connection graph +- **Apollo/Clay API** -- For enrichment cross-reference if user has access +- **GitHub MCP** -- For developer-centric lead qualification + +## Pipeline Overview + +``` +1. Signal -> 2. Mutual -> 3. Warm Path -> 4. Enrich -> 5. Outreach + Scoring Ranking Discovery Draft +``` + +## Stage 1: Signal Scoring + +Search for high-signal people in target verticals. Assign a weight to each based on: + +| Signal | Weight | Source | +|--------|--------|--------| +| Role/title alignment | 30% | Exa, LinkedIn | +| Industry match | 25% | Exa company search | +| Recent activity on topic | 20% | X API search, Exa | +| Follower count / influence | 10% | X API | +| Location proximity | 10% | Exa, LinkedIn | +| Engagement with your content | 5% | X API interactions | + +### Signal Search Approach + +1. Define target parameters (verticals, roles, locations) +2. Run Exa deep search for people and companies in each vertical +3. Run X API search for active voices on relevant topics +4. Score each result against the signal weights +5. Rank and deduplicate + +## Stage 2: Mutual Ranking + +For each scored target, analyze the user's social graph to find the warmest path. + +### Algorithm + +1. Pull user's X following list and LinkedIn connections +2. For each high-signal target, check for shared connections +3. Rank mutuals by: + +| Factor | Weight | +|--------|--------| +| Number of connections to targets | 40% | +| Mutual's current role/company | 20% | +| Mutual's location | 15% | +| Industry alignment | 15% | +| Mutual's identifiability (handle/profile) | 10% | + +### Output Format + +``` +MUTUAL RANKING REPORT +===================== + +#1 @mutual_handle (Score: 92) + Name: Jane Smith + Role: Partner @ Acme Ventures + Location: San Francisco + Connections to targets: 7 + Connected to: @target1, @target2, @target3, ... + Best intro path: Jane invested in Target1's company + +#2 @mutual_handle2 (Score: 85) + ... +``` + +## Stage 3: Warm Path Discovery + +For each target, find the shortest introduction chain: + +``` +You --[follows]--> Mutual A --[invested in]--> Target Company +You --[follows]--> Mutual B --[co-founded with]--> Target Person +You --[met at]--> Event --[also attended]--> Target Person +``` + +### Path Types (ordered by warmth) +1. **Direct mutual** -- You both follow/know the same person +2. **Portfolio connection** -- Mutual invested in or advises target's company +3. **Co-worker/alumni** -- Mutual worked at same company or attended same school +4. **Event overlap** -- Both attended same conference/program +5. **Content engagement** -- Target engaged with mutual's content or vice versa + +## Stage 4: Enrichment + +For each qualified lead, pull: + +- Full name, current title, company +- Company size, funding stage, recent news +- Recent X posts (last 30 days): topics, tone, interests +- Mutual interests with user (shared follows, similar content) +- Recent company events (product launch, funding round, hiring) + +### Enrichment Sources +- Exa: company data, news, blog posts +- X API: recent tweets, bio, followers +- GitHub: open source contributions (for developer-centric leads) +- LinkedIn (via browser-use): full profile, experience, education + +## Stage 5: Outreach Draft + +Generate personalized outreach for each lead. Two modes: + +### Warm Intro Request (to mutual) +``` +hey [mutual name], + +quick ask. i see you know [target name] at [company]. +i'm building [your product] which [1-line relevance to target]. +would you be open to a quick intro? happy to send you a +forwardable blurb. + +[your name] +``` + +### Direct Cold Outreach (to target) +``` +hey [target name], + +[specific reference to their recent work/post/announcement]. +i'm [your name], building [product]. [1 line on why this is +relevant to them specifically]. + +[specific low-friction ask]. + +[your name] +``` + +### Anti-Patterns (never do) +- Generic templates with no personalization +- Long paragraphs explaining your whole company +- Multiple asks in one message +- Fake familiarity ("loved your recent talk!" without specifics) +- Bulk-sent messages with visible merge fields + +## Configuration + +Users should set these environment variables: + +```bash +# Required +export X_BEARER_TOKEN="..." +export X_ACCESS_TOKEN="..." +export X_ACCESS_TOKEN_SECRET="..." +export X_API_KEY="..." +export X_API_SECRET="..." +export EXA_API_KEY="..." + +# Optional +export LINKEDIN_COOKIE="..." # For browser-use LinkedIn access +export APOLLO_API_KEY="..." # For Apollo enrichment +``` + +## Related Skills + +- `x-api` -- X/Twitter API integration for graph analysis +- `investor-outreach` -- Investor-specific outreach patterns +- `market-research` -- Company and fund due diligence diff --git a/skills/market-research/SKILL.md b/skills/market-research/SKILL.md index 12ffa034..04d44976 100644 --- a/skills/market-research/SKILL.md +++ b/skills/market-research/SKILL.md @@ -65,6 +65,30 @@ Default structure: 5. recommendation 6. sources +## Domain-Specific Research Context + +When researching specific verticals, collect domain-specific signals: + +### Prediction Markets +Key metrics: Volume, open interest, user count, market categories +Regulatory landscape: CFTC (US), FCA (UK), global patchwork +Key players: Polymarket, Kalshi, Robinhood (event contracts), Metaculus, Manifold + +### DeFi / Structured Products +Key concepts: Vaults, exotic options, baskets, LP positions, DLMM +Key players: Cega, Ribbon Finance, Opyn, OrBit Markets +Chain-specific context matters (Solana vs Ethereum vs L2s) + +### AI Agent Security +Key concepts: Agent permissions, tool poisoning, prompt injection, OWASP LLM Top 10 +Key players: Invariant Labs, Backslash, Dam Secure, Cogent Security, Entire, Pillar Security + +### General Research Practices +- For investor due diligence: produce a 200-300 word dossier with fund overview, relevant investments, thesis alignment, suggested angle, and red flags +- For competitive analysis: always include "so what" for each finding relative to the user's venture +- For market sizing: follow TAM/SAM/SOM with explicit growth rate (CAGR with source), key drivers, and key risks +- For technology research: cover architecture (not marketing), trade-offs, adoption signals (GitHub stars, npm downloads, TVL if DeFi), and integration complexity + ## Quality Gate Before delivering: @@ -73,3 +97,5 @@ Before delivering: - the recommendation follows from the evidence - risks and counterarguments are included - the output makes a decision easier +- no filler paragraphs or generic market commentary +- contrarian/risk perspective explicitly included diff --git a/skills/oura-health/SKILL.md b/skills/oura-health/SKILL.md new file mode 100644 index 00000000..14aae5c6 --- /dev/null +++ b/skills/oura-health/SKILL.md @@ -0,0 +1,162 @@ +--- +name: oura-health +description: Oura Ring health data sync, analysis, and wellness reporting via the Oura API v2. Sleep, readiness, activity, HRV, heart rate, SpO2, stress, and resilience tracking with automated sync and trend analysis. Use when the user wants health data, sleep scores, recovery metrics, or wellness reports from their Oura Ring. +origin: ECC +--- + +# Oura Health Operations + +Sync, analyze, and report on health data from the Oura Ring via the Oura API v2. + +## When to Activate + +- User asks about sleep quality, readiness, recovery, activity, or health stats +- Generating wellness reports or trend analysis +- Syncing Oura data to local storage or knowledge base +- User says "how did I sleep", "my health stats", "wellness report", "check my Oura", "am I recovered" +- Any Oura API interaction or biometric data analysis + +## Authentication + +Oura uses OAuth2. Store credentials in environment variables or a `.env` file: + +```bash +export OURA_CLIENT_ID="your-client-id" +export OURA_CLIENT_SECRET="your-client-secret" +export OURA_ACCESS_TOKEN="your-access-token" +export OURA_REFRESH_TOKEN="your-refresh-token" +``` + +### Token Refresh + +If the access token is expired, refresh it: + +```bash +curl -X POST "https://api.ouraring.com/oauth/token" \ + -d "grant_type=refresh_token" \ + -d "refresh_token=$OURA_REFRESH_TOKEN" \ + -d "client_id=$OURA_CLIENT_ID" \ + -d "client_secret=$OURA_CLIENT_SECRET" +``` + +Update your `.env` with the new tokens after refresh. + +## API Endpoints (v2) + +Base URL: `https://api.ouraring.com/v2/usercollection/` + +| Endpoint | Data | +|----------|------| +| `daily_sleep` | Sleep score, total sleep, deep/REM/light, efficiency, latency | +| `daily_activity` | Activity score, steps, calories, active time, sedentary time | +| `daily_readiness` | Readiness score, HRV balance, body temp, recovery index | +| `daily_spo2` | Blood oxygen levels | +| `daily_stress` | Stress levels throughout the day | +| `daily_resilience` | Resilience score and contributing factors | +| `heart_rate` | Continuous heart rate data | +| `sleep` | Detailed sleep periods with phases | +| `workout` | Exercise sessions | +| `daily_cardiovascular_age` | Cardio age estimate | + +All endpoints accept `start_date` and `end_date` query params (YYYY-MM-DD format). + +### Example API Call + +```bash +curl -H "Authorization: Bearer $OURA_ACCESS_TOKEN" \ + "https://api.ouraring.com/v2/usercollection/daily_sleep?start_date=2026-03-25&end_date=2026-03-31" +``` + +```python +import os, requests + +headers = {"Authorization": f"Bearer {os.environ['OURA_ACCESS_TOKEN']}"} +params = {"start_date": "2026-03-25", "end_date": "2026-03-31"} + +resp = requests.get( + "https://api.ouraring.com/v2/usercollection/daily_sleep", + headers=headers, params=params +) +data = resp.json()["data"] +``` + +## Wellness Report Format + +When generating health summaries, use this structure: + +1. **Overall Status** -- composite of sleep + readiness + activity scores +2. **Sleep Quality** -- total sleep, deep sleep %, REM %, efficiency, sleep debt +3. **Recovery** -- readiness score, HRV trend, resting HR, body temperature deviation +4. **Activity** -- steps, active calories, goal progress +5. **Trends** -- 7-day rolling averages for key metrics +6. **Recommendations** -- 1-2 actionable suggestions based on the data + +Keep it concise. Numbers and insights, not paragraphs. + +### Example Output + +``` +WELLNESS REPORT (Mar 25-31) +=========================== + +Overall: GOOD (avg readiness 78, sleep 82, activity 71) + +Sleep: 7h12m avg (deep 18%, REM 22%, light 60%) + Efficiency: 91% | Latency: 8min avg + Sleep debt: -1h32m (improving) + +Recovery: Readiness trending up (+4 over 7d) + HRV: 42ms avg (baseline 38ms) | Resting HR: 58 bpm + Body temp: +0.1C (normal range) + +Activity: 8,420 steps/day avg | 2,180 active cal + Goal progress: 85% | Sedentary: 9.2h avg + +Recommendations: +- Deep sleep below 20%. Try earlier last meal (3h before bed). +- Activity trending down. Add a 20min walk in the afternoon. +``` + +## Automated Sync + +Set up a daily sync cron to pull Oura data automatically: + +```bash +# Example: daily sync at 10 AM +claude -p "Pull yesterday's Oura data (sleep, readiness, activity, HRV) and write a summary to memory." +``` + +### Sync Script Pattern + +```bash +#!/bin/bash +# oura-sync.sh - Pull daily Oura data + +source ~/.env.oura + +# Refresh token if needed +# ... (token refresh logic) + +DATE=$(date -v-1d +%Y-%m-%d) # yesterday + +for endpoint in daily_sleep daily_activity daily_readiness daily_stress; do + curl -s -H "Authorization: Bearer $OURA_ACCESS_TOKEN" \ + "https://api.ouraring.com/v2/usercollection/${endpoint}?start_date=${DATE}&end_date=${DATE}" \ + > "oura-data/${endpoint}_${DATE}.json" +done + +# Generate markdown summary from JSON files +``` + +## Integration with Other Skills + +- Use with `knowledge-ops` to persist health trends in the knowledge base +- Use with `content-engine` to create health-related content +- Use with `autonomous-loops` for scheduled health check-ins + +## Security + +- Never commit Oura tokens to Git +- Store OAuth credentials in `.env` files (gitignored) +- Token auto-refresh should update the `.env` file in place +- Log functions should write to stderr to avoid polluting data pipelines diff --git a/skills/pmx-guidelines/SKILL.md b/skills/pmx-guidelines/SKILL.md new file mode 100644 index 00000000..a3ef50e5 --- /dev/null +++ b/skills/pmx-guidelines/SKILL.md @@ -0,0 +1,210 @@ +--- +name: pmx-guidelines +description: PMX prediction markets platform development guidelines, architecture patterns, and project-specific best practices. Use when working on PMX or similar Next.js + Supabase + Solana prediction market codebases. Covers semantic search, real-money safety, deployment workflow, and design system rules. +origin: ECC +--- + +# PMX Prediction Markets - Development Guidelines + +Project-specific development guidelines, architectural patterns, and critical rules for prediction market platforms handling real money. + +## When to Activate + +- Working on a prediction markets platform +- Building a Next.js + Supabase + Solana application that handles real money +- Implementing semantic search with Redis + OpenAI embeddings +- Following strict deployment workflows (no direct push to production) +- User references PMX or a similar prediction market codebase + +## Core Principles + +### 1. Real Money Safety +This platform handles real money for real users. +- ONE person does all code review and production deployments +- Bugs mean financial losses. Test everything. +- NEVER push until EVERYTHING is bulletproof + +### 2. Many Small Files > Few Large Files +- High cohesion, low coupling +- Files: 200-400 lines typical, 800 max +- Extract utilities from large components +- Organize by feature/domain, not by code type + +### 3. No Emojis in Codebase +- Never in documentation, code comments, console output, or error messages +- Use clear text: "SUCCESS:" not checkmarks, "ERROR:" not X marks + +### 4. Single README.md for All Documentation +- All documentation in ONE file: `README.md` +- Use collapsible sections for organization +- No separate .md files (except CLAUDE.md) + +### 5. Immutability in JavaScript/TypeScript +Always create new objects, never mutate: + +```javascript +// WRONG: Mutating original object +async function fetchWithRetry(url, options) { + options.headers['Authorization'] = newToken // MUTATION + response = await fetch(url, options) +} + +// CORRECT: Create new object +async function fetchWithRetry(url, options) { + const retryOptions = { + ...options, + headers: { + ...options.headers, + Authorization: `Bearer ${newToken}` + } + } + response = await fetch(url, retryOptions) +} +``` + +## Tech Stack + +- **Frontend**: Next.js 15, React 19, TypeScript +- **Database**: Supabase PostgreSQL +- **Blockchain**: Solana Web3.js +- **Authentication**: Privy (@privy-io/react-auth) +- **UI**: Tailwind CSS + Three.js +- **Search**: Redis Stack + OpenAI embeddings (semantic search) +- **Trading**: Meteora CP-AMM SDK +- **Charts**: Lightweight Charts + Recharts + +## Semantic Search System + +### Architecture (Mandatory Pattern) +``` +User Query -> Debounce (500ms) -> Generate Embedding (OpenAI) -> +Vector Search (Redis KNN) -> Fetch Data (Supabase) -> +Sort by Similarity -> Results Displayed +``` + +### Performance +- ~300ms average (200ms OpenAI + 10ms Redis + 50ms Supabase) +- Cost: ~$0.0001 per search +- Scales to 100K+ markets +- Automatic fallback to substring search if Redis unavailable + +### Critical Implementation Rule +Always use the useSemanticSearch hook: + +```javascript +const { + searchQuery, + searchResults, + isSearching, + handleSearchChange, // Use this, NOT setSearchQuery + clearSearch +} = useSemanticSearch(500) + +// ALWAYS check semantic search is enabled +if (isSemanticEnabled && searchResults) { + // Use semantic results +} else { + // Use substring fallback +} +``` + +## Git Workflow (Critical) + +1. Test locally FIRST +2. Commit locally only: `git commit` +3. DO NOT push upstream +4. Wait for review +5. Small commits: one logical change per commit +6. Never commit: `.env.local`, `node_modules/`, build artifacts + +### Branch Strategy +- `main` (upstream) - PRODUCTION - DO NOT PUSH DIRECTLY +- `main` (fork) - Your fork - Push here for testing +- Feature branches: `fix/issue-32`, `feat/add-feature` + +## Pre-Deployment Requirements (Mandatory) + +1. **Comprehensive Local Testing** + - Every feature tested manually + - Every API endpoint tested with curl + - Every edge case considered + - Mobile AND desktop tested + - No console errors anywhere + +2. **Comprehensive Automated Testing** + - Unit tests for utilities + - Integration tests for APIs + - E2E tests for critical flows + - Database migration tests + - Race condition tests + +3. **Security Review** + - No secrets exposed + - All inputs validated + - Authentication/authorization verified + - Rate limiting tested + +4. **Only After All Above** + - Commit locally with detailed message + - DO NOT push to production + - Wait for project owner review + +## CSS and Styling Rules + +- **Unify styles**: Extend existing patterns, never create parallel systems +- **Tailwind first**: Prefer Tailwind utilities over custom CSS +- **One CSS file only**: `globals.css` +- **Delete unused styles immediately** + +### Design System +- Background: `#0a1929`, `#1a2a3a`, `#2a3a4a` +- Primary: `#00bcd4` (cyan) +- Text: `#ffffff` (white), `#94a3b8` (gray) +- Spacing: Tailwind spacing scale (4px base) +- Typography: Inter font family +- Shadows: Subtle glows for glass-morphism + +## Next.js Best Practices + +- Use Next.js built-ins first: `router.back()`, `useSearchParams()`, Server Components +- Client components: `'use client'` for interactivity +- Server components: Static content, data fetching, SEO +- Proper loading states on all data-fetching components +- Dynamic imports for code splitting + +## API Design + +### Conventions +- Next.js API Routes pattern (route.js files) +- Authentication via Privy (JWT tokens) +- Supabase client for database operations +- Comprehensive error handling with NextResponse +- Rate limiting on search endpoints +- Caching headers for static data + +### Supabase Pattern (Mandatory) +```javascript +const { data, error } = await supabase + .from('markets') + .select('*') + .eq('published', true) + +if (error) { + console.error('Supabase error:', error) + return NextResponse.json({ success: false, error: error.message }, { status: 500 }) +} + +return NextResponse.json({ success: true, data }) +``` + +## Security + +- All API keys server-side only +- Input validation on all API routes +- Sanitize user inputs before queries +- Use Supabase RLS (Row Level Security) +- Rate limiting on expensive operations +- HTTPS everywhere in production +- Never hardcode secrets (Supabase URLs, OpenAI keys, Redis connection strings) + +**Remember**: This handles real money. Reliability and correctness are paramount. Test everything. Never push directly to production. diff --git a/skills/remotion/SKILL.md b/skills/remotion/SKILL.md new file mode 100644 index 00000000..f505d67f --- /dev/null +++ b/skills/remotion/SKILL.md @@ -0,0 +1,44 @@ +--- +name: remotion-best-practices +description: Best practices for Remotion - Video creation in React +origin: ECC +metadata: + tags: remotion, video, react, animation, composition +--- + +## When to use + +Use this skills whenever you are dealing with Remotion code to obtain the domain-specific knowledge. + +## How to use + +Read individual rule files for detailed explanations and code examples: + +- [rules/3d.md](rules/3d.md) - 3D content in Remotion using Three.js and React Three Fiber +- [rules/animations.md](rules/animations.md) - Fundamental animation skills for Remotion +- [rules/assets.md](rules/assets.md) - Importing images, videos, audio, and fonts into Remotion +- [rules/audio.md](rules/audio.md) - Using audio and sound in Remotion - importing, trimming, volume, speed, pitch +- [rules/calculate-metadata.md](rules/calculate-metadata.md) - Dynamically set composition duration, dimensions, and props +- [rules/can-decode.md](rules/can-decode.md) - Check if a video can be decoded by the browser using Mediabunny +- [rules/charts.md](rules/charts.md) - Chart and data visualization patterns for Remotion +- [rules/compositions.md](rules/compositions.md) - Defining compositions, stills, folders, default props and dynamic metadata +- [rules/display-captions.md](rules/display-captions.md) - Displaying captions in Remotion with TikTok-style pages and word highlighting +- [rules/extract-frames.md](rules/extract-frames.md) - Extract frames from videos at specific timestamps using Mediabunny +- [rules/fonts.md](rules/fonts.md) - Loading Google Fonts and local fonts in Remotion +- [rules/get-audio-duration.md](rules/get-audio-duration.md) - Getting the duration of an audio file in seconds with Mediabunny +- [rules/get-video-dimensions.md](rules/get-video-dimensions.md) - Getting the width and height of a video file with Mediabunny +- [rules/get-video-duration.md](rules/get-video-duration.md) - Getting the duration of a video file in seconds with Mediabunny +- [rules/gifs.md](rules/gifs.md) - Displaying GIFs synchronized with Remotion's timeline +- [rules/images.md](rules/images.md) - Embedding images in Remotion using the Img component +- [rules/import-srt-captions.md](rules/import-srt-captions.md) - Importing .srt subtitle files into Remotion using @remotion/captions +- [rules/lottie.md](rules/lottie.md) - Embedding Lottie animations in Remotion +- [rules/measuring-dom-nodes.md](rules/measuring-dom-nodes.md) - Measuring DOM element dimensions in Remotion +- [rules/measuring-text.md](rules/measuring-text.md) - Measuring text dimensions, fitting text to containers, and checking overflow +- [rules/sequencing.md](rules/sequencing.md) - Sequencing patterns for Remotion - delay, trim, limit duration of items +- [rules/tailwind.md](rules/tailwind.md) - Using TailwindCSS in Remotion +- [rules/text-animations.md](rules/text-animations.md) - Typography and text animation patterns for Remotion +- [rules/timing.md](rules/timing.md) - Interpolation curves in Remotion - linear, easing, spring animations +- [rules/transcribe-captions.md](rules/transcribe-captions.md) - Transcribing audio to generate captions in Remotion +- [rules/transitions.md](rules/transitions.md) - Scene transition patterns for Remotion +- [rules/trimming.md](rules/trimming.md) - Trimming patterns for Remotion - cut the beginning or end of animations +- [rules/videos.md](rules/videos.md) - Embedding videos in Remotion - trimming, volume, speed, looping, pitch diff --git a/skills/remotion/rules/3d.md b/skills/remotion/rules/3d.md new file mode 100644 index 00000000..43fe2ff6 --- /dev/null +++ b/skills/remotion/rules/3d.md @@ -0,0 +1,86 @@ +--- +name: 3d +description: 3D content in Remotion using Three.js and React Three Fiber. +metadata: + tags: 3d, three, threejs +--- + +# Using Three.js and React Three Fiber in Remotion + +Follow React Three Fiber and Three.js best practices. +Only the following Remotion-specific rules need to be followed: + +## Prerequisites + +First, the `@remotion/three` package needs to be installed. +If it is not, use the following command: + +```bash +npx remotion add @remotion/three # If project uses npm +bunx remotion add @remotion/three # If project uses bun +yarn remotion add @remotion/three # If project uses yarn +pnpm exec remotion add @remotion/three # If project uses pnpm +``` + +## Using ThreeCanvas + +You MUST wrap 3D content in `` and include proper lighting. +`` MUST have a `width` and `height` prop. + +```tsx +import { ThreeCanvas } from "@remotion/three"; +import { useVideoConfig } from "remotion"; + +const { width, height } = useVideoConfig(); + + + + + + + + + +``` + +## No animations not driven by `useCurrentFrame()` + +Shaders, models etc MUST NOT animate by themselves. +No animations are allowed unless they are driven by `useCurrentFrame()`. +Otherwise, it will cause flickering during rendering. + +Using `useFrame()` from `@react-three/fiber` is forbidden. + +## Animate using `useCurrentFrame()` + +Use `useCurrentFrame()` to perform animations. + +```tsx +const frame = useCurrentFrame(); +const rotationY = frame * 0.02; + + + + + +``` + +## Using `` inside `` + +The `layout` prop of any `` inside a `` must be set to `none`. + +```tsx +import { Sequence } from "remotion"; +import { ThreeCanvas } from "@remotion/three"; + +const { width, height } = useVideoConfig(); + + + + + + + + + +``` diff --git a/skills/remotion/rules/animations.md b/skills/remotion/rules/animations.md new file mode 100644 index 00000000..ff6646a5 --- /dev/null +++ b/skills/remotion/rules/animations.md @@ -0,0 +1,29 @@ +--- +name: animations +description: Fundamental animation skills for Remotion +metadata: + tags: animations, transitions, frames, useCurrentFrame +--- + +All animations MUST be driven by the `useCurrentFrame()` hook. +Write animations in seconds and multiply them by the `fps` value from `useVideoConfig()`. + +```tsx +import { useCurrentFrame } from "remotion"; + +export const FadeIn = () => { + const frame = useCurrentFrame(); + const { fps } = useVideoConfig(); + + const opacity = interpolate(frame, [0, 2 * fps], [0, 1], { + extrapolateRight: 'clamp', + }); + + return ( +
Hello World!
+ ); +}; +``` + +CSS transitions or animations are FORBIDDEN - they will not render correctly. +Tailwind animation class names are FORBIDDEN - they will not render correctly. diff --git a/skills/remotion/rules/assets.md b/skills/remotion/rules/assets.md new file mode 100644 index 00000000..04c8ad59 --- /dev/null +++ b/skills/remotion/rules/assets.md @@ -0,0 +1,78 @@ +--- +name: assets +description: Importing images, videos, audio, and fonts into Remotion +metadata: + tags: assets, staticFile, images, fonts, public +--- + +# Importing assets in Remotion + +## The public folder + +Place assets in the `public/` folder at your project root. + +## Using staticFile() + +You MUST use `staticFile()` to reference files from the `public/` folder: + +```tsx +import {Img, staticFile} from 'remotion'; + +export const MyComposition = () => { + return ; +}; +``` + +The function returns an encoded URL that works correctly when deploying to subdirectories. + +## Using with components + +**Images:** + +```tsx +import {Img, staticFile} from 'remotion'; + +; +``` + +**Videos:** + +```tsx +import {Video} from '@remotion/media'; +import {staticFile} from 'remotion'; + +