Compare commits

...

1 Commits

Author SHA1 Message Date
Affaan Mustafa
19755f6c52 feat: add hermes-generated ops skills 2026-03-25 02:41:08 -07:00
10 changed files with 459 additions and 6 deletions

View File

@@ -13,7 +13,7 @@
{
"name": "everything-claude-code",
"source": "./",
"description": "The most comprehensive Claude Code plugin — 14+ agents, 56+ skills, 33+ commands, and production-ready hooks for TDD, security scanning, code review, and continuous learning",
"description": "The most comprehensive Claude Code plugin — 28+ agents, 116+ skills, 57+ commands, and production-ready hooks for TDD, security scanning, code review, and continuous learning",
"version": "1.9.0",
"author": {
"name": "Affaan Mustafa",

View File

@@ -12,6 +12,7 @@ CONFIG_FILE="$CODEX_HOME/config.toml"
AGENTS_FILE="$CODEX_HOME/AGENTS.md"
PROMPTS_DIR="$CODEX_HOME/prompts"
SKILLS_DIR="$CODEX_HOME/skills"
ROLE_DIR="$CODEX_HOME/agents"
HOOKS_DIR_EXPECT="${ECC_GLOBAL_HOOKS_DIR:-$CODEX_HOME/git-hooks}"
failures=0
@@ -89,12 +90,14 @@ fi
if [[ -f "$CONFIG_FILE" ]]; then
check_config_pattern '^multi_agent\s*=\s*true' "multi_agent is enabled"
check_config_absent '^\s*collab\s*=' "deprecated collab flag is absent"
check_config_pattern '^persistent_instructions\s*=' "persistent_instructions is configured"
check_config_pattern '^profile\s*=\s*"full-access"' "default profile is full-access"
check_config_pattern '^\[profiles\.full-access\]' "profiles.full-access exists"
check_config_pattern '^\[profiles\.strict\]' "profiles.strict exists"
check_config_pattern '^\[profiles\.yolo\]' "profiles.yolo exists"
for section in \
'mcp_servers.github' \
'mcp_servers.exa' \
'mcp_servers.memory' \
'mcp_servers.sequential-thinking' \
'mcp_servers.context7-mcp'
@@ -152,6 +155,26 @@ else
fail "Skills directory missing ($SKILLS_DIR)"
fi
if [[ -d "$ROLE_DIR" ]]; then
missing_roles=0
for role_file in explorer.toml reviewer.toml docs-researcher.toml; do
if [[ -f "$ROLE_DIR/$role_file" ]]; then
:
else
printf ' - missing agent role config: %s\n' "$role_file"
missing_roles=$((missing_roles + 1))
fi
done
if [[ "$missing_roles" -eq 0 ]]; then
ok "Global Codex agent role configs are present"
else
fail "$missing_roles required agent role configs are missing"
fi
else
fail "Agent role config directory missing ($ROLE_DIR)"
fi
if [[ -f "$PROMPTS_DIR/ecc-prompts-manifest.txt" ]]; then
ok "Command prompts manifest exists"
else

View File

@@ -28,6 +28,8 @@ CONFIG_FILE="$CODEX_HOME/config.toml"
AGENTS_FILE="$CODEX_HOME/AGENTS.md"
AGENTS_ROOT_SRC="$REPO_ROOT/AGENTS.md"
AGENTS_CODEX_SUPP_SRC="$REPO_ROOT/.codex/AGENTS.md"
ROLE_CONFIG_SRC="$REPO_ROOT/.codex/agents"
ROLE_CONFIG_DEST="$CODEX_HOME/agents"
SKILLS_SRC="$REPO_ROOT/.agents/skills"
SKILLS_DEST="$CODEX_HOME/skills"
PROMPTS_SRC="$REPO_ROOT/commands"
@@ -131,6 +133,7 @@ MCP_MERGE_SCRIPT="$REPO_ROOT/scripts/codex/merge-mcp-config.js"
require_path "$REPO_ROOT/AGENTS.md" "ECC AGENTS.md"
require_path "$AGENTS_CODEX_SUPP_SRC" "ECC Codex AGENTS supplement"
require_path "$ROLE_CONFIG_SRC" "ECC Codex agent config directory"
require_path "$SKILLS_SRC" "ECC skills directory"
require_path "$PROMPTS_SRC" "ECC commands directory"
require_path "$HOOKS_INSTALLER" "ECC global git hooks installer"
@@ -245,6 +248,17 @@ for skill_dir in "$SKILLS_SRC"/*; do
skills_count=$((skills_count + 1))
done
log "Syncing ECC Codex agent role configs"
run_or_echo "mkdir -p \"$ROLE_CONFIG_DEST\""
role_count=0
for role_file in "$ROLE_CONFIG_SRC"/*.toml; do
[[ -f "$role_file" ]] || continue
role_name="$(basename "$role_file")"
dest="$ROLE_CONFIG_DEST/$role_name"
run_or_echo "cp \"$role_file\" \"$dest\""
role_count=$((role_count + 1))
done
log "Generating prompt files from ECC commands"
run_or_echo "mkdir -p \"$PROMPTS_DEST\""
manifest="$PROMPTS_DEST/ecc-prompts-manifest.txt"
@@ -470,21 +484,22 @@ fi
log "Installing global git safety hooks"
if [[ "$MODE" == "dry-run" ]]; then
"$HOOKS_INSTALLER" --dry-run
bash "$HOOKS_INSTALLER" --dry-run
else
"$HOOKS_INSTALLER"
bash "$HOOKS_INSTALLER"
fi
log "Running global regression sanity check"
if [[ "$MODE" == "dry-run" ]]; then
printf '[dry-run] %s\n' "$SANITY_CHECKER"
printf '[dry-run] bash %s\n' "$SANITY_CHECKER"
else
"$SANITY_CHECKER"
bash "$SANITY_CHECKER"
fi
log "Sync complete"
log "Backup saved at: $BACKUP_DIR"
log "Skills synced: $skills_count"
log "Agent role configs synced: $role_count"
log "Prompts generated: $((prompt_count + extension_count)) (commands: $prompt_count, extensions: $extension_count)"
if [[ "$MODE" == "apply" ]]; then

View File

@@ -0,0 +1,9 @@
# Hermes Generated Skills
This directory is reserved for skills distilled from Hermes session data, repeated Telegram asks, and self-improvement runs.
Rules:
- keep skills specific and evidence-backed
- prefer reusable operational patterns over one-off tasks
- mirror from `~/.hermes/skills/generated/` only after the pattern is stable
- do not overwrite unrelated ECC skills

View File

@@ -0,0 +1,80 @@
---
name: content-crosspost-ops
description: Evidence-first crossposting workflow for Hermes. Use when adapting posts, threads, demos, videos, or articles across LinkedIn, Threads, Bluesky, Farcaster, and YouTube Community while keeping per-platform copy distinct and verified.
metadata:
hermes:
tags: [generated, content, crosspost, workflow, verification]
---
# Content Crosspost Ops
Use this when the user wants Hermes to crosspost or repurpose content across multiple platforms, especially from Telegram-driven publishing requests.
## Skill Stack
Pull these imported skills into the workflow when relevant:
- `content-engine` for platform-native rewrites
- `crosspost` for sequencing and destination-specific adaptation
- `article-writing` when the source asset is long-form
- `video-editing` or `fal-ai-media` when the post should lead with a clip, frame, or visual
- `search-first` before claiming a platform or API supports a format
- `eval-harness` mindset for publish verification and status reporting
## When To Use
- user says `crosspost`, `post everywhere`, `put this on linkedin too`, or similar
- the source asset is an X post/thread, quote tweet, article, demo video, screenshot, or YouTube post
- the destination is a community thread or showcase channel like Discord's `built-with-claude`
- the user asks whether a new destination or post type is supported
## Workflow
1. Read the real source asset and any destination rules first. Do not draft from memory.
- if the user pasted thread requirements, comply with those requirements before drafting
2. If the request depends on platform capability, API support, or quota behavior, verify it before answering.
- if the user asks whether PostBridge can handle a destination or format, inspect the real wrapper, configs, or recent publish logs before promising support
- if the destination is unsupported, say `blocked by unsupported capability` and give the next viable path
3. Extract one core idea and a few specifics. Split multiple ideas into separate posts.
4. Write native variants instead of reusing the same copy:
- X: fast hook, minimal framing
- LinkedIn: strong first line, short paragraphs, explicit lesson or takeaway
- Threads, Bluesky, Farcaster: shorter, conversational, clearly distinct wording
- YouTube Community: lead with the result or takeaway, keep it media-friendly
5. Prefer native media when the user wants engagement:
- for quote tweets, articles, or external links, prefer screenshots or media over a bare outbound link when the platform rewards native assets
- if the user says the demo itself should lead, use the video or a frame from it instead of a generic screenshot
- for community showcase threads, prefer the strongest demo clip or screenshot pair the user explicitly pointed to
6. Use link placement intentionally:
- put external links in comments or replies when engagement is the goal and the platform supports it
- otherwise use a platform-native CTA such as `comment for link` only when it matches the user's instruction
7. Resolve account and auth blockers early for browser-only destinations:
- for Discord or other browser-only community shares, verify the active account and whether the destination is reachable before spending more turns on extra asset hunting or copy polish
- verify the active account before typing into a community or social composer
- if login is blocked by MFA or a missing verification code, use the checked-in helper path instead of ad hoc inline scripting and do at most one focused resend plus one fresh helper check
- if that still returns no matching code, stop and report `blocked on missing MFA code`
8. Execute in order:
- post the primary platform first
- stagger secondary destinations when requested, defaulting to 4 hours apart unless the user overrides it
- prefer PostBridge for supported platforms, browser flows only when required
9. Verify before claiming completion:
- capture a returned post ID, URL, API response, or an updated verification log
- when the user asks `did you do it?`, answer with the exact status for each platform: posted, queued, drafted, uploaded-only, blocked, or awaiting verification
- record every attempt with `/Users/affoon/.hermes/workspace/content/log_crosspost.py` or `/Users/affoon/.hermes/workspace/content/postbridge_publish.py`
- if the state is only drafted, uploaded-only, queued, blocked, or pending manual action, report that exact status
## Pitfalls
- do not post identical copy cross-platform
- do not assume platform support without checking
- do not ignore thread rules or platform-specific showcase requirements
- do not call a draft, composer state, or upload step `posted`
- do not keep searching unrelated systems after a login or MFA blocker is already the limiting step
- do not keep refining copy or looking for better assets once auth is the only blocker on a browser-only publish
- do not answer a support question with a guess when the wrapper, logs, or API response can settle it
- do not ignore the user's preference for screenshots or native media over raw links
## Verification
- `/Users/affoon/.hermes/workspace/content/crosspost-verification-latest.md` reflects the latest attempts
- each destination has an ID, URL, or explicit failure reason
- the copy and media logged match what was actually sent

View File

@@ -0,0 +1,70 @@
---
name: email-ops
description: Evidence-first mailbox triage and sent-mail-safe reply workflow for Hermes. Use when organizing folders, drafting or sending through Himalaya, or verifying a message landed in Sent.
origin: Hermes
---
# Email Ops
Use this when the user wants Hermes to clean a mailbox, move messages between folders, draft or send replies, or prove a message landed in Sent.
## Prerequisites
Before using this workflow:
- install and configure the Himalaya CLI for the target mailbox accounts
- confirm the account's Sent folder name if it differs from `Sent`
## Skill Stack
Pull these companion skills into the workflow when relevant:
- `investor-outreach` when the email is investor, partner, or sponsor facing
- `search-first` before assuming a mail API, folder name, or CLI flag works
- `eval-harness` mindset for Sent-folder verification and exact status reporting
## When To Use
- user asks to triage inbox or trash, rescue important mail, or delete only obvious spam
- user asks to draft or send email and wants the message to appear in the mailbox's Sent folder
- user wants proof of which account, folder, or message id was used
## Workflow
1. Read the exact mailbox constraint first. If the user says `himalaya only` or forbids Apple Mail or `osascript`, stay inside Himalaya.
2. Resolve account and folder explicitly:
- check `himalaya account list`
- use `himalaya envelope list -a <account> -f <folder> ...`
- never misuse `-s INBOX` as a folder selector
3. For triage, classify before acting:
- preserve investor, partner, scheduling, and user-sent threads
- move only after the folder and account are confirmed
- permanently delete only obvious spam or messages the user explicitly authorized
4. For replies or new mail:
- read the full thread first
- choose the sender account that matches the project or recipient
- compose non-interactively with piped `himalaya template send` or `message write`
- avoid editor-driven flows unless required
5. If the request mentions attachments or images:
- resolve the exact absolute file path before broad mailbox searching
- keep the task on the local send-and-verify path instead of branching into unrelated web or repo exploration
- if Mail.app fallback is needed, pass the attachment paths after the body: `osascript /Users/affoon/.hermes/scripts/send_mail.applescript "<sender>" "<recipient>" "<subject>" "<body>" "/absolute/file1" ...`
6. If the user wants an actual send and Himalaya fails with an IMAP append or save-copy error, fall back to `/Users/affoon/.hermes/scripts/send_mail.applescript` only when the user did not forbid Apple Mail or `osascript`, then verify Sent. If the user constrained the method to Himalaya only, report the exact blocked state instead of silently switching tools.
7. During long-running mailbox work, send a short progress update before more searching. If a budget warning says 3 or fewer tool calls remain, stop broad exploration and spend the remaining calls on the highest-confidence execution or verification step, or report exact status and next action.
8. If the user wants sent-mail evidence:
- verify via `himalaya envelope list -a <account> -f Sent ...` or the account's actual sent folder
- report the subject, recipient, account, and message id or date if available
9. Report exact status words: drafted, sent, moved, flagged, deleted, blocked, awaiting verification.
## Pitfalls
- do not claim a message was sent without Sent-folder verification
- do not use the wrong account just because it is default
- do not delete uncertain business mail during cleanup
- do not switch tools after the user constrained the method
- do not wander into unrelated searches while an attachment path or Sent verification is unresolved
- do not keep searching through the budget warning while the user is asking for a status update
## Verification
- the requested messages are present in the expected folder after the move
- sent mail appears in Sent for the correct account
- the final report includes counts or concrete message identifiers, not vague completion language

View File

@@ -0,0 +1,71 @@
---
name: finance-billing-ops
description: Evidence-first Stripe sales, billing incident, and team-pricing workflow for Hermes. Use when pulling sales, investigating duplicate charges or failed payments, checking whether team billing is real in code, or benchmarking pricing.
metadata:
hermes:
tags: [generated, finance, billing, stripe, pricing, workflow, verification]
---
# Finance Billing Ops
Use this when the user asks about Stripe sales, refunds, failed payments, duplicate charges, org or team billing behavior, pricing strategy, or whether the product logic matches the marketing copy.
## Skill Stack
Pull these imported skills into the workflow when relevant:
- `market-research` for competitor pricing, billing models, and sourced market context
- `deep-research` or `exa-search` when the answer depends on current public pricing or enforcement behavior
- `search-first` before inventing a Stripe, billing, or entitlement path
- `eval-harness` mindset for exact status reporting and separating proof from inference
- `agentic-engineering` and `plankton-code-quality` when the answer depends on checked-in ECC billing or entitlement code
## When To Use
- user says `pull in stripe data`, `any new sales`, `why was he charged`, `refund`, `duplicate charge`, `team billing`, `per seat`, or similar
- the question mixes revenue facts with product truth, for example whether team or org billing is actually implemented
- the user wants a pricing comparison against Greptile or similar competitors
## Workflow
1. Start with the freshest revenue evidence available:
- if a live Stripe pull exists, refresh it first
- otherwise read `/Users/affoon/.hermes/workspace/business/stripe-sales.md` and `/Users/affoon/.hermes/workspace/business/financial-status.md`
- always report the snapshot timestamp if the data is not live
2. Normalize the revenue picture before answering:
- separate paid sales, failed attempts, successful retries, `$0` invoices, refunds, disputes, and active subscriptions
- do not treat a transient decline as lost revenue if the same checkout later succeeded
- flag any duplicate subscriptions or repeated checkouts with exact timestamps
3. For a customer billing incident:
- identify the customer email, account login, subscription ids, checkout sessions, payment intents, and timing
- determine whether extra charges are duplicates, retries, or real extra entitlements
- if recommending refunds or consolidation, explain what product value the extra charges did or did not unlock
4. For org, seat, quota, or activation questions:
- inspect the checked-in billing and usage code before making claims
- verify checkout quantity handling, installation vs user usage keys, unit-count handling, seat registry or member sync, and quota stacking
- inspect the live pricing copy too, so you can call out mismatches between marketing and implementation
5. For pricing and competitor questions:
- use `market-research`, `deep-research`, or `exa-search` for current public evidence
- separate sourced facts from inference, and call out stale or incomplete pricing signals
6. Report in layers:
- current sales snapshot
- customer-impact diagnosis
- code-backed product truth
- recommendation or next action
7. If the user wants fixes after diagnosis:
- hand the implementation path to `agentic-engineering` and `plankton-code-quality`
- keep the evidence trail so copy changes, refunds, and code changes stay aligned
## Pitfalls
- do not claim `new sales` without saying whether the data is live or a saved snapshot
- do not mix failed attempts into net revenue if the payment later succeeded
- do not say `per seat` unless the code actually enforces seat behavior
- do not assume extra subscriptions increase quotas without verifying the entitlement path
- do not compare competitor pricing from memory when current public sources are available
## Verification
- the answer includes a snapshot timestamp or an explicit live-pull statement
- the answer separates fact, inference, and recommendation
- code-backed claims cite file paths or code areas
- customer-impact statements name the exact payment or subscription evidence they rely on

View File

@@ -0,0 +1,57 @@
---
name: knowledge-ops
description: Evidence-first memory and context retrieval workflow for Hermes. Use when the user asks what Hermes remembers, points to OpenClaw or Hermes memory, or wants context recovered from a compacted session without re-reading already loaded files.
origin: Hermes
---
# Knowledge Ops
Use this when the user asks Hermes to remember something, recover an older conversation, pull context from a compacted session, or find information that "should be in memory somewhere."
## Skill Stack
Pull these companion skills into the workflow when relevant:
- `continuous-learning-v2` for evidence-backed pattern capture and cross-session learning
- `search-first` before inventing a new lookup path or assuming a store is empty
- `eval-harness` mindset for exact source attribution and negative-search reporting
## When To Use
- user says `do you remember`, `it was in memory`, `it was in openclaw`, `find the old session`, or similar
- the prompt contains a compaction summary or `[Files already read ... do NOT re-read these]`
- the answer depends on Hermes workspace memory, Supermemory, session logs, or the historical knowledge base
## Workflow
1. Start from the evidence already in the prompt:
- treat compaction summaries and `do NOT re-read` markers as usable context
- do not waste turns re-reading the same files unless the summary is clearly insufficient
2. Search in a fixed order before saying `not found`:
- `mcp_supermemory_recall` with a targeted query
- grep `/Users/affoon/.hermes/workspace/memory/`
- grep `/Users/affoon/.hermes/workspace/` more broadly
- `session_search` for recent Hermes conversations
- grep `/Users/affoon/GitHub/affaans_knowledge_base/` or the OpenClaw archive for historical context
3. If the user says the answer is in a specific memory store, pivot there immediately:
- `openclaw memory` means favor the historical knowledge base or OpenClaw archive
- `not in this session` means stop digging through the current thread and move to persistent stores
4. Keep the search narrow and evidence-led:
- reuse names, dates, channels, account names, or quoted phrases from the user
- search the most likely store first instead of spraying generic queries everywhere
5. Report findings with source evidence:
- give the file path, session id, date, or memory store
- distinguish between a direct hit, a likely match, and an inference
6. If nothing turns up, say which sources were checked and what to try next. Do not say `not found` after a single failed search.
## Pitfalls
- do not ignore a compaction summary and start over from zero
- do not keep re-reading files the prompt says are already loaded
- do not answer from vague memory without a source path, date, or session reference
- do not stop after one failed memory source when others remain
## Verification
- the response names the source store or file
- the response separates direct evidence from inference
- failed lookups list the sources checked, not just a bare `not found`

View File

@@ -0,0 +1,64 @@
---
name: research-ops
description: Evidence-first research workflow for Hermes. Use when answering current questions, evaluating a market or tool, enriching leads, or deciding whether a request should become ongoing monitored data collection.
metadata:
hermes:
tags: [generated, research, market, discovery, monitoring, workflow, verification]
---
# Research Ops
Use this when the user asks Hermes to research something current, compare options, enrich people or companies, or turn repeated lookups into an ongoing monitoring workflow.
## Skill Stack
Pull these imported skills into the workflow when relevant:
- `deep-research` for multi-source cited synthesis
- `market-research` for decision-oriented framing
- `exa-search` for first-pass discovery and current-web retrieval
- `data-scraper-agent` when the user really needs recurring collection or monitoring
- `search-first` before building new scraping or enrichment logic
- `eval-harness` mindset for claim quality, freshness, and explicit uncertainty
## When To Use
- user says `research`, `look up`, `find`, `who should i talk to`, `what's the latest`, or similar
- the answer depends on current public information, external sources, or a ranked set of candidates
- the task sounds recurring enough that a scraper or scheduled monitor may be better than a one-off search
## Workflow
1. Classify the ask before searching:
- quick factual answer
- decision memo or comparison
- lead list or enrichment
- recurring monitoring request
2. Start with the fastest evidence path:
- use `exa-search` first for broad current-web discovery
- if the question is about a local wrapper, config, or checked-in code path, inspect the live local source before making any web claim
3. Deepen only where the evidence justifies it:
- use `deep-research` when the user needs synthesis, citations, or multiple angles
- use `market-research` when the result should end in a recommendation, ranking, or go/no-go call
4. Separate fact from inference:
- label sourced facts clearly
- label inferred fit, ranking, or recommendation as inference
- include dates when freshness matters
5. Decide whether this should stay manual:
- if the user will likely ask for the same scan repeatedly, use `data-scraper-agent` patterns or propose a monitored collection path instead of repeating the same manual research forever
6. Report with evidence:
- cite the source or local file behind each important claim
- if evidence is thin or conflicting, say so directly
## Pitfalls
- do not answer current questions from stale memory when a fresh search is cheap
- do not conflate local code-backed behavior with market or web evidence
- do not present unsourced numbers or rankings as facts
- do not spin up a heavy deep-research pass for a quick capability check that local code can answer
- do not keep one-off researching a repeated monitoring ask when automation is the better fit
## Verification
- important claims have a source, file path, or explicit inference label
- freshness-sensitive answers include concrete dates when relevant
- recurring-monitoring recommendations state whether the task should remain manual or graduate to a scraper/workflow

View File

@@ -0,0 +1,64 @@
---
name: terminal-ops
description: Evidence-first terminal and repo execution workflow for Hermes. Use when fixing CI or build failures, running commands in a repo, applying code changes, or proving what was actually executed, verified, and pushed.
metadata:
hermes:
tags: [generated, terminal, coding, ci, repo, workflow, verification]
---
# Terminal Ops
Use this when the user asks Hermes to fix code, resolve CI failures, run terminal commands in a repo, inspect git state, or push verified changes.
## Skill Stack
Pull these imported skills into the workflow when relevant:
- `agentic-engineering` for scoped decomposition and explicit done conditions
- `plankton-code-quality` for write-time quality expectations and linter discipline
- `eval-harness` for pass/fail verification after each change
- `search-first` before inventing a new helper, dependency, or abstraction
- `security-review` when secrets, auth, external inputs, or privileged operations are touched
## When To Use
- user says `fix`, `debug`, `run this`, `check the repo`, `push it`, or similar
- the task references CI failures, lint errors, build errors, tests, scripts, or a local repo path
- the answer depends on what a command, diff, branch, or verification step actually shows
## Workflow
1. Resolve the exact working surface first:
- use the user-provided absolute repo path when given
- if the target is not a git repo, do not reach for git-only steps
- prefer `/Users/affoon/GitHub/...` over any iCloud or Documents mirror
2. Inspect before editing:
- read the failing command, file, test, or CI error first
- check current branch and local state before changing or pushing anything
- if the prompt already includes loaded-file markers or a compaction summary, use that evidence instead of re-reading blindly
3. Keep fixes narrow and evidence-led:
- solve one dominant failure at a time
- prefer repo-local scripts, package scripts, and checked-in helpers over ad hoc one-liners
- if a dependency or helper is needed, use `search-first` before writing custom glue
4. Verify after each meaningful change:
- rerun the smallest command that proves the fix
- escalate to the broader build, lint, or test only after the local failure is addressed
- review the diff before any commit or push
5. Push only when the requested state is real:
- distinguish `changed locally`, `verified locally`, `committed`, and `pushed`
- if push is requested, use a non-interactive git flow and report the branch and result
6. Report exact status words:
- drafted, changed locally, verified locally, committed, pushed, blocked, awaiting verification
## Pitfalls
- do not guess the failure from memory when logs or tests can settle it
- do not work in `/Users/affoon/Documents/...` clones when `/Users/affoon/GitHub/...` exists
- do not use destructive git commands or revert unrelated local work
- do not claim `fixed` if the proving command was not rerun
- do not claim `pushed` if the change only exists locally
## Verification
- the response names the proving command or test and its result
- the response names the repo path and branch when git was involved
- any push claim includes the target branch and exact status