mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-31 14:13:27 +08:00
Compare commits
167 Commits
f6ebc2a3c2
...
v1.6.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db27ba1eb2 | ||
|
|
3c833d8922 | ||
|
|
156b89ed30 | ||
|
|
41ce1a52e5 | ||
|
|
6f94c2e28f | ||
|
|
91b7ccf56f | ||
|
|
7daa830da9 | ||
|
|
7e57d1b831 | ||
|
|
ff47dace11 | ||
|
|
c9dc53e862 | ||
|
|
c8f54481b8 | ||
|
|
294fc4aad8 | ||
|
|
81aa8a72c3 | ||
|
|
0e9f613fd1 | ||
|
|
1bd68ff534 | ||
|
|
24047351c2 | ||
|
|
66959c1dca | ||
|
|
5a0f6e9e1e | ||
|
|
cf61ef7539 | ||
|
|
07e23e3e64 | ||
|
|
8fc49ba0e8 | ||
|
|
b90448aef6 | ||
|
|
caab908be8 | ||
|
|
7021d1f6cf | ||
|
|
3ad211b01b | ||
|
|
f61c9b0caf | ||
|
|
b682ac7d79 | ||
|
|
e1fca6e84d | ||
|
|
07530ace5f | ||
|
|
00464b6f60 | ||
|
|
0c78a7c779 | ||
|
|
fca997001e | ||
|
|
1eca3c9130 | ||
|
|
defcdc356e | ||
|
|
b548ce47c9 | ||
|
|
90e6a8c63b | ||
|
|
c68f7efcdc | ||
|
|
aa805d5240 | ||
|
|
c5ca3c698c | ||
|
|
7e928572c7 | ||
|
|
0bf47bbb41 | ||
|
|
2ad888ca82 | ||
|
|
8966282e48 | ||
|
|
3d97985559 | ||
|
|
d54124afad | ||
|
|
0b11849f1e | ||
|
|
2c26d2d67c | ||
|
|
fdda6cbcd9 | ||
|
|
5cb9c1c2a5 | ||
|
|
595127954f | ||
|
|
bb084229aa | ||
|
|
849bb3b425 | ||
|
|
4db215f60d | ||
|
|
bb1486c404 | ||
|
|
9339d4c88c | ||
|
|
2497a9b6e5 | ||
|
|
e449471ed3 | ||
|
|
cad8db21b7 | ||
|
|
9d9258c7e1 | ||
|
|
08ee723e85 | ||
|
|
f11347a708 | ||
|
|
586637f94c | ||
|
|
2b6ff6b55e | ||
|
|
2be6e09501 | ||
|
|
b1d47b22ea | ||
|
|
9dd4f4409b | ||
|
|
c5de2a7bf7 | ||
|
|
af24c617bb | ||
|
|
2ca903d4c5 | ||
|
|
4d98d9f125 | ||
|
|
40e80bcc61 | ||
|
|
eaf710847f | ||
|
|
b169a2e1dd | ||
|
|
8b4aac4e56 | ||
|
|
08f60355d4 | ||
|
|
1f74889dbf | ||
|
|
82d751556c | ||
|
|
3847cc0e0d | ||
|
|
94eaaad238 | ||
|
|
ab5be936e9 | ||
|
|
219bd1ff88 | ||
|
|
4ff6831b2b | ||
|
|
182e9e78b9 | ||
|
|
0250de793a | ||
|
|
88fa1bdbbc | ||
|
|
2753db3a48 | ||
|
|
e50b05384a | ||
|
|
26f3c88902 | ||
|
|
df2d3a6d54 | ||
|
|
25c5d58c44 | ||
|
|
06af1acb8d | ||
|
|
6a0b231d34 | ||
|
|
a563df2a52 | ||
|
|
53e06a8850 | ||
|
|
93633e44f2 | ||
|
|
791da32c6b | ||
|
|
635eb108ab | ||
|
|
1e740724ca | ||
|
|
6737f3245b | ||
|
|
1b273de13f | ||
|
|
882157ac09 | ||
|
|
69799f2f80 | ||
|
|
b27c21732f | ||
|
|
332d0f444b | ||
|
|
45a0b62fcb | ||
|
|
a64a294b29 | ||
|
|
4d016babbb | ||
|
|
d2c1281e97 | ||
|
|
78ad952433 | ||
|
|
274cca025e | ||
|
|
18fcb88168 | ||
|
|
8604583d16 | ||
|
|
233b341557 | ||
|
|
a95fb54ee4 | ||
|
|
910ffa5530 | ||
|
|
b9a38b2680 | ||
|
|
14dfe4d110 | ||
|
|
3e98be3e39 | ||
|
|
3ec59c48bc | ||
|
|
e70d4d2237 | ||
|
|
9b286ab3f8 | ||
|
|
b3e362105d | ||
|
|
8cacf0f6a6 | ||
|
|
cedcf9a701 | ||
|
|
15717d6d04 | ||
|
|
c8b7d41e42 | ||
|
|
9bec3d7625 | ||
|
|
2573cbb7b0 | ||
|
|
9dccdb9068 | ||
|
|
f000d9b02d | ||
|
|
27ae5ea299 | ||
|
|
723e69a621 | ||
|
|
241c35a589 | ||
|
|
0c67e0571e | ||
|
|
02d5986049 | ||
|
|
f623e3b429 | ||
|
|
44b5a4f9f0 | ||
|
|
567664091d | ||
|
|
5031a84d6e | ||
|
|
702c3f54b4 | ||
|
|
162222a46c | ||
|
|
485def8582 | ||
|
|
cba6b44c61 | ||
|
|
1fcdf12b62 | ||
|
|
85a86f6747 | ||
|
|
3ec0aa7b50 | ||
|
|
9afecedb21 | ||
|
|
7db0d316f5 | ||
|
|
99fc51dda7 | ||
|
|
2fea46edc7 | ||
|
|
990c08159c | ||
|
|
43808ccf78 | ||
|
|
3bc0929c6e | ||
|
|
ad40bf3aad | ||
|
|
f1a693f7cf | ||
|
|
4e520c6873 | ||
|
|
86844a305a | ||
|
|
b950fd7427 | ||
|
|
71e86cc93f | ||
|
|
4f7b50fb78 | ||
|
|
277006bd7f | ||
|
|
9db98673d0 | ||
|
|
fab2e05ae7 | ||
|
|
8d65c6d429 | ||
|
|
9b2233b5bc | ||
|
|
5a26daf392 | ||
|
|
438d082e30 |
@@ -8,7 +8,7 @@ Pre-translated configurations for [Cursor IDE](https://cursor.com), part of the
|
||||
|----------|-------|-------------|
|
||||
| Rules | 27 | Coding standards, security, testing, patterns (common + TypeScript/Python/Go) |
|
||||
| Agents | 13 | Specialized AI agents (planner, architect, code-reviewer, tdd-guide, etc.) |
|
||||
| Skills | 37 | Agent skills for backend, frontend, security, TDD, and more |
|
||||
| Skills | 43 | Agent skills for backend, frontend, security, TDD, and more |
|
||||
| Commands | 31 | Slash commands for planning, reviewing, testing, and deployment |
|
||||
| MCP Config | 1 | Pre-configured MCP servers (GitHub, Supabase, Vercel, Railway, etc.) |
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ alwaysApply: true
|
||||
- Pair programming and code generation
|
||||
- Worker agents in multi-agent systems
|
||||
|
||||
**Sonnet 4.5** (Best coding model):
|
||||
**Sonnet 4.6** (Best coding model):
|
||||
- Main development work
|
||||
- Orchestrating multi-agent workflows
|
||||
- Complex coding tasks
|
||||
|
||||
722
.cursor/skills/cpp-coding-standards/SKILL.md
Normal file
722
.cursor/skills/cpp-coding-standards/SKILL.md
Normal file
@@ -0,0 +1,722 @@
|
||||
---
|
||||
name: cpp-coding-standards
|
||||
description: C++ coding standards based on the C++ Core Guidelines (isocpp.github.io). Use when writing, reviewing, or refactoring C++ code to enforce modern, safe, and idiomatic practices.
|
||||
---
|
||||
|
||||
# C++ Coding Standards (C++ Core Guidelines)
|
||||
|
||||
Comprehensive coding standards for modern C++ (C++17/20/23) derived from the [C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines). Enforces type safety, resource safety, immutability, and clarity.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Writing new C++ code (classes, functions, templates)
|
||||
- Reviewing or refactoring existing C++ code
|
||||
- Making architectural decisions in C++ projects
|
||||
- Enforcing consistent style across a C++ codebase
|
||||
- Choosing between language features (e.g., `enum` vs `enum class`, raw pointer vs smart pointer)
|
||||
|
||||
### When NOT to Use
|
||||
|
||||
- Non-C++ projects
|
||||
- Legacy C codebases that cannot adopt modern C++ features
|
||||
- Embedded/bare-metal contexts where specific guidelines conflict with hardware constraints (adapt selectively)
|
||||
|
||||
## Cross-Cutting Principles
|
||||
|
||||
These themes recur across the entire guidelines and form the foundation:
|
||||
|
||||
1. **RAII everywhere** (P.8, R.1, E.6, CP.20): Bind resource lifetime to object lifetime
|
||||
2. **Immutability by default** (P.10, Con.1-5, ES.25): Start with `const`/`constexpr`; mutability is the exception
|
||||
3. **Type safety** (P.4, I.4, ES.46-49, Enum.3): Use the type system to prevent errors at compile time
|
||||
4. **Express intent** (P.3, F.1, NL.1-2, T.10): Names, types, and concepts should communicate purpose
|
||||
5. **Minimize complexity** (F.2-3, ES.5, Per.4-5): Simple code is correct code
|
||||
6. **Value semantics over pointer semantics** (C.10, R.3-5, F.20, CP.31): Prefer returning by value and scoped objects
|
||||
|
||||
## Philosophy & Interfaces (P.*, I.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **P.1** | Express ideas directly in code |
|
||||
| **P.3** | Express intent |
|
||||
| **P.4** | Ideally, a program should be statically type safe |
|
||||
| **P.5** | Prefer compile-time checking to run-time checking |
|
||||
| **P.8** | Don't leak any resources |
|
||||
| **P.10** | Prefer immutable data to mutable data |
|
||||
| **I.1** | Make interfaces explicit |
|
||||
| **I.2** | Avoid non-const global variables |
|
||||
| **I.4** | Make interfaces precisely and strongly typed |
|
||||
| **I.11** | Never transfer ownership by a raw pointer or reference |
|
||||
| **I.23** | Keep the number of function arguments low |
|
||||
|
||||
### DO
|
||||
|
||||
```cpp
|
||||
// P.10 + I.4: Immutable, strongly typed interface
|
||||
struct Temperature {
|
||||
double kelvin;
|
||||
};
|
||||
|
||||
Temperature boil(const Temperature& water);
|
||||
```
|
||||
|
||||
### DON'T
|
||||
|
||||
```cpp
|
||||
// Weak interface: unclear ownership, unclear units
|
||||
double boil(double* temp);
|
||||
|
||||
// Non-const global variable
|
||||
int g_counter = 0; // I.2 violation
|
||||
```
|
||||
|
||||
## Functions (F.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **F.1** | Package meaningful operations as carefully named functions |
|
||||
| **F.2** | A function should perform a single logical operation |
|
||||
| **F.3** | Keep functions short and simple |
|
||||
| **F.4** | If a function might be evaluated at compile time, declare it `constexpr` |
|
||||
| **F.6** | If your function must not throw, declare it `noexcept` |
|
||||
| **F.8** | Prefer pure functions |
|
||||
| **F.16** | For "in" parameters, pass cheaply-copied types by value and others by `const&` |
|
||||
| **F.20** | For "out" values, prefer return values to output parameters |
|
||||
| **F.21** | To return multiple "out" values, prefer returning a struct |
|
||||
| **F.43** | Never return a pointer or reference to a local object |
|
||||
|
||||
### Parameter Passing
|
||||
|
||||
```cpp
|
||||
// F.16: Cheap types by value, others by const&
|
||||
void print(int x); // cheap: by value
|
||||
void analyze(const std::string& data); // expensive: by const&
|
||||
void transform(std::string s); // sink: by value (will move)
|
||||
|
||||
// F.20 + F.21: Return values, not output parameters
|
||||
struct ParseResult {
|
||||
std::string token;
|
||||
int position;
|
||||
};
|
||||
|
||||
ParseResult parse(std::string_view input); // GOOD: return struct
|
||||
|
||||
// BAD: output parameters
|
||||
void parse(std::string_view input,
|
||||
std::string& token, int& pos); // avoid this
|
||||
```
|
||||
|
||||
### Pure Functions and constexpr
|
||||
|
||||
```cpp
|
||||
// F.4 + F.8: Pure, constexpr where possible
|
||||
constexpr int factorial(int n) noexcept {
|
||||
return (n <= 1) ? 1 : n * factorial(n - 1);
|
||||
}
|
||||
|
||||
static_assert(factorial(5) == 120);
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Returning `T&&` from functions (F.45)
|
||||
- Using `va_arg` / C-style variadics (F.55)
|
||||
- Capturing by reference in lambdas passed to other threads (F.53)
|
||||
- Returning `const T` which inhibits move semantics (F.49)
|
||||
|
||||
## Classes & Class Hierarchies (C.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **C.2** | Use `class` if invariant exists; `struct` if data members vary independently |
|
||||
| **C.9** | Minimize exposure of members |
|
||||
| **C.20** | If you can avoid defining default operations, do (Rule of Zero) |
|
||||
| **C.21** | If you define or `=delete` any copy/move/destructor, handle them all (Rule of Five) |
|
||||
| **C.35** | Base class destructor: public virtual or protected non-virtual |
|
||||
| **C.41** | A constructor should create a fully initialized object |
|
||||
| **C.46** | Declare single-argument constructors `explicit` |
|
||||
| **C.67** | A polymorphic class should suppress public copy/move |
|
||||
| **C.128** | Virtual functions: specify exactly one of `virtual`, `override`, or `final` |
|
||||
|
||||
### Rule of Zero
|
||||
|
||||
```cpp
|
||||
// C.20: Let the compiler generate special members
|
||||
struct Employee {
|
||||
std::string name;
|
||||
std::string department;
|
||||
int id;
|
||||
// No destructor, copy/move constructors, or assignment operators needed
|
||||
};
|
||||
```
|
||||
|
||||
### Rule of Five
|
||||
|
||||
```cpp
|
||||
// C.21: If you must manage a resource, define all five
|
||||
class Buffer {
|
||||
public:
|
||||
explicit Buffer(std::size_t size)
|
||||
: data_(std::make_unique<char[]>(size)), size_(size) {}
|
||||
|
||||
~Buffer() = default;
|
||||
|
||||
Buffer(const Buffer& other)
|
||||
: data_(std::make_unique<char[]>(other.size_)), size_(other.size_) {
|
||||
std::copy_n(other.data_.get(), size_, data_.get());
|
||||
}
|
||||
|
||||
Buffer& operator=(const Buffer& other) {
|
||||
if (this != &other) {
|
||||
auto new_data = std::make_unique<char[]>(other.size_);
|
||||
std::copy_n(other.data_.get(), other.size_, new_data.get());
|
||||
data_ = std::move(new_data);
|
||||
size_ = other.size_;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
Buffer(Buffer&&) noexcept = default;
|
||||
Buffer& operator=(Buffer&&) noexcept = default;
|
||||
|
||||
private:
|
||||
std::unique_ptr<char[]> data_;
|
||||
std::size_t size_;
|
||||
};
|
||||
```
|
||||
|
||||
### Class Hierarchy
|
||||
|
||||
```cpp
|
||||
// C.35 + C.128: Virtual destructor, use override
|
||||
class Shape {
|
||||
public:
|
||||
virtual ~Shape() = default;
|
||||
virtual double area() const = 0; // C.121: pure interface
|
||||
};
|
||||
|
||||
class Circle : public Shape {
|
||||
public:
|
||||
explicit Circle(double r) : radius_(r) {}
|
||||
double area() const override { return 3.14159 * radius_ * radius_; }
|
||||
|
||||
private:
|
||||
double radius_;
|
||||
};
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Calling virtual functions in constructors/destructors (C.82)
|
||||
- Using `memset`/`memcpy` on non-trivial types (C.90)
|
||||
- Providing different default arguments for virtual function and overrider (C.140)
|
||||
- Making data members `const` or references, which suppresses move/copy (C.12)
|
||||
|
||||
## Resource Management (R.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **R.1** | Manage resources automatically using RAII |
|
||||
| **R.3** | A raw pointer (`T*`) is non-owning |
|
||||
| **R.5** | Prefer scoped objects; don't heap-allocate unnecessarily |
|
||||
| **R.10** | Avoid `malloc()`/`free()` |
|
||||
| **R.11** | Avoid calling `new` and `delete` explicitly |
|
||||
| **R.20** | Use `unique_ptr` or `shared_ptr` to represent ownership |
|
||||
| **R.21** | Prefer `unique_ptr` over `shared_ptr` unless sharing ownership |
|
||||
| **R.22** | Use `make_shared()` to make `shared_ptr`s |
|
||||
|
||||
### Smart Pointer Usage
|
||||
|
||||
```cpp
|
||||
// R.11 + R.20 + R.21: RAII with smart pointers
|
||||
auto widget = std::make_unique<Widget>("config"); // unique ownership
|
||||
auto cache = std::make_shared<Cache>(1024); // shared ownership
|
||||
|
||||
// R.3: Raw pointer = non-owning observer
|
||||
void render(const Widget* w) { // does NOT own w
|
||||
if (w) w->draw();
|
||||
}
|
||||
|
||||
render(widget.get());
|
||||
```
|
||||
|
||||
### RAII Pattern
|
||||
|
||||
```cpp
|
||||
// R.1: Resource acquisition is initialization
|
||||
class FileHandle {
|
||||
public:
|
||||
explicit FileHandle(const std::string& path)
|
||||
: handle_(std::fopen(path.c_str(), "r")) {
|
||||
if (!handle_) throw std::runtime_error("Failed to open: " + path);
|
||||
}
|
||||
|
||||
~FileHandle() {
|
||||
if (handle_) std::fclose(handle_);
|
||||
}
|
||||
|
||||
FileHandle(const FileHandle&) = delete;
|
||||
FileHandle& operator=(const FileHandle&) = delete;
|
||||
FileHandle(FileHandle&& other) noexcept
|
||||
: handle_(std::exchange(other.handle_, nullptr)) {}
|
||||
FileHandle& operator=(FileHandle&& other) noexcept {
|
||||
if (this != &other) {
|
||||
if (handle_) std::fclose(handle_);
|
||||
handle_ = std::exchange(other.handle_, nullptr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
std::FILE* handle_;
|
||||
};
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Naked `new`/`delete` (R.11)
|
||||
- `malloc()`/`free()` in C++ code (R.10)
|
||||
- Multiple resource allocations in a single expression (R.13 -- exception safety hazard)
|
||||
- `shared_ptr` where `unique_ptr` suffices (R.21)
|
||||
|
||||
## Expressions & Statements (ES.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **ES.5** | Keep scopes small |
|
||||
| **ES.20** | Always initialize an object |
|
||||
| **ES.23** | Prefer `{}` initializer syntax |
|
||||
| **ES.25** | Declare objects `const` or `constexpr` unless modification is intended |
|
||||
| **ES.28** | Use lambdas for complex initialization of `const` variables |
|
||||
| **ES.45** | Avoid magic constants; use symbolic constants |
|
||||
| **ES.46** | Avoid narrowing/lossy arithmetic conversions |
|
||||
| **ES.47** | Use `nullptr` rather than `0` or `NULL` |
|
||||
| **ES.48** | Avoid casts |
|
||||
| **ES.50** | Don't cast away `const` |
|
||||
|
||||
### Initialization
|
||||
|
||||
```cpp
|
||||
// ES.20 + ES.23 + ES.25: Always initialize, prefer {}, default to const
|
||||
const int max_retries{3};
|
||||
const std::string name{"widget"};
|
||||
const std::vector<int> primes{2, 3, 5, 7, 11};
|
||||
|
||||
// ES.28: Lambda for complex const initialization
|
||||
const auto config = [&] {
|
||||
Config c;
|
||||
c.timeout = std::chrono::seconds{30};
|
||||
c.retries = max_retries;
|
||||
c.verbose = debug_mode;
|
||||
return c;
|
||||
}();
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Uninitialized variables (ES.20)
|
||||
- Using `0` or `NULL` as pointer (ES.47 -- use `nullptr`)
|
||||
- C-style casts (ES.48 -- use `static_cast`, `const_cast`, etc.)
|
||||
- Casting away `const` (ES.50)
|
||||
- Magic numbers without named constants (ES.45)
|
||||
- Mixing signed and unsigned arithmetic (ES.100)
|
||||
- Reusing names in nested scopes (ES.12)
|
||||
|
||||
## Error Handling (E.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **E.1** | Develop an error-handling strategy early in a design |
|
||||
| **E.2** | Throw an exception to signal that a function can't perform its assigned task |
|
||||
| **E.6** | Use RAII to prevent leaks |
|
||||
| **E.12** | Use `noexcept` when throwing is impossible or unacceptable |
|
||||
| **E.14** | Use purpose-designed user-defined types as exceptions |
|
||||
| **E.15** | Throw by value, catch by reference |
|
||||
| **E.16** | Destructors, deallocation, and swap must never fail |
|
||||
| **E.17** | Don't try to catch every exception in every function |
|
||||
|
||||
### Exception Hierarchy
|
||||
|
||||
```cpp
|
||||
// E.14 + E.15: Custom exception types, throw by value, catch by reference
|
||||
class AppError : public std::runtime_error {
|
||||
public:
|
||||
using std::runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
class NetworkError : public AppError {
|
||||
public:
|
||||
NetworkError(const std::string& msg, int code)
|
||||
: AppError(msg), status_code(code) {}
|
||||
int status_code;
|
||||
};
|
||||
|
||||
void fetch_data(const std::string& url) {
|
||||
// E.2: Throw to signal failure
|
||||
throw NetworkError("connection refused", 503);
|
||||
}
|
||||
|
||||
void run() {
|
||||
try {
|
||||
fetch_data("https://api.example.com");
|
||||
} catch (const NetworkError& e) {
|
||||
log_error(e.what(), e.status_code);
|
||||
} catch (const AppError& e) {
|
||||
log_error(e.what());
|
||||
}
|
||||
// E.17: Don't catch everything here -- let unexpected errors propagate
|
||||
}
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Throwing built-in types like `int` or string literals (E.14)
|
||||
- Catching by value (slicing risk) (E.15)
|
||||
- Empty catch blocks that silently swallow errors
|
||||
- Using exceptions for flow control (E.3)
|
||||
- Error handling based on global state like `errno` (E.28)
|
||||
|
||||
## Constants & Immutability (Con.*)
|
||||
|
||||
### All Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **Con.1** | By default, make objects immutable |
|
||||
| **Con.2** | By default, make member functions `const` |
|
||||
| **Con.3** | By default, pass pointers and references to `const` |
|
||||
| **Con.4** | Use `const` for values that don't change after construction |
|
||||
| **Con.5** | Use `constexpr` for values computable at compile time |
|
||||
|
||||
```cpp
|
||||
// Con.1 through Con.5: Immutability by default
|
||||
class Sensor {
|
||||
public:
|
||||
explicit Sensor(std::string id) : id_(std::move(id)) {}
|
||||
|
||||
// Con.2: const member functions by default
|
||||
const std::string& id() const { return id_; }
|
||||
double last_reading() const { return reading_; }
|
||||
|
||||
// Only non-const when mutation is required
|
||||
void record(double value) { reading_ = value; }
|
||||
|
||||
private:
|
||||
const std::string id_; // Con.4: never changes after construction
|
||||
double reading_{0.0};
|
||||
};
|
||||
|
||||
// Con.3: Pass by const reference
|
||||
void display(const Sensor& s) {
|
||||
std::cout << s.id() << ": " << s.last_reading() << '\n';
|
||||
}
|
||||
|
||||
// Con.5: Compile-time constants
|
||||
constexpr double PI = 3.14159265358979;
|
||||
constexpr int MAX_SENSORS = 256;
|
||||
```
|
||||
|
||||
## Concurrency & Parallelism (CP.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **CP.2** | Avoid data races |
|
||||
| **CP.3** | Minimize explicit sharing of writable data |
|
||||
| **CP.4** | Think in terms of tasks, rather than threads |
|
||||
| **CP.8** | Don't use `volatile` for synchronization |
|
||||
| **CP.20** | Use RAII, never plain `lock()`/`unlock()` |
|
||||
| **CP.21** | Use `std::scoped_lock` to acquire multiple mutexes |
|
||||
| **CP.22** | Never call unknown code while holding a lock |
|
||||
| **CP.42** | Don't wait without a condition |
|
||||
| **CP.44** | Remember to name your `lock_guard`s and `unique_lock`s |
|
||||
| **CP.100** | Don't use lock-free programming unless you absolutely have to |
|
||||
|
||||
### Safe Locking
|
||||
|
||||
```cpp
|
||||
// CP.20 + CP.44: RAII locks, always named
|
||||
class ThreadSafeQueue {
|
||||
public:
|
||||
void push(int value) {
|
||||
std::lock_guard<std::mutex> lock(mutex_); // CP.44: named!
|
||||
queue_.push(value);
|
||||
cv_.notify_one();
|
||||
}
|
||||
|
||||
int pop() {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
// CP.42: Always wait with a condition
|
||||
cv_.wait(lock, [this] { return !queue_.empty(); });
|
||||
const int value = queue_.front();
|
||||
queue_.pop();
|
||||
return value;
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mutex_; // CP.50: mutex with its data
|
||||
std::condition_variable cv_;
|
||||
std::queue<int> queue_;
|
||||
};
|
||||
```
|
||||
|
||||
### Multiple Mutexes
|
||||
|
||||
```cpp
|
||||
// CP.21: std::scoped_lock for multiple mutexes (deadlock-free)
|
||||
void transfer(Account& from, Account& to, double amount) {
|
||||
std::scoped_lock lock(from.mutex_, to.mutex_);
|
||||
from.balance_ -= amount;
|
||||
to.balance_ += amount;
|
||||
}
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- `volatile` for synchronization (CP.8 -- it's for hardware I/O only)
|
||||
- Detaching threads (CP.26 -- lifetime management becomes nearly impossible)
|
||||
- Unnamed lock guards: `std::lock_guard<std::mutex>(m);` destroys immediately (CP.44)
|
||||
- Holding locks while calling callbacks (CP.22 -- deadlock risk)
|
||||
- Lock-free programming without deep expertise (CP.100)
|
||||
|
||||
## Templates & Generic Programming (T.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **T.1** | Use templates to raise the level of abstraction |
|
||||
| **T.2** | Use templates to express algorithms for many argument types |
|
||||
| **T.10** | Specify concepts for all template arguments |
|
||||
| **T.11** | Use standard concepts whenever possible |
|
||||
| **T.13** | Prefer shorthand notation for simple concepts |
|
||||
| **T.43** | Prefer `using` over `typedef` |
|
||||
| **T.120** | Use template metaprogramming only when you really need to |
|
||||
| **T.144** | Don't specialize function templates (overload instead) |
|
||||
|
||||
### Concepts (C++20)
|
||||
|
||||
```cpp
|
||||
#include <concepts>
|
||||
|
||||
// T.10 + T.11: Constrain templates with standard concepts
|
||||
template<std::integral T>
|
||||
T gcd(T a, T b) {
|
||||
while (b != 0) {
|
||||
a = std::exchange(b, a % b);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
// T.13: Shorthand concept syntax
|
||||
void sort(std::ranges::random_access_range auto& range) {
|
||||
std::ranges::sort(range);
|
||||
}
|
||||
|
||||
// Custom concept for domain-specific constraints
|
||||
template<typename T>
|
||||
concept Serializable = requires(const T& t) {
|
||||
{ t.serialize() } -> std::convertible_to<std::string>;
|
||||
};
|
||||
|
||||
template<Serializable T>
|
||||
void save(const T& obj, const std::string& path);
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Unconstrained templates in visible namespaces (T.47)
|
||||
- Specializing function templates instead of overloading (T.144)
|
||||
- Template metaprogramming where `constexpr` suffices (T.120)
|
||||
- `typedef` instead of `using` (T.43)
|
||||
|
||||
## Standard Library (SL.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **SL.1** | Use libraries wherever possible |
|
||||
| **SL.2** | Prefer the standard library to other libraries |
|
||||
| **SL.con.1** | Prefer `std::array` or `std::vector` over C arrays |
|
||||
| **SL.con.2** | Prefer `std::vector` by default |
|
||||
| **SL.str.1** | Use `std::string` to own character sequences |
|
||||
| **SL.str.2** | Use `std::string_view` to refer to character sequences |
|
||||
| **SL.io.50** | Avoid `endl` (use `'\n'` -- `endl` forces a flush) |
|
||||
|
||||
```cpp
|
||||
// SL.con.1 + SL.con.2: Prefer vector/array over C arrays
|
||||
const std::array<int, 4> fixed_data{1, 2, 3, 4};
|
||||
std::vector<std::string> dynamic_data;
|
||||
|
||||
// SL.str.1 + SL.str.2: string owns, string_view observes
|
||||
std::string build_greeting(std::string_view name) {
|
||||
return "Hello, " + std::string(name) + "!";
|
||||
}
|
||||
|
||||
// SL.io.50: Use '\n' not endl
|
||||
std::cout << "result: " << value << '\n';
|
||||
```
|
||||
|
||||
## Enumerations (Enum.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **Enum.1** | Prefer enumerations over macros |
|
||||
| **Enum.3** | Prefer `enum class` over plain `enum` |
|
||||
| **Enum.5** | Don't use ALL_CAPS for enumerators |
|
||||
| **Enum.6** | Avoid unnamed enumerations |
|
||||
|
||||
```cpp
|
||||
// Enum.3 + Enum.5: Scoped enum, no ALL_CAPS
|
||||
enum class Color { red, green, blue };
|
||||
enum class LogLevel { debug, info, warning, error };
|
||||
|
||||
// BAD: plain enum leaks names, ALL_CAPS clashes with macros
|
||||
enum { RED, GREEN, BLUE }; // Enum.3 + Enum.5 + Enum.6 violation
|
||||
#define MAX_SIZE 100 // Enum.1 violation -- use constexpr
|
||||
```
|
||||
|
||||
## Source Files & Naming (SF.*, NL.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **SF.1** | Use `.cpp` for code files and `.h` for interface files |
|
||||
| **SF.7** | Don't write `using namespace` at global scope in a header |
|
||||
| **SF.8** | Use `#include` guards for all `.h` files |
|
||||
| **SF.11** | Header files should be self-contained |
|
||||
| **NL.5** | Avoid encoding type information in names (no Hungarian notation) |
|
||||
| **NL.8** | Use a consistent naming style |
|
||||
| **NL.9** | Use ALL_CAPS for macro names only |
|
||||
| **NL.10** | Prefer `underscore_style` names |
|
||||
|
||||
### Header Guard
|
||||
|
||||
```cpp
|
||||
// SF.8: Include guard (or #pragma once)
|
||||
#ifndef PROJECT_MODULE_WIDGET_H
|
||||
#define PROJECT_MODULE_WIDGET_H
|
||||
|
||||
// SF.11: Self-contained -- include everything this header needs
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace project::module {
|
||||
|
||||
class Widget {
|
||||
public:
|
||||
explicit Widget(std::string name);
|
||||
const std::string& name() const;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
};
|
||||
|
||||
} // namespace project::module
|
||||
|
||||
#endif // PROJECT_MODULE_WIDGET_H
|
||||
```
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
```cpp
|
||||
// NL.8 + NL.10: Consistent underscore_style
|
||||
namespace my_project {
|
||||
|
||||
constexpr int max_buffer_size = 4096; // NL.9: not ALL_CAPS (it's not a macro)
|
||||
|
||||
class tcp_connection { // underscore_style class
|
||||
public:
|
||||
void send_message(std::string_view msg);
|
||||
bool is_connected() const;
|
||||
|
||||
private:
|
||||
std::string host_; // trailing underscore for members
|
||||
int port_;
|
||||
};
|
||||
|
||||
} // namespace my_project
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- `using namespace std;` in a header at global scope (SF.7)
|
||||
- Headers that depend on inclusion order (SF.10, SF.11)
|
||||
- Hungarian notation like `strName`, `iCount` (NL.5)
|
||||
- ALL_CAPS for anything other than macros (NL.9)
|
||||
|
||||
## Performance (Per.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **Per.1** | Don't optimize without reason |
|
||||
| **Per.2** | Don't optimize prematurely |
|
||||
| **Per.6** | Don't make claims about performance without measurements |
|
||||
| **Per.7** | Design to enable optimization |
|
||||
| **Per.10** | Rely on the static type system |
|
||||
| **Per.11** | Move computation from run time to compile time |
|
||||
| **Per.19** | Access memory predictably |
|
||||
|
||||
### Guidelines
|
||||
|
||||
```cpp
|
||||
// Per.11: Compile-time computation where possible
|
||||
constexpr auto lookup_table = [] {
|
||||
std::array<int, 256> table{};
|
||||
for (int i = 0; i < 256; ++i) {
|
||||
table[i] = i * i;
|
||||
}
|
||||
return table;
|
||||
}();
|
||||
|
||||
// Per.19: Prefer contiguous data for cache-friendliness
|
||||
std::vector<Point> points; // GOOD: contiguous
|
||||
std::vector<std::unique_ptr<Point>> indirect_points; // BAD: pointer chasing
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Optimizing without profiling data (Per.1, Per.6)
|
||||
- Choosing "clever" low-level code over clear abstractions (Per.4, Per.5)
|
||||
- Ignoring data layout and cache behavior (Per.19)
|
||||
|
||||
## Quick Reference Checklist
|
||||
|
||||
Before marking C++ work complete:
|
||||
|
||||
- [ ] No raw `new`/`delete` -- use smart pointers or RAII (R.11)
|
||||
- [ ] Objects initialized at declaration (ES.20)
|
||||
- [ ] Variables are `const`/`constexpr` by default (Con.1, ES.25)
|
||||
- [ ] Member functions are `const` where possible (Con.2)
|
||||
- [ ] `enum class` instead of plain `enum` (Enum.3)
|
||||
- [ ] `nullptr` instead of `0`/`NULL` (ES.47)
|
||||
- [ ] No narrowing conversions (ES.46)
|
||||
- [ ] No C-style casts (ES.48)
|
||||
- [ ] Single-argument constructors are `explicit` (C.46)
|
||||
- [ ] Rule of Zero or Rule of Five applied (C.20, C.21)
|
||||
- [ ] Base class destructors are public virtual or protected non-virtual (C.35)
|
||||
- [ ] Templates are constrained with concepts (T.10)
|
||||
- [ ] No `using namespace` in headers at global scope (SF.7)
|
||||
- [ ] Headers have include guards and are self-contained (SF.8, SF.11)
|
||||
- [ ] Locks use RAII (`scoped_lock`/`lock_guard`) (CP.20)
|
||||
- [ ] Exceptions are custom types, thrown by value, caught by reference (E.14, E.15)
|
||||
- [ ] `'\n'` instead of `std::endl` (SL.io.50)
|
||||
- [ ] No magic numbers (ES.45)
|
||||
18
.github/workflows/copilot-setup-steps.yml
vendored
Normal file
18
.github/workflows/copilot-setup-steps.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
steps:
|
||||
- name: Setup Go environment
|
||||
uses: actions/setup-go@v6.2.0
|
||||
with:
|
||||
# The Go version to download (if necessary) and use. Supports semver spec and ranges. Be sure to enclose this option in single quotation marks.
|
||||
go-version: # optional
|
||||
# Path to the go.mod, go.work, .go-version, or .tool-versions file.
|
||||
go-version-file: # optional
|
||||
# Set this option to true if you want the action to always check for the latest available version that satisfies the version spec
|
||||
check-latest: # optional
|
||||
# Used to pull Go distributions from go-versions. Since there's a default, this is typically not supplied by the user. When running this action on github.com, the default value is sufficient. When running on GHES, you can pass a personal access token for github.com if you are experiencing rate limiting.
|
||||
token: # optional, default is ${{ github.server_url == 'https://github.com' && github.token || '' }}
|
||||
# Used to specify whether caching is needed. Set to true, if you'd like to enable caching.
|
||||
cache: # optional, default is true
|
||||
# Used to specify the path to a dependency file - go.sum
|
||||
cache-dependency-path: # optional
|
||||
# Target architecture for Go to use. Examples: x86, x64. Will use system architecture by default.
|
||||
architecture: # optional
|
||||
34
.github/workflows/security-scan.yml
vendored
34
.github/workflows/security-scan.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: AgentShield Security Scan
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
# Prevent duplicate runs
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Minimal permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
agentshield:
|
||||
name: AgentShield Scan
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run AgentShield Security Scan
|
||||
uses: affaan-m/agentshield@v1
|
||||
with:
|
||||
path: '.'
|
||||
min-severity: 'medium'
|
||||
format: 'terminal'
|
||||
fail-on-findings: 'false'
|
||||
60
CLAUDE.md
Normal file
60
CLAUDE.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
This is a **Claude Code plugin** - a collection of production-ready agents, skills, hooks, commands, rules, and MCP configurations. The project provides battle-tested workflows for software development using Claude Code.
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
node tests/run-all.js
|
||||
|
||||
# Run individual test files
|
||||
node tests/lib/utils.test.js
|
||||
node tests/lib/package-manager.test.js
|
||||
node tests/hooks/hooks.test.js
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
The project is organized into several core components:
|
||||
|
||||
- **agents/** - Specialized subagents for delegation (planner, code-reviewer, tdd-guide, etc.)
|
||||
- **skills/** - Workflow definitions and domain knowledge (coding standards, patterns, testing)
|
||||
- **commands/** - Slash commands invoked by users (/tdd, /plan, /e2e, etc.)
|
||||
- **hooks/** - Trigger-based automations (session persistence, pre/post-tool hooks)
|
||||
- **rules/** - Always-follow guidelines (security, coding style, testing requirements)
|
||||
- **mcp-configs/** - MCP server configurations for external integrations
|
||||
- **scripts/** - Cross-platform Node.js utilities for hooks and setup
|
||||
- **tests/** - Test suite for scripts and utilities
|
||||
|
||||
## Key Commands
|
||||
|
||||
- `/tdd` - Test-driven development workflow
|
||||
- `/plan` - Implementation planning
|
||||
- `/e2e` - Generate and run E2E tests
|
||||
- `/code-review` - Quality review
|
||||
- `/build-fix` - Fix build errors
|
||||
- `/learn` - Extract patterns from sessions
|
||||
- `/skill-create` - Generate skills from git history
|
||||
|
||||
## Development Notes
|
||||
|
||||
- Package manager detection: npm, pnpm, yarn, bun (configurable via `CLAUDE_PACKAGE_MANAGER` env var or project config)
|
||||
- Cross-platform: Windows, macOS, Linux support via Node.js scripts
|
||||
- Agent format: Markdown with YAML frontmatter (name, description, tools, model)
|
||||
- Skill format: Markdown with clear sections for when to use, how it works, examples
|
||||
- Hook format: JSON with matcher conditions and command/notification hooks
|
||||
|
||||
## Contributing
|
||||
|
||||
Follow the formats in CONTRIBUTING.md:
|
||||
- Agents: Markdown with frontmatter (name, description, tools, model)
|
||||
- Skills: Clear sections (When to Use, How It Works, Examples)
|
||||
- Commands: Markdown with description frontmatter
|
||||
- Hooks: JSON with matcher and hooks array
|
||||
|
||||
File naming: lowercase with hyphens (e.g., `python-reviewer.md`, `tdd-workflow.md`)
|
||||
23
README.md
23
README.md
@@ -13,7 +13,7 @@
|
||||

|
||||

|
||||
|
||||
> **42K+ stars** | **5K+ forks** | **24 contributors** | **6 languages supported** | **Anthropic Hackathon Winner**
|
||||
> **50K+ stars** | **6K+ forks** | **30 contributors** | **6 languages supported** | **Anthropic Hackathon Winner**
|
||||
|
||||
---
|
||||
|
||||
@@ -143,7 +143,7 @@ For manual install instructions see the README in the `rules/` folder.
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **That's it!** You now have access to 13 agents, 37 skills, and 31 commands.
|
||||
✨ **That's it!** You now have access to 13 agents, 44 skills, and 32 commands.
|
||||
|
||||
---
|
||||
|
||||
@@ -222,6 +222,7 @@ everything-claude-code/
|
||||
| |-- verification-loop/ # Continuous verification (Longform Guide)
|
||||
| |-- golang-patterns/ # Go idioms and best practices
|
||||
| |-- golang-testing/ # Go testing patterns, TDD, benchmarks
|
||||
| |-- cpp-coding-standards/ # C++ coding standards from C++ Core Guidelines (NEW)
|
||||
| |-- cpp-testing/ # C++ testing with GoogleTest, CMake/CTest (NEW)
|
||||
| |-- django-patterns/ # Django patterns, models, views (NEW)
|
||||
| |-- django-security/ # Django security best practices (NEW)
|
||||
@@ -245,6 +246,12 @@ everything-claude-code/
|
||||
| |-- deployment-patterns/ # CI/CD, Docker, health checks, rollbacks (NEW)
|
||||
| |-- docker-patterns/ # Docker Compose, networking, volumes, container security (NEW)
|
||||
| |-- e2e-testing/ # Playwright E2E patterns and Page Object Model (NEW)
|
||||
| |-- content-hash-cache-pattern/ # SHA-256 content hash caching for file processing (NEW)
|
||||
| |-- cost-aware-llm-pipeline/ # LLM cost optimization, model routing, budget tracking (NEW)
|
||||
| |-- regex-vs-llm-structured-text/ # Decision framework: regex vs LLM for text parsing (NEW)
|
||||
| |-- swift-actor-persistence/ # Thread-safe Swift data persistence with actors (NEW)
|
||||
| |-- swift-protocol-di-testing/ # Protocol-based DI for testable Swift code (NEW)
|
||||
| |-- search-first/ # Research-before-coding workflow (NEW)
|
||||
|
|
||||
|-- commands/ # Slash commands for quick execution
|
||||
| |-- tdd.md # /tdd - Test-driven development
|
||||
@@ -254,6 +261,7 @@ everything-claude-code/
|
||||
| |-- build-fix.md # /build-fix - Fix build errors
|
||||
| |-- refactor-clean.md # /refactor-clean - Dead code removal
|
||||
| |-- learn.md # /learn - Extract patterns mid-session (Longform Guide)
|
||||
| |-- learn-eval.md # /learn-eval - Extract, evaluate, and save patterns (NEW)
|
||||
| |-- checkpoint.md # /checkpoint - Save verification state (Longform Guide)
|
||||
| |-- verify.md # /verify - Run verification loop (Longform Guide)
|
||||
| |-- setup-pm.md # /setup-pm - Configure package manager
|
||||
@@ -486,6 +494,7 @@ This gives you instant access to all commands, agents, skills, and hooks.
|
||||
> git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
>
|
||||
> # Option A: User-level rules (applies to all projects)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
@@ -691,11 +700,12 @@ Each component is fully independent.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Does this work with Cursor / OpenCode?</b></summary>
|
||||
<summary><b>Does this work with Cursor / OpenCode / Codex?</b></summary>
|
||||
|
||||
Yes. ECC is cross-platform:
|
||||
- **Cursor**: Pre-translated configs in `.cursor/`. See [Cursor IDE Support](#cursor-ide-support).
|
||||
- **OpenCode**: Full plugin support in `.opencode/`. See [OpenCode Support](#-opencode-support).
|
||||
- **Codex**: First-class support with adapter drift guards and SessionStart fallback. See PR [#257](https://github.com/affaan-m/everything-claude-code/pull/257).
|
||||
- **Claude Code**: Native — this is the primary target.
|
||||
</details>
|
||||
|
||||
@@ -800,8 +810,8 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
||||
| Feature | Claude Code | OpenCode | Status |
|
||||
|---------|-------------|----------|--------|
|
||||
| Agents | ✅ 13 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Commands | ✅ 31 commands | ✅ 24 commands | **Claude Code leads** |
|
||||
| Skills | ✅ 37 skills | ✅ 16 skills | **Claude Code leads** |
|
||||
| Commands | ✅ 32 commands | ✅ 24 commands | **Claude Code leads** |
|
||||
| Skills | ✅ 44 skills | ✅ 16 skills | **Claude Code leads** |
|
||||
| Hooks | ✅ 3 phases | ✅ 20+ events | **OpenCode has more!** |
|
||||
| Rules | ✅ 8 rules | ✅ 8 rules | **Full parity** |
|
||||
| MCP Servers | ✅ Full | ✅ Full | **Full parity** |
|
||||
@@ -821,7 +831,7 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
|
||||
|
||||
**Additional OpenCode events**: `file.edited`, `file.watcher.updated`, `message.updated`, `lsp.client.diagnostics`, `tui.toast.show`, and more.
|
||||
|
||||
### Available Commands (31)
|
||||
### Available Commands (32)
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
@@ -855,6 +865,7 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
|
||||
| `/instinct-import` | Import instincts |
|
||||
| `/instinct-export` | Export instincts |
|
||||
| `/evolve` | Cluster instincts into skills |
|
||||
| `/learn-eval` | Extract and evaluate patterns before saving |
|
||||
| `/setup-pm` | Configure package manager |
|
||||
|
||||
### Plugin Installation
|
||||
|
||||
@@ -95,7 +95,7 @@ cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **完成!** 你现在可以使用 13 个代理、37 个技能和 31 个命令。
|
||||
✨ **完成!** 你现在可以使用 13 个代理、43 个技能和 31 个命令。
|
||||
|
||||
---
|
||||
|
||||
|
||||
91
commands/learn-eval.md
Normal file
91
commands/learn-eval.md
Normal file
@@ -0,0 +1,91 @@
|
||||
---
|
||||
description: Extract reusable patterns from the session, self-evaluate quality before saving, and determine the right save location (Global vs Project).
|
||||
---
|
||||
|
||||
# /learn-eval - Extract, Evaluate, then Save
|
||||
|
||||
Extends `/learn` with a quality gate and save-location decision before writing any skill file.
|
||||
|
||||
## What to Extract
|
||||
|
||||
Look for:
|
||||
|
||||
1. **Error Resolution Patterns** — root cause + fix + reusability
|
||||
2. **Debugging Techniques** — non-obvious steps, tool combinations
|
||||
3. **Workarounds** — library quirks, API limitations, version-specific fixes
|
||||
4. **Project-Specific Patterns** — conventions, architecture decisions, integration patterns
|
||||
|
||||
## Process
|
||||
|
||||
1. Review the session for extractable patterns
|
||||
2. Identify the most valuable/reusable insight
|
||||
|
||||
3. **Determine save location:**
|
||||
- Ask: "Would this pattern be useful in a different project?"
|
||||
- **Global** (`~/.claude/skills/learned/`): Generic patterns usable across 2+ projects (bash compatibility, LLM API behavior, debugging techniques, etc.)
|
||||
- **Project** (`.claude/skills/learned/` in current project): Project-specific knowledge (quirks of a particular config file, project-specific architecture decisions, etc.)
|
||||
- When in doubt, choose Global (moving Global → Project is easier than the reverse)
|
||||
|
||||
4. Draft the skill file using this format:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: pattern-name
|
||||
description: "Under 130 characters"
|
||||
user-invocable: false
|
||||
origin: auto-extracted
|
||||
---
|
||||
|
||||
# [Descriptive Pattern Name]
|
||||
|
||||
**Extracted:** [Date]
|
||||
**Context:** [Brief description of when this applies]
|
||||
|
||||
## Problem
|
||||
[What problem this solves - be specific]
|
||||
|
||||
## Solution
|
||||
[The pattern/technique/workaround - with code examples]
|
||||
|
||||
## When to Use
|
||||
[Trigger conditions]
|
||||
```
|
||||
|
||||
5. **Self-evaluate before saving** using this rubric:
|
||||
|
||||
| Dimension | 1 | 3 | 5 |
|
||||
|-----------|---|---|---|
|
||||
| Specificity | Abstract principles only, no code examples | Representative code example present | Rich examples covering all usage patterns |
|
||||
| Actionability | Unclear what to do | Main steps are understandable | Immediately actionable, edge cases covered |
|
||||
| Scope Fit | Too broad or too narrow | Mostly appropriate, some boundary ambiguity | Name, trigger, and content perfectly aligned |
|
||||
| Non-redundancy | Nearly identical to another skill | Some overlap but unique perspective exists | Completely unique value |
|
||||
| Coverage | Covers only a fraction of the target task | Main cases covered, common variants missing | Main cases, edge cases, and pitfalls covered |
|
||||
|
||||
- Score each dimension 1–5
|
||||
- If any dimension scores 1–2, improve the draft and re-score until all dimensions are ≥ 3
|
||||
- Show the user the scores table and the final draft
|
||||
|
||||
6. Ask user to confirm:
|
||||
- Show: proposed save path + scores table + final draft
|
||||
- Wait for explicit confirmation before writing
|
||||
|
||||
7. Save to the determined location
|
||||
|
||||
## Output Format for Step 5 (scores table)
|
||||
|
||||
| Dimension | Score | Rationale |
|
||||
|-----------|-------|-----------|
|
||||
| Specificity | N/5 | ... |
|
||||
| Actionability | N/5 | ... |
|
||||
| Scope Fit | N/5 | ... |
|
||||
| Non-redundancy | N/5 | ... |
|
||||
| Coverage | N/5 | ... |
|
||||
| **Total** | **N/25** | |
|
||||
|
||||
## Notes
|
||||
|
||||
- Don't extract trivial fixes (typos, simple syntax errors)
|
||||
- Don't extract one-time issues (specific API outages, etc.)
|
||||
- Focus on patterns that will save time in future sessions
|
||||
- Keep skills focused — one pattern per skill
|
||||
- If Coverage score is low, add related variants before saving
|
||||
@@ -140,7 +140,7 @@ cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **完了です!** これで13のエージェント、37のスキル、31のコマンドにアクセスできます。
|
||||
✨ **完了です!** これで13のエージェント、43のスキル、31のコマンドにアクセスできます。
|
||||
|
||||
---
|
||||
|
||||
@@ -454,6 +454,7 @@ Duplicate hooks file detected: ./hooks/hooks.json resolves to already-loaded fil
|
||||
> git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
>
|
||||
> # オプション A:ユーザーレベルルール(すべてのプロジェクトに適用)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # スタックを選択
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
|
||||
@@ -456,6 +456,7 @@ Duplicate hooks file detected: ./hooks/hooks.json resolves to already-loaded fil
|
||||
> git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
>
|
||||
> # 选项 A:用户级规则(适用于所有项目)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择您的技术栈
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"C": "Cyan - username, todos",
|
||||
"T": "Gray - model name"
|
||||
},
|
||||
"output_example": "affoon:~/projects/myapp main* ctx:73% sonnet-4.5 14:30 todos:3",
|
||||
"output_example": "affoon:~/projects/myapp main* ctx:73% sonnet-4.6 14:30 todos:3",
|
||||
"usage": "Copy the statusLine object to your ~/.claude/settings.json"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const p=i.tool_input?.file_path||'';if(/\\.(md|txt)$/.test(p)&&!/(README|CLAUDE|AGENTS|CONTRIBUTING)\\.md$/.test(p)){console.error('[Hook] BLOCKED: Unnecessary documentation file creation');console.error('[Hook] File: '+p);console.error('[Hook] Use README.md for documentation instead');process.exit(2)}}catch{}console.log(d)})\""
|
||||
"command": "node -e \"const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const p=i.tool_input?.file_path||'';if(/\\.(md|txt)$/.test(p)&&!/(README|CLAUDE|AGENTS|CONTRIBUTING)\\.md$/.test(p)&&!/\\.claude\\/plans\\//.test(p)){console.error('[Hook] BLOCKED: Unnecessary documentation file creation');console.error('[Hook] File: '+p);console.error('[Hook] Use README.md for documentation instead');process.exit(2)}}catch{}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Block creation of random .md files - keeps docs consolidated"
|
||||
|
||||
642
llms.txt
642
llms.txt
@@ -1,642 +0,0 @@
|
||||
# OpenCode Documentation for LLMs
|
||||
|
||||
> OpenCode is an open-source AI coding agent available as a terminal interface, desktop application, or IDE extension. It helps developers write code, add features, and understand codebases through conversational interactions.
|
||||
|
||||
## Installation
|
||||
|
||||
Multiple installation methods are available:
|
||||
|
||||
```bash
|
||||
# curl script (recommended)
|
||||
curl -fsSL https://opencode.ai/install | bash
|
||||
|
||||
# Node.js package managers
|
||||
npm install -g opencode
|
||||
bun install -g opencode
|
||||
pnpm add -g opencode
|
||||
yarn global add opencode
|
||||
|
||||
# Homebrew (macOS/Linux)
|
||||
brew install anomalyco/tap/opencode
|
||||
|
||||
# Arch Linux
|
||||
paru -S opencode
|
||||
|
||||
# Windows
|
||||
choco install opencode # Chocolatey
|
||||
scoop install opencode # Scoop
|
||||
# Or use Docker/WSL (recommended for Windows)
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Configuration file: `opencode.json` or `opencode.jsonc` (with comments)
|
||||
|
||||
Schema: `https://opencode.ai/config.json`
|
||||
|
||||
### Core Settings
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "https://opencode.ai/config.json",
|
||||
"model": "anthropic/claude-sonnet-4-5",
|
||||
"small_model": "anthropic/claude-haiku-4-5",
|
||||
"default_agent": "build",
|
||||
"instructions": [
|
||||
"CONTRIBUTING.md",
|
||||
"docs/guidelines.md"
|
||||
],
|
||||
"plugin": [
|
||||
"opencode-helicone-session",
|
||||
"./.opencode/plugins"
|
||||
],
|
||||
"agent": { /* agent definitions */ },
|
||||
"command": { /* command definitions */ },
|
||||
"permission": {
|
||||
"edit": "ask",
|
||||
"bash": "ask",
|
||||
"mcp_*": "ask"
|
||||
},
|
||||
"tools": {
|
||||
"write": true,
|
||||
"bash": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Use `{env:VAR_NAME}` for environment variables and `{file:path}` for file contents in configuration values.
|
||||
|
||||
## Agents
|
||||
|
||||
OpenCode supports two agent types:
|
||||
|
||||
### Primary Agents
|
||||
Main assistants you interact with directly. Switch between them using Tab or configured keybinds.
|
||||
|
||||
### Subagents
|
||||
Specialized assistants that primary agents invoke automatically or through `@` mentions (e.g., `@general help me search`).
|
||||
|
||||
### Built-in Agents
|
||||
|
||||
| Agent | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| build | primary | Default agent with full tool access for development work |
|
||||
| plan | primary | Restricted agent for analysis; file edits and bash set to "ask" |
|
||||
| general | subagent | Full tool access for multi-step research tasks |
|
||||
| explore | subagent | Read-only agent for rapid codebase exploration |
|
||||
| compaction | system | Hidden agent for context compaction |
|
||||
| title | system | Hidden agent for title generation |
|
||||
| summary | system | Hidden agent for summarization |
|
||||
|
||||
### Agent Configuration
|
||||
|
||||
JSON format in `opencode.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"agent": {
|
||||
"code-reviewer": {
|
||||
"description": "Reviews code for best practices",
|
||||
"mode": "subagent",
|
||||
"model": "anthropic/claude-opus-4-5",
|
||||
"prompt": "{file:.opencode/prompts/agents/code-reviewer.txt}",
|
||||
"temperature": 0.3,
|
||||
"tools": {
|
||||
"write": false,
|
||||
"edit": false,
|
||||
"read": true,
|
||||
"bash": true
|
||||
},
|
||||
"permission": {
|
||||
"edit": "deny",
|
||||
"bash": {
|
||||
"*": "ask",
|
||||
"git status": "allow"
|
||||
}
|
||||
},
|
||||
"steps": 10
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Markdown format in `~/.config/opencode/agents/` or `.opencode/agents/`:
|
||||
|
||||
```markdown
|
||||
---
|
||||
description: Expert code review specialist
|
||||
mode: subagent
|
||||
model: anthropic/claude-opus-4-5
|
||||
temperature: 0.3
|
||||
tools:
|
||||
write: false
|
||||
read: true
|
||||
bash: true
|
||||
permission:
|
||||
edit: deny
|
||||
steps: 10
|
||||
---
|
||||
|
||||
You are an expert code reviewer. Review code for quality, security, and maintainability...
|
||||
```
|
||||
|
||||
### Agent Configuration Options
|
||||
|
||||
| Option | Purpose | Values |
|
||||
|--------|---------|--------|
|
||||
| description | Required field explaining agent purpose | string |
|
||||
| mode | Agent type | "primary", "subagent", or "all" |
|
||||
| model | Override default model | "provider/model-id" |
|
||||
| temperature | Control randomness | 0.0-1.0 (lower = focused) |
|
||||
| tools | Enable/disable specific tools | object or wildcards |
|
||||
| permission | Set tool permissions | "ask", "allow", or "deny" |
|
||||
| steps | Limit agentic iterations | number |
|
||||
| prompt | Reference custom prompt file | "{file:./path}" |
|
||||
| top_p | Alternative randomness control | 0.0-1.0 |
|
||||
|
||||
## Commands
|
||||
|
||||
### Built-in Commands
|
||||
|
||||
- `/init` - Initialize project analysis (creates AGENTS.md)
|
||||
- `/undo` - Undo last change
|
||||
- `/redo` - Redo undone change
|
||||
- `/share` - Generate shareable conversation link
|
||||
- `/help` - Show help
|
||||
- `/connect` - Configure API providers
|
||||
|
||||
### Custom Commands
|
||||
|
||||
**Markdown files** in `~/.config/opencode/commands/` (global) or `.opencode/commands/` (project):
|
||||
|
||||
```markdown
|
||||
---
|
||||
description: Create implementation plan
|
||||
agent: planner
|
||||
subtask: true
|
||||
---
|
||||
|
||||
Create a detailed implementation plan for: $ARGUMENTS
|
||||
|
||||
Include:
|
||||
- Requirements analysis
|
||||
- Architecture review
|
||||
- Step breakdown
|
||||
- Testing strategy
|
||||
```
|
||||
|
||||
**JSON configuration** in `opencode.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"command": {
|
||||
"plan": {
|
||||
"description": "Create implementation plan",
|
||||
"template": "Create a detailed implementation plan for: $ARGUMENTS",
|
||||
"agent": "planner",
|
||||
"subtask": true
|
||||
},
|
||||
"test": {
|
||||
"template": "Run tests with coverage for: $ARGUMENTS\n\nOutput:\n!`npm test`",
|
||||
"description": "Run tests with coverage",
|
||||
"agent": "build"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Template Variables
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `$ARGUMENTS` | All command arguments |
|
||||
| `$1`, `$2`, `$3` | Positional arguments |
|
||||
| `!`command`` | Include shell command output |
|
||||
| `@filepath` | Include file contents |
|
||||
|
||||
### Command Options
|
||||
|
||||
| Option | Purpose | Required |
|
||||
|--------|---------|----------|
|
||||
| template | Prompt text sent to LLM | Yes |
|
||||
| description | UI display text | No |
|
||||
| agent | Target agent for execution | No |
|
||||
| model | Override default LLM | No |
|
||||
| subtask | Force subagent invocation | No |
|
||||
|
||||
## Tools
|
||||
|
||||
### Built-in Tools
|
||||
|
||||
| Tool | Purpose | Permission Key |
|
||||
|------|---------|---------------|
|
||||
| bash | Execute shell commands | "bash" |
|
||||
| edit | Modify existing files using exact string replacements | "edit" |
|
||||
| write | Create new files or overwrite existing ones | "edit" |
|
||||
| read | Read file contents from codebase | "read" |
|
||||
| grep | Search file contents using regular expressions | "grep" |
|
||||
| glob | Find files by pattern matching | "glob" |
|
||||
| list | List files and directories | "list" |
|
||||
| lsp | Access code intelligence (experimental) | "lsp" |
|
||||
| patch | Apply patches to files | "edit" |
|
||||
| skill | Load skill files (SKILL.md) | "skill" |
|
||||
| todowrite | Manage todo lists during sessions | "todowrite" |
|
||||
| todoread | Read existing todo lists | "todoread" |
|
||||
| webfetch | Fetch and read web pages | "webfetch" |
|
||||
| question | Ask user questions during execution | "question" |
|
||||
|
||||
### Tool Permissions
|
||||
|
||||
```json
|
||||
{
|
||||
"permission": {
|
||||
"edit": "ask",
|
||||
"bash": "allow",
|
||||
"webfetch": "deny",
|
||||
"mcp_*": "ask"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Permission levels:
|
||||
- `"allow"` - Tool executes without restriction
|
||||
- `"deny"` - Tool cannot be used
|
||||
- `"ask"` - Requires user approval before execution
|
||||
|
||||
## Custom Tools
|
||||
|
||||
Location: `.opencode/tools/` (project) or `~/.config/opencode/tools/` (global)
|
||||
|
||||
### Tool Definition
|
||||
|
||||
```typescript
|
||||
import { tool } from "@opencode-ai/plugin"
|
||||
|
||||
export default tool({
|
||||
description: "Execute SQL query on the database",
|
||||
args: {
|
||||
query: tool.schema.string().describe("SQL query to execute"),
|
||||
database: tool.schema.string().optional().describe("Target database")
|
||||
},
|
||||
async execute(args, context) {
|
||||
// context.worktree - git repository root
|
||||
// context.directory - current working directory
|
||||
// context.sessionID - current session ID
|
||||
// context.agent - active agent identifier
|
||||
|
||||
const result = await someDbQuery(args.query)
|
||||
return { result }
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
### Multiple Tools Per File
|
||||
|
||||
```typescript
|
||||
// math.ts - creates math_add and math_multiply tools
|
||||
export const add = tool({
|
||||
description: "Add two numbers",
|
||||
args: {
|
||||
a: tool.schema.number(),
|
||||
b: tool.schema.number()
|
||||
},
|
||||
async execute({ a, b }) {
|
||||
return { result: a + b }
|
||||
}
|
||||
})
|
||||
|
||||
export const multiply = tool({
|
||||
description: "Multiply two numbers",
|
||||
args: {
|
||||
a: tool.schema.number(),
|
||||
b: tool.schema.number()
|
||||
},
|
||||
async execute({ a, b }) {
|
||||
return { result: a * b }
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
### Schema Types (Zod)
|
||||
|
||||
```typescript
|
||||
tool.schema.string()
|
||||
tool.schema.number()
|
||||
tool.schema.boolean()
|
||||
tool.schema.array(tool.schema.string())
|
||||
tool.schema.object({ key: tool.schema.string() })
|
||||
tool.schema.enum(["option1", "option2"])
|
||||
tool.schema.optional()
|
||||
tool.schema.describe("Description for LLM")
|
||||
```
|
||||
|
||||
## Plugins
|
||||
|
||||
Plugins extend OpenCode with custom hooks, tools, and behaviors.
|
||||
|
||||
### Plugin Structure
|
||||
|
||||
```typescript
|
||||
import { tool } from "@opencode-ai/plugin"
|
||||
|
||||
export const MyPlugin = async ({ project, client, $, directory, worktree }) => {
|
||||
// project - Current project information
|
||||
// client - OpenCode SDK client for AI interaction
|
||||
// $ - Bun's shell API for command execution
|
||||
// directory - Current working directory
|
||||
// worktree - Git worktree path
|
||||
|
||||
return {
|
||||
// Hook implementations
|
||||
"file.edited": async (event) => {
|
||||
// Handle file edit event
|
||||
},
|
||||
|
||||
"tool.execute.before": async (input, output) => {
|
||||
// Intercept before tool execution
|
||||
},
|
||||
|
||||
"session.idle": async (event) => {
|
||||
// Handle session idle
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Loading Plugins
|
||||
|
||||
1. **Local files**: Place in `.opencode/plugins/` (project) or `~/.config/opencode/plugins/` (global)
|
||||
2. **npm packages**: Specify in `opencode.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"plugin": [
|
||||
"opencode-helicone-session",
|
||||
"@my-org/custom-plugin",
|
||||
"./.opencode/plugins"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Available Hook Events
|
||||
|
||||
**Command Events:**
|
||||
- `command.executed` - After a command is executed
|
||||
|
||||
**File Events:**
|
||||
- `file.edited` - After a file is edited
|
||||
- `file.watcher.updated` - When file watcher detects changes
|
||||
|
||||
**Tool Events:**
|
||||
- `tool.execute.before` - Before tool execution (can modify input)
|
||||
- `tool.execute.after` - After tool execution (can modify output)
|
||||
|
||||
**Session Events:**
|
||||
- `session.created` - When session starts
|
||||
- `session.compacted` - After context compaction
|
||||
- `session.deleted` - When session ends
|
||||
- `session.idle` - When session becomes idle
|
||||
- `session.updated` - When session is updated
|
||||
- `session.status` - Session status changes
|
||||
|
||||
**Message Events:**
|
||||
- `message.updated` - When message is updated
|
||||
- `message.removed` - When message is removed
|
||||
- `message.part.updated` - When message part is updated
|
||||
|
||||
**LSP Events:**
|
||||
- `lsp.client.diagnostics` - LSP diagnostic updates
|
||||
- `lsp.updated` - LSP state updates
|
||||
|
||||
**Shell Events:**
|
||||
- `shell.env` - Modify shell environment variables
|
||||
|
||||
**TUI Events:**
|
||||
- `tui.prompt.append` - Append to TUI prompt
|
||||
- `tui.command.execute` - Execute TUI command
|
||||
- `tui.toast.show` - Show toast notification
|
||||
|
||||
**Other Events:**
|
||||
- `installation.updated` - Installation updates
|
||||
- `permission.asked` - Permission request
|
||||
- `server.connected` - Server connection
|
||||
- `todo.updated` - Todo list updates
|
||||
|
||||
### Hook Event Mapping (Claude Code → OpenCode)
|
||||
|
||||
| Claude Code Hook | OpenCode Plugin Event |
|
||||
|-----------------|----------------------|
|
||||
| PreToolUse | `tool.execute.before` |
|
||||
| PostToolUse | `tool.execute.after` |
|
||||
| Stop | `session.idle` or `session.status` |
|
||||
| SessionStart | `session.created` |
|
||||
| SessionEnd | `session.deleted` |
|
||||
| N/A | `file.edited`, `file.watcher.updated` |
|
||||
| N/A | `message.*`, `permission.*`, `lsp.*` |
|
||||
|
||||
### Plugin Example: Auto-Format
|
||||
|
||||
```typescript
|
||||
export const AutoFormatPlugin = async ({ $, directory }) => {
|
||||
return {
|
||||
"file.edited": async (event) => {
|
||||
if (event.path.match(/\.(ts|tsx|js|jsx)$/)) {
|
||||
await $`prettier --write ${event.path}`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Plugin Example: TypeScript Check
|
||||
|
||||
```typescript
|
||||
export const TypeCheckPlugin = async ({ $, client }) => {
|
||||
return {
|
||||
"tool.execute.after": async (input, output) => {
|
||||
if (input.tool === "edit" && input.args.filePath?.match(/\.tsx?$/)) {
|
||||
const result = await $`npx tsc --noEmit`.catch(e => e)
|
||||
if (result.exitCode !== 0) {
|
||||
client.app.log("warn", "TypeScript errors detected")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Providers
|
||||
|
||||
OpenCode integrates 75+ LLM providers via AI SDK and Models.dev.
|
||||
|
||||
### Supported Providers
|
||||
|
||||
- OpenCode Zen (recommended for beginners)
|
||||
- Anthropic (Claude)
|
||||
- OpenAI (GPT)
|
||||
- Google (Gemini)
|
||||
- Amazon Bedrock
|
||||
- Azure OpenAI
|
||||
- GitHub Copilot
|
||||
- Ollama (local)
|
||||
- And 70+ more
|
||||
|
||||
### Provider Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"provider": {
|
||||
"anthropic": {
|
||||
"options": {
|
||||
"baseURL": "https://api.anthropic.com/v1"
|
||||
}
|
||||
},
|
||||
"custom-provider": {
|
||||
"npm": "@ai-sdk/openai-compatible",
|
||||
"name": "Display Name",
|
||||
"options": {
|
||||
"baseURL": "https://api.example.com/v1",
|
||||
"apiKey": "{env:CUSTOM_API_KEY}"
|
||||
},
|
||||
"models": {
|
||||
"model-id": { "name": "Model Name" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Model Naming Convention
|
||||
|
||||
Format: `provider/model-id`
|
||||
|
||||
Examples:
|
||||
- `anthropic/claude-sonnet-4-5`
|
||||
- `anthropic/claude-opus-4-5`
|
||||
- `anthropic/claude-haiku-4-5`
|
||||
- `openai/gpt-4o`
|
||||
- `google/gemini-2.0-flash`
|
||||
|
||||
### API Key Setup
|
||||
|
||||
```bash
|
||||
# Interactive setup
|
||||
opencode
|
||||
/connect
|
||||
|
||||
# Environment variables
|
||||
export ANTHROPIC_API_KEY=sk-...
|
||||
export OPENAI_API_KEY=sk-...
|
||||
```
|
||||
|
||||
Keys stored in: `~/.local/share/opencode/auth.json`
|
||||
|
||||
## MCP (Model Context Protocol)
|
||||
|
||||
Configure MCP servers in `opencode.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcp": {
|
||||
"github": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-github"],
|
||||
"env": {
|
||||
"GITHUB_PERSONAL_ACCESS_TOKEN": "{env:GITHUB_TOKEN}"
|
||||
}
|
||||
},
|
||||
"filesystem": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/dir"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
MCP tool permissions use `mcp_*` wildcard:
|
||||
|
||||
```json
|
||||
{
|
||||
"permission": {
|
||||
"mcp_*": "ask"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Ecosystem Plugins
|
||||
|
||||
### Authentication & Provider Plugins
|
||||
- Alternative model access (ChatGPT Plus, Gemini, Antigravity)
|
||||
- Session tracking (Helicone headers)
|
||||
- OAuth integrations
|
||||
|
||||
### Development Tools
|
||||
- Sandbox isolation (Daytona integration)
|
||||
- Type injection for TypeScript/Svelte
|
||||
- DevContainer multi-branch support
|
||||
- Git worktree management
|
||||
|
||||
### Enhancement Plugins
|
||||
- Web search with citations
|
||||
- Markdown table formatting
|
||||
- Dynamic context token pruning
|
||||
- Desktop notifications
|
||||
- Persistent memory (Supermemory)
|
||||
- Background process management
|
||||
|
||||
### Plugin Discovery
|
||||
|
||||
- opencode.cafe - Community plugin registry
|
||||
- awesome-opencode - Curated list
|
||||
- GitHub search for "opencode-plugin"
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Key Shortcuts
|
||||
|
||||
| Key | Action |
|
||||
|-----|--------|
|
||||
| Tab | Toggle between Plan and Build modes |
|
||||
| @ | Reference files or mention agents |
|
||||
| / | Execute commands |
|
||||
|
||||
### Common Commands
|
||||
|
||||
```bash
|
||||
/init # Initialize project
|
||||
/connect # Configure API providers
|
||||
/share # Share conversation
|
||||
/undo # Undo last change
|
||||
/redo # Redo undone change
|
||||
/help # Show help
|
||||
```
|
||||
|
||||
### File Locations
|
||||
|
||||
| Purpose | Project | Global |
|
||||
|---------|---------|--------|
|
||||
| Configuration | `opencode.json` | `~/.config/opencode/config.json` |
|
||||
| Agents | `.opencode/agents/` | `~/.config/opencode/agents/` |
|
||||
| Commands | `.opencode/commands/` | `~/.config/opencode/commands/` |
|
||||
| Plugins | `.opencode/plugins/` | `~/.config/opencode/plugins/` |
|
||||
| Tools | `.opencode/tools/` | `~/.config/opencode/tools/` |
|
||||
| Auth | - | `~/.local/share/opencode/auth.json` |
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
```bash
|
||||
# Verify credentials
|
||||
opencode auth list
|
||||
|
||||
# Check configuration
|
||||
cat opencode.json | jq .
|
||||
|
||||
# Test provider connection
|
||||
/connect
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
For more information: https://opencode.ai/docs/
|
||||
16
package-lock.json
generated
16
package-lock.json
generated
@@ -1,14 +1,25 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"name": "ecc-universal",
|
||||
"version": "1.4.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "ecc-universal",
|
||||
"version": "1.4.1",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"ecc-install": "install.sh"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
"eslint": "^9.39.2",
|
||||
"globals": "^17.1.0",
|
||||
"markdownlint-cli": "^0.47.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@eslint-community/eslint-utils": {
|
||||
@@ -294,6 +305,7 @@
|
||||
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"acorn": "bin/acorn"
|
||||
},
|
||||
@@ -599,6 +611,7 @@
|
||||
"integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@eslint-community/eslint-utils": "^4.8.0",
|
||||
"@eslint-community/regexpp": "^4.12.1",
|
||||
@@ -1930,6 +1943,7 @@
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
- Pair programming and code generation
|
||||
- Worker agents in multi-agent systems
|
||||
|
||||
**Sonnet 4.5** (Best coding model):
|
||||
**Sonnet 4.6** (Best coding model):
|
||||
- Main development work
|
||||
- Orchestrating multi-agent workflows
|
||||
- Complex coding tasks
|
||||
|
||||
330
scripts/codemaps/generate.ts
Normal file
330
scripts/codemaps/generate.ts
Normal file
@@ -0,0 +1,330 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* scripts/codemaps/generate.ts
|
||||
*
|
||||
* Codemap Generator for everything-claude-code (ECC)
|
||||
*
|
||||
* Scans the current working directory and generates architectural
|
||||
* codemap documentation under docs/CODEMAPS/ as specified by the
|
||||
* doc-updater agent.
|
||||
*
|
||||
* Usage:
|
||||
* npx tsx scripts/codemaps/generate.ts [srcDir]
|
||||
*
|
||||
* Output:
|
||||
* docs/CODEMAPS/INDEX.md
|
||||
* docs/CODEMAPS/frontend.md
|
||||
* docs/CODEMAPS/backend.md
|
||||
* docs/CODEMAPS/database.md
|
||||
* docs/CODEMAPS/integrations.md
|
||||
* docs/CODEMAPS/workers.md
|
||||
*/
|
||||
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Config
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const ROOT = process.cwd();
|
||||
const SRC_DIR = process.argv[2] ? path.resolve(process.argv[2]) : ROOT;
|
||||
const OUTPUT_DIR = path.join(ROOT, 'docs', 'CODEMAPS');
|
||||
const TODAY = new Date().toISOString().split('T')[0];
|
||||
|
||||
// Patterns used to classify files into codemap areas
|
||||
const AREA_PATTERNS: Record<string, RegExp[]> = {
|
||||
frontend: [
|
||||
/\/(app|pages|components|hooks|contexts|ui|views|layouts|styles)\//i,
|
||||
/\.(tsx|jsx|css|scss|sass|less|vue|svelte)$/i,
|
||||
],
|
||||
backend: [
|
||||
/\/(api|routes|controllers|middleware|server|services|handlers)\//i,
|
||||
/\.(route|controller|handler|middleware|service)\.(ts|js)$/i,
|
||||
],
|
||||
database: [
|
||||
/\/(models|schemas|migrations|prisma|drizzle|db|database|repositories)\//i,
|
||||
/\.(model|schema|migration|seed)\.(ts|js)$/i,
|
||||
/prisma\/schema\.prisma$/,
|
||||
/schema\.sql$/,
|
||||
],
|
||||
integrations: [
|
||||
/\/(integrations?|third-party|external|plugins?|adapters?|connectors?)\//i,
|
||||
/\.(integration|adapter|connector)\.(ts|js)$/i,
|
||||
],
|
||||
workers: [
|
||||
/\/(workers?|jobs?|queues?|tasks?|cron|background)\//i,
|
||||
/\.(worker|job|queue|task|cron)\.(ts|js)$/i,
|
||||
],
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// File System Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Recursively collect all files under a directory, skipping common noise dirs. */
|
||||
function walkDir(dir: string, results: string[] = []): string[] {
|
||||
const SKIP = new Set([
|
||||
'node_modules', '.git', '.next', '.nuxt', 'dist', 'build', 'out',
|
||||
'.turbo', 'coverage', '.cache', '__pycache__', '.venv', 'venv',
|
||||
]);
|
||||
|
||||
let entries: fs.Dirent[];
|
||||
try {
|
||||
entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||
} catch {
|
||||
return results;
|
||||
}
|
||||
|
||||
for (const entry of entries) {
|
||||
if (SKIP.has(entry.name)) continue;
|
||||
const fullPath = path.join(dir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
walkDir(fullPath, results);
|
||||
} else if (entry.isFile()) {
|
||||
results.push(fullPath);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/** Return path relative to ROOT, always using forward slashes. */
|
||||
function rel(p: string): string {
|
||||
return path.relative(ROOT, p).replace(/\\/g, '/');
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Analysis
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface AreaInfo {
|
||||
name: string;
|
||||
files: string[];
|
||||
entryPoints: string[];
|
||||
directories: string[];
|
||||
}
|
||||
|
||||
function classifyFiles(allFiles: string[]): Record<string, AreaInfo> {
|
||||
const areas: Record<string, AreaInfo> = {
|
||||
frontend: { name: 'Frontend', files: [], entryPoints: [], directories: [] },
|
||||
backend: { name: 'Backend/API', files: [], entryPoints: [], directories: [] },
|
||||
database: { name: 'Database', files: [], entryPoints: [], directories: [] },
|
||||
integrations: { name: 'Integrations', files: [], entryPoints: [], directories: [] },
|
||||
workers: { name: 'Workers', files: [], entryPoints: [], directories: [] },
|
||||
};
|
||||
|
||||
for (const file of allFiles) {
|
||||
const relPath = rel(file);
|
||||
for (const [area, patterns] of Object.entries(AREA_PATTERNS)) {
|
||||
if (patterns.some((p) => p.test(relPath))) {
|
||||
areas[area].files.push(relPath);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Derive unique directories and entry points per area
|
||||
for (const area of Object.values(areas)) {
|
||||
const dirs = new Set(area.files.map((f) => path.dirname(f)));
|
||||
area.directories = [...dirs].sort();
|
||||
|
||||
area.entryPoints = area.files
|
||||
.filter((f) => /index\.(ts|tsx|js|jsx)$/.test(f) || /main\.(ts|tsx|js|jsx)$/.test(f))
|
||||
.slice(0, 10);
|
||||
}
|
||||
|
||||
return areas;
|
||||
}
|
||||
|
||||
/** Count lines in a file (returns 0 on error). */
|
||||
function lineCount(p: string): number {
|
||||
try {
|
||||
const content = fs.readFileSync(p, 'utf8');
|
||||
return content.split('\n').length;
|
||||
} catch {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/** Build a simple directory tree ASCII diagram (max 3 levels deep). */
|
||||
function buildTree(dir: string, prefix = '', depth = 0): string {
|
||||
if (depth > 2) return '';
|
||||
const SKIP = new Set(['node_modules', '.git', 'dist', 'build', '.next', 'coverage']);
|
||||
|
||||
let entries: fs.Dirent[];
|
||||
try {
|
||||
entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||
} catch {
|
||||
return '';
|
||||
}
|
||||
|
||||
const dirs = entries.filter((e) => e.isDirectory() && !SKIP.has(e.name));
|
||||
const files = entries.filter((e) => e.isFile());
|
||||
|
||||
let result = '';
|
||||
const items = [...dirs, ...files];
|
||||
items.forEach((entry, i) => {
|
||||
const isLast = i === items.length - 1;
|
||||
const connector = isLast ? '└── ' : '├── ';
|
||||
result += `${prefix}${connector}${entry.name}\n`;
|
||||
if (entry.isDirectory()) {
|
||||
const newPrefix = prefix + (isLast ? ' ' : '│ ');
|
||||
result += buildTree(path.join(dir, entry.name), newPrefix, depth + 1);
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Markdown Generators
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function generateAreaDoc(areaKey: string, area: AreaInfo, allFiles: string[]): string {
|
||||
const fileCount = area.files.length;
|
||||
const totalLines = area.files.reduce((sum, f) => sum + lineCount(path.join(ROOT, f)), 0);
|
||||
|
||||
const entrySection = area.entryPoints.length > 0
|
||||
? area.entryPoints.map((e) => `- \`${e}\``).join('\n')
|
||||
: '- *(no index/main entry points detected)*';
|
||||
|
||||
const dirSection = area.directories.slice(0, 20)
|
||||
.map((d) => `- \`${d}/\``)
|
||||
.join('\n') || '- *(no dedicated directories detected)*';
|
||||
|
||||
const fileSection = area.files.slice(0, 30)
|
||||
.map((f) => `| \`${f}\` | ${lineCount(path.join(ROOT, f))} |`)
|
||||
.join('\n');
|
||||
|
||||
const moreFiles = area.files.length > 30
|
||||
? `\n*...and ${area.files.length - 30} more files*`
|
||||
: '';
|
||||
|
||||
return `# ${area.name} Codemap
|
||||
|
||||
**Last Updated:** ${TODAY}
|
||||
**Total Files:** ${fileCount}
|
||||
**Total Lines:** ${totalLines}
|
||||
|
||||
## Entry Points
|
||||
|
||||
${entrySection}
|
||||
|
||||
## Architecture
|
||||
|
||||
\`\`\`
|
||||
${area.name} Directory Structure
|
||||
${dirSection.replace(/- `/g, '').replace(/`\/$/gm, '/')}
|
||||
\`\`\`
|
||||
|
||||
## Key Modules
|
||||
|
||||
| File | Lines |
|
||||
|------|-------|
|
||||
${fileSection}${moreFiles}
|
||||
|
||||
## Data Flow
|
||||
|
||||
> Detected from file patterns. Review individual files for detailed data flow.
|
||||
|
||||
## External Dependencies
|
||||
|
||||
> Run \`npx jsdoc2md src/**/*.ts\` to extract JSDoc and identify external dependencies.
|
||||
|
||||
## Related Areas
|
||||
|
||||
- [INDEX](./INDEX.md) — Full overview
|
||||
- [Frontend](./frontend.md)
|
||||
- [Backend/API](./backend.md)
|
||||
- [Database](./database.md)
|
||||
- [Integrations](./integrations.md)
|
||||
- [Workers](./workers.md)
|
||||
`;
|
||||
}
|
||||
|
||||
function generateIndex(areas: Record<string, AreaInfo>, allFiles: string[]): string {
|
||||
const totalFiles = allFiles.length;
|
||||
const areaRows = Object.entries(areas)
|
||||
.map(([key, area]) => `| [${area.name}](./${key}.md) | ${area.files.length} files | ${area.directories.slice(0, 3).map((d) => `\`${d}\``).join(', ') || '—'} |`)
|
||||
.join('\n');
|
||||
|
||||
const topLevelTree = buildTree(SRC_DIR);
|
||||
|
||||
return `# Codebase Overview — CODEMAPS Index
|
||||
|
||||
**Last Updated:** ${TODAY}
|
||||
**Root:** \`${rel(SRC_DIR) || '.'}\`
|
||||
**Total Files Scanned:** ${totalFiles}
|
||||
|
||||
## Areas
|
||||
|
||||
| Area | Size | Key Directories |
|
||||
|------|------|-----------------|
|
||||
${areaRows}
|
||||
|
||||
## Repository Structure
|
||||
|
||||
\`\`\`
|
||||
${rel(SRC_DIR) || path.basename(SRC_DIR)}/
|
||||
${topLevelTree}\`\`\`
|
||||
|
||||
## How to Regenerate
|
||||
|
||||
\`\`\`bash
|
||||
npx tsx scripts/codemaps/generate.ts # Regenerate codemaps
|
||||
npx madge --image graph.svg src/ # Dependency graph (requires graphviz)
|
||||
npx jsdoc2md src/**/*.ts # Extract JSDoc
|
||||
\`\`\`
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Frontend](./frontend.md) — UI components, pages, hooks
|
||||
- [Backend/API](./backend.md) — API routes, controllers, middleware
|
||||
- [Database](./database.md) — Models, schemas, migrations
|
||||
- [Integrations](./integrations.md) — External services & adapters
|
||||
- [Workers](./workers.md) — Background jobs, queues, cron tasks
|
||||
`;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Main
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function main(): void {
|
||||
console.log(`[generate.ts] Scanning: ${SRC_DIR}`);
|
||||
console.log(`[generate.ts] Output: ${OUTPUT_DIR}`);
|
||||
|
||||
// Ensure output directory exists
|
||||
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
|
||||
|
||||
// Walk the directory tree
|
||||
const allFiles = walkDir(SRC_DIR);
|
||||
console.log(`[generate.ts] Found ${allFiles.length} files`);
|
||||
|
||||
// Classify files into areas
|
||||
const areas = classifyFiles(allFiles);
|
||||
|
||||
// Generate INDEX.md
|
||||
const indexContent = generateIndex(areas, allFiles);
|
||||
const indexPath = path.join(OUTPUT_DIR, 'INDEX.md');
|
||||
fs.writeFileSync(indexPath, indexContent, 'utf8');
|
||||
console.log(`[generate.ts] Written: ${rel(indexPath)}`);
|
||||
|
||||
// Generate per-area codemaps
|
||||
for (const [key, area] of Object.entries(areas)) {
|
||||
const content = generateAreaDoc(key, area, allFiles);
|
||||
const outPath = path.join(OUTPUT_DIR, `${key}.md`);
|
||||
fs.writeFileSync(outPath, content, 'utf8');
|
||||
console.log(`[generate.ts] Written: ${rel(outPath)} (${area.files.length} files)`);
|
||||
}
|
||||
|
||||
console.log('\n[generate.ts] Done! Codemaps written to docs/CODEMAPS/');
|
||||
console.log('[generate.ts] Files generated:');
|
||||
console.log(' docs/CODEMAPS/INDEX.md');
|
||||
console.log(' docs/CODEMAPS/frontend.md');
|
||||
console.log(' docs/CODEMAPS/backend.md');
|
||||
console.log(' docs/CODEMAPS/database.md');
|
||||
console.log(' docs/CODEMAPS/integrations.md');
|
||||
console.log(' docs/CODEMAPS/workers.md');
|
||||
}
|
||||
|
||||
main();
|
||||
@@ -32,7 +32,8 @@ process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
data += chunk;
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -29,7 +29,8 @@ process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (stdinData.length < MAX_STDIN) {
|
||||
stdinData += chunk;
|
||||
const remaining = MAX_STDIN - stdinData.length;
|
||||
stdinData += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -17,7 +17,8 @@ process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
data += chunk;
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -28,7 +29,7 @@ process.stdin.on('end', () => {
|
||||
|
||||
if (filePath && /\.(ts|tsx|js|jsx)$/.test(filePath)) {
|
||||
const content = readFile(filePath);
|
||||
if (!content) { process.stdout.write(data); return; }
|
||||
if (!content) { process.stdout.write(data); process.exit(0); }
|
||||
const lines = content.split('\n');
|
||||
const matches = [];
|
||||
|
||||
|
||||
@@ -17,7 +17,8 @@ process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
data += chunk;
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -19,7 +19,8 @@ process.stdin.setEncoding("utf8");
|
||||
|
||||
process.stdin.on("data", (chunk) => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
data += chunk;
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -109,7 +109,8 @@ process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (stdinData.length < MAX_STDIN) {
|
||||
stdinData += chunk;
|
||||
const remaining = MAX_STDIN - stdinData.length;
|
||||
stdinData += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -282,7 +282,7 @@ function setProjectPackageManager(pmName, projectDir = process.cwd()) {
|
||||
|
||||
// Allowed characters in script/binary names: alphanumeric, dash, underscore, dot, slash, @
|
||||
// This prevents shell metacharacter injection while allowing scoped packages (e.g., @scope/pkg)
|
||||
const SAFE_NAME_REGEX = /^[@a-zA-Z0-9_.\/-]+$/;
|
||||
const SAFE_NAME_REGEX = /^[@a-zA-Z0-9_./-]+$/;
|
||||
|
||||
/**
|
||||
* Get the command to run a script
|
||||
@@ -316,7 +316,7 @@ function getRunCommand(script, options = {}) {
|
||||
|
||||
// Allowed characters in arguments: alphanumeric, whitespace, dashes, dots, slashes,
|
||||
// equals, colons, commas, quotes, @. Rejects shell metacharacters like ; | & ` $ ( ) { } < > !
|
||||
const SAFE_ARGS_REGEX = /^[@a-zA-Z0-9\s_.\/:=,'"*+-]+$/;
|
||||
const SAFE_ARGS_REGEX = /^[@a-zA-Z0-9\s_./:=,'"*+-]+$/;
|
||||
|
||||
/**
|
||||
* Get the command to execute a package binary
|
||||
@@ -370,28 +370,31 @@ function escapeRegex(str) {
|
||||
function getCommandPattern(action) {
|
||||
const patterns = [];
|
||||
|
||||
if (action === 'dev') {
|
||||
// Trim spaces from action to handle leading/trailing whitespace gracefully
|
||||
const trimmedAction = action.trim();
|
||||
|
||||
if (trimmedAction === 'dev') {
|
||||
patterns.push(
|
||||
'npm run dev',
|
||||
'pnpm( run)? dev',
|
||||
'yarn dev',
|
||||
'bun run dev'
|
||||
);
|
||||
} else if (action === 'install') {
|
||||
} else if (trimmedAction === 'install') {
|
||||
patterns.push(
|
||||
'npm install',
|
||||
'pnpm install',
|
||||
'yarn( install)?',
|
||||
'bun install'
|
||||
);
|
||||
} else if (action === 'test') {
|
||||
} else if (trimmedAction === 'test') {
|
||||
patterns.push(
|
||||
'npm test',
|
||||
'pnpm test',
|
||||
'yarn test',
|
||||
'bun test'
|
||||
);
|
||||
} else if (action === 'build') {
|
||||
} else if (trimmedAction === 'build') {
|
||||
patterns.push(
|
||||
'npm run build',
|
||||
'pnpm( run)? build',
|
||||
@@ -400,7 +403,7 @@ function getCommandPattern(action) {
|
||||
);
|
||||
} else {
|
||||
// Generic run command — escape regex metacharacters in action
|
||||
const escaped = escapeRegex(action);
|
||||
const escaped = escapeRegex(trimmedAction);
|
||||
patterns.push(
|
||||
`npm run ${escaped}`,
|
||||
`pnpm( run)? ${escaped}`,
|
||||
|
||||
@@ -125,7 +125,7 @@ ${chalk.bold('Files Tracked:')} ${chalk.green(data.files)}
|
||||
console.log(chalk.gray('─'.repeat(50)));
|
||||
|
||||
patterns.forEach((pattern, i) => {
|
||||
const confidence = pattern.confidence || 0.8;
|
||||
const confidence = pattern.confidence ?? 0.8;
|
||||
const confidenceBar = progressBar(Math.round(confidence * 100), 15);
|
||||
console.log(`
|
||||
${chalk.bold(chalk.yellow(`${i + 1}.`))} ${chalk.bold(pattern.name)}
|
||||
|
||||
160
skills/content-hash-cache-pattern/SKILL.md
Normal file
160
skills/content-hash-cache-pattern/SKILL.md
Normal file
@@ -0,0 +1,160 @@
|
||||
---
|
||||
name: content-hash-cache-pattern
|
||||
description: Cache expensive file processing results using SHA-256 content hashes — path-independent, auto-invalidating, with service layer separation.
|
||||
---
|
||||
|
||||
# Content-Hash File Cache Pattern
|
||||
|
||||
Cache expensive file processing results (PDF parsing, text extraction, image analysis) using SHA-256 content hashes as cache keys. Unlike path-based caching, this approach survives file moves/renames and auto-invalidates when content changes.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Building file processing pipelines (PDF, images, text extraction)
|
||||
- Processing cost is high and same files are processed repeatedly
|
||||
- Need a `--cache/--no-cache` CLI option
|
||||
- Want to add caching to existing pure functions without modifying them
|
||||
|
||||
## Core Pattern
|
||||
|
||||
### 1. Content-Hash Based Cache Key
|
||||
|
||||
Use file content (not path) as the cache key:
|
||||
|
||||
```python
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
|
||||
_HASH_CHUNK_SIZE = 65536 # 64KB chunks for large files
|
||||
|
||||
def compute_file_hash(path: Path) -> str:
|
||||
"""SHA-256 of file contents (chunked for large files)."""
|
||||
if not path.is_file():
|
||||
raise FileNotFoundError(f"File not found: {path}")
|
||||
sha256 = hashlib.sha256()
|
||||
with open(path, "rb") as f:
|
||||
while True:
|
||||
chunk = f.read(_HASH_CHUNK_SIZE)
|
||||
if not chunk:
|
||||
break
|
||||
sha256.update(chunk)
|
||||
return sha256.hexdigest()
|
||||
```
|
||||
|
||||
**Why content hash?** File rename/move = cache hit. Content change = automatic invalidation. No index file needed.
|
||||
|
||||
### 2. Frozen Dataclass for Cache Entry
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CacheEntry:
|
||||
file_hash: str
|
||||
source_path: str
|
||||
document: ExtractedDocument # The cached result
|
||||
```
|
||||
|
||||
### 3. File-Based Cache Storage
|
||||
|
||||
Each cache entry is stored as `{hash}.json` — O(1) lookup by hash, no index file required.
|
||||
|
||||
```python
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
def write_cache(cache_dir: Path, entry: CacheEntry) -> None:
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
cache_file = cache_dir / f"{entry.file_hash}.json"
|
||||
data = serialize_entry(entry)
|
||||
cache_file.write_text(json.dumps(data, ensure_ascii=False), encoding="utf-8")
|
||||
|
||||
def read_cache(cache_dir: Path, file_hash: str) -> CacheEntry | None:
|
||||
cache_file = cache_dir / f"{file_hash}.json"
|
||||
if not cache_file.is_file():
|
||||
return None
|
||||
try:
|
||||
raw = cache_file.read_text(encoding="utf-8")
|
||||
data = json.loads(raw)
|
||||
return deserialize_entry(data)
|
||||
except (json.JSONDecodeError, ValueError, KeyError):
|
||||
return None # Treat corruption as cache miss
|
||||
```
|
||||
|
||||
### 4. Service Layer Wrapper (SRP)
|
||||
|
||||
Keep the processing function pure. Add caching as a separate service layer.
|
||||
|
||||
```python
|
||||
def extract_with_cache(
|
||||
file_path: Path,
|
||||
*,
|
||||
cache_enabled: bool = True,
|
||||
cache_dir: Path = Path(".cache"),
|
||||
) -> ExtractedDocument:
|
||||
"""Service layer: cache check -> extraction -> cache write."""
|
||||
if not cache_enabled:
|
||||
return extract_text(file_path) # Pure function, no cache knowledge
|
||||
|
||||
file_hash = compute_file_hash(file_path)
|
||||
|
||||
# Check cache
|
||||
cached = read_cache(cache_dir, file_hash)
|
||||
if cached is not None:
|
||||
logger.info("Cache hit: %s (hash=%s)", file_path.name, file_hash[:12])
|
||||
return cached.document
|
||||
|
||||
# Cache miss -> extract -> store
|
||||
logger.info("Cache miss: %s (hash=%s)", file_path.name, file_hash[:12])
|
||||
doc = extract_text(file_path)
|
||||
entry = CacheEntry(file_hash=file_hash, source_path=str(file_path), document=doc)
|
||||
write_cache(cache_dir, entry)
|
||||
return doc
|
||||
```
|
||||
|
||||
## Key Design Decisions
|
||||
|
||||
| Decision | Rationale |
|
||||
|----------|-----------|
|
||||
| SHA-256 content hash | Path-independent, auto-invalidates on content change |
|
||||
| `{hash}.json` file naming | O(1) lookup, no index file needed |
|
||||
| Service layer wrapper | SRP: extraction stays pure, cache is a separate concern |
|
||||
| Manual JSON serialization | Full control over frozen dataclass serialization |
|
||||
| Corruption returns `None` | Graceful degradation, re-processes on next run |
|
||||
| `cache_dir.mkdir(parents=True)` | Lazy directory creation on first write |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Hash content, not paths** — paths change, content identity doesn't
|
||||
- **Chunk large files** when hashing — avoid loading entire files into memory
|
||||
- **Keep processing functions pure** — they should know nothing about caching
|
||||
- **Log cache hit/miss** with truncated hashes for debugging
|
||||
- **Handle corruption gracefully** — treat invalid cache entries as misses, never crash
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
```python
|
||||
# BAD: Path-based caching (breaks on file move/rename)
|
||||
cache = {"/path/to/file.pdf": result}
|
||||
|
||||
# BAD: Adding cache logic inside the processing function (SRP violation)
|
||||
def extract_text(path, *, cache_enabled=False, cache_dir=None):
|
||||
if cache_enabled: # Now this function has two responsibilities
|
||||
...
|
||||
|
||||
# BAD: Using dataclasses.asdict() with nested frozen dataclasses
|
||||
# (can cause issues with complex nested types)
|
||||
data = dataclasses.asdict(entry) # Use manual serialization instead
|
||||
```
|
||||
|
||||
## When to Use
|
||||
|
||||
- File processing pipelines (PDF parsing, OCR, text extraction, image analysis)
|
||||
- CLI tools that benefit from `--cache/--no-cache` options
|
||||
- Batch processing where the same files appear across runs
|
||||
- Adding caching to existing pure functions without modifying them
|
||||
|
||||
## When NOT to Use
|
||||
|
||||
- Data that must always be fresh (real-time feeds)
|
||||
- Cache entries that would be extremely large (consider streaming instead)
|
||||
- Results that depend on parameters beyond file content (e.g., different extraction configs)
|
||||
@@ -103,7 +103,8 @@ PARSED_OK=$(echo "$PARSED" | python3 -c "import json,sys; print(json.load(sys.st
|
||||
if [ "$PARSED_OK" != "True" ]; then
|
||||
# Fallback: log raw input for debugging
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
TIMESTAMP="$timestamp" echo "$INPUT_JSON" | python3 -c "
|
||||
export TIMESTAMP="$timestamp"
|
||||
echo "$INPUT_JSON" | python3 -c "
|
||||
import json, sys, os
|
||||
raw = sys.stdin.read()[:2000]
|
||||
print(json.dumps({'timestamp': os.environ['TIMESTAMP'], 'event': 'parse_error', 'raw': raw}))
|
||||
@@ -124,7 +125,8 @@ fi
|
||||
# Build and write observation
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
TIMESTAMP="$timestamp" echo "$PARSED" | python3 -c "
|
||||
export TIMESTAMP="$timestamp"
|
||||
echo "$PARSED" | python3 -c "
|
||||
import json, sys, os
|
||||
|
||||
parsed = json.load(sys.stdin)
|
||||
|
||||
182
skills/cost-aware-llm-pipeline/SKILL.md
Normal file
182
skills/cost-aware-llm-pipeline/SKILL.md
Normal file
@@ -0,0 +1,182 @@
|
||||
---
|
||||
name: cost-aware-llm-pipeline
|
||||
description: Cost optimization patterns for LLM API usage — model routing by task complexity, budget tracking, retry logic, and prompt caching.
|
||||
---
|
||||
|
||||
# Cost-Aware LLM Pipeline
|
||||
|
||||
Patterns for controlling LLM API costs while maintaining quality. Combines model routing, budget tracking, retry logic, and prompt caching into a composable pipeline.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Building applications that call LLM APIs (Claude, GPT, etc.)
|
||||
- Processing batches of items with varying complexity
|
||||
- Need to stay within a budget for API spend
|
||||
- Optimizing cost without sacrificing quality on complex tasks
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### 1. Model Routing by Task Complexity
|
||||
|
||||
Automatically select cheaper models for simple tasks, reserving expensive models for complex ones.
|
||||
|
||||
```python
|
||||
MODEL_SONNET = "claude-sonnet-4-6"
|
||||
MODEL_HAIKU = "claude-haiku-4-5-20251001"
|
||||
|
||||
_SONNET_TEXT_THRESHOLD = 10_000 # chars
|
||||
_SONNET_ITEM_THRESHOLD = 30 # items
|
||||
|
||||
def select_model(
|
||||
text_length: int,
|
||||
item_count: int,
|
||||
force_model: str | None = None,
|
||||
) -> str:
|
||||
"""Select model based on task complexity."""
|
||||
if force_model is not None:
|
||||
return force_model
|
||||
if text_length >= _SONNET_TEXT_THRESHOLD or item_count >= _SONNET_ITEM_THRESHOLD:
|
||||
return MODEL_SONNET # Complex task
|
||||
return MODEL_HAIKU # Simple task (3-4x cheaper)
|
||||
```
|
||||
|
||||
### 2. Immutable Cost Tracking
|
||||
|
||||
Track cumulative spend with frozen dataclasses. Each API call returns a new tracker — never mutates state.
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CostRecord:
|
||||
model: str
|
||||
input_tokens: int
|
||||
output_tokens: int
|
||||
cost_usd: float
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CostTracker:
|
||||
budget_limit: float = 1.00
|
||||
records: tuple[CostRecord, ...] = ()
|
||||
|
||||
def add(self, record: CostRecord) -> "CostTracker":
|
||||
"""Return new tracker with added record (never mutates self)."""
|
||||
return CostTracker(
|
||||
budget_limit=self.budget_limit,
|
||||
records=(*self.records, record),
|
||||
)
|
||||
|
||||
@property
|
||||
def total_cost(self) -> float:
|
||||
return sum(r.cost_usd for r in self.records)
|
||||
|
||||
@property
|
||||
def over_budget(self) -> bool:
|
||||
return self.total_cost > self.budget_limit
|
||||
```
|
||||
|
||||
### 3. Narrow Retry Logic
|
||||
|
||||
Retry only on transient errors. Fail fast on authentication or bad request errors.
|
||||
|
||||
```python
|
||||
from anthropic import (
|
||||
APIConnectionError,
|
||||
InternalServerError,
|
||||
RateLimitError,
|
||||
)
|
||||
|
||||
_RETRYABLE_ERRORS = (APIConnectionError, RateLimitError, InternalServerError)
|
||||
_MAX_RETRIES = 3
|
||||
|
||||
def call_with_retry(func, *, max_retries: int = _MAX_RETRIES):
|
||||
"""Retry only on transient errors, fail fast on others."""
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
return func()
|
||||
except _RETRYABLE_ERRORS:
|
||||
if attempt == max_retries - 1:
|
||||
raise
|
||||
time.sleep(2 ** attempt) # Exponential backoff
|
||||
# AuthenticationError, BadRequestError etc. → raise immediately
|
||||
```
|
||||
|
||||
### 4. Prompt Caching
|
||||
|
||||
Cache long system prompts to avoid resending them on every request.
|
||||
|
||||
```python
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": system_prompt,
|
||||
"cache_control": {"type": "ephemeral"}, # Cache this
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": user_input, # Variable part
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Composition
|
||||
|
||||
Combine all four techniques in a single pipeline function:
|
||||
|
||||
```python
|
||||
def process(text: str, config: Config, tracker: CostTracker) -> tuple[Result, CostTracker]:
|
||||
# 1. Route model
|
||||
model = select_model(len(text), estimated_items, config.force_model)
|
||||
|
||||
# 2. Check budget
|
||||
if tracker.over_budget:
|
||||
raise BudgetExceededError(tracker.total_cost, tracker.budget_limit)
|
||||
|
||||
# 3. Call with retry + caching
|
||||
response = call_with_retry(lambda: client.messages.create(
|
||||
model=model,
|
||||
messages=build_cached_messages(system_prompt, text),
|
||||
))
|
||||
|
||||
# 4. Track cost (immutable)
|
||||
record = CostRecord(model=model, input_tokens=..., output_tokens=..., cost_usd=...)
|
||||
tracker = tracker.add(record)
|
||||
|
||||
return parse_result(response), tracker
|
||||
```
|
||||
|
||||
## Pricing Reference (2025-2026)
|
||||
|
||||
| Model | Input ($/1M tokens) | Output ($/1M tokens) | Relative Cost |
|
||||
|-------|---------------------|----------------------|---------------|
|
||||
| Haiku 4.5 | $0.80 | $4.00 | 1x |
|
||||
| Sonnet 4.6 | $3.00 | $15.00 | ~4x |
|
||||
| Opus 4.5 | $15.00 | $75.00 | ~19x |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Start with the cheapest model** and only route to expensive models when complexity thresholds are met
|
||||
- **Set explicit budget limits** before processing batches — fail early rather than overspend
|
||||
- **Log model selection decisions** so you can tune thresholds based on real data
|
||||
- **Use prompt caching** for system prompts over 1024 tokens — saves both cost and latency
|
||||
- **Never retry on authentication or validation errors** — only transient failures (network, rate limit, server error)
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
- Using the most expensive model for all requests regardless of complexity
|
||||
- Retrying on all errors (wastes budget on permanent failures)
|
||||
- Mutating cost tracking state (makes debugging and auditing difficult)
|
||||
- Hardcoding model names throughout the codebase (use constants or config)
|
||||
- Ignoring prompt caching for repetitive system prompts
|
||||
|
||||
## When to Use
|
||||
|
||||
- Any application calling Claude, OpenAI, or similar LLM APIs
|
||||
- Batch processing pipelines where cost adds up quickly
|
||||
- Multi-model architectures that need intelligent routing
|
||||
- Production systems that need budget guardrails
|
||||
722
skills/cpp-coding-standards/SKILL.md
Normal file
722
skills/cpp-coding-standards/SKILL.md
Normal file
@@ -0,0 +1,722 @@
|
||||
---
|
||||
name: cpp-coding-standards
|
||||
description: C++ coding standards based on the C++ Core Guidelines (isocpp.github.io). Use when writing, reviewing, or refactoring C++ code to enforce modern, safe, and idiomatic practices.
|
||||
---
|
||||
|
||||
# C++ Coding Standards (C++ Core Guidelines)
|
||||
|
||||
Comprehensive coding standards for modern C++ (C++17/20/23) derived from the [C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines). Enforces type safety, resource safety, immutability, and clarity.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Writing new C++ code (classes, functions, templates)
|
||||
- Reviewing or refactoring existing C++ code
|
||||
- Making architectural decisions in C++ projects
|
||||
- Enforcing consistent style across a C++ codebase
|
||||
- Choosing between language features (e.g., `enum` vs `enum class`, raw pointer vs smart pointer)
|
||||
|
||||
### When NOT to Use
|
||||
|
||||
- Non-C++ projects
|
||||
- Legacy C codebases that cannot adopt modern C++ features
|
||||
- Embedded/bare-metal contexts where specific guidelines conflict with hardware constraints (adapt selectively)
|
||||
|
||||
## Cross-Cutting Principles
|
||||
|
||||
These themes recur across the entire guidelines and form the foundation:
|
||||
|
||||
1. **RAII everywhere** (P.8, R.1, E.6, CP.20): Bind resource lifetime to object lifetime
|
||||
2. **Immutability by default** (P.10, Con.1-5, ES.25): Start with `const`/`constexpr`; mutability is the exception
|
||||
3. **Type safety** (P.4, I.4, ES.46-49, Enum.3): Use the type system to prevent errors at compile time
|
||||
4. **Express intent** (P.3, F.1, NL.1-2, T.10): Names, types, and concepts should communicate purpose
|
||||
5. **Minimize complexity** (F.2-3, ES.5, Per.4-5): Simple code is correct code
|
||||
6. **Value semantics over pointer semantics** (C.10, R.3-5, F.20, CP.31): Prefer returning by value and scoped objects
|
||||
|
||||
## Philosophy & Interfaces (P.*, I.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **P.1** | Express ideas directly in code |
|
||||
| **P.3** | Express intent |
|
||||
| **P.4** | Ideally, a program should be statically type safe |
|
||||
| **P.5** | Prefer compile-time checking to run-time checking |
|
||||
| **P.8** | Don't leak any resources |
|
||||
| **P.10** | Prefer immutable data to mutable data |
|
||||
| **I.1** | Make interfaces explicit |
|
||||
| **I.2** | Avoid non-const global variables |
|
||||
| **I.4** | Make interfaces precisely and strongly typed |
|
||||
| **I.11** | Never transfer ownership by a raw pointer or reference |
|
||||
| **I.23** | Keep the number of function arguments low |
|
||||
|
||||
### DO
|
||||
|
||||
```cpp
|
||||
// P.10 + I.4: Immutable, strongly typed interface
|
||||
struct Temperature {
|
||||
double kelvin;
|
||||
};
|
||||
|
||||
Temperature boil(const Temperature& water);
|
||||
```
|
||||
|
||||
### DON'T
|
||||
|
||||
```cpp
|
||||
// Weak interface: unclear ownership, unclear units
|
||||
double boil(double* temp);
|
||||
|
||||
// Non-const global variable
|
||||
int g_counter = 0; // I.2 violation
|
||||
```
|
||||
|
||||
## Functions (F.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **F.1** | Package meaningful operations as carefully named functions |
|
||||
| **F.2** | A function should perform a single logical operation |
|
||||
| **F.3** | Keep functions short and simple |
|
||||
| **F.4** | If a function might be evaluated at compile time, declare it `constexpr` |
|
||||
| **F.6** | If your function must not throw, declare it `noexcept` |
|
||||
| **F.8** | Prefer pure functions |
|
||||
| **F.16** | For "in" parameters, pass cheaply-copied types by value and others by `const&` |
|
||||
| **F.20** | For "out" values, prefer return values to output parameters |
|
||||
| **F.21** | To return multiple "out" values, prefer returning a struct |
|
||||
| **F.43** | Never return a pointer or reference to a local object |
|
||||
|
||||
### Parameter Passing
|
||||
|
||||
```cpp
|
||||
// F.16: Cheap types by value, others by const&
|
||||
void print(int x); // cheap: by value
|
||||
void analyze(const std::string& data); // expensive: by const&
|
||||
void transform(std::string s); // sink: by value (will move)
|
||||
|
||||
// F.20 + F.21: Return values, not output parameters
|
||||
struct ParseResult {
|
||||
std::string token;
|
||||
int position;
|
||||
};
|
||||
|
||||
ParseResult parse(std::string_view input); // GOOD: return struct
|
||||
|
||||
// BAD: output parameters
|
||||
void parse(std::string_view input,
|
||||
std::string& token, int& pos); // avoid this
|
||||
```
|
||||
|
||||
### Pure Functions and constexpr
|
||||
|
||||
```cpp
|
||||
// F.4 + F.8: Pure, constexpr where possible
|
||||
constexpr int factorial(int n) noexcept {
|
||||
return (n <= 1) ? 1 : n * factorial(n - 1);
|
||||
}
|
||||
|
||||
static_assert(factorial(5) == 120);
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Returning `T&&` from functions (F.45)
|
||||
- Using `va_arg` / C-style variadics (F.55)
|
||||
- Capturing by reference in lambdas passed to other threads (F.53)
|
||||
- Returning `const T` which inhibits move semantics (F.49)
|
||||
|
||||
## Classes & Class Hierarchies (C.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **C.2** | Use `class` if invariant exists; `struct` if data members vary independently |
|
||||
| **C.9** | Minimize exposure of members |
|
||||
| **C.20** | If you can avoid defining default operations, do (Rule of Zero) |
|
||||
| **C.21** | If you define or `=delete` any copy/move/destructor, handle them all (Rule of Five) |
|
||||
| **C.35** | Base class destructor: public virtual or protected non-virtual |
|
||||
| **C.41** | A constructor should create a fully initialized object |
|
||||
| **C.46** | Declare single-argument constructors `explicit` |
|
||||
| **C.67** | A polymorphic class should suppress public copy/move |
|
||||
| **C.128** | Virtual functions: specify exactly one of `virtual`, `override`, or `final` |
|
||||
|
||||
### Rule of Zero
|
||||
|
||||
```cpp
|
||||
// C.20: Let the compiler generate special members
|
||||
struct Employee {
|
||||
std::string name;
|
||||
std::string department;
|
||||
int id;
|
||||
// No destructor, copy/move constructors, or assignment operators needed
|
||||
};
|
||||
```
|
||||
|
||||
### Rule of Five
|
||||
|
||||
```cpp
|
||||
// C.21: If you must manage a resource, define all five
|
||||
class Buffer {
|
||||
public:
|
||||
explicit Buffer(std::size_t size)
|
||||
: data_(std::make_unique<char[]>(size)), size_(size) {}
|
||||
|
||||
~Buffer() = default;
|
||||
|
||||
Buffer(const Buffer& other)
|
||||
: data_(std::make_unique<char[]>(other.size_)), size_(other.size_) {
|
||||
std::copy_n(other.data_.get(), size_, data_.get());
|
||||
}
|
||||
|
||||
Buffer& operator=(const Buffer& other) {
|
||||
if (this != &other) {
|
||||
auto new_data = std::make_unique<char[]>(other.size_);
|
||||
std::copy_n(other.data_.get(), other.size_, new_data.get());
|
||||
data_ = std::move(new_data);
|
||||
size_ = other.size_;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
Buffer(Buffer&&) noexcept = default;
|
||||
Buffer& operator=(Buffer&&) noexcept = default;
|
||||
|
||||
private:
|
||||
std::unique_ptr<char[]> data_;
|
||||
std::size_t size_;
|
||||
};
|
||||
```
|
||||
|
||||
### Class Hierarchy
|
||||
|
||||
```cpp
|
||||
// C.35 + C.128: Virtual destructor, use override
|
||||
class Shape {
|
||||
public:
|
||||
virtual ~Shape() = default;
|
||||
virtual double area() const = 0; // C.121: pure interface
|
||||
};
|
||||
|
||||
class Circle : public Shape {
|
||||
public:
|
||||
explicit Circle(double r) : radius_(r) {}
|
||||
double area() const override { return 3.14159 * radius_ * radius_; }
|
||||
|
||||
private:
|
||||
double radius_;
|
||||
};
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Calling virtual functions in constructors/destructors (C.82)
|
||||
- Using `memset`/`memcpy` on non-trivial types (C.90)
|
||||
- Providing different default arguments for virtual function and overrider (C.140)
|
||||
- Making data members `const` or references, which suppresses move/copy (C.12)
|
||||
|
||||
## Resource Management (R.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **R.1** | Manage resources automatically using RAII |
|
||||
| **R.3** | A raw pointer (`T*`) is non-owning |
|
||||
| **R.5** | Prefer scoped objects; don't heap-allocate unnecessarily |
|
||||
| **R.10** | Avoid `malloc()`/`free()` |
|
||||
| **R.11** | Avoid calling `new` and `delete` explicitly |
|
||||
| **R.20** | Use `unique_ptr` or `shared_ptr` to represent ownership |
|
||||
| **R.21** | Prefer `unique_ptr` over `shared_ptr` unless sharing ownership |
|
||||
| **R.22** | Use `make_shared()` to make `shared_ptr`s |
|
||||
|
||||
### Smart Pointer Usage
|
||||
|
||||
```cpp
|
||||
// R.11 + R.20 + R.21: RAII with smart pointers
|
||||
auto widget = std::make_unique<Widget>("config"); // unique ownership
|
||||
auto cache = std::make_shared<Cache>(1024); // shared ownership
|
||||
|
||||
// R.3: Raw pointer = non-owning observer
|
||||
void render(const Widget* w) { // does NOT own w
|
||||
if (w) w->draw();
|
||||
}
|
||||
|
||||
render(widget.get());
|
||||
```
|
||||
|
||||
### RAII Pattern
|
||||
|
||||
```cpp
|
||||
// R.1: Resource acquisition is initialization
|
||||
class FileHandle {
|
||||
public:
|
||||
explicit FileHandle(const std::string& path)
|
||||
: handle_(std::fopen(path.c_str(), "r")) {
|
||||
if (!handle_) throw std::runtime_error("Failed to open: " + path);
|
||||
}
|
||||
|
||||
~FileHandle() {
|
||||
if (handle_) std::fclose(handle_);
|
||||
}
|
||||
|
||||
FileHandle(const FileHandle&) = delete;
|
||||
FileHandle& operator=(const FileHandle&) = delete;
|
||||
FileHandle(FileHandle&& other) noexcept
|
||||
: handle_(std::exchange(other.handle_, nullptr)) {}
|
||||
FileHandle& operator=(FileHandle&& other) noexcept {
|
||||
if (this != &other) {
|
||||
if (handle_) std::fclose(handle_);
|
||||
handle_ = std::exchange(other.handle_, nullptr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
std::FILE* handle_;
|
||||
};
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Naked `new`/`delete` (R.11)
|
||||
- `malloc()`/`free()` in C++ code (R.10)
|
||||
- Multiple resource allocations in a single expression (R.13 -- exception safety hazard)
|
||||
- `shared_ptr` where `unique_ptr` suffices (R.21)
|
||||
|
||||
## Expressions & Statements (ES.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **ES.5** | Keep scopes small |
|
||||
| **ES.20** | Always initialize an object |
|
||||
| **ES.23** | Prefer `{}` initializer syntax |
|
||||
| **ES.25** | Declare objects `const` or `constexpr` unless modification is intended |
|
||||
| **ES.28** | Use lambdas for complex initialization of `const` variables |
|
||||
| **ES.45** | Avoid magic constants; use symbolic constants |
|
||||
| **ES.46** | Avoid narrowing/lossy arithmetic conversions |
|
||||
| **ES.47** | Use `nullptr` rather than `0` or `NULL` |
|
||||
| **ES.48** | Avoid casts |
|
||||
| **ES.50** | Don't cast away `const` |
|
||||
|
||||
### Initialization
|
||||
|
||||
```cpp
|
||||
// ES.20 + ES.23 + ES.25: Always initialize, prefer {}, default to const
|
||||
const int max_retries{3};
|
||||
const std::string name{"widget"};
|
||||
const std::vector<int> primes{2, 3, 5, 7, 11};
|
||||
|
||||
// ES.28: Lambda for complex const initialization
|
||||
const auto config = [&] {
|
||||
Config c;
|
||||
c.timeout = std::chrono::seconds{30};
|
||||
c.retries = max_retries;
|
||||
c.verbose = debug_mode;
|
||||
return c;
|
||||
}();
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Uninitialized variables (ES.20)
|
||||
- Using `0` or `NULL` as pointer (ES.47 -- use `nullptr`)
|
||||
- C-style casts (ES.48 -- use `static_cast`, `const_cast`, etc.)
|
||||
- Casting away `const` (ES.50)
|
||||
- Magic numbers without named constants (ES.45)
|
||||
- Mixing signed and unsigned arithmetic (ES.100)
|
||||
- Reusing names in nested scopes (ES.12)
|
||||
|
||||
## Error Handling (E.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **E.1** | Develop an error-handling strategy early in a design |
|
||||
| **E.2** | Throw an exception to signal that a function can't perform its assigned task |
|
||||
| **E.6** | Use RAII to prevent leaks |
|
||||
| **E.12** | Use `noexcept` when throwing is impossible or unacceptable |
|
||||
| **E.14** | Use purpose-designed user-defined types as exceptions |
|
||||
| **E.15** | Throw by value, catch by reference |
|
||||
| **E.16** | Destructors, deallocation, and swap must never fail |
|
||||
| **E.17** | Don't try to catch every exception in every function |
|
||||
|
||||
### Exception Hierarchy
|
||||
|
||||
```cpp
|
||||
// E.14 + E.15: Custom exception types, throw by value, catch by reference
|
||||
class AppError : public std::runtime_error {
|
||||
public:
|
||||
using std::runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
class NetworkError : public AppError {
|
||||
public:
|
||||
NetworkError(const std::string& msg, int code)
|
||||
: AppError(msg), status_code(code) {}
|
||||
int status_code;
|
||||
};
|
||||
|
||||
void fetch_data(const std::string& url) {
|
||||
// E.2: Throw to signal failure
|
||||
throw NetworkError("connection refused", 503);
|
||||
}
|
||||
|
||||
void run() {
|
||||
try {
|
||||
fetch_data("https://api.example.com");
|
||||
} catch (const NetworkError& e) {
|
||||
log_error(e.what(), e.status_code);
|
||||
} catch (const AppError& e) {
|
||||
log_error(e.what());
|
||||
}
|
||||
// E.17: Don't catch everything here -- let unexpected errors propagate
|
||||
}
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Throwing built-in types like `int` or string literals (E.14)
|
||||
- Catching by value (slicing risk) (E.15)
|
||||
- Empty catch blocks that silently swallow errors
|
||||
- Using exceptions for flow control (E.3)
|
||||
- Error handling based on global state like `errno` (E.28)
|
||||
|
||||
## Constants & Immutability (Con.*)
|
||||
|
||||
### All Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **Con.1** | By default, make objects immutable |
|
||||
| **Con.2** | By default, make member functions `const` |
|
||||
| **Con.3** | By default, pass pointers and references to `const` |
|
||||
| **Con.4** | Use `const` for values that don't change after construction |
|
||||
| **Con.5** | Use `constexpr` for values computable at compile time |
|
||||
|
||||
```cpp
|
||||
// Con.1 through Con.5: Immutability by default
|
||||
class Sensor {
|
||||
public:
|
||||
explicit Sensor(std::string id) : id_(std::move(id)) {}
|
||||
|
||||
// Con.2: const member functions by default
|
||||
const std::string& id() const { return id_; }
|
||||
double last_reading() const { return reading_; }
|
||||
|
||||
// Only non-const when mutation is required
|
||||
void record(double value) { reading_ = value; }
|
||||
|
||||
private:
|
||||
const std::string id_; // Con.4: never changes after construction
|
||||
double reading_{0.0};
|
||||
};
|
||||
|
||||
// Con.3: Pass by const reference
|
||||
void display(const Sensor& s) {
|
||||
std::cout << s.id() << ": " << s.last_reading() << '\n';
|
||||
}
|
||||
|
||||
// Con.5: Compile-time constants
|
||||
constexpr double PI = 3.14159265358979;
|
||||
constexpr int MAX_SENSORS = 256;
|
||||
```
|
||||
|
||||
## Concurrency & Parallelism (CP.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **CP.2** | Avoid data races |
|
||||
| **CP.3** | Minimize explicit sharing of writable data |
|
||||
| **CP.4** | Think in terms of tasks, rather than threads |
|
||||
| **CP.8** | Don't use `volatile` for synchronization |
|
||||
| **CP.20** | Use RAII, never plain `lock()`/`unlock()` |
|
||||
| **CP.21** | Use `std::scoped_lock` to acquire multiple mutexes |
|
||||
| **CP.22** | Never call unknown code while holding a lock |
|
||||
| **CP.42** | Don't wait without a condition |
|
||||
| **CP.44** | Remember to name your `lock_guard`s and `unique_lock`s |
|
||||
| **CP.100** | Don't use lock-free programming unless you absolutely have to |
|
||||
|
||||
### Safe Locking
|
||||
|
||||
```cpp
|
||||
// CP.20 + CP.44: RAII locks, always named
|
||||
class ThreadSafeQueue {
|
||||
public:
|
||||
void push(int value) {
|
||||
std::lock_guard<std::mutex> lock(mutex_); // CP.44: named!
|
||||
queue_.push(value);
|
||||
cv_.notify_one();
|
||||
}
|
||||
|
||||
int pop() {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
// CP.42: Always wait with a condition
|
||||
cv_.wait(lock, [this] { return !queue_.empty(); });
|
||||
const int value = queue_.front();
|
||||
queue_.pop();
|
||||
return value;
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mutex_; // CP.50: mutex with its data
|
||||
std::condition_variable cv_;
|
||||
std::queue<int> queue_;
|
||||
};
|
||||
```
|
||||
|
||||
### Multiple Mutexes
|
||||
|
||||
```cpp
|
||||
// CP.21: std::scoped_lock for multiple mutexes (deadlock-free)
|
||||
void transfer(Account& from, Account& to, double amount) {
|
||||
std::scoped_lock lock(from.mutex_, to.mutex_);
|
||||
from.balance_ -= amount;
|
||||
to.balance_ += amount;
|
||||
}
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- `volatile` for synchronization (CP.8 -- it's for hardware I/O only)
|
||||
- Detaching threads (CP.26 -- lifetime management becomes nearly impossible)
|
||||
- Unnamed lock guards: `std::lock_guard<std::mutex>(m);` destroys immediately (CP.44)
|
||||
- Holding locks while calling callbacks (CP.22 -- deadlock risk)
|
||||
- Lock-free programming without deep expertise (CP.100)
|
||||
|
||||
## Templates & Generic Programming (T.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **T.1** | Use templates to raise the level of abstraction |
|
||||
| **T.2** | Use templates to express algorithms for many argument types |
|
||||
| **T.10** | Specify concepts for all template arguments |
|
||||
| **T.11** | Use standard concepts whenever possible |
|
||||
| **T.13** | Prefer shorthand notation for simple concepts |
|
||||
| **T.43** | Prefer `using` over `typedef` |
|
||||
| **T.120** | Use template metaprogramming only when you really need to |
|
||||
| **T.144** | Don't specialize function templates (overload instead) |
|
||||
|
||||
### Concepts (C++20)
|
||||
|
||||
```cpp
|
||||
#include <concepts>
|
||||
|
||||
// T.10 + T.11: Constrain templates with standard concepts
|
||||
template<std::integral T>
|
||||
T gcd(T a, T b) {
|
||||
while (b != 0) {
|
||||
a = std::exchange(b, a % b);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
// T.13: Shorthand concept syntax
|
||||
void sort(std::ranges::random_access_range auto& range) {
|
||||
std::ranges::sort(range);
|
||||
}
|
||||
|
||||
// Custom concept for domain-specific constraints
|
||||
template<typename T>
|
||||
concept Serializable = requires(const T& t) {
|
||||
{ t.serialize() } -> std::convertible_to<std::string>;
|
||||
};
|
||||
|
||||
template<Serializable T>
|
||||
void save(const T& obj, const std::string& path);
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Unconstrained templates in visible namespaces (T.47)
|
||||
- Specializing function templates instead of overloading (T.144)
|
||||
- Template metaprogramming where `constexpr` suffices (T.120)
|
||||
- `typedef` instead of `using` (T.43)
|
||||
|
||||
## Standard Library (SL.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **SL.1** | Use libraries wherever possible |
|
||||
| **SL.2** | Prefer the standard library to other libraries |
|
||||
| **SL.con.1** | Prefer `std::array` or `std::vector` over C arrays |
|
||||
| **SL.con.2** | Prefer `std::vector` by default |
|
||||
| **SL.str.1** | Use `std::string` to own character sequences |
|
||||
| **SL.str.2** | Use `std::string_view` to refer to character sequences |
|
||||
| **SL.io.50** | Avoid `endl` (use `'\n'` -- `endl` forces a flush) |
|
||||
|
||||
```cpp
|
||||
// SL.con.1 + SL.con.2: Prefer vector/array over C arrays
|
||||
const std::array<int, 4> fixed_data{1, 2, 3, 4};
|
||||
std::vector<std::string> dynamic_data;
|
||||
|
||||
// SL.str.1 + SL.str.2: string owns, string_view observes
|
||||
std::string build_greeting(std::string_view name) {
|
||||
return "Hello, " + std::string(name) + "!";
|
||||
}
|
||||
|
||||
// SL.io.50: Use '\n' not endl
|
||||
std::cout << "result: " << value << '\n';
|
||||
```
|
||||
|
||||
## Enumerations (Enum.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **Enum.1** | Prefer enumerations over macros |
|
||||
| **Enum.3** | Prefer `enum class` over plain `enum` |
|
||||
| **Enum.5** | Don't use ALL_CAPS for enumerators |
|
||||
| **Enum.6** | Avoid unnamed enumerations |
|
||||
|
||||
```cpp
|
||||
// Enum.3 + Enum.5: Scoped enum, no ALL_CAPS
|
||||
enum class Color { red, green, blue };
|
||||
enum class LogLevel { debug, info, warning, error };
|
||||
|
||||
// BAD: plain enum leaks names, ALL_CAPS clashes with macros
|
||||
enum { RED, GREEN, BLUE }; // Enum.3 + Enum.5 + Enum.6 violation
|
||||
#define MAX_SIZE 100 // Enum.1 violation -- use constexpr
|
||||
```
|
||||
|
||||
## Source Files & Naming (SF.*, NL.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **SF.1** | Use `.cpp` for code files and `.h` for interface files |
|
||||
| **SF.7** | Don't write `using namespace` at global scope in a header |
|
||||
| **SF.8** | Use `#include` guards for all `.h` files |
|
||||
| **SF.11** | Header files should be self-contained |
|
||||
| **NL.5** | Avoid encoding type information in names (no Hungarian notation) |
|
||||
| **NL.8** | Use a consistent naming style |
|
||||
| **NL.9** | Use ALL_CAPS for macro names only |
|
||||
| **NL.10** | Prefer `underscore_style` names |
|
||||
|
||||
### Header Guard
|
||||
|
||||
```cpp
|
||||
// SF.8: Include guard (or #pragma once)
|
||||
#ifndef PROJECT_MODULE_WIDGET_H
|
||||
#define PROJECT_MODULE_WIDGET_H
|
||||
|
||||
// SF.11: Self-contained -- include everything this header needs
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace project::module {
|
||||
|
||||
class Widget {
|
||||
public:
|
||||
explicit Widget(std::string name);
|
||||
const std::string& name() const;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
};
|
||||
|
||||
} // namespace project::module
|
||||
|
||||
#endif // PROJECT_MODULE_WIDGET_H
|
||||
```
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
```cpp
|
||||
// NL.8 + NL.10: Consistent underscore_style
|
||||
namespace my_project {
|
||||
|
||||
constexpr int max_buffer_size = 4096; // NL.9: not ALL_CAPS (it's not a macro)
|
||||
|
||||
class tcp_connection { // underscore_style class
|
||||
public:
|
||||
void send_message(std::string_view msg);
|
||||
bool is_connected() const;
|
||||
|
||||
private:
|
||||
std::string host_; // trailing underscore for members
|
||||
int port_;
|
||||
};
|
||||
|
||||
} // namespace my_project
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- `using namespace std;` in a header at global scope (SF.7)
|
||||
- Headers that depend on inclusion order (SF.10, SF.11)
|
||||
- Hungarian notation like `strName`, `iCount` (NL.5)
|
||||
- ALL_CAPS for anything other than macros (NL.9)
|
||||
|
||||
## Performance (Per.*)
|
||||
|
||||
### Key Rules
|
||||
|
||||
| Rule | Summary |
|
||||
|------|---------|
|
||||
| **Per.1** | Don't optimize without reason |
|
||||
| **Per.2** | Don't optimize prematurely |
|
||||
| **Per.6** | Don't make claims about performance without measurements |
|
||||
| **Per.7** | Design to enable optimization |
|
||||
| **Per.10** | Rely on the static type system |
|
||||
| **Per.11** | Move computation from run time to compile time |
|
||||
| **Per.19** | Access memory predictably |
|
||||
|
||||
### Guidelines
|
||||
|
||||
```cpp
|
||||
// Per.11: Compile-time computation where possible
|
||||
constexpr auto lookup_table = [] {
|
||||
std::array<int, 256> table{};
|
||||
for (int i = 0; i < 256; ++i) {
|
||||
table[i] = i * i;
|
||||
}
|
||||
return table;
|
||||
}();
|
||||
|
||||
// Per.19: Prefer contiguous data for cache-friendliness
|
||||
std::vector<Point> points; // GOOD: contiguous
|
||||
std::vector<std::unique_ptr<Point>> indirect_points; // BAD: pointer chasing
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Optimizing without profiling data (Per.1, Per.6)
|
||||
- Choosing "clever" low-level code over clear abstractions (Per.4, Per.5)
|
||||
- Ignoring data layout and cache behavior (Per.19)
|
||||
|
||||
## Quick Reference Checklist
|
||||
|
||||
Before marking C++ work complete:
|
||||
|
||||
- [ ] No raw `new`/`delete` -- use smart pointers or RAII (R.11)
|
||||
- [ ] Objects initialized at declaration (ES.20)
|
||||
- [ ] Variables are `const`/`constexpr` by default (Con.1, ES.25)
|
||||
- [ ] Member functions are `const` where possible (Con.2)
|
||||
- [ ] `enum class` instead of plain `enum` (Enum.3)
|
||||
- [ ] `nullptr` instead of `0`/`NULL` (ES.47)
|
||||
- [ ] No narrowing conversions (ES.46)
|
||||
- [ ] No C-style casts (ES.48)
|
||||
- [ ] Single-argument constructors are `explicit` (C.46)
|
||||
- [ ] Rule of Zero or Rule of Five applied (C.20, C.21)
|
||||
- [ ] Base class destructors are public virtual or protected non-virtual (C.35)
|
||||
- [ ] Templates are constrained with concepts (T.10)
|
||||
- [ ] No `using namespace` in headers at global scope (SF.7)
|
||||
- [ ] Headers have include guards and are self-contained (SF.8, SF.11)
|
||||
- [ ] Locks use RAII (`scoped_lock`/`lock_guard`) (CP.20)
|
||||
- [ ] Exceptions are custom types, thrown by value, caught by reference (E.14, E.15)
|
||||
- [ ] `'\n'` instead of `std::endl` (SL.io.50)
|
||||
- [ ] No magic numbers (ES.45)
|
||||
219
skills/regex-vs-llm-structured-text/SKILL.md
Normal file
219
skills/regex-vs-llm-structured-text/SKILL.md
Normal file
@@ -0,0 +1,219 @@
|
||||
---
|
||||
name: regex-vs-llm-structured-text
|
||||
description: Decision framework for choosing between regex and LLM when parsing structured text — start with regex, add LLM only for low-confidence edge cases.
|
||||
---
|
||||
|
||||
# Regex vs LLM for Structured Text Parsing
|
||||
|
||||
A practical decision framework for parsing structured text (quizzes, forms, invoices, documents). The key insight: regex handles 95-98% of cases cheaply and deterministically. Reserve expensive LLM calls for the remaining edge cases.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Parsing structured text with repeating patterns (questions, forms, tables)
|
||||
- Deciding between regex and LLM for text extraction
|
||||
- Building hybrid pipelines that combine both approaches
|
||||
- Optimizing cost/accuracy tradeoffs in text processing
|
||||
|
||||
## Decision Framework
|
||||
|
||||
```
|
||||
Is the text format consistent and repeating?
|
||||
├── Yes (>90% follows a pattern) → Start with Regex
|
||||
│ ├── Regex handles 95%+ → Done, no LLM needed
|
||||
│ └── Regex handles <95% → Add LLM for edge cases only
|
||||
└── No (free-form, highly variable) → Use LLM directly
|
||||
```
|
||||
|
||||
## Architecture Pattern
|
||||
|
||||
```
|
||||
Source Text
|
||||
│
|
||||
▼
|
||||
[Regex Parser] ─── Extracts structure (95-98% accuracy)
|
||||
│
|
||||
▼
|
||||
[Text Cleaner] ─── Removes noise (markers, page numbers, artifacts)
|
||||
│
|
||||
▼
|
||||
[Confidence Scorer] ─── Flags low-confidence extractions
|
||||
│
|
||||
├── High confidence (≥0.95) → Direct output
|
||||
│
|
||||
└── Low confidence (<0.95) → [LLM Validator] → Output
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### 1. Regex Parser (Handles the Majority)
|
||||
|
||||
```python
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ParsedItem:
|
||||
id: str
|
||||
text: str
|
||||
choices: tuple[str, ...]
|
||||
answer: str
|
||||
confidence: float = 1.0
|
||||
|
||||
def parse_structured_text(content: str) -> list[ParsedItem]:
|
||||
"""Parse structured text using regex patterns."""
|
||||
pattern = re.compile(
|
||||
r"(?P<id>\d+)\.\s*(?P<text>.+?)\n"
|
||||
r"(?P<choices>(?:[A-D]\..+?\n)+)"
|
||||
r"Answer:\s*(?P<answer>[A-D])",
|
||||
re.MULTILINE | re.DOTALL,
|
||||
)
|
||||
items = []
|
||||
for match in pattern.finditer(content):
|
||||
choices = tuple(
|
||||
c.strip() for c in re.findall(r"[A-D]\.\s*(.+)", match.group("choices"))
|
||||
)
|
||||
items.append(ParsedItem(
|
||||
id=match.group("id"),
|
||||
text=match.group("text").strip(),
|
||||
choices=choices,
|
||||
answer=match.group("answer"),
|
||||
))
|
||||
return items
|
||||
```
|
||||
|
||||
### 2. Confidence Scoring
|
||||
|
||||
Flag items that may need LLM review:
|
||||
|
||||
```python
|
||||
@dataclass(frozen=True)
|
||||
class ConfidenceFlag:
|
||||
item_id: str
|
||||
score: float
|
||||
reasons: tuple[str, ...]
|
||||
|
||||
def score_confidence(item: ParsedItem) -> ConfidenceFlag:
|
||||
"""Score extraction confidence and flag issues."""
|
||||
reasons = []
|
||||
score = 1.0
|
||||
|
||||
if len(item.choices) < 3:
|
||||
reasons.append("few_choices")
|
||||
score -= 0.3
|
||||
|
||||
if not item.answer:
|
||||
reasons.append("missing_answer")
|
||||
score -= 0.5
|
||||
|
||||
if len(item.text) < 10:
|
||||
reasons.append("short_text")
|
||||
score -= 0.2
|
||||
|
||||
return ConfidenceFlag(
|
||||
item_id=item.id,
|
||||
score=max(0.0, score),
|
||||
reasons=tuple(reasons),
|
||||
)
|
||||
|
||||
def identify_low_confidence(
|
||||
items: list[ParsedItem],
|
||||
threshold: float = 0.95,
|
||||
) -> list[ConfidenceFlag]:
|
||||
"""Return items below confidence threshold."""
|
||||
flags = [score_confidence(item) for item in items]
|
||||
return [f for f in flags if f.score < threshold]
|
||||
```
|
||||
|
||||
### 3. LLM Validator (Edge Cases Only)
|
||||
|
||||
```python
|
||||
def validate_with_llm(
|
||||
item: ParsedItem,
|
||||
original_text: str,
|
||||
client,
|
||||
) -> ParsedItem:
|
||||
"""Use LLM to fix low-confidence extractions."""
|
||||
response = client.messages.create(
|
||||
model="claude-haiku-4-5-20251001", # Cheapest model for validation
|
||||
max_tokens=500,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f"Extract the question, choices, and answer from this text.\n\n"
|
||||
f"Text: {original_text}\n\n"
|
||||
f"Current extraction: {item}\n\n"
|
||||
f"Return corrected JSON if needed, or 'CORRECT' if accurate."
|
||||
),
|
||||
}],
|
||||
)
|
||||
# Parse LLM response and return corrected item...
|
||||
return corrected_item
|
||||
```
|
||||
|
||||
### 4. Hybrid Pipeline
|
||||
|
||||
```python
|
||||
def process_document(
|
||||
content: str,
|
||||
*,
|
||||
llm_client=None,
|
||||
confidence_threshold: float = 0.95,
|
||||
) -> list[ParsedItem]:
|
||||
"""Full pipeline: regex -> confidence check -> LLM for edge cases."""
|
||||
# Step 1: Regex extraction (handles 95-98%)
|
||||
items = parse_structured_text(content)
|
||||
|
||||
# Step 2: Confidence scoring
|
||||
low_confidence = identify_low_confidence(items, confidence_threshold)
|
||||
|
||||
if not low_confidence or llm_client is None:
|
||||
return items
|
||||
|
||||
# Step 3: LLM validation (only for flagged items)
|
||||
low_conf_ids = {f.item_id for f in low_confidence}
|
||||
result = []
|
||||
for item in items:
|
||||
if item.id in low_conf_ids:
|
||||
result.append(validate_with_llm(item, content, llm_client))
|
||||
else:
|
||||
result.append(item)
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
## Real-World Metrics
|
||||
|
||||
From a production quiz parsing pipeline (410 items):
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Regex success rate | 98.0% |
|
||||
| Low confidence items | 8 (2.0%) |
|
||||
| LLM calls needed | ~5 |
|
||||
| Cost savings vs all-LLM | ~95% |
|
||||
| Test coverage | 93% |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Start with regex** — even imperfect regex gives you a baseline to improve
|
||||
- **Use confidence scoring** to programmatically identify what needs LLM help
|
||||
- **Use the cheapest LLM** for validation (Haiku-class models are sufficient)
|
||||
- **Never mutate** parsed items — return new instances from cleaning/validation steps
|
||||
- **TDD works well** for parsers — write tests for known patterns first, then edge cases
|
||||
- **Log metrics** (regex success rate, LLM call count) to track pipeline health
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
- Sending all text to an LLM when regex handles 95%+ of cases (expensive and slow)
|
||||
- Using regex for free-form, highly variable text (LLM is better here)
|
||||
- Skipping confidence scoring and hoping regex "just works"
|
||||
- Mutating parsed objects during cleaning/validation steps
|
||||
- Not testing edge cases (malformed input, missing fields, encoding issues)
|
||||
|
||||
## When to Use
|
||||
|
||||
- Quiz/exam question parsing
|
||||
- Form data extraction
|
||||
- Invoice/receipt processing
|
||||
- Document structure parsing (headers, sections, tables)
|
||||
- Any structured text with repeating patterns where cost matters
|
||||
159
skills/search-first/SKILL.md
Normal file
159
skills/search-first/SKILL.md
Normal file
@@ -0,0 +1,159 @@
|
||||
---
|
||||
name: search-first
|
||||
description: Research-before-coding workflow. Search for existing tools, libraries, and patterns before writing custom code. Invokes the researcher agent.
|
||||
---
|
||||
|
||||
# /search-first — Research Before You Code
|
||||
|
||||
Systematizes the "search for existing solutions before implementing" workflow.
|
||||
|
||||
## Trigger
|
||||
|
||||
Use this skill when:
|
||||
- Starting a new feature that likely has existing solutions
|
||||
- Adding a dependency or integration
|
||||
- The user asks "add X functionality" and you're about to write code
|
||||
- Before creating a new utility, helper, or abstraction
|
||||
|
||||
## Workflow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ 1. NEED ANALYSIS │
|
||||
│ Define what functionality is needed │
|
||||
│ Identify language/framework constraints │
|
||||
├─────────────────────────────────────────────┤
|
||||
│ 2. PARALLEL SEARCH (researcher agent) │
|
||||
│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ npm / │ │ MCP / │ │ GitHub / │ │
|
||||
│ │ PyPI │ │ Skills │ │ Web │ │
|
||||
│ └──────────┘ └──────────┘ └──────────┘ │
|
||||
├─────────────────────────────────────────────┤
|
||||
│ 3. EVALUATE │
|
||||
│ Score candidates (functionality, maint, │
|
||||
│ community, docs, license, deps) │
|
||||
├─────────────────────────────────────────────┤
|
||||
│ 4. DECIDE │
|
||||
│ ┌─────────┐ ┌──────────┐ ┌─────────┐ │
|
||||
│ │ Adopt │ │ Extend │ │ Build │ │
|
||||
│ │ as-is │ │ /Wrap │ │ Custom │ │
|
||||
│ └─────────┘ └──────────┘ └─────────┘ │
|
||||
├─────────────────────────────────────────────┤
|
||||
│ 5. IMPLEMENT │
|
||||
│ Install package / Configure MCP / │
|
||||
│ Write minimal custom code │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Decision Matrix
|
||||
|
||||
| Signal | Action |
|
||||
|--------|--------|
|
||||
| Exact match, well-maintained, MIT/Apache | **Adopt** — install and use directly |
|
||||
| Partial match, good foundation | **Extend** — install + write thin wrapper |
|
||||
| Multiple weak matches | **Compose** — combine 2-3 small packages |
|
||||
| Nothing suitable found | **Build** — write custom, but informed by research |
|
||||
|
||||
## How to Use
|
||||
|
||||
### Quick Mode (inline)
|
||||
|
||||
Before writing a utility or adding functionality, mentally run through:
|
||||
|
||||
1. Is this a common problem? → Search npm/PyPI
|
||||
2. Is there an MCP for this? → Check `~/.claude/settings.json` and search
|
||||
3. Is there a skill for this? → Check `~/.claude/skills/`
|
||||
4. Is there a GitHub template? → Search GitHub
|
||||
|
||||
### Full Mode (agent)
|
||||
|
||||
For non-trivial functionality, launch the researcher agent:
|
||||
|
||||
```
|
||||
Task(subagent_type="general-purpose", prompt="
|
||||
Research existing tools for: [DESCRIPTION]
|
||||
Language/framework: [LANG]
|
||||
Constraints: [ANY]
|
||||
|
||||
Search: npm/PyPI, MCP servers, Claude Code skills, GitHub
|
||||
Return: Structured comparison with recommendation
|
||||
")
|
||||
```
|
||||
|
||||
## Search Shortcuts by Category
|
||||
|
||||
### Development Tooling
|
||||
- Linting → `eslint`, `ruff`, `textlint`, `markdownlint`
|
||||
- Formatting → `prettier`, `black`, `gofmt`
|
||||
- Testing → `jest`, `pytest`, `go test`
|
||||
- Pre-commit → `husky`, `lint-staged`, `pre-commit`
|
||||
|
||||
### AI/LLM Integration
|
||||
- Claude SDK → Context7 for latest docs
|
||||
- Prompt management → Check MCP servers
|
||||
- Document processing → `unstructured`, `pdfplumber`, `mammoth`
|
||||
|
||||
### Data & APIs
|
||||
- HTTP clients → `httpx` (Python), `ky`/`got` (Node)
|
||||
- Validation → `zod` (TS), `pydantic` (Python)
|
||||
- Database → Check for MCP servers first
|
||||
|
||||
### Content & Publishing
|
||||
- Markdown processing → `remark`, `unified`, `markdown-it`
|
||||
- Image optimization → `sharp`, `imagemin`
|
||||
|
||||
## Integration Points
|
||||
|
||||
### With planner agent
|
||||
The planner should invoke researcher before Phase 1 (Architecture Review):
|
||||
- Researcher identifies available tools
|
||||
- Planner incorporates them into the implementation plan
|
||||
- Avoids "reinventing the wheel" in the plan
|
||||
|
||||
### With architect agent
|
||||
The architect should consult researcher for:
|
||||
- Technology stack decisions
|
||||
- Integration pattern discovery
|
||||
- Existing reference architectures
|
||||
|
||||
### With iterative-retrieval skill
|
||||
Combine for progressive discovery:
|
||||
- Cycle 1: Broad search (npm, PyPI, MCP)
|
||||
- Cycle 2: Evaluate top candidates in detail
|
||||
- Cycle 3: Test compatibility with project constraints
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: "Add dead link checking"
|
||||
```
|
||||
Need: Check markdown files for broken links
|
||||
Search: npm "markdown dead link checker"
|
||||
Found: textlint-rule-no-dead-link (score: 9/10)
|
||||
Action: ADOPT — npm install textlint-rule-no-dead-link
|
||||
Result: Zero custom code, battle-tested solution
|
||||
```
|
||||
|
||||
### Example 2: "Add HTTP client wrapper"
|
||||
```
|
||||
Need: Resilient HTTP client with retries and timeout handling
|
||||
Search: npm "http client retry", PyPI "httpx retry"
|
||||
Found: got (Node) with retry plugin, httpx (Python) with built-in retry
|
||||
Action: ADOPT — use got/httpx directly with retry config
|
||||
Result: Zero custom code, production-proven libraries
|
||||
```
|
||||
|
||||
### Example 3: "Add config file linter"
|
||||
```
|
||||
Need: Validate project config files against a schema
|
||||
Search: npm "config linter schema", "json schema validator cli"
|
||||
Found: ajv-cli (score: 8/10)
|
||||
Action: ADOPT + EXTEND — install ajv-cli, write project-specific schema
|
||||
Result: 1 package + 1 schema file, no custom validation logic
|
||||
```
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
- **Jumping to code**: Writing a utility without checking if one exists
|
||||
- **Ignoring MCP**: Not checking if an MCP server already provides the capability
|
||||
- **Over-customizing**: Wrapping a library so heavily it loses its benefits
|
||||
- **Dependency bloat**: Installing a massive package for one small feature
|
||||
175
skills/skill-stocktake/SKILL.md
Normal file
175
skills/skill-stocktake/SKILL.md
Normal file
@@ -0,0 +1,175 @@
|
||||
---
|
||||
description: "Use when auditing Claude skills and commands for quality. Supports Quick Scan (changed skills only) and Full Stocktake modes with sequential subagent batch evaluation."
|
||||
---
|
||||
|
||||
# skill-stocktake
|
||||
|
||||
Slash command (`/skill-stocktake`) that audits all Claude skills and commands using a quality checklist + AI holistic judgment. Supports two modes: Quick Scan for recently changed skills, and Full Stocktake for a complete review.
|
||||
|
||||
## Scope
|
||||
|
||||
The command targets the following paths **relative to the directory where it is invoked**:
|
||||
|
||||
| Path | Description |
|
||||
|------|-------------|
|
||||
| `~/.claude/skills/` | Global skills (all projects) |
|
||||
| `{cwd}/.claude/skills/` | Project-level skills (if the directory exists) |
|
||||
|
||||
**At the start of Phase 1, the command explicitly lists which paths were found and scanned.**
|
||||
|
||||
### Targeting a specific project
|
||||
|
||||
To include project-level skills, run from that project's root directory:
|
||||
|
||||
```bash
|
||||
cd ~/path/to/my-project
|
||||
/skill-stocktake
|
||||
```
|
||||
|
||||
If the project has no `.claude/skills/` directory, only global skills and commands are evaluated.
|
||||
|
||||
## Modes
|
||||
|
||||
| Mode | Trigger | Duration |
|
||||
|------|---------|---------|
|
||||
| Quick Scan | `results.json` exists (default) | 5–10 min |
|
||||
| Full Stocktake | `results.json` absent, or `/skill-stocktake full` | 20–30 min |
|
||||
|
||||
**Results cache:** `~/.claude/skills/skill-stocktake/results.json`
|
||||
|
||||
## Quick Scan Flow
|
||||
|
||||
Re-evaluate only skills that have changed since the last run (5–10 min).
|
||||
|
||||
1. Read `~/.claude/skills/skill-stocktake/results.json`
|
||||
2. Run: `bash ~/.claude/skills/skill-stocktake/scripts/quick-diff.sh \
|
||||
~/.claude/skills/skill-stocktake/results.json`
|
||||
(Project dir is auto-detected from `$PWD/.claude/skills`; pass it explicitly only if needed)
|
||||
3. If output is `[]`: report "No changes since last run." and stop
|
||||
4. Re-evaluate only those changed files using the same Phase 2 criteria
|
||||
5. Carry forward unchanged skills from previous results
|
||||
6. Output only the diff
|
||||
7. Run: `bash ~/.claude/skills/skill-stocktake/scripts/save-results.sh \
|
||||
~/.claude/skills/skill-stocktake/results.json <<< "$EVAL_RESULTS"`
|
||||
|
||||
## Full Stocktake Flow
|
||||
|
||||
### Phase 1 — Inventory
|
||||
|
||||
Run: `bash ~/.claude/skills/skill-stocktake/scripts/scan.sh`
|
||||
|
||||
The script enumerates skill files, extracts frontmatter, and collects UTC mtimes.
|
||||
Project dir is auto-detected from `$PWD/.claude/skills`; pass it explicitly only if needed.
|
||||
Present the scan summary and inventory table from the script output:
|
||||
|
||||
```
|
||||
Scanning:
|
||||
✓ ~/.claude/skills/ (17 files)
|
||||
✗ {cwd}/.claude/skills/ (not found — global skills only)
|
||||
```
|
||||
|
||||
| Skill | 7d use | 30d use | Description |
|
||||
|-------|--------|---------|-------------|
|
||||
|
||||
### Phase 2 — Quality Evaluation
|
||||
|
||||
Launch a Task tool subagent (**Explore agent, model: opus**) with the full inventory and checklist.
|
||||
The subagent reads each skill, applies the checklist, and returns per-skill JSON:
|
||||
|
||||
`{ "verdict": "Keep"|"Improve"|"Update"|"Retire"|"Merge into [X]", "reason": "..." }`
|
||||
|
||||
**Chunk guidance:** Process ~20 skills per subagent invocation to keep context manageable. Save intermediate results to `results.json` (`status: "in_progress"`) after each chunk.
|
||||
|
||||
After all skills are evaluated: set `status: "completed"`, proceed to Phase 3.
|
||||
|
||||
**Resume detection:** If `status: "in_progress"` is found on startup, resume from the first unevaluated skill.
|
||||
|
||||
Each skill is evaluated against this checklist:
|
||||
|
||||
```
|
||||
- [ ] Content overlap with other skills checked
|
||||
- [ ] Overlap with MEMORY.md / CLAUDE.md checked
|
||||
- [ ] Freshness of technical references verified (use WebSearch if tool names / CLI flags / APIs are present)
|
||||
- [ ] Usage frequency considered
|
||||
```
|
||||
|
||||
Verdict criteria:
|
||||
|
||||
| Verdict | Meaning |
|
||||
|---------|---------|
|
||||
| Keep | Useful and current |
|
||||
| Improve | Worth keeping, but specific improvements needed |
|
||||
| Update | Referenced technology is outdated (verify with WebSearch) |
|
||||
| Retire | Low quality, stale, or cost-asymmetric |
|
||||
| Merge into [X] | Substantial overlap with another skill; name the merge target |
|
||||
|
||||
Evaluation is **holistic AI judgment** — not a numeric rubric. Guiding dimensions:
|
||||
- **Actionability**: code examples, commands, or steps that let you act immediately
|
||||
- **Scope fit**: name, trigger, and content are aligned; not too broad or narrow
|
||||
- **Uniqueness**: value not replaceable by MEMORY.md / CLAUDE.md / another skill
|
||||
- **Currency**: technical references work in the current environment
|
||||
|
||||
**Reason quality requirements** — the `reason` field must be self-contained and decision-enabling:
|
||||
- Do NOT write "unchanged" alone — always restate the core evidence
|
||||
- For **Retire**: state (1) what specific defect was found, (2) what covers the same need instead
|
||||
- Bad: `"Superseded"`
|
||||
- Good: `"disable-model-invocation: true already set; superseded by continuous-learning-v2 which covers all the same patterns plus confidence scoring. No unique content remains."`
|
||||
- For **Merge**: name the target and describe what content to integrate
|
||||
- Bad: `"Overlaps with X"`
|
||||
- Good: `"42-line thin content; Step 4 of chatlog-to-article already covers the same workflow. Integrate the 'article angle' tip as a note in that skill."`
|
||||
- For **Improve**: describe the specific change needed (what section, what action, target size if relevant)
|
||||
- Bad: `"Too long"`
|
||||
- Good: `"276 lines; Section 'Framework Comparison' (L80–140) duplicates ai-era-architecture-principles; delete it to reach ~150 lines."`
|
||||
- For **Keep** (mtime-only change in Quick Scan): restate the original verdict rationale, do not write "unchanged"
|
||||
- Bad: `"Unchanged"`
|
||||
- Good: `"mtime updated but content unchanged. Unique Python reference explicitly imported by rules/python/; no overlap found."`
|
||||
|
||||
### Phase 3 — Summary Table
|
||||
|
||||
| Skill | 7d use | Verdict | Reason |
|
||||
|-------|--------|---------|--------|
|
||||
|
||||
### Phase 4 — Consolidation
|
||||
|
||||
1. **Retire / Merge**: present detailed justification per file before confirming with user:
|
||||
- What specific problem was found (overlap, staleness, broken references, etc.)
|
||||
- What alternative covers the same functionality (for Retire: which existing skill/rule; for Merge: the target file and what content to integrate)
|
||||
- Impact of removal (any dependent skills, MEMORY.md references, or workflows affected)
|
||||
2. **Improve**: present specific improvement suggestions with rationale:
|
||||
- What to change and why (e.g., "trim 430→200 lines because sections X/Y duplicate python-patterns")
|
||||
- User decides whether to act
|
||||
3. **Update**: present updated content with sources checked
|
||||
4. Check MEMORY.md line count; propose compression if >100 lines
|
||||
|
||||
## Results File Schema
|
||||
|
||||
`~/.claude/skills/skill-stocktake/results.json`:
|
||||
|
||||
**`evaluated_at`**: Must be set to the actual UTC time of evaluation completion.
|
||||
Obtain via Bash: `date -u +%Y-%m-%dT%H:%M:%SZ`. Never use a date-only approximation like `T00:00:00Z`.
|
||||
|
||||
```json
|
||||
{
|
||||
"evaluated_at": "2026-02-21T10:00:00Z",
|
||||
"mode": "full",
|
||||
"batch_progress": {
|
||||
"total": 80,
|
||||
"evaluated": 80,
|
||||
"status": "completed"
|
||||
},
|
||||
"skills": {
|
||||
"skill-name": {
|
||||
"path": "~/.claude/skills/skill-name/SKILL.md",
|
||||
"verdict": "Keep",
|
||||
"reason": "Concrete, actionable, unique value for X workflow",
|
||||
"mtime": "2026-01-15T08:30:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- Evaluation is blind: the same checklist applies to all skills regardless of origin (ECC, self-authored, auto-extracted)
|
||||
- Archive / delete operations always require explicit user confirmation
|
||||
- No verdict branching by skill origin
|
||||
87
skills/skill-stocktake/scripts/quick-diff.sh
Executable file
87
skills/skill-stocktake/scripts/quick-diff.sh
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env bash
|
||||
# quick-diff.sh — compare skill file mtimes against results.json evaluated_at
|
||||
# Usage: quick-diff.sh RESULTS_JSON [CWD_SKILLS_DIR]
|
||||
# Output: JSON array of changed/new files to stdout (empty [] if no changes)
|
||||
#
|
||||
# When CWD_SKILLS_DIR is omitted, defaults to $PWD/.claude/skills so the
|
||||
# script always picks up project-level skills without relying on the caller.
|
||||
#
|
||||
# Environment:
|
||||
# SKILL_STOCKTAKE_GLOBAL_DIR Override ~/.claude/skills (for testing only;
|
||||
# do not set in production — intended for bats tests)
|
||||
# SKILL_STOCKTAKE_PROJECT_DIR Override project dir detection (for testing only)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
RESULTS_JSON="${1:-}"
|
||||
CWD_SKILLS_DIR="${SKILL_STOCKTAKE_PROJECT_DIR:-${2:-$PWD/.claude/skills}}"
|
||||
GLOBAL_DIR="${SKILL_STOCKTAKE_GLOBAL_DIR:-$HOME/.claude/skills}"
|
||||
|
||||
if [[ -z "$RESULTS_JSON" || ! -f "$RESULTS_JSON" ]]; then
|
||||
echo "Error: RESULTS_JSON not found: ${RESULTS_JSON:-<empty>}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate CWD_SKILLS_DIR looks like a .claude/skills path (defense-in-depth).
|
||||
# Only warn when the path exists — a nonexistent path poses no traversal risk.
|
||||
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" && "$CWD_SKILLS_DIR" != */.claude/skills* ]]; then
|
||||
echo "Warning: CWD_SKILLS_DIR does not look like a .claude/skills path: $CWD_SKILLS_DIR" >&2
|
||||
fi
|
||||
|
||||
evaluated_at=$(jq -r '.evaluated_at' "$RESULTS_JSON")
|
||||
|
||||
# Fail fast on a missing or malformed evaluated_at rather than producing
|
||||
# unpredictable results from ISO 8601 string comparison against "null".
|
||||
if [[ ! "$evaluated_at" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$ ]]; then
|
||||
echo "Error: invalid or missing evaluated_at in $RESULTS_JSON: $evaluated_at" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Pre-extract known paths from results.json once (O(1) lookup per file instead of O(n*m))
|
||||
known_paths=$(jq -r '.skills[].path' "$RESULTS_JSON" 2>/dev/null)
|
||||
|
||||
tmpdir=$(mktemp -d)
|
||||
# Use a function to avoid embedding $tmpdir in a quoted string (prevents injection
|
||||
# if TMPDIR were crafted to contain shell metacharacters).
|
||||
_cleanup() { rm -rf "$tmpdir"; }
|
||||
trap _cleanup EXIT
|
||||
|
||||
# Shared counter across process_dir calls — intentionally NOT local
|
||||
i=0
|
||||
|
||||
process_dir() {
|
||||
local dir="$1"
|
||||
while IFS= read -r file; do
|
||||
local mtime dp is_new
|
||||
mtime=$(date -u -r "$file" +%Y-%m-%dT%H:%M:%SZ)
|
||||
dp="${file/#$HOME/~}"
|
||||
|
||||
# Check if this file is known to results.json (exact whole-line match to
|
||||
# avoid substring false-positives, e.g. "python-patterns" matching "python-patterns-v2").
|
||||
if echo "$known_paths" | grep -qxF "$dp"; then
|
||||
is_new="false"
|
||||
# Known file: only emit if mtime changed (ISO 8601 string comparison is safe)
|
||||
[[ "$mtime" > "$evaluated_at" ]] || continue
|
||||
else
|
||||
is_new="true"
|
||||
# New file: always emit regardless of mtime
|
||||
fi
|
||||
|
||||
jq -n \
|
||||
--arg path "$dp" \
|
||||
--arg mtime "$mtime" \
|
||||
--argjson is_new "$is_new" \
|
||||
'{path:$path,mtime:$mtime,is_new:$is_new}' \
|
||||
> "$tmpdir/$i.json"
|
||||
i=$((i+1))
|
||||
done < <(find "$dir" -name "*.md" -type f 2>/dev/null | sort)
|
||||
}
|
||||
|
||||
[[ -d "$GLOBAL_DIR" ]] && process_dir "$GLOBAL_DIR"
|
||||
[[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" ]] && process_dir "$CWD_SKILLS_DIR"
|
||||
|
||||
if [[ $i -eq 0 ]]; then
|
||||
echo "[]"
|
||||
else
|
||||
jq -s '.' "$tmpdir"/*.json
|
||||
fi
|
||||
56
skills/skill-stocktake/scripts/save-results.sh
Executable file
56
skills/skill-stocktake/scripts/save-results.sh
Executable file
@@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env bash
|
||||
# save-results.sh — merge evaluated skills into results.json with correct UTC timestamp
|
||||
# Usage: save-results.sh RESULTS_JSON <<< "$EVAL_JSON"
|
||||
#
|
||||
# stdin format:
|
||||
# { "skills": {...}, "mode"?: "full"|"quick", "batch_progress"?: {...} }
|
||||
#
|
||||
# Always sets evaluated_at to current UTC time via `date -u`.
|
||||
# Merges stdin .skills into existing results.json (new entries override old).
|
||||
# Optionally updates .mode and .batch_progress if present in stdin.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
RESULTS_JSON="${1:-}"
|
||||
|
||||
if [[ -z "$RESULTS_JSON" ]]; then
|
||||
echo "Error: RESULTS_JSON argument required" >&2
|
||||
echo "Usage: save-results.sh RESULTS_JSON <<< \"\$EVAL_JSON\"" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
EVALUATED_AT=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
|
||||
# Read eval results from stdin and validate JSON before touching the results file
|
||||
input_json=$(cat)
|
||||
if ! echo "$input_json" | jq empty 2>/dev/null; then
|
||||
echo "Error: stdin is not valid JSON" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$RESULTS_JSON" ]]; then
|
||||
# Bootstrap: create new results.json from stdin JSON + current UTC timestamp
|
||||
echo "$input_json" | jq --arg ea "$EVALUATED_AT" \
|
||||
'. + { evaluated_at: $ea }' > "$RESULTS_JSON"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Merge: new .skills override existing ones; old skills not in input_json are kept.
|
||||
# Optionally update .mode and .batch_progress if provided.
|
||||
#
|
||||
# Use mktemp for a collision-safe temp file (concurrent runs on the same RESULTS_JSON
|
||||
# would race on a predictable ".tmp" suffix; random suffix prevents silent overwrites).
|
||||
tmp=$(mktemp "${RESULTS_JSON}.XXXXXX")
|
||||
trap 'rm -f "$tmp"' EXIT
|
||||
|
||||
jq -s \
|
||||
--arg ea "$EVALUATED_AT" \
|
||||
'.[0] as $existing | .[1] as $new |
|
||||
$existing |
|
||||
.evaluated_at = $ea |
|
||||
.skills = ($existing.skills + ($new.skills // {})) |
|
||||
if ($new | has("mode")) then .mode = $new.mode else . end |
|
||||
if ($new | has("batch_progress")) then .batch_progress = $new.batch_progress else . end' \
|
||||
"$RESULTS_JSON" <(echo "$input_json") > "$tmp"
|
||||
|
||||
mv "$tmp" "$RESULTS_JSON"
|
||||
170
skills/skill-stocktake/scripts/scan.sh
Executable file
170
skills/skill-stocktake/scripts/scan.sh
Executable file
@@ -0,0 +1,170 @@
|
||||
#!/usr/bin/env bash
|
||||
# scan.sh — enumerate skill files, extract frontmatter and UTC mtime
|
||||
# Usage: scan.sh [CWD_SKILLS_DIR]
|
||||
# Output: JSON to stdout
|
||||
#
|
||||
# When CWD_SKILLS_DIR is omitted, defaults to $PWD/.claude/skills so the
|
||||
# script always picks up project-level skills without relying on the caller.
|
||||
#
|
||||
# Environment:
|
||||
# SKILL_STOCKTAKE_GLOBAL_DIR Override ~/.claude/skills (for testing only;
|
||||
# do not set in production — intended for bats tests)
|
||||
# SKILL_STOCKTAKE_PROJECT_DIR Override project dir detection (for testing only)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
GLOBAL_DIR="${SKILL_STOCKTAKE_GLOBAL_DIR:-$HOME/.claude/skills}"
|
||||
CWD_SKILLS_DIR="${SKILL_STOCKTAKE_PROJECT_DIR:-${1:-$PWD/.claude/skills}}"
|
||||
# Path to JSONL file containing tool-use observations (optional; used for usage frequency counts).
|
||||
# Override via SKILL_STOCKTAKE_OBSERVATIONS env var if your setup uses a different path.
|
||||
OBSERVATIONS="${SKILL_STOCKTAKE_OBSERVATIONS:-$HOME/.claude/observations.jsonl}"
|
||||
|
||||
# Validate CWD_SKILLS_DIR looks like a .claude/skills path (defense-in-depth).
|
||||
# Only warn when the path exists — a nonexistent path poses no traversal risk.
|
||||
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" && "$CWD_SKILLS_DIR" != */.claude/skills* ]]; then
|
||||
echo "Warning: CWD_SKILLS_DIR does not look like a .claude/skills path: $CWD_SKILLS_DIR" >&2
|
||||
fi
|
||||
|
||||
# Extract a frontmatter field (handles both quoted and unquoted single-line values).
|
||||
# Does NOT support multi-line YAML blocks (| or >) or nested YAML keys.
|
||||
extract_field() {
|
||||
local file="$1" field="$2"
|
||||
awk -v f="$field" '
|
||||
BEGIN { fm=0 }
|
||||
/^---$/ { fm++; next }
|
||||
fm==1 {
|
||||
n = length(f) + 2
|
||||
if (substr($0, 1, n) == f ": ") {
|
||||
val = substr($0, n+1)
|
||||
gsub(/^"/, "", val)
|
||||
gsub(/"$/, "", val)
|
||||
print val
|
||||
exit
|
||||
}
|
||||
}
|
||||
fm>=2 { exit }
|
||||
' "$file"
|
||||
}
|
||||
|
||||
# Get UTC timestamp N days ago (supports both macOS and GNU date)
|
||||
date_ago() {
|
||||
local n="$1"
|
||||
date -u -v-"${n}d" +%Y-%m-%dT%H:%M:%SZ 2>/dev/null ||
|
||||
date -u -d "${n} days ago" +%Y-%m-%dT%H:%M:%SZ
|
||||
}
|
||||
|
||||
# Count observations matching a file path since a cutoff timestamp
|
||||
count_obs() {
|
||||
local file="$1" cutoff="$2"
|
||||
if [[ ! -f "$OBSERVATIONS" ]]; then
|
||||
echo 0
|
||||
return
|
||||
fi
|
||||
jq -r --arg p "$file" --arg c "$cutoff" \
|
||||
'select(.tool=="Read" and .path==$p and .timestamp>=$c) | 1' \
|
||||
"$OBSERVATIONS" 2>/dev/null | wc -l | tr -d ' '
|
||||
}
|
||||
|
||||
# Scan a directory and produce a JSON array of skill objects
|
||||
scan_dir_to_json() {
|
||||
local dir="$1"
|
||||
local c7 c30
|
||||
c7=$(date_ago 7)
|
||||
c30=$(date_ago 30)
|
||||
|
||||
local tmpdir
|
||||
tmpdir=$(mktemp -d)
|
||||
# Use a function to avoid embedding $tmpdir in a quoted string (prevents injection
|
||||
# if TMPDIR were crafted to contain shell metacharacters).
|
||||
local _scan_tmpdir="$tmpdir"
|
||||
_scan_cleanup() { rm -rf "$_scan_tmpdir"; }
|
||||
trap _scan_cleanup RETURN
|
||||
|
||||
# Pre-aggregate observation counts in two passes (one per window) instead of
|
||||
# calling jq per-file — reduces from O(n*m) to O(n+m) jq invocations.
|
||||
local obs_7d_counts obs_30d_counts
|
||||
obs_7d_counts=""
|
||||
obs_30d_counts=""
|
||||
if [[ -f "$OBSERVATIONS" ]]; then
|
||||
obs_7d_counts=$(jq -r --arg c "$c7" \
|
||||
'select(.tool=="Read" and .timestamp>=$c) | .path' \
|
||||
"$OBSERVATIONS" 2>/dev/null | sort | uniq -c)
|
||||
obs_30d_counts=$(jq -r --arg c "$c30" \
|
||||
'select(.tool=="Read" and .timestamp>=$c) | .path' \
|
||||
"$OBSERVATIONS" 2>/dev/null | sort | uniq -c)
|
||||
fi
|
||||
|
||||
local i=0
|
||||
while IFS= read -r file; do
|
||||
local name desc mtime u7 u30 dp
|
||||
name=$(extract_field "$file" "name")
|
||||
desc=$(extract_field "$file" "description")
|
||||
mtime=$(date -u -r "$file" +%Y-%m-%dT%H:%M:%SZ)
|
||||
# Use awk exact field match to avoid substring false-positives from grep -F.
|
||||
# uniq -c output format: " N /path/to/file" — path is always field 2.
|
||||
u7=$(echo "$obs_7d_counts" | awk -v f="$file" '$2 == f {print $1}' | head -1)
|
||||
u7="${u7:-0}"
|
||||
u30=$(echo "$obs_30d_counts" | awk -v f="$file" '$2 == f {print $1}' | head -1)
|
||||
u30="${u30:-0}"
|
||||
dp="${file/#$HOME/~}"
|
||||
|
||||
jq -n \
|
||||
--arg path "$dp" \
|
||||
--arg name "$name" \
|
||||
--arg description "$desc" \
|
||||
--arg mtime "$mtime" \
|
||||
--argjson use_7d "$u7" \
|
||||
--argjson use_30d "$u30" \
|
||||
'{path:$path,name:$name,description:$description,use_7d:$use_7d,use_30d:$use_30d,mtime:$mtime}' \
|
||||
> "$tmpdir/$i.json"
|
||||
i=$((i+1))
|
||||
done < <(find "$dir" -name "*.md" -type f 2>/dev/null | sort)
|
||||
|
||||
if [[ $i -eq 0 ]]; then
|
||||
echo "[]"
|
||||
else
|
||||
jq -s '.' "$tmpdir"/*.json
|
||||
fi
|
||||
}
|
||||
|
||||
# --- Main ---
|
||||
|
||||
global_found="false"
|
||||
global_count=0
|
||||
global_skills="[]"
|
||||
|
||||
if [[ -d "$GLOBAL_DIR" ]]; then
|
||||
global_found="true"
|
||||
global_skills=$(scan_dir_to_json "$GLOBAL_DIR")
|
||||
global_count=$(echo "$global_skills" | jq 'length')
|
||||
fi
|
||||
|
||||
project_found="false"
|
||||
project_path=""
|
||||
project_count=0
|
||||
project_skills="[]"
|
||||
|
||||
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" ]]; then
|
||||
project_found="true"
|
||||
project_path="$CWD_SKILLS_DIR"
|
||||
project_skills=$(scan_dir_to_json "$CWD_SKILLS_DIR")
|
||||
project_count=$(echo "$project_skills" | jq 'length')
|
||||
fi
|
||||
|
||||
# Merge global + project skills into one array
|
||||
all_skills=$(jq -s 'add' <(echo "$global_skills") <(echo "$project_skills"))
|
||||
|
||||
jq -n \
|
||||
--arg global_found "$global_found" \
|
||||
--argjson global_count "$global_count" \
|
||||
--arg project_found "$project_found" \
|
||||
--arg project_path "$project_path" \
|
||||
--argjson project_count "$project_count" \
|
||||
--argjson skills "$all_skills" \
|
||||
'{
|
||||
scan_summary: {
|
||||
global: { found: ($global_found == "true"), count: $global_count },
|
||||
project: { found: ($project_found == "true"), path: $project_path, count: $project_count }
|
||||
},
|
||||
skills: $skills
|
||||
}'
|
||||
142
skills/swift-actor-persistence/SKILL.md
Normal file
142
skills/swift-actor-persistence/SKILL.md
Normal file
@@ -0,0 +1,142 @@
|
||||
---
|
||||
name: swift-actor-persistence
|
||||
description: Thread-safe data persistence in Swift using actors — in-memory cache with file-backed storage, eliminating data races by design.
|
||||
---
|
||||
|
||||
# Swift Actors for Thread-Safe Persistence
|
||||
|
||||
Patterns for building thread-safe data persistence layers using Swift actors. Combines in-memory caching with file-backed storage, leveraging the actor model to eliminate data races at compile time.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Building a data persistence layer in Swift 5.5+
|
||||
- Need thread-safe access to shared mutable state
|
||||
- Want to eliminate manual synchronization (locks, DispatchQueues)
|
||||
- Building offline-first apps with local storage
|
||||
|
||||
## Core Pattern
|
||||
|
||||
### Actor-Based Repository
|
||||
|
||||
The actor model guarantees serialized access — no data races, enforced by the compiler.
|
||||
|
||||
```swift
|
||||
public actor LocalRepository<T: Codable & Identifiable> where T.ID == String {
|
||||
private var cache: [String: T] = [:]
|
||||
private let fileURL: URL
|
||||
|
||||
public init(directory: URL = .documentsDirectory, filename: String = "data.json") {
|
||||
self.fileURL = directory.appendingPathComponent(filename)
|
||||
// Synchronous load during init (actor isolation not yet active)
|
||||
self.cache = Self.loadSynchronously(from: fileURL)
|
||||
}
|
||||
|
||||
// MARK: - Public API
|
||||
|
||||
public func save(_ item: T) throws {
|
||||
cache[item.id] = item
|
||||
try persistToFile()
|
||||
}
|
||||
|
||||
public func delete(_ id: String) throws {
|
||||
cache[id] = nil
|
||||
try persistToFile()
|
||||
}
|
||||
|
||||
public func find(by id: String) -> T? {
|
||||
cache[id]
|
||||
}
|
||||
|
||||
public func loadAll() -> [T] {
|
||||
Array(cache.values)
|
||||
}
|
||||
|
||||
// MARK: - Private
|
||||
|
||||
private func persistToFile() throws {
|
||||
let data = try JSONEncoder().encode(Array(cache.values))
|
||||
try data.write(to: fileURL, options: .atomic)
|
||||
}
|
||||
|
||||
private static func loadSynchronously(from url: URL) -> [String: T] {
|
||||
guard let data = try? Data(contentsOf: url),
|
||||
let items = try? JSONDecoder().decode([T].self, from: data) else {
|
||||
return [:]
|
||||
}
|
||||
return Dictionary(uniqueKeysWithValues: items.map { ($0.id, $0) })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
All calls are automatically async due to actor isolation:
|
||||
|
||||
```swift
|
||||
let repository = LocalRepository<Question>()
|
||||
|
||||
// Read — fast O(1) lookup from in-memory cache
|
||||
let question = await repository.find(by: "q-001")
|
||||
let allQuestions = await repository.loadAll()
|
||||
|
||||
// Write — updates cache and persists to file atomically
|
||||
try await repository.save(newQuestion)
|
||||
try await repository.delete("q-001")
|
||||
```
|
||||
|
||||
### Combining with @Observable ViewModel
|
||||
|
||||
```swift
|
||||
@Observable
|
||||
final class QuestionListViewModel {
|
||||
private(set) var questions: [Question] = []
|
||||
private let repository: LocalRepository<Question>
|
||||
|
||||
init(repository: LocalRepository<Question> = LocalRepository()) {
|
||||
self.repository = repository
|
||||
}
|
||||
|
||||
func load() async {
|
||||
questions = await repository.loadAll()
|
||||
}
|
||||
|
||||
func add(_ question: Question) async throws {
|
||||
try await repository.save(question)
|
||||
questions = await repository.loadAll()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Key Design Decisions
|
||||
|
||||
| Decision | Rationale |
|
||||
|----------|-----------|
|
||||
| Actor (not class + lock) | Compiler-enforced thread safety, no manual synchronization |
|
||||
| In-memory cache + file persistence | Fast reads from cache, durable writes to disk |
|
||||
| Synchronous init loading | Avoids async initialization complexity |
|
||||
| Dictionary keyed by ID | O(1) lookups by identifier |
|
||||
| Generic over `Codable & Identifiable` | Reusable across any model type |
|
||||
| Atomic file writes (`.atomic`) | Prevents partial writes on crash |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Use `Sendable` types** for all data crossing actor boundaries
|
||||
- **Keep the actor's public API minimal** — only expose domain operations, not persistence details
|
||||
- **Use `.atomic` writes** to prevent data corruption if the app crashes mid-write
|
||||
- **Load synchronously in `init`** — async initializers add complexity with minimal benefit for local files
|
||||
- **Combine with `@Observable`** ViewModels for reactive UI updates
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
- Using `DispatchQueue` or `NSLock` instead of actors for new Swift concurrency code
|
||||
- Exposing the internal cache dictionary to external callers
|
||||
- Making the file URL configurable without validation
|
||||
- Forgetting that all actor method calls are `await` — callers must handle async context
|
||||
- Using `nonisolated` to bypass actor isolation (defeats the purpose)
|
||||
|
||||
## When to Use
|
||||
|
||||
- Local data storage in iOS/macOS apps (user data, settings, cached content)
|
||||
- Offline-first architectures that sync to a server later
|
||||
- Any shared mutable state that multiple parts of the app access concurrently
|
||||
- Replacing legacy `DispatchQueue`-based thread safety with modern Swift concurrency
|
||||
189
skills/swift-protocol-di-testing/SKILL.md
Normal file
189
skills/swift-protocol-di-testing/SKILL.md
Normal file
@@ -0,0 +1,189 @@
|
||||
---
|
||||
name: swift-protocol-di-testing
|
||||
description: Protocol-based dependency injection for testable Swift code — mock file system, network, and external APIs using focused protocols and Swift Testing.
|
||||
---
|
||||
|
||||
# Swift Protocol-Based Dependency Injection for Testing
|
||||
|
||||
Patterns for making Swift code testable by abstracting external dependencies (file system, network, iCloud) behind small, focused protocols. Enables deterministic tests without I/O.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Writing Swift code that accesses file system, network, or external APIs
|
||||
- Need to test error handling paths without triggering real failures
|
||||
- Building modules that work across environments (app, test, SwiftUI preview)
|
||||
- Designing testable architecture with Swift concurrency (actors, Sendable)
|
||||
|
||||
## Core Pattern
|
||||
|
||||
### 1. Define Small, Focused Protocols
|
||||
|
||||
Each protocol handles exactly one external concern.
|
||||
|
||||
```swift
|
||||
// File system access
|
||||
public protocol FileSystemProviding: Sendable {
|
||||
func containerURL(for purpose: Purpose) -> URL?
|
||||
}
|
||||
|
||||
// File read/write operations
|
||||
public protocol FileAccessorProviding: Sendable {
|
||||
func read(from url: URL) throws -> Data
|
||||
func write(_ data: Data, to url: URL) throws
|
||||
func fileExists(at url: URL) -> Bool
|
||||
}
|
||||
|
||||
// Bookmark storage (e.g., for sandboxed apps)
|
||||
public protocol BookmarkStorageProviding: Sendable {
|
||||
func saveBookmark(_ data: Data, for key: String) throws
|
||||
func loadBookmark(for key: String) throws -> Data?
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Create Default (Production) Implementations
|
||||
|
||||
```swift
|
||||
public struct DefaultFileSystemProvider: FileSystemProviding {
|
||||
public init() {}
|
||||
|
||||
public func containerURL(for purpose: Purpose) -> URL? {
|
||||
FileManager.default.url(forUbiquityContainerIdentifier: nil)
|
||||
}
|
||||
}
|
||||
|
||||
public struct DefaultFileAccessor: FileAccessorProviding {
|
||||
public init() {}
|
||||
|
||||
public func read(from url: URL) throws -> Data {
|
||||
try Data(contentsOf: url)
|
||||
}
|
||||
|
||||
public func write(_ data: Data, to url: URL) throws {
|
||||
try data.write(to: url, options: .atomic)
|
||||
}
|
||||
|
||||
public func fileExists(at url: URL) -> Bool {
|
||||
FileManager.default.fileExists(atPath: url.path)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Create Mock Implementations for Testing
|
||||
|
||||
```swift
|
||||
public final class MockFileAccessor: FileAccessorProviding, @unchecked Sendable {
|
||||
public var files: [URL: Data] = [:]
|
||||
public var readError: Error?
|
||||
public var writeError: Error?
|
||||
|
||||
public init() {}
|
||||
|
||||
public func read(from url: URL) throws -> Data {
|
||||
if let error = readError { throw error }
|
||||
guard let data = files[url] else {
|
||||
throw CocoaError(.fileReadNoSuchFile)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
public func write(_ data: Data, to url: URL) throws {
|
||||
if let error = writeError { throw error }
|
||||
files[url] = data
|
||||
}
|
||||
|
||||
public func fileExists(at url: URL) -> Bool {
|
||||
files[url] != nil
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Inject Dependencies with Default Parameters
|
||||
|
||||
Production code uses defaults; tests inject mocks.
|
||||
|
||||
```swift
|
||||
public actor SyncManager {
|
||||
private let fileSystem: FileSystemProviding
|
||||
private let fileAccessor: FileAccessorProviding
|
||||
|
||||
public init(
|
||||
fileSystem: FileSystemProviding = DefaultFileSystemProvider(),
|
||||
fileAccessor: FileAccessorProviding = DefaultFileAccessor()
|
||||
) {
|
||||
self.fileSystem = fileSystem
|
||||
self.fileAccessor = fileAccessor
|
||||
}
|
||||
|
||||
public func sync() async throws {
|
||||
guard let containerURL = fileSystem.containerURL(for: .sync) else {
|
||||
throw SyncError.containerNotAvailable
|
||||
}
|
||||
let data = try fileAccessor.read(
|
||||
from: containerURL.appendingPathComponent("data.json")
|
||||
)
|
||||
// Process data...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Write Tests with Swift Testing
|
||||
|
||||
```swift
|
||||
import Testing
|
||||
|
||||
@Test("Sync manager handles missing container")
|
||||
func testMissingContainer() async {
|
||||
let mockFileSystem = MockFileSystemProvider(containerURL: nil)
|
||||
let manager = SyncManager(fileSystem: mockFileSystem)
|
||||
|
||||
await #expect(throws: SyncError.containerNotAvailable) {
|
||||
try await manager.sync()
|
||||
}
|
||||
}
|
||||
|
||||
@Test("Sync manager reads data correctly")
|
||||
func testReadData() async throws {
|
||||
let mockFileAccessor = MockFileAccessor()
|
||||
mockFileAccessor.files[testURL] = testData
|
||||
|
||||
let manager = SyncManager(fileAccessor: mockFileAccessor)
|
||||
let result = try await manager.loadData()
|
||||
|
||||
#expect(result == expectedData)
|
||||
}
|
||||
|
||||
@Test("Sync manager handles read errors gracefully")
|
||||
func testReadError() async {
|
||||
let mockFileAccessor = MockFileAccessor()
|
||||
mockFileAccessor.readError = CocoaError(.fileReadCorruptFile)
|
||||
|
||||
let manager = SyncManager(fileAccessor: mockFileAccessor)
|
||||
|
||||
await #expect(throws: SyncError.self) {
|
||||
try await manager.sync()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Single Responsibility**: Each protocol should handle one concern — don't create "god protocols" with many methods
|
||||
- **Sendable conformance**: Required when protocols are used across actor boundaries
|
||||
- **Default parameters**: Let production code use real implementations by default; only tests need to specify mocks
|
||||
- **Error simulation**: Design mocks with configurable error properties for testing failure paths
|
||||
- **Only mock boundaries**: Mock external dependencies (file system, network, APIs), not internal types
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
- Creating a single large protocol that covers all external access
|
||||
- Mocking internal types that have no external dependencies
|
||||
- Using `#if DEBUG` conditionals instead of proper dependency injection
|
||||
- Forgetting `Sendable` conformance when used with actors
|
||||
- Over-engineering: if a type has no external dependencies, it doesn't need a protocol
|
||||
|
||||
## When to Use
|
||||
|
||||
- Any Swift code that touches file system, network, or external APIs
|
||||
- Testing error handling paths that are hard to trigger in real environments
|
||||
- Building modules that need to work in app, test, and SwiftUI preview contexts
|
||||
- Apps using Swift concurrency (actors, structured concurrency) that need testable architecture
|
||||
@@ -1587,6 +1587,558 @@ function runTests() {
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 47: escape sequence and frontmatter edge cases ──
|
||||
console.log('\nRound 47: validate-hooks (inline JS escape sequences):');
|
||||
|
||||
if (test('validates inline JS with mixed escape sequences (newline + escaped quote)', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
// Command value after JSON parse: node -e "var a = \"ok\"\nconsole.log(a)"
|
||||
// Regex captures: var a = \"ok\"\nconsole.log(a)
|
||||
// After unescape chain: var a = "ok"\nconsole.log(a) (real newline) — valid JS
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command',
|
||||
command: 'node -e "var a = \\"ok\\"\\nconsole.log(a)"' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0, 'Should handle escaped quotes and newline separators');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects inline JS with syntax error after unescaping', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
// After unescape this becomes: var x = { — missing closing brace
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command',
|
||||
command: 'node -e "var x = {"' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject JS syntax error after unescaping');
|
||||
assert.ok(result.stderr.includes('invalid inline JS'), 'Should report inline JS error');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 47: validate-agents (frontmatter lines without colon):');
|
||||
|
||||
if (test('silently ignores frontmatter line without colon', () => {
|
||||
const testDir = createTestDir();
|
||||
// Line "just some text" has no colon — should be skipped, not cause crash
|
||||
fs.writeFileSync(path.join(testDir, 'mixed.md'),
|
||||
'---\nmodel: sonnet\njust some text without colon\ntools: Read\n---\n# Agent');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should ignore lines without colon in frontmatter');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 52: command inline backtick refs, workflow whitespace, code-only rules ──
|
||||
console.log('\nRound 52: validate-commands (inline backtick refs):');
|
||||
|
||||
if (test('validates command refs inside inline backticks (not stripped by code block removal)', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'deploy.md'), '# Deploy\nDeploy the app.');
|
||||
// Inline backtick ref `/deploy` should be validated (only fenced blocks stripped)
|
||||
fs.writeFileSync(path.join(testDir, 'workflow.md'),
|
||||
'# Workflow\nFirst run `/deploy` to deploy the app.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Inline backtick command refs should be validated');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 52: validate-commands (workflow whitespace):');
|
||||
|
||||
if (test('validates workflow arrows with irregular whitespace', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
fs.writeFileSync(path.join(agentsDir, 'planner.md'), '# Planner');
|
||||
fs.writeFileSync(path.join(agentsDir, 'reviewer.md'), '# Reviewer');
|
||||
// Three workflow lines: no spaces, double spaces, tab-separated
|
||||
fs.writeFileSync(path.join(testDir, 'flow.md'),
|
||||
'# Workflow\n\nplanner->reviewer\nplanner -> reviewer');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Workflow arrows with irregular whitespace should be valid');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 52: validate-rules (code-only content):');
|
||||
|
||||
if (test('passes rule file containing only a fenced code block', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'code-only.md'),
|
||||
'```javascript\nfunction example() {\n return true;\n}\n```');
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Rule with only code block should pass (non-empty)');
|
||||
assert.ok(result.stdout.includes('Validated 1'), 'Should count the code-only file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 57: readFileSync error path, statSync catch block, adjacent code blocks ──
|
||||
console.log('\nRound 57: validate-skills.js (SKILL.md is a directory — readFileSync error):');
|
||||
|
||||
if (test('fails gracefully when SKILL.md is a directory instead of a file', () => {
|
||||
const testDir = createTestDir();
|
||||
const skillDir = path.join(testDir, 'dir-skill');
|
||||
fs.mkdirSync(skillDir);
|
||||
// Create SKILL.md as a DIRECTORY, not a file — existsSync returns true
|
||||
// but readFileSync throws EISDIR, exercising the catch block (lines 33-37)
|
||||
fs.mkdirSync(path.join(skillDir, 'SKILL.md'));
|
||||
|
||||
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should fail when SKILL.md is a directory');
|
||||
assert.ok(result.stderr.includes('dir-skill'), 'Should report the problematic skill');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 57: validate-rules.js (broken symlink — statSync catch block):');
|
||||
|
||||
if (test('reports error for broken symlink .md file in rules directory', () => {
|
||||
const testDir = createTestDir();
|
||||
// Create a valid rule first
|
||||
fs.writeFileSync(path.join(testDir, 'valid.md'), '# Valid Rule');
|
||||
// Create a broken symlink (dangling → target doesn't exist)
|
||||
// statSync follows symlinks and throws ENOENT, exercising catch (lines 35-38)
|
||||
try {
|
||||
fs.symlinkSync('/nonexistent/target.md', path.join(testDir, 'broken.md'));
|
||||
} catch {
|
||||
// Skip on systems that don't support symlinks
|
||||
console.log(' (skipped — symlinks not supported)');
|
||||
cleanupTestDir(testDir);
|
||||
return;
|
||||
}
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on broken symlink');
|
||||
assert.ok(result.stderr.includes('broken.md'), 'Should report the broken symlink file');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 57: validate-commands.js (adjacent code blocks both stripped):');
|
||||
|
||||
if (test('strips multiple adjacent code blocks before checking references', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Two adjacent code blocks, each with broken refs — BOTH must be stripped
|
||||
fs.writeFileSync(path.join(testDir, 'multi-blocks.md'),
|
||||
'# Multi Block\n\n' +
|
||||
'```\n`/phantom-a` in first block\n```\n\n' +
|
||||
'Content between blocks\n\n' +
|
||||
'```\n`/phantom-b` in second block\nagents/ghost-agent.md\n```\n\n' +
|
||||
'Final content');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0,
|
||||
'Both code blocks should be stripped — no broken refs reported');
|
||||
assert.ok(!result.stderr.includes('phantom-a'), 'First block ref should be stripped');
|
||||
assert.ok(!result.stderr.includes('phantom-b'), 'Second block ref should be stripped');
|
||||
assert.ok(!result.stderr.includes('ghost-agent'), 'Agent ref in second block should be stripped');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 58: readFileSync catch block, colonIdx edge case, command-as-object ──
|
||||
console.log('\nRound 58: validate-agents.js (unreadable agent file — readFileSync catch):');
|
||||
|
||||
if (test('reports error when agent .md file is unreadable (chmod 000)', () => {
|
||||
// Skip on Windows or when running as root (permissions won't work)
|
||||
if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) {
|
||||
console.log(' (skipped — not supported on this platform)');
|
||||
return;
|
||||
}
|
||||
const testDir = createTestDir();
|
||||
const agentFile = path.join(testDir, 'locked.md');
|
||||
fs.writeFileSync(agentFile, '---\nmodel: sonnet\ntools: Read\n---\n# Agent');
|
||||
fs.chmodSync(agentFile, 0o000);
|
||||
|
||||
try {
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should exit 1 on read error');
|
||||
assert.ok(result.stderr.includes('locked.md'), 'Should mention the unreadable file');
|
||||
} finally {
|
||||
fs.chmodSync(agentFile, 0o644);
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 58: validate-agents.js (frontmatter line with colon at position 0):');
|
||||
|
||||
if (test('rejects agent when required field key has colon at position 0 (no key name)', () => {
|
||||
const testDir = createTestDir();
|
||||
fs.writeFileSync(path.join(testDir, 'bad-colon.md'),
|
||||
'---\n:sonnet\ntools: Read\n---\n# Agent with leading colon');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should fail — model field is missing (colon at idx 0 skipped)');
|
||||
assert.ok(result.stderr.includes('model'), 'Should report missing model field');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 58: validate-hooks.js (command is a plain object — not string or array):');
|
||||
|
||||
if (test('rejects hook entry where command is a plain object', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: { run: 'echo hi' } }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should reject object command (not string or array)');
|
||||
assert.ok(result.stderr.includes('command'), 'Should report invalid command field');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 63: object-format missing matcher, unreadable command file, empty commands dir ──
|
||||
console.log('\nRound 63: validate-hooks.js (object-format matcher missing matcher field):');
|
||||
|
||||
if (test('rejects object-format matcher entry missing matcher field', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
// Object format: matcher entry has hooks array but NO matcher field
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
hooks: {
|
||||
PreToolUse: [{ hooks: [{ type: 'command', command: 'echo ok' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on missing matcher field in object format');
|
||||
assert.ok(result.stderr.includes("missing 'matcher' field"), 'Should report missing matcher field');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 63: validate-commands.js (unreadable command file):');
|
||||
|
||||
if (test('reports error when command .md file is unreadable (chmod 000)', () => {
|
||||
if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) {
|
||||
console.log(' (skipped — not supported on this platform)');
|
||||
return;
|
||||
}
|
||||
const testDir = createTestDir();
|
||||
const cmdFile = path.join(testDir, 'locked.md');
|
||||
fs.writeFileSync(cmdFile, '# Locked Command');
|
||||
fs.chmodSync(cmdFile, 0o000);
|
||||
|
||||
try {
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: '/nonexistent', SKILLS_DIR: '/nonexistent'
|
||||
});
|
||||
assert.strictEqual(result.code, 1, 'Should exit 1 on read error');
|
||||
assert.ok(result.stderr.includes('locked.md'), 'Should mention the unreadable file');
|
||||
} finally {
|
||||
fs.chmodSync(cmdFile, 0o644);
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 63: validate-commands.js (empty commands directory):');
|
||||
|
||||
if (test('passes on empty commands directory (no .md files)', () => {
|
||||
const testDir = createTestDir();
|
||||
// Only non-.md files — no .md files to validate
|
||||
fs.writeFileSync(path.join(testDir, 'readme.txt'), 'not a command');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: '/nonexistent', SKILLS_DIR: '/nonexistent'
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should pass on empty commands directory');
|
||||
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 validated');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 65: empty directories for rules and skills ──
|
||||
console.log('\nRound 65: validate-rules.js (empty directory — no .md files):');
|
||||
|
||||
if (test('passes on rules directory with no .md files (Validated 0)', () => {
|
||||
const testDir = createTestDir();
|
||||
// Only non-.md files — readdirSync filter yields empty array
|
||||
fs.writeFileSync(path.join(testDir, 'notes.txt'), 'not a rule');
|
||||
fs.writeFileSync(path.join(testDir, 'config.json'), '{}');
|
||||
|
||||
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should pass on empty rules directory');
|
||||
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 validated rule files');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 65: validate-skills.js (empty directory — no subdirectories):');
|
||||
|
||||
if (test('passes on skills directory with only files, no subdirectories (Validated 0)', () => {
|
||||
const testDir = createTestDir();
|
||||
// Only files, no subdirectories — isDirectory filter yields empty array
|
||||
fs.writeFileSync(path.join(testDir, 'README.md'), '# Skills');
|
||||
fs.writeFileSync(path.join(testDir, '.gitkeep'), '');
|
||||
|
||||
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 0, 'Should pass on skills directory with no subdirectories');
|
||||
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 validated skill directories');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 70: validate-commands.js "would create:" line skip ──
|
||||
console.log('\nRound 70: validate-commands.js (would create: skip):');
|
||||
|
||||
if (test('skips command references on "would create:" lines', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// "Would create:" is the alternate form checked by the regex at line 80:
|
||||
// if (/creates:|would create:/i.test(line)) continue;
|
||||
// Only "creates:" was previously tested (Round 20). "Would create:" exercises
|
||||
// the second alternation in the regex.
|
||||
fs.writeFileSync(path.join(testDir, 'gen-cmd.md'),
|
||||
'# Generator Command\n\nWould create: `/phantom-cmd` in your project.\n\nThis is safe.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should skip "would create:" lines');
|
||||
assert.ok(!result.stderr.includes('phantom-cmd'), 'Should not flag ref on "would create:" line');
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 72: validate-hooks.js async/timeout type validation ──
|
||||
console.log('\nRound 72: validate-hooks.js (async and timeout type validation):');
|
||||
|
||||
if (test('rejects hook with non-boolean async field', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
PreToolUse: [{
|
||||
matcher: 'Write',
|
||||
hooks: [{
|
||||
type: 'intercept',
|
||||
command: 'echo test',
|
||||
async: 'yes' // Should be boolean, not string
|
||||
}]
|
||||
}]
|
||||
}));
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on non-boolean async');
|
||||
assert.ok(result.stderr.includes('async'), 'Should mention async in error');
|
||||
assert.ok(result.stderr.includes('boolean'), 'Should mention boolean type');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects hook with negative timeout value', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
PostToolUse: [{
|
||||
matcher: 'Edit',
|
||||
hooks: [{
|
||||
type: 'intercept',
|
||||
command: 'echo test',
|
||||
timeout: -5 // Must be non-negative
|
||||
}]
|
||||
}]
|
||||
}));
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1, 'Should fail on negative timeout');
|
||||
assert.ok(result.stderr.includes('timeout'), 'Should mention timeout in error');
|
||||
assert.ok(result.stderr.includes('non-negative'), 'Should mention non-negative');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 73: validate-commands.js skill directory statSync catch ──
|
||||
console.log('\nRound 73: validate-commands.js (unreadable skill entry — statSync catch):');
|
||||
|
||||
if (test('skips unreadable skill directory entries without error (broken symlink)', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
|
||||
// Create one valid skill directory and one broken symlink
|
||||
const validSkill = path.join(skillsDir, 'valid-skill');
|
||||
fs.mkdirSync(validSkill, { recursive: true });
|
||||
// Broken symlink: target does not exist — statSync will throw ENOENT
|
||||
const brokenLink = path.join(skillsDir, 'broken-skill');
|
||||
fs.symlinkSync('/nonexistent/target/path', brokenLink);
|
||||
|
||||
// Command that references the valid skill (should resolve)
|
||||
fs.writeFileSync(path.join(testDir, 'cmd.md'),
|
||||
'# Command\nSee skills/valid-skill/ for details.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0,
|
||||
'Should pass — broken symlink in skills dir should be skipped silently');
|
||||
// The broken-skill should NOT be in validSkills, so referencing it would warn
|
||||
// but the valid-skill reference should resolve fine
|
||||
cleanupTestDir(testDir);
|
||||
cleanupTestDir(agentsDir);
|
||||
fs.rmSync(skillsDir, { recursive: true, force: true });
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 76: validate-hooks.js invalid JSON in hooks.json ──
|
||||
console.log('\nRound 76: validate-hooks.js (invalid JSON in hooks.json):');
|
||||
|
||||
if (test('reports error for invalid JSON in hooks.json', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, '{not valid json!!!');
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 1,
|
||||
`Expected exit 1 for invalid JSON, got ${result.code}`);
|
||||
assert.ok(result.stderr.includes('Invalid JSON'),
|
||||
`stderr should mention Invalid JSON, got: ${result.stderr}`);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 78: validate-hooks.js wrapped { hooks: { ... } } format ──
|
||||
console.log('\nRound 78: validate-hooks.js (wrapped hooks format):');
|
||||
|
||||
if (test('validates wrapped format { hooks: { PreToolUse: [...] } }', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
// The production hooks.json uses this wrapped format — { hooks: { ... } }
|
||||
// data.hooks is the object with event types, not data itself
|
||||
fs.writeFileSync(hooksFile, JSON.stringify({
|
||||
"$schema": "https://json.schemastore.org/claude-code-settings.json",
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'Write', hooks: [{ type: 'command', command: 'echo ok' }] }],
|
||||
PostToolUse: [{ matcher: 'Read', hooks: [{ type: 'command', command: 'echo done' }] }]
|
||||
}
|
||||
}));
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0,
|
||||
`Should pass wrapped hooks format, got exit ${result.code}. stderr: ${result.stderr}`);
|
||||
assert.ok(result.stdout.includes('Validated 2'),
|
||||
`Should validate 2 matchers, got: ${result.stdout}`);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 79: validate-commands.js warnings count suffix in output ──
|
||||
console.log('\nRound 79: validate-commands.js (warnings count in output):');
|
||||
|
||||
if (test('output includes (N warnings) suffix when skill references produce warnings', () => {
|
||||
const testDir = createTestDir();
|
||||
const agentsDir = createTestDir();
|
||||
const skillsDir = createTestDir();
|
||||
// Create a command that references 2 non-existent skill directories
|
||||
// Each triggers a WARN (not error) — warnCount should be 2
|
||||
fs.writeFileSync(path.join(testDir, 'cmd-warn.md'),
|
||||
'# Command\nSee skills/fake-skill-a/ and skills/fake-skill-b/ for details.');
|
||||
|
||||
const result = runValidatorWithDirs('validate-commands', {
|
||||
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Skill warnings should not cause error exit');
|
||||
// The validate-commands output appends "(N warnings)" when warnCount > 0
|
||||
assert.ok(result.stdout.includes('(2 warnings)'),
|
||||
`Output should include "(2 warnings)" suffix, got: ${result.stdout}`);
|
||||
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 80: validate-hooks.js legacy array format (lines 115-135) ──
|
||||
console.log('\nRound 80: validate-hooks.js (legacy array format):');
|
||||
|
||||
if (test('validates hooks in legacy array format (hooks is an array, not object)', () => {
|
||||
const testDir = createTestDir();
|
||||
// The legacy array format wraps hooks as { hooks: [...] } where the array
|
||||
// contains matcher objects directly. This exercises lines 115-135 of
|
||||
// validate-hooks.js which use "Hook ${i}" error labels instead of "${eventType}[${i}]".
|
||||
const hooksJson = JSON.stringify({
|
||||
hooks: [
|
||||
{
|
||||
matcher: 'Edit',
|
||||
hooks: [{ type: 'command', command: 'echo legacy test' }]
|
||||
}
|
||||
]
|
||||
});
|
||||
fs.writeFileSync(path.join(testDir, 'hooks.json'), hooksJson);
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', path.join(testDir, 'hooks.json'));
|
||||
assert.strictEqual(result.code, 0, 'Should pass on valid legacy array format');
|
||||
assert.ok(result.stdout.includes('Validated 1 hook'),
|
||||
`Should report 1 validated matcher, got: ${result.stdout}`);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 82: Notification and SubagentStop event types ──
|
||||
|
||||
console.log('\nRound 82: validate-hooks (Notification and SubagentStop event types):');
|
||||
|
||||
if (test('accepts Notification and SubagentStop as valid event types', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksJson = JSON.stringify({
|
||||
hooks: [
|
||||
{
|
||||
matcher: { type: 'Notification' },
|
||||
hooks: [{ type: 'command', command: 'echo notification' }]
|
||||
},
|
||||
{
|
||||
matcher: { type: 'SubagentStop' },
|
||||
hooks: [{ type: 'command', command: 'echo subagent stopped' }]
|
||||
}
|
||||
]
|
||||
});
|
||||
fs.writeFileSync(path.join(testDir, 'hooks.json'), hooksJson);
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', path.join(testDir, 'hooks.json'));
|
||||
assert.strictEqual(result.code, 0, 'Should pass with Notification and SubagentStop events');
|
||||
assert.ok(result.stdout.includes('Validated 2 hook'),
|
||||
`Should report 2 validated matchers, got: ${result.stdout}`);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 83: validate-agents whitespace-only field, validate-skills empty SKILL.md ──
|
||||
|
||||
console.log('\nRound 83: validate-agents (whitespace-only frontmatter field value):');
|
||||
|
||||
if (test('rejects agent with whitespace-only model field (trim guard)', () => {
|
||||
const testDir = createTestDir();
|
||||
// model has only whitespace — extractFrontmatter produces { model: ' ', tools: 'Read' }
|
||||
// The condition: typeof frontmatter[field] === 'string' && !frontmatter[field].trim()
|
||||
// evaluates to true for model → "Missing required field: model"
|
||||
fs.writeFileSync(path.join(testDir, 'ws.md'), '---\nmodel: \ntools: Read\n---\n# Whitespace model');
|
||||
|
||||
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject whitespace-only model');
|
||||
assert.ok(result.stderr.includes('model'), 'Should report missing model field');
|
||||
assert.ok(!result.stderr.includes('tools'), 'tools field is valid and should NOT be flagged');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 83: validate-skills (empty SKILL.md file):');
|
||||
|
||||
if (test('rejects skill directory with empty SKILL.md file', () => {
|
||||
const testDir = createTestDir();
|
||||
const skillDir = path.join(testDir, 'empty-skill');
|
||||
fs.mkdirSync(skillDir, { recursive: true });
|
||||
// Create SKILL.md with only whitespace (trim to zero length)
|
||||
fs.writeFileSync(path.join(skillDir, 'SKILL.md'), ' \n \n');
|
||||
|
||||
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
|
||||
assert.strictEqual(result.code, 1, 'Should reject empty SKILL.md');
|
||||
assert.ok(result.stderr.includes('Empty file'),
|
||||
`Should report "Empty file", got: ${result.stderr}`);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
@@ -11,7 +11,7 @@ const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const { spawnSync, execFileSync } = require('child_process');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const evaluateScript = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'evaluate-session.js');
|
||||
|
||||
@@ -258,6 +258,159 @@ function runTests() {
|
||||
assert.strictEqual(result.status, 0, 'Should exit 0 on empty stdin');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 53: env var fallback path ──
|
||||
console.log('\nRound 53: CLAUDE_TRANSCRIPT_PATH fallback:');
|
||||
|
||||
if (test('falls back to CLAUDE_TRANSCRIPT_PATH env var when stdin is invalid JSON', () => {
|
||||
const testDir = createTestDir();
|
||||
const transcript = createTranscript(testDir, 15);
|
||||
|
||||
const result = spawnSync('node', [evaluateScript], {
|
||||
encoding: 'utf8',
|
||||
input: 'invalid json {{{',
|
||||
timeout: 10000,
|
||||
env: { ...process.env, CLAUDE_TRANSCRIPT_PATH: transcript }
|
||||
});
|
||||
|
||||
assert.strictEqual(result.status, 0, 'Should exit 0');
|
||||
assert.ok(
|
||||
result.stderr.includes('15 messages'),
|
||||
'Should evaluate using env var fallback path'
|
||||
);
|
||||
assert.ok(
|
||||
result.stderr.includes('evaluate'),
|
||||
'Should indicate session evaluation'
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 65: regex whitespace tolerance in countInFile ──
|
||||
console.log('\nRound 65: regex whitespace tolerance around colon:');
|
||||
|
||||
if (test('counts user messages when JSON has spaces around colon ("type" : "user")', () => {
|
||||
const testDir = createTestDir();
|
||||
const filePath = path.join(testDir, 'spaced.jsonl');
|
||||
// Manually write JSON with spaces around the colon — NOT JSON.stringify
|
||||
// The regex /"type"\s*:\s*"user"/g should match these
|
||||
const lines = [];
|
||||
for (let i = 0; i < 12; i++) {
|
||||
lines.push(`{"type" : "user", "content": "msg ${i}"}`);
|
||||
lines.push(`{"type" : "assistant", "content": "resp ${i}"}`);
|
||||
}
|
||||
fs.writeFileSync(filePath, lines.join('\n') + '\n');
|
||||
|
||||
const result = runEvaluate({ transcript_path: filePath });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// 12 user messages >= 10 threshold → should evaluate (not "too short")
|
||||
assert.ok(!result.stderr.includes('too short'),
|
||||
'Should NOT say too short for 12 spaced-colon user messages');
|
||||
assert.ok(
|
||||
result.stderr.includes('12 messages') || result.stderr.includes('evaluate'),
|
||||
`Should evaluate session with spaced-colon JSON. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 85: config file parse error (corrupt JSON) ──
|
||||
console.log('\nRound 85: config parse error catch block:');
|
||||
|
||||
if (test('falls back to defaults when config file contains invalid JSON', () => {
|
||||
// The evaluate-session.js script reads config from:
|
||||
// path.join(__dirname, '..', '..', 'skills', 'continuous-learning', 'config.json')
|
||||
// where __dirname = scripts/hooks/ → config = repo_root/skills/continuous-learning/config.json
|
||||
const configPath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning', 'config.json');
|
||||
let originalContent = null;
|
||||
try {
|
||||
originalContent = fs.readFileSync(configPath, 'utf8');
|
||||
} catch {
|
||||
// Config file may not exist — that's fine
|
||||
}
|
||||
|
||||
try {
|
||||
// Write corrupt JSON to the config file
|
||||
fs.writeFileSync(configPath, 'NOT VALID JSON {{{ corrupt data !!!', 'utf8');
|
||||
|
||||
// Create a transcript with 12 user messages (above default threshold of 10)
|
||||
const testDir = createTestDir();
|
||||
const transcript = createTranscript(testDir, 12);
|
||||
const result = runEvaluate({ transcript_path: transcript });
|
||||
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0 despite corrupt config');
|
||||
// With corrupt config, defaults apply: min_session_length = 10
|
||||
// 12 >= 10 → should evaluate (not "too short")
|
||||
assert.ok(!result.stderr.includes('too short'),
|
||||
`Should NOT say too short — corrupt config falls back to default min=10. Got: ${result.stderr}`);
|
||||
assert.ok(
|
||||
result.stderr.includes('12 messages') || result.stderr.includes('evaluate'),
|
||||
`Should evaluate with 12 messages using default threshold. Got: ${result.stderr}`
|
||||
);
|
||||
// The catch block logs "Failed to parse config" — verify that log message
|
||||
assert.ok(result.stderr.includes('Failed to parse config'),
|
||||
`Should log config parse error. Got: ${result.stderr}`);
|
||||
|
||||
cleanupTestDir(testDir);
|
||||
} finally {
|
||||
// Restore original config file
|
||||
if (originalContent !== null) {
|
||||
fs.writeFileSync(configPath, originalContent, 'utf8');
|
||||
} else {
|
||||
// Config didn't exist before — remove the corrupt one we created
|
||||
try { fs.unlinkSync(configPath); } catch { /* best-effort */ }
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 86: config learned_skills_path override with ~ expansion ──
|
||||
console.log('\nRound 86: config learned_skills_path override:');
|
||||
|
||||
if (test('uses learned_skills_path from config with ~ expansion', () => {
|
||||
// evaluate-session.js lines 69-72:
|
||||
// if (config.learned_skills_path) {
|
||||
// learnedSkillsPath = config.learned_skills_path.replace(/^~/, require('os').homedir());
|
||||
// }
|
||||
// This branch was never tested — only the parse error (Round 85) and default path.
|
||||
const configPath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning', 'config.json');
|
||||
let originalContent = null;
|
||||
try {
|
||||
originalContent = fs.readFileSync(configPath, 'utf8');
|
||||
} catch {
|
||||
// Config file may not exist
|
||||
}
|
||||
|
||||
try {
|
||||
// Write config with a custom learned_skills_path using ~ prefix
|
||||
fs.writeFileSync(configPath, JSON.stringify({
|
||||
min_session_length: 10,
|
||||
learned_skills_path: '~/custom-learned-skills-dir'
|
||||
}));
|
||||
|
||||
// Create a transcript with 12 user messages (above threshold)
|
||||
const testDir = createTestDir();
|
||||
const transcript = createTranscript(testDir, 12);
|
||||
const result = runEvaluate({ transcript_path: transcript });
|
||||
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
// The script logs "Save learned skills to: <path>" where <path> should
|
||||
// be the expanded home directory, NOT the literal "~"
|
||||
assert.ok(!result.stderr.includes('~/custom-learned-skills-dir'),
|
||||
'Should NOT contain literal ~ in output (should be expanded)');
|
||||
assert.ok(result.stderr.includes('custom-learned-skills-dir'),
|
||||
`Should reference the custom learned skills dir. Got: ${result.stderr}`);
|
||||
// The ~ should have been replaced with os.homedir()
|
||||
assert.ok(result.stderr.includes(os.homedir()),
|
||||
`Should contain expanded home directory. Got: ${result.stderr}`);
|
||||
|
||||
cleanupTestDir(testDir);
|
||||
} finally {
|
||||
// Restore original config file
|
||||
if (originalContent !== null) {
|
||||
fs.writeFileSync(configPath, originalContent, 'utf8');
|
||||
} else {
|
||||
try { fs.unlinkSync(configPath); } catch { /* best-effort */ }
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,11 +19,11 @@ const compactScript = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'sugg
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` \u2713 ${name}`);
|
||||
console.log(` \u2713 ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` \u2717 ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
} catch (_err) {
|
||||
console.log(` \u2717 ${name}`);
|
||||
console.log(` Error: ${_err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -66,7 +66,11 @@ function runTests() {
|
||||
|
||||
// Cleanup helper
|
||||
function cleanupCounter() {
|
||||
try { fs.unlinkSync(counterFile); } catch {}
|
||||
try {
|
||||
fs.unlinkSync(counterFile);
|
||||
} catch (_err) {
|
||||
// Ignore error
|
||||
}
|
||||
}
|
||||
|
||||
// Basic functionality
|
||||
@@ -80,7 +84,8 @@ function runTests() {
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Counter should be 1 after first run');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('increments counter on subsequent runs', () => {
|
||||
cleanupCounter();
|
||||
@@ -90,7 +95,8 @@ function runTests() {
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 3, 'Counter should be 3 after three runs');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// Threshold suggestion
|
||||
console.log('\nThreshold suggestion:');
|
||||
@@ -106,7 +112,8 @@ function runTests() {
|
||||
`Should suggest compact at threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('does NOT suggest compact before threshold', () => {
|
||||
cleanupCounter();
|
||||
@@ -117,7 +124,8 @@ function runTests() {
|
||||
'Should NOT suggest compact before threshold'
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// Interval suggestion (every 25 calls after threshold)
|
||||
console.log('\nInterval suggestion:');
|
||||
@@ -135,7 +143,8 @@ function runTests() {
|
||||
`Should suggest at threshold+25 interval. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// Environment variable handling
|
||||
console.log('\nEnvironment variable handling:');
|
||||
@@ -151,7 +160,8 @@ function runTests() {
|
||||
`Should use default threshold of 50. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('ignores invalid COMPACT_THRESHOLD (negative)', () => {
|
||||
cleanupCounter();
|
||||
@@ -163,7 +173,8 @@ function runTests() {
|
||||
`Should fallback to 50 for negative threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('ignores non-numeric COMPACT_THRESHOLD', () => {
|
||||
cleanupCounter();
|
||||
@@ -175,7 +186,8 @@ function runTests() {
|
||||
`Should fallback to 50 for non-numeric threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// Corrupted counter file
|
||||
console.log('\nCorrupted counter file:');
|
||||
@@ -189,7 +201,8 @@ function runTests() {
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should reset to 1 on corrupted file');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('resets counter on extremely large value', () => {
|
||||
cleanupCounter();
|
||||
@@ -200,7 +213,8 @@ function runTests() {
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should reset to 1 for value > 1000000');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('handles empty counter file', () => {
|
||||
cleanupCounter();
|
||||
@@ -211,7 +225,8 @@ function runTests() {
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should start at 1 for empty file');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// Session isolation
|
||||
console.log('\nSession isolation:');
|
||||
@@ -230,10 +245,11 @@ function runTests() {
|
||||
assert.strictEqual(countA, 2, 'Session A should have count 2');
|
||||
assert.strictEqual(countB, 1, 'Session B should have count 1');
|
||||
} finally {
|
||||
try { fs.unlinkSync(fileA); } catch {}
|
||||
try { fs.unlinkSync(fileB); } catch {}
|
||||
try { fs.unlinkSync(fileA); } catch (_err) { /* ignore */ }
|
||||
try { fs.unlinkSync(fileB); } catch (_err) { /* ignore */ }
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// Always exits 0
|
||||
console.log('\nExit code:');
|
||||
@@ -243,7 +259,8 @@ function runTests() {
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
assert.strictEqual(result.code, 0, 'Should always exit 0');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// ── Round 29: threshold boundary values ──
|
||||
console.log('\nThreshold boundary values:');
|
||||
@@ -258,7 +275,8 @@ function runTests() {
|
||||
`Should fallback to 50 for threshold=0. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('accepts COMPACT_THRESHOLD=10000 (boundary max)', () => {
|
||||
cleanupCounter();
|
||||
@@ -270,7 +288,8 @@ function runTests() {
|
||||
`Should accept threshold=10000. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('rejects COMPACT_THRESHOLD=10001 (falls back to 50)', () => {
|
||||
cleanupCounter();
|
||||
@@ -282,7 +301,8 @@ function runTests() {
|
||||
`Should fallback to 50 for threshold=10001. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('rejects float COMPACT_THRESHOLD (e.g. 3.5)', () => {
|
||||
cleanupCounter();
|
||||
@@ -297,29 +317,58 @@ function runTests() {
|
||||
'Float threshold should be parseInt-ed to 3, no suggestion at count=50'
|
||||
);
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('counter value at exact boundary 1000000 is valid', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '999999');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
// 999999 is valid (> 0, <= 1000000), count becomes 1000000
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1000000, 'Counter at 1000000 boundary should be valid');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('counter value at 1000001 is clamped (reset to 1)', () => {
|
||||
cleanupCounter();
|
||||
fs.writeFileSync(counterFile, '1000001');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Counter > 1000000 should be reset to 1');
|
||||
cleanupCounter();
|
||||
})) passed++; else failed++;
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// ── Round 64: default session ID fallback ──
|
||||
console.log('\nDefault session ID fallback (Round 64):');
|
||||
|
||||
if (test('uses "default" session ID when CLAUDE_SESSION_ID is empty', () => {
|
||||
const defaultCounterFile = getCounterFilePath('default');
|
||||
try { fs.unlinkSync(defaultCounterFile); } catch (_err) { /* ignore */ }
|
||||
try {
|
||||
// Pass empty CLAUDE_SESSION_ID — falsy, so script uses 'default'
|
||||
const env = { ...process.env, CLAUDE_SESSION_ID: '' };
|
||||
const result = spawnSync('node', [compactScript], {
|
||||
encoding: 'utf8',
|
||||
input: '{}',
|
||||
timeout: 10000,
|
||||
env,
|
||||
});
|
||||
assert.strictEqual(result.status || 0, 0, 'Should exit 0');
|
||||
assert.ok(fs.existsSync(defaultCounterFile), 'Counter file should use "default" session ID');
|
||||
const count = parseInt(fs.readFileSync(defaultCounterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Counter should be 1 for first run with default session');
|
||||
} finally {
|
||||
try { fs.unlinkSync(defaultCounterFile); } catch (_err) { /* ignore */ }
|
||||
}
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
console.log(`
|
||||
Results: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -262,8 +262,13 @@ async function runTests() {
|
||||
});
|
||||
});
|
||||
|
||||
assert.ok(stderr.includes('BLOCKED'), 'Blocking hook should output BLOCKED');
|
||||
assert.strictEqual(code, 2, 'Blocking hook should exit with code 2');
|
||||
// Hook only blocks on non-Windows platforms (tmux is Unix-only)
|
||||
if (process.platform === 'win32') {
|
||||
assert.strictEqual(code, 0, 'On Windows, hook should not block (exit 0)');
|
||||
} else {
|
||||
assert.ok(stderr.includes('BLOCKED'), 'Blocking hook should output BLOCKED');
|
||||
assert.strictEqual(code, 2, 'Blocking hook should exit with code 2');
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
@@ -298,7 +303,12 @@ async function runTests() {
|
||||
});
|
||||
});
|
||||
|
||||
assert.strictEqual(code, 2, 'Blocking hook should exit 2');
|
||||
// Hook only blocks on non-Windows platforms (tmux is Unix-only)
|
||||
if (process.platform === 'win32') {
|
||||
assert.strictEqual(code, 0, 'On Windows, hook should not block (exit 0)');
|
||||
} else {
|
||||
assert.strictEqual(code, 2, 'Blocking hook should exit 2');
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('hooks handle missing files gracefully', async () => {
|
||||
@@ -622,6 +632,76 @@ async function runTests() {
|
||||
assert.strictEqual(code, 0, 'Should not crash on truncated JSON');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
// Round 51: Timeout Enforcement
|
||||
// ==========================================
|
||||
console.log('\nRound 51: Timeout Enforcement:');
|
||||
|
||||
if (await asyncTest('runHookWithInput kills hanging hooks after timeout', async () => {
|
||||
const testDir = createTestDir();
|
||||
const hangingHookPath = path.join(testDir, 'hanging-hook.js');
|
||||
fs.writeFileSync(hangingHookPath, 'setInterval(() => {}, 100);');
|
||||
|
||||
try {
|
||||
const startTime = Date.now();
|
||||
let error = null;
|
||||
|
||||
try {
|
||||
await runHookWithInput(hangingHookPath, {}, {}, 500);
|
||||
} catch (err) {
|
||||
error = err;
|
||||
}
|
||||
|
||||
const elapsed = Date.now() - startTime;
|
||||
assert.ok(error, 'Should throw timeout error');
|
||||
assert.ok(error.message.includes('timed out'), 'Error should mention timeout');
|
||||
assert.ok(elapsed >= 450, `Should wait at least ~500ms, waited ${elapsed}ms`);
|
||||
assert.ok(elapsed < 2000, `Should not wait much longer than 500ms, waited ${elapsed}ms`);
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ==========================================
|
||||
// Round 51: hooks.json Schema Validation
|
||||
// ==========================================
|
||||
console.log('\nRound 51: hooks.json Schema Validation:');
|
||||
|
||||
if (await asyncTest('hooks.json async hook has valid timeout field', async () => {
|
||||
const asyncHook = hooks.hooks.PostToolUse.find(h =>
|
||||
h.hooks && h.hooks[0] && h.hooks[0].async === true
|
||||
);
|
||||
|
||||
assert.ok(asyncHook, 'Should have at least one async hook defined');
|
||||
assert.strictEqual(asyncHook.hooks[0].async, true, 'async field should be true');
|
||||
assert.ok(asyncHook.hooks[0].timeout, 'Should have timeout field');
|
||||
assert.strictEqual(typeof asyncHook.hooks[0].timeout, 'number', 'Timeout should be a number');
|
||||
assert.ok(asyncHook.hooks[0].timeout > 0, 'Timeout should be positive');
|
||||
|
||||
const match = asyncHook.hooks[0].command.match(/^node -e "(.+)"$/s);
|
||||
assert.ok(match, 'Async hook command should be node -e format');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('all hook commands in hooks.json are valid format', async () => {
|
||||
for (const [hookType, hookArray] of Object.entries(hooks.hooks)) {
|
||||
for (const hookDef of hookArray) {
|
||||
assert.ok(hookDef.hooks, `${hookType} entry should have hooks array`);
|
||||
|
||||
for (const hook of hookDef.hooks) {
|
||||
assert.ok(hook.command, `Hook in ${hookType} should have command field`);
|
||||
|
||||
const isInline = hook.command.startsWith('node -e');
|
||||
const isFilePath = hook.command.startsWith('node "');
|
||||
|
||||
assert.ok(
|
||||
isInline || isFilePath,
|
||||
`Hook command in ${hookType} should be inline (node -e) or file path (node "), got: ${hook.command.substring(0, 50)}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Test Results ===');
|
||||
console.log(`Passed: ${passed}`);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -787,7 +787,6 @@ function runTests() {
|
||||
// Verify the file exists
|
||||
const aliasesPath = path.join(tmpHome, '.claude', 'session-aliases.json');
|
||||
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist');
|
||||
const contentBefore = fs.readFileSync(aliasesPath, 'utf8');
|
||||
|
||||
// Attempt to save circular data — will fail
|
||||
const circular = { aliases: {}, metadata: {} };
|
||||
@@ -839,6 +838,989 @@ function runTests() {
|
||||
// best-effort
|
||||
}
|
||||
|
||||
// ── Round 48: rapid sequential saves data integrity ──
|
||||
console.log('\nRound 48: rapid sequential saves:');
|
||||
|
||||
if (test('rapid sequential setAlias calls maintain data integrity', () => {
|
||||
resetAliases();
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const result = aliases.setAlias(`rapid-${i}`, `/path/${i}`, `Title ${i}`);
|
||||
assert.strictEqual(result.success, true, `setAlias rapid-${i} should succeed`);
|
||||
}
|
||||
const data = aliases.loadAliases();
|
||||
for (let i = 0; i < 5; i++) {
|
||||
assert.ok(data.aliases[`rapid-${i}`], `rapid-${i} should exist after all saves`);
|
||||
assert.strictEqual(data.aliases[`rapid-${i}`].sessionPath, `/path/${i}`);
|
||||
}
|
||||
assert.strictEqual(data.metadata.totalCount, 5, 'Metadata count should match actual aliases');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 56: Windows platform unlink-before-rename code path ──
|
||||
console.log('\nRound 56: Windows platform atomic write path:');
|
||||
|
||||
if (test('Windows platform mock: unlinks existing file before rename', () => {
|
||||
resetAliases();
|
||||
// First create an alias so the file exists
|
||||
const r1 = aliases.setAlias('win-initial', '2026-01-01-abc123-session.tmp');
|
||||
assert.strictEqual(r1.success, true, 'Initial alias should succeed');
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist before win32 test');
|
||||
|
||||
// Mock process.platform to 'win32' to trigger the unlink-before-rename path
|
||||
const origPlatform = Object.getOwnPropertyDescriptor(process, 'platform');
|
||||
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
|
||||
|
||||
try {
|
||||
// This save triggers the Windows code path: unlink existing → rename temp
|
||||
const r2 = aliases.setAlias('win-updated', '2026-02-01-def456-session.tmp');
|
||||
assert.strictEqual(r2.success, true, 'setAlias should succeed under win32 mock');
|
||||
|
||||
// Verify data integrity after the Windows path
|
||||
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist after win32 save');
|
||||
const data = aliases.loadAliases();
|
||||
assert.ok(data.aliases['win-initial'], 'Original alias should still exist');
|
||||
assert.ok(data.aliases['win-updated'], 'New alias should exist');
|
||||
assert.strictEqual(data.aliases['win-updated'].sessionPath,
|
||||
'2026-02-01-def456-session.tmp', 'Session path should match');
|
||||
|
||||
// No .tmp or .bak files left behind
|
||||
assert.ok(!fs.existsSync(aliasesPath + '.tmp'), 'No temp file should remain');
|
||||
assert.ok(!fs.existsSync(aliasesPath + '.bak'), 'No backup file should remain');
|
||||
} finally {
|
||||
// Restore original platform descriptor
|
||||
if (origPlatform) {
|
||||
Object.defineProperty(process, 'platform', origPlatform);
|
||||
}
|
||||
resetAliases();
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 64: loadAliases backfills missing version and metadata ──
|
||||
console.log('\nRound 64: loadAliases version/metadata backfill:');
|
||||
|
||||
if (test('loadAliases backfills missing version and metadata fields', () => {
|
||||
resetAliases();
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
// Write a file with valid aliases but NO version and NO metadata
|
||||
fs.writeFileSync(aliasesPath, JSON.stringify({
|
||||
aliases: {
|
||||
'backfill-test': {
|
||||
sessionPath: '/sessions/backfill',
|
||||
createdAt: '2026-01-15T00:00:00.000Z',
|
||||
updatedAt: '2026-01-15T00:00:00.000Z',
|
||||
title: 'Backfill Test'
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
const data = aliases.loadAliases();
|
||||
// Version should be backfilled to ALIAS_VERSION ('1.0')
|
||||
assert.strictEqual(data.version, '1.0', 'Should backfill missing version to 1.0');
|
||||
// Metadata should be backfilled with totalCount from aliases
|
||||
assert.ok(data.metadata, 'Should backfill missing metadata object');
|
||||
assert.strictEqual(data.metadata.totalCount, 1, 'Metadata totalCount should match alias count');
|
||||
assert.ok(data.metadata.lastUpdated, 'Metadata should have lastUpdated');
|
||||
// Alias data should be preserved
|
||||
assert.ok(data.aliases['backfill-test'], 'Alias data should be preserved');
|
||||
assert.strictEqual(data.aliases['backfill-test'].sessionPath, '/sessions/backfill');
|
||||
resetAliases();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 67: loadAliases empty file, resolveSessionAlias null, metadata-only backfill ──
|
||||
console.log('\nRound 67: loadAliases (empty 0-byte file):');
|
||||
|
||||
if (test('loadAliases returns default structure for empty (0-byte) file', () => {
|
||||
resetAliases();
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
// Write a 0-byte file — readFile returns '', which is falsy → !content branch
|
||||
fs.writeFileSync(aliasesPath, '');
|
||||
const data = aliases.loadAliases();
|
||||
assert.ok(data.aliases, 'Should have aliases key');
|
||||
assert.strictEqual(Object.keys(data.aliases).length, 0, 'Should have no aliases');
|
||||
assert.strictEqual(data.version, '1.0', 'Should have default version');
|
||||
assert.ok(data.metadata, 'Should have metadata');
|
||||
assert.strictEqual(data.metadata.totalCount, 0, 'Should have totalCount 0');
|
||||
resetAliases();
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 67: resolveSessionAlias (null/falsy input):');
|
||||
|
||||
if (test('resolveSessionAlias returns null when given null input', () => {
|
||||
resetAliases();
|
||||
const result = aliases.resolveSessionAlias(null);
|
||||
assert.strictEqual(result, null, 'Should return null for null input');
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 67: loadAliases (metadata-only backfill, version present):');
|
||||
|
||||
if (test('loadAliases backfills only metadata when version already present', () => {
|
||||
resetAliases();
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
// Write a file WITH version but WITHOUT metadata
|
||||
fs.writeFileSync(aliasesPath, JSON.stringify({
|
||||
version: '1.0',
|
||||
aliases: {
|
||||
'meta-only': {
|
||||
sessionPath: '/sessions/meta-only',
|
||||
createdAt: '2026-01-20T00:00:00.000Z',
|
||||
updatedAt: '2026-01-20T00:00:00.000Z',
|
||||
title: 'Metadata Only Test'
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
const data = aliases.loadAliases();
|
||||
// Version should remain as-is (NOT overwritten)
|
||||
assert.strictEqual(data.version, '1.0', 'Version should remain 1.0');
|
||||
// Metadata should be backfilled
|
||||
assert.ok(data.metadata, 'Should backfill missing metadata');
|
||||
assert.strictEqual(data.metadata.totalCount, 1, 'Metadata totalCount should be 1');
|
||||
assert.ok(data.metadata.lastUpdated, 'Metadata should have lastUpdated');
|
||||
// Alias data should be preserved
|
||||
assert.ok(data.aliases['meta-only'], 'Alias should be preserved');
|
||||
assert.strictEqual(data.aliases['meta-only'].title, 'Metadata Only Test');
|
||||
resetAliases();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 70: updateAliasTitle save failure path ──
|
||||
console.log('\nupdateAliasTitle save failure (Round 70):');
|
||||
|
||||
if (test('updateAliasTitle returns failure when saveAliases fails (read-only dir)', () => {
|
||||
if (process.platform === 'win32' || process.getuid?.() === 0) {
|
||||
console.log(' (skipped — chmod ineffective on Windows/root)');
|
||||
return;
|
||||
}
|
||||
// Use a fresh isolated HOME to avoid .tmp/.bak leftovers from other tests.
|
||||
// On macOS, overwriting an EXISTING file in a read-only dir succeeds,
|
||||
// so we must start clean with ONLY the .json file present.
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-alias-r70-${Date.now()}`);
|
||||
const isoClaudeDir = path.join(isoHome, '.claude');
|
||||
fs.mkdirSync(isoClaudeDir, { recursive: true });
|
||||
const savedHome = process.env.HOME;
|
||||
const savedProfile = process.env.USERPROFILE;
|
||||
try {
|
||||
process.env.HOME = isoHome;
|
||||
process.env.USERPROFILE = isoHome;
|
||||
// Re-require to pick up new HOME
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
const freshAliases = require('../../scripts/lib/session-aliases');
|
||||
|
||||
// Set up a valid alias
|
||||
freshAliases.setAlias('title-save-fail', '/path/session', 'Original Title');
|
||||
// Verify no leftover .tmp/.bak
|
||||
const ap = freshAliases.getAliasesPath();
|
||||
assert.ok(fs.existsSync(ap), 'Alias file should exist after setAlias');
|
||||
|
||||
// Make .claude dir read-only so saveAliases fails when creating .bak
|
||||
fs.chmodSync(isoClaudeDir, 0o555);
|
||||
|
||||
const result = freshAliases.updateAliasTitle('title-save-fail', 'New Title');
|
||||
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
|
||||
assert.ok(result.error.includes('Failed to update alias title'),
|
||||
`Should return save failure error, got: ${result.error}`);
|
||||
} finally {
|
||||
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
|
||||
process.env.HOME = savedHome;
|
||||
process.env.USERPROFILE = savedProfile;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 72: deleteAlias save failure path ──
|
||||
console.log('\nRound 72: deleteAlias (save failure):');
|
||||
|
||||
if (test('deleteAlias returns failure when saveAliases fails (read-only dir)', () => {
|
||||
if (process.platform === 'win32' || process.getuid?.() === 0) {
|
||||
console.log(' (skipped — chmod ineffective on Windows/root)');
|
||||
return;
|
||||
}
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-alias-r72-${Date.now()}`);
|
||||
const isoClaudeDir = path.join(isoHome, '.claude');
|
||||
fs.mkdirSync(isoClaudeDir, { recursive: true });
|
||||
const savedHome = process.env.HOME;
|
||||
const savedProfile = process.env.USERPROFILE;
|
||||
try {
|
||||
process.env.HOME = isoHome;
|
||||
process.env.USERPROFILE = isoHome;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
const freshAliases = require('../../scripts/lib/session-aliases');
|
||||
|
||||
// Create an alias first (writes the file)
|
||||
freshAliases.setAlias('to-delete', '/path/session', 'Test');
|
||||
const ap = freshAliases.getAliasesPath();
|
||||
assert.ok(fs.existsSync(ap), 'Alias file should exist after setAlias');
|
||||
|
||||
// Make .claude directory read-only — save will fail (can't create temp file)
|
||||
fs.chmodSync(isoClaudeDir, 0o555);
|
||||
|
||||
const result = freshAliases.deleteAlias('to-delete');
|
||||
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
|
||||
assert.ok(result.error.includes('Failed to delete alias'),
|
||||
`Should return delete failure error, got: ${result.error}`);
|
||||
} finally {
|
||||
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
|
||||
process.env.HOME = savedHome;
|
||||
process.env.USERPROFILE = savedProfile;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 73: cleanupAliases save failure path ──
|
||||
console.log('\nRound 73: cleanupAliases (save failure):');
|
||||
|
||||
if (test('cleanupAliases returns failure when saveAliases fails after removing aliases', () => {
|
||||
if (process.platform === 'win32' || process.getuid?.() === 0) {
|
||||
console.log(' (skipped — chmod ineffective on Windows/root)');
|
||||
return;
|
||||
}
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-alias-r73-cleanup-${Date.now()}`);
|
||||
const isoClaudeDir = path.join(isoHome, '.claude');
|
||||
fs.mkdirSync(isoClaudeDir, { recursive: true });
|
||||
const savedHome = process.env.HOME;
|
||||
const savedProfile = process.env.USERPROFILE;
|
||||
try {
|
||||
process.env.HOME = isoHome;
|
||||
process.env.USERPROFILE = isoHome;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
const freshAliases = require('../../scripts/lib/session-aliases');
|
||||
|
||||
// Create aliases — one to keep, one to remove
|
||||
freshAliases.setAlias('keep-me', '/sessions/real', 'Kept');
|
||||
freshAliases.setAlias('remove-me', '/sessions/gone', 'Gone');
|
||||
|
||||
// Make .claude dir read-only so save will fail
|
||||
fs.chmodSync(isoClaudeDir, 0o555);
|
||||
|
||||
// Cleanup: "gone" session doesn't exist, so remove-me should be removed
|
||||
const result = freshAliases.cleanupAliases((p) => p === '/sessions/real');
|
||||
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
|
||||
assert.ok(result.error.includes('Failed to save after cleanup'),
|
||||
`Should return cleanup save failure error, got: ${result.error}`);
|
||||
assert.strictEqual(result.removed, 1, 'Should report 1 removed alias');
|
||||
assert.ok(result.removedAliases.some(a => a.name === 'remove-me'),
|
||||
'Should report remove-me in removedAliases');
|
||||
} finally {
|
||||
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
|
||||
process.env.HOME = savedHome;
|
||||
process.env.USERPROFILE = savedProfile;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 73: setAlias save failure path ──
|
||||
console.log('\nRound 73: setAlias (save failure):');
|
||||
|
||||
if (test('setAlias returns failure when saveAliases fails', () => {
|
||||
if (process.platform === 'win32' || process.getuid?.() === 0) {
|
||||
console.log(' (skipped — chmod ineffective on Windows/root)');
|
||||
return;
|
||||
}
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-alias-r73-set-${Date.now()}`);
|
||||
const isoClaudeDir = path.join(isoHome, '.claude');
|
||||
fs.mkdirSync(isoClaudeDir, { recursive: true });
|
||||
const savedHome = process.env.HOME;
|
||||
const savedProfile = process.env.USERPROFILE;
|
||||
try {
|
||||
process.env.HOME = isoHome;
|
||||
process.env.USERPROFILE = isoHome;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
const freshAliases = require('../../scripts/lib/session-aliases');
|
||||
|
||||
// Make .claude dir read-only BEFORE any setAlias call
|
||||
fs.chmodSync(isoClaudeDir, 0o555);
|
||||
|
||||
const result = freshAliases.setAlias('my-alias', '/sessions/test', 'Test');
|
||||
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
|
||||
assert.ok(result.error.includes('Failed to save alias'),
|
||||
`Should return save failure error, got: ${result.error}`);
|
||||
} finally {
|
||||
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
|
||||
process.env.HOME = savedHome;
|
||||
process.env.USERPROFILE = savedProfile;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 84: listAliases sort NaN date fallback (getTime() || 0) ──
|
||||
console.log('\nRound 84: listAliases (NaN date fallback in sort comparator):');
|
||||
|
||||
if (test('listAliases sorts entries with invalid/missing dates to the end via || 0 fallback', () => {
|
||||
// session-aliases.js line 257:
|
||||
// (new Date(b.updatedAt || b.createdAt || 0).getTime() || 0) - ...
|
||||
// When updatedAt and createdAt are both invalid strings, getTime() returns NaN.
|
||||
// The outer || 0 converts NaN to 0 (epoch time), pushing the entry to the end.
|
||||
resetAliases();
|
||||
const data = aliases.loadAliases();
|
||||
|
||||
// Entry with valid dates — should sort first (newest)
|
||||
data.aliases['valid-alias'] = {
|
||||
sessionPath: '/sessions/valid',
|
||||
createdAt: '2026-02-10T12:00:00.000Z',
|
||||
updatedAt: '2026-02-10T12:00:00.000Z',
|
||||
title: 'Valid'
|
||||
};
|
||||
|
||||
// Entry with invalid date strings — getTime() → NaN → || 0 → epoch (oldest)
|
||||
data.aliases['nan-alias'] = {
|
||||
sessionPath: '/sessions/nan',
|
||||
createdAt: 'not-a-date',
|
||||
updatedAt: 'also-invalid',
|
||||
title: 'NaN dates'
|
||||
};
|
||||
|
||||
// Entry with missing date fields — undefined || undefined || 0 → new Date(0) → epoch
|
||||
data.aliases['missing-alias'] = {
|
||||
sessionPath: '/sessions/missing',
|
||||
title: 'Missing dates'
|
||||
// No createdAt or updatedAt
|
||||
};
|
||||
|
||||
aliases.saveAliases(data);
|
||||
const list = aliases.listAliases();
|
||||
|
||||
assert.strictEqual(list.length, 3, 'Should list all 3 aliases');
|
||||
// Valid-dated entry should be first (newest by updatedAt)
|
||||
assert.strictEqual(list[0].name, 'valid-alias',
|
||||
'Entry with valid dates should sort first');
|
||||
// The two invalid-dated entries sort to epoch (0), so they come after
|
||||
assert.ok(
|
||||
(list[1].name === 'nan-alias' || list[1].name === 'missing-alias') &&
|
||||
(list[2].name === 'nan-alias' || list[2].name === 'missing-alias'),
|
||||
'Entries with invalid/missing dates should sort to the end');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 86: loadAliases with truthy non-object aliases field ──
|
||||
console.log('\nRound 86: loadAliases (truthy non-object aliases field):');
|
||||
|
||||
if (test('loadAliases resets to defaults when aliases field is a string (typeof !== object)', () => {
|
||||
// session-aliases.js line 58: if (!data.aliases || typeof data.aliases !== 'object')
|
||||
// Previous tests covered !data.aliases (undefined) via { noAliasesKey: true }.
|
||||
// This exercises the SECOND half: aliases is truthy but typeof !== 'object'.
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
fs.writeFileSync(aliasesPath, JSON.stringify({
|
||||
version: '1.0',
|
||||
aliases: 'this-is-a-string-not-an-object',
|
||||
metadata: { totalCount: 0 }
|
||||
}));
|
||||
const data = aliases.loadAliases();
|
||||
assert.strictEqual(typeof data.aliases, 'object', 'Should reset aliases to object');
|
||||
assert.ok(!Array.isArray(data.aliases), 'Should be a plain object, not array');
|
||||
assert.strictEqual(Object.keys(data.aliases).length, 0, 'Should have no aliases');
|
||||
assert.strictEqual(data.version, '1.0', 'Should have version');
|
||||
resetAliases();
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 90: saveAliases backup restore double failure (inner catch restoreErr) ──
|
||||
console.log('\nRound 90: saveAliases (backup restore double failure):');
|
||||
|
||||
if (test('saveAliases triggers inner restoreErr catch when both save and restore fail', () => {
|
||||
// session-aliases.js lines 131-137: When saveAliases fails (outer catch),
|
||||
// it tries to restore from backup. If the restore ALSO fails, the inner
|
||||
// catch at line 135 logs restoreErr. No existing test creates this double-fault.
|
||||
if (process.platform === 'win32') {
|
||||
console.log(' (skipped — chmod not reliable on Windows)');
|
||||
return;
|
||||
}
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-r90-restore-fail-${Date.now()}`);
|
||||
const claudeDir = path.join(isoHome, '.claude');
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
|
||||
// Pre-create a backup file while directory is still writable
|
||||
const backupPath = path.join(claudeDir, 'session-aliases.json.bak');
|
||||
fs.writeFileSync(backupPath, JSON.stringify({ aliases: {}, version: '1.0' }));
|
||||
|
||||
// Make .claude directory read-only (0o555):
|
||||
// 1. writeFileSync(tempPath) → EACCES (can't create file in read-only dir) — outer catch
|
||||
// 2. copyFileSync(backupPath, aliasesPath) → EACCES (can't create target) — inner catch (line 135)
|
||||
fs.chmodSync(claudeDir, 0o555);
|
||||
|
||||
const origH = process.env.HOME;
|
||||
const origP = process.env.USERPROFILE;
|
||||
process.env.HOME = isoHome;
|
||||
process.env.USERPROFILE = isoHome;
|
||||
|
||||
try {
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
const freshAliases = require('../../scripts/lib/session-aliases');
|
||||
|
||||
const result = freshAliases.saveAliases({ aliases: { x: 1 }, version: '1.0' });
|
||||
assert.strictEqual(result, false, 'Should return false when save fails');
|
||||
|
||||
// Backup should still exist (restore also failed, so backup was not consumed)
|
||||
assert.ok(fs.existsSync(backupPath), 'Backup should still exist after double failure');
|
||||
} finally {
|
||||
process.env.HOME = origH;
|
||||
process.env.USERPROFILE = origP;
|
||||
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
|
||||
delete require.cache[require.resolve('../../scripts/lib/utils')];
|
||||
try { fs.chmodSync(claudeDir, 0o755); } catch { /* best-effort */ }
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 95: renameAlias with same old and new name (self-rename) ──
|
||||
console.log('\nRound 95: renameAlias (self-rename same name):');
|
||||
|
||||
if (test('renameAlias returns "already exists" error when renaming alias to itself', () => {
|
||||
resetAliases();
|
||||
// Create an alias first
|
||||
const created = aliases.setAlias('self-rename', '/path/session', 'Self Rename');
|
||||
assert.strictEqual(created.success, true, 'Setup: alias should be created');
|
||||
|
||||
// Attempt to rename to the same name
|
||||
const result = aliases.renameAlias('self-rename', 'self-rename');
|
||||
assert.strictEqual(result.success, false, 'Renaming to itself should fail');
|
||||
assert.ok(result.error.includes('already exists'),
|
||||
'Error should indicate alias already exists (line 333-334 check)');
|
||||
|
||||
// Verify original alias is still intact
|
||||
const resolved = aliases.resolveAlias('self-rename');
|
||||
assert.ok(resolved, 'Original alias should still exist after failed self-rename');
|
||||
assert.strictEqual(resolved.sessionPath, '/path/session',
|
||||
'Alias data should be preserved');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 100: cleanupAliases callback returning falsy non-boolean 0 ──
|
||||
console.log('\nRound 100: cleanupAliases (callback returns 0 — falsy non-boolean coercion):');
|
||||
if (test('cleanupAliases removes alias when callback returns 0 (falsy coercion: !0 === true)', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('zero-test', '/sessions/some-session', '2026-01-15');
|
||||
// callback returns 0 (a falsy value) — !0 === true → alias is removed
|
||||
const result = aliases.cleanupAliases(() => 0);
|
||||
assert.strictEqual(result.removed, 1,
|
||||
'Alias should be removed because !0 === true (JavaScript falsy coercion)');
|
||||
assert.strictEqual(result.success, true,
|
||||
'Cleanup should succeed');
|
||||
const resolved = aliases.resolveAlias('zero-test');
|
||||
assert.strictEqual(resolved, null,
|
||||
'Alias should no longer exist after removal');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 102: setAlias with title=0 (falsy number coercion) ──
|
||||
console.log('\nRound 102: setAlias (title=0 — falsy coercion silently converts to null):');
|
||||
if (test('setAlias with title=0 stores null (0 || null === null due to JavaScript falsy coercion)', () => {
|
||||
// session-aliases.js line 221: `title: title || null` — the value 0 is falsy
|
||||
// in JavaScript, so `0 || null` evaluates to `null`. This means numeric
|
||||
// titles like 0 are silently discarded.
|
||||
resetAliases();
|
||||
const result = aliases.setAlias('zero-title', '/sessions/test', 0);
|
||||
assert.strictEqual(result.success, true,
|
||||
'setAlias should succeed (0 is valid as a truthy check bypass)');
|
||||
assert.strictEqual(result.title, null,
|
||||
'Title should be null because 0 || null === null (falsy coercion)');
|
||||
const resolved = aliases.resolveAlias('zero-title');
|
||||
assert.strictEqual(resolved.title, null,
|
||||
'Persisted title should be null after round-trip through saveAliases/loadAliases');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 103: loadAliases with array aliases in JSON (typeof [] === 'object' bypass) ──
|
||||
console.log('\nRound 103: loadAliases (array aliases — typeof bypass):');
|
||||
if (test('loadAliases accepts array aliases because typeof [] === "object" passes validation', () => {
|
||||
// session-aliases.js line 58: `typeof data.aliases !== 'object'` is the guard.
|
||||
// Arrays are typeof 'object' in JavaScript, so {"aliases": [1,2,3]} passes
|
||||
// validation. The returned data.aliases is an array, not a plain object.
|
||||
// Downstream code (Object.keys, Object.entries, bracket access) behaves
|
||||
// differently on arrays vs objects but doesn't crash — it just produces
|
||||
// unexpected results like numeric string keys "0", "1", "2".
|
||||
resetAliases();
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
fs.writeFileSync(aliasesPath, JSON.stringify({
|
||||
version: '1.0',
|
||||
aliases: ['item0', 'item1', 'item2'],
|
||||
metadata: { totalCount: 3, lastUpdated: new Date().toISOString() }
|
||||
}));
|
||||
const data = aliases.loadAliases();
|
||||
// The array passes the typeof 'object' check and is returned as-is
|
||||
assert.ok(Array.isArray(data.aliases),
|
||||
'data.aliases should be an array (typeof [] === "object" bypasses guard)');
|
||||
assert.strictEqual(data.aliases.length, 3,
|
||||
'Array should have 3 elements');
|
||||
// Object.keys on an array returns ["0", "1", "2"] — numeric index strings
|
||||
const keys = Object.keys(data.aliases);
|
||||
assert.deepStrictEqual(keys, ['0', '1', '2'],
|
||||
'Object.keys of array returns numeric string indices, not named alias keys');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 104: resolveSessionAlias with path-traversal input (passthrough without validation) ──
|
||||
console.log('\nRound 104: resolveSessionAlias (path-traversal input — returned unchanged):');
|
||||
if (test('resolveSessionAlias returns path-traversal input as-is when alias lookup fails', () => {
|
||||
// session-aliases.js lines 365-374: resolveSessionAlias first tries resolveAlias(),
|
||||
// which rejects '../etc/passwd' because the regex /^[a-zA-Z0-9_-]+$/ fails on dots
|
||||
// and slashes (returns null). Then the function falls through to line 373:
|
||||
// `return aliasOrId` — returning the potentially dangerous input unchanged.
|
||||
// Callers that blindly use this return value could be at risk.
|
||||
resetAliases();
|
||||
const traversal = '../etc/passwd';
|
||||
const result = aliases.resolveSessionAlias(traversal);
|
||||
assert.strictEqual(result, traversal,
|
||||
'Path-traversal input should be returned as-is (resolveAlias rejects it, fallback returns input)');
|
||||
// Also test with another invalid alias pattern
|
||||
const dotSlash = './../../secrets';
|
||||
const result2 = aliases.resolveSessionAlias(dotSlash);
|
||||
assert.strictEqual(result2, dotSlash,
|
||||
'Another path-traversal pattern also returned unchanged');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 107: setAlias with whitespace-only title (not trimmed unlike sessionPath) ──
|
||||
console.log('\nRound 107: setAlias (whitespace-only title — truthy string stored as-is, unlike sessionPath which is trim-checked):');
|
||||
if (test('setAlias stores whitespace-only title as-is (no trim validation, unlike sessionPath)', () => {
|
||||
resetAliases();
|
||||
// sessionPath with whitespace is rejected (line 195: sessionPath.trim().length === 0)
|
||||
const pathResult = aliases.setAlias('ws-path', ' ');
|
||||
assert.strictEqual(pathResult.success, false,
|
||||
'Whitespace-only sessionPath is rejected by trim check');
|
||||
// But title with whitespace is stored as-is (line 221: title || null — whitespace is truthy)
|
||||
const titleResult = aliases.setAlias('ws-title', '/valid/path', ' ');
|
||||
assert.strictEqual(titleResult.success, true,
|
||||
'Whitespace-only title is accepted (no trim check on title)');
|
||||
assert.strictEqual(titleResult.title, ' ',
|
||||
'Title stored as whitespace string (truthy, so title || null returns the whitespace)');
|
||||
// Verify persisted correctly
|
||||
const loaded = aliases.loadAliases();
|
||||
assert.strictEqual(loaded.aliases['ws-title'].title, ' ',
|
||||
'Whitespace title persists in JSON as-is');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 111: setAlias with exactly 128-character alias — off-by-one boundary ──
|
||||
console.log('\nRound 111: setAlias (128-char alias — exact boundary of > 128 check):');
|
||||
if (test('setAlias accepts alias of exactly 128 characters (128 is NOT > 128)', () => {
|
||||
// session-aliases.js line 199: if (alias.length > 128)
|
||||
// 128 is NOT > 128, so exactly 128 chars is ACCEPTED.
|
||||
// Existing test only checks 129 (rejected).
|
||||
resetAliases();
|
||||
const alias128 = 'a'.repeat(128);
|
||||
const result = aliases.setAlias(alias128, '/path/to/session');
|
||||
assert.strictEqual(result.success, true,
|
||||
'128-char alias should be accepted (128 is NOT > 128)');
|
||||
assert.strictEqual(result.isNew, true);
|
||||
// Verify it can be resolved
|
||||
const resolved = aliases.resolveAlias(alias128);
|
||||
assert.notStrictEqual(resolved, null, '128-char alias should be resolvable');
|
||||
assert.strictEqual(resolved.sessionPath, '/path/to/session');
|
||||
// Confirm 129 is rejected (boundary)
|
||||
const result129 = aliases.setAlias('b'.repeat(129), '/path');
|
||||
assert.strictEqual(result129.success, false, '129-char alias should be rejected');
|
||||
assert.ok(result129.error.includes('128'),
|
||||
'Error message should mention 128-char limit');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 112: resolveAlias rejects Unicode characters in alias name ──
|
||||
console.log('\nRound 112: resolveAlias (Unicode rejection):');
|
||||
if (test('resolveAlias returns null for alias names containing Unicode characters', () => {
|
||||
resetAliases();
|
||||
// First create a valid alias to ensure the store works
|
||||
aliases.setAlias('valid-alias', '/path/to/session');
|
||||
const validResult = aliases.resolveAlias('valid-alias');
|
||||
assert.notStrictEqual(validResult, null, 'Valid ASCII alias should resolve');
|
||||
|
||||
// Unicode accented characters — rejected by /^[a-zA-Z0-9_-]+$/
|
||||
const accentedResult = aliases.resolveAlias('café-session');
|
||||
assert.strictEqual(accentedResult, null,
|
||||
'Accented character "é" should be rejected by [a-zA-Z0-9_-]');
|
||||
|
||||
const umlautResult = aliases.resolveAlias('über-test');
|
||||
assert.strictEqual(umlautResult, null,
|
||||
'Umlaut "ü" should be rejected by [a-zA-Z0-9_-]');
|
||||
|
||||
// CJK characters
|
||||
const cjkResult = aliases.resolveAlias('会議-notes');
|
||||
assert.strictEqual(cjkResult, null,
|
||||
'CJK characters should be rejected');
|
||||
|
||||
// Emoji
|
||||
const emojiResult = aliases.resolveAlias('rocket-🚀');
|
||||
assert.strictEqual(emojiResult, null,
|
||||
'Emoji should be rejected by the ASCII-only regex');
|
||||
|
||||
// Cyrillic characters that look like Latin (homoglyphs)
|
||||
const cyrillicResult = aliases.resolveAlias('tеst'); // 'е' is Cyrillic U+0435
|
||||
assert.strictEqual(cyrillicResult, null,
|
||||
'Cyrillic homoglyph "е" (U+0435) should be rejected even though it looks like "e"');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 114: listAliases with non-string search (number) — TypeError on toLowerCase ──
|
||||
console.log('\nRound 114: listAliases (non-string search — number triggers TypeError):');
|
||||
if (test('listAliases throws TypeError when search option is a number (no toLowerCase method)', () => {
|
||||
resetAliases();
|
||||
|
||||
// Set up some aliases to search through
|
||||
aliases.setAlias('alpha-session', '/path/to/alpha');
|
||||
aliases.setAlias('beta-session', '/path/to/beta');
|
||||
|
||||
// String search works fine — baseline
|
||||
const stringResult = aliases.listAliases({ search: 'alpha' });
|
||||
assert.strictEqual(stringResult.length, 1, 'String search should find 1 match');
|
||||
assert.strictEqual(stringResult[0].name, 'alpha-session');
|
||||
|
||||
// Numeric search — search.toLowerCase() at line 261 of session-aliases.js
|
||||
// throws TypeError because Number.prototype has no toLowerCase method.
|
||||
// The code does NOT guard against non-string search values.
|
||||
assert.throws(
|
||||
() => aliases.listAliases({ search: 123 }),
|
||||
(err) => err instanceof TypeError && /toLowerCase/.test(err.message),
|
||||
'Numeric search value should throw TypeError from toLowerCase call'
|
||||
);
|
||||
|
||||
// Boolean search — also lacks toLowerCase
|
||||
assert.throws(
|
||||
() => aliases.listAliases({ search: true }),
|
||||
(err) => err instanceof TypeError && /toLowerCase/.test(err.message),
|
||||
'Boolean search value should also throw TypeError'
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 115: updateAliasTitle with empty string — stored as null via || but returned as "" ──
|
||||
console.log('\nRound 115: updateAliasTitle (empty string title — stored null, returned ""):');
|
||||
if (test('updateAliasTitle with empty string stores null but returns empty string (|| coercion mismatch)', () => {
|
||||
resetAliases();
|
||||
|
||||
// Create alias with a title
|
||||
aliases.setAlias('r115-alias', '/path/to/session', 'Original Title');
|
||||
const before = aliases.resolveAlias('r115-alias');
|
||||
assert.strictEqual(before.title, 'Original Title', 'Baseline: title should be set');
|
||||
|
||||
// Update title with empty string
|
||||
// Line 383: typeof "" === 'string' → passes validation
|
||||
// Line 393: "" || null → null (empty string is falsy in JS)
|
||||
// Line 400: returns { title: "" } (original parameter, not stored value)
|
||||
const result = aliases.updateAliasTitle('r115-alias', '');
|
||||
assert.strictEqual(result.success, true, 'Should succeed (empty string passes validation)');
|
||||
assert.strictEqual(result.title, '', 'Return value reflects the input parameter (empty string)');
|
||||
|
||||
// But what's actually stored?
|
||||
const after = aliases.resolveAlias('r115-alias');
|
||||
assert.strictEqual(after.title, null,
|
||||
'Stored title should be null because "" || null evaluates to null');
|
||||
|
||||
// Contrast: non-empty string is stored as-is
|
||||
aliases.updateAliasTitle('r115-alias', 'New Title');
|
||||
const withTitle = aliases.resolveAlias('r115-alias');
|
||||
assert.strictEqual(withTitle.title, 'New Title', 'Non-empty string stored as-is');
|
||||
|
||||
// null explicitly clears title
|
||||
aliases.updateAliasTitle('r115-alias', null);
|
||||
const cleared = aliases.resolveAlias('r115-alias');
|
||||
assert.strictEqual(cleared.title, null, 'null clears title');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 116: loadAliases with extra unknown fields — silently preserved ──
|
||||
console.log('\nRound 116: loadAliases (extra unknown JSON fields — preserved by loose validation):');
|
||||
if (test('loadAliases preserves extra unknown fields because only aliases key is validated', () => {
|
||||
resetAliases();
|
||||
|
||||
// Manually write an aliases file with extra fields
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
const customData = {
|
||||
version: '1.0',
|
||||
aliases: {
|
||||
'test-session': {
|
||||
sessionPath: '/path/to/session',
|
||||
createdAt: '2026-01-01T00:00:00.000Z',
|
||||
updatedAt: '2026-01-01T00:00:00.000Z',
|
||||
title: 'Test'
|
||||
}
|
||||
},
|
||||
metadata: {
|
||||
totalCount: 1,
|
||||
lastUpdated: '2026-01-01T00:00:00.000Z'
|
||||
},
|
||||
customField: 'extra data',
|
||||
debugInfo: { level: 3, verbose: true },
|
||||
tags: ['important', 'test']
|
||||
};
|
||||
fs.writeFileSync(aliasesPath, JSON.stringify(customData, null, 2), 'utf8');
|
||||
|
||||
// loadAliases only validates data.aliases — extra fields pass through
|
||||
const loaded = aliases.loadAliases();
|
||||
assert.ok(loaded.aliases['test-session'], 'Should load the valid alias');
|
||||
assert.strictEqual(loaded.aliases['test-session'].title, 'Test');
|
||||
assert.strictEqual(loaded.customField, 'extra data',
|
||||
'Extra string field should be preserved');
|
||||
assert.deepStrictEqual(loaded.debugInfo, { level: 3, verbose: true },
|
||||
'Extra object field should be preserved');
|
||||
assert.deepStrictEqual(loaded.tags, ['important', 'test'],
|
||||
'Extra array field should be preserved');
|
||||
|
||||
// After saving, extra fields survive a round-trip (saveAliases only updates metadata)
|
||||
aliases.setAlias('new-alias', '/path/to/new');
|
||||
const reloaded = aliases.loadAliases();
|
||||
assert.ok(reloaded.aliases['new-alias'], 'New alias should be saved');
|
||||
assert.strictEqual(reloaded.customField, 'extra data',
|
||||
'Extra field should survive save/load round-trip');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 118: renameAlias to the same name — "already exists" because self-check ──
|
||||
console.log('\nRound 118: renameAlias (same name — "already exists" because data.aliases[newAlias] is truthy):');
|
||||
if (test('renameAlias to the same name returns "already exists" error (no self-rename short-circuit)', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('same-name', '/path/to/session');
|
||||
|
||||
// Rename 'same-name' → 'same-name'
|
||||
// Line 333: data.aliases[newAlias] → truthy (the alias exists under that name)
|
||||
// Returns error before checking if oldAlias === newAlias
|
||||
const result = aliases.renameAlias('same-name', 'same-name');
|
||||
assert.strictEqual(result.success, false, 'Should fail');
|
||||
assert.ok(result.error.includes('already exists'),
|
||||
'Error should say "already exists" (not "same name" or a no-op success)');
|
||||
|
||||
// Verify alias is unchanged
|
||||
const resolved = aliases.resolveAlias('same-name');
|
||||
assert.ok(resolved, 'Original alias should still exist');
|
||||
assert.strictEqual(resolved.sessionPath, '/path/to/session');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 118: setAlias reserved names — case-insensitive rejection ──
|
||||
console.log('\nRound 118: setAlias (reserved names — case-insensitive rejection):');
|
||||
if (test('setAlias rejects all reserved names case-insensitively (list, help, remove, delete, create, set)', () => {
|
||||
resetAliases();
|
||||
|
||||
// All reserved names in lowercase
|
||||
const reserved = ['list', 'help', 'remove', 'delete', 'create', 'set'];
|
||||
for (const name of reserved) {
|
||||
const result = aliases.setAlias(name, '/path/to/session');
|
||||
assert.strictEqual(result.success, false,
|
||||
`'${name}' should be rejected as reserved`);
|
||||
assert.ok(result.error.includes('reserved'),
|
||||
`Error for '${name}' should mention "reserved"`);
|
||||
}
|
||||
|
||||
// Case-insensitive: uppercase variants also rejected
|
||||
const upperResult = aliases.setAlias('LIST', '/path/to/session');
|
||||
assert.strictEqual(upperResult.success, false,
|
||||
'"LIST" (uppercase) should be rejected (toLowerCase check)');
|
||||
|
||||
const mixedResult = aliases.setAlias('Help', '/path/to/session');
|
||||
assert.strictEqual(mixedResult.success, false,
|
||||
'"Help" (mixed case) should be rejected');
|
||||
|
||||
const allCapsResult = aliases.setAlias('DELETE', '/path/to/session');
|
||||
assert.strictEqual(allCapsResult.success, false,
|
||||
'"DELETE" (all caps) should be rejected');
|
||||
|
||||
// Non-reserved names work fine
|
||||
const validResult = aliases.setAlias('my-session', '/path/to/session');
|
||||
assert.strictEqual(validResult.success, true,
|
||||
'Non-reserved name should succeed');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 119: renameAlias with reserved newAlias name — parallel reserved check ──
|
||||
console.log('\nRound 119: renameAlias (reserved newAlias name — parallel check to setAlias):');
|
||||
if (test('renameAlias rejects reserved names for newAlias (same reserved list as setAlias)', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('my-alias', '/path/to/session');
|
||||
|
||||
// Rename to reserved name 'list' — should fail
|
||||
const listResult = aliases.renameAlias('my-alias', 'list');
|
||||
assert.strictEqual(listResult.success, false, '"list" should be rejected');
|
||||
assert.ok(listResult.error.includes('reserved'),
|
||||
'Error should mention "reserved"');
|
||||
|
||||
// Rename to reserved name 'help' (uppercase) — should fail
|
||||
const helpResult = aliases.renameAlias('my-alias', 'Help');
|
||||
assert.strictEqual(helpResult.success, false, '"Help" should be rejected');
|
||||
|
||||
// Rename to reserved name 'delete' — should fail
|
||||
const deleteResult = aliases.renameAlias('my-alias', 'DELETE');
|
||||
assert.strictEqual(deleteResult.success, false, '"DELETE" should be rejected');
|
||||
|
||||
// Verify alias is unchanged
|
||||
const resolved = aliases.resolveAlias('my-alias');
|
||||
assert.ok(resolved, 'Original alias should still exist after failed renames');
|
||||
assert.strictEqual(resolved.sessionPath, '/path/to/session');
|
||||
|
||||
// Valid rename works
|
||||
const validResult = aliases.renameAlias('my-alias', 'new-valid-name');
|
||||
assert.strictEqual(validResult.success, true, 'Non-reserved name should succeed');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 120: setAlias max length boundary — 128 accepted, 129 rejected ──
|
||||
console.log('\nRound 120: setAlias (max alias length boundary — 128 ok, 129 rejected):');
|
||||
if (test('setAlias accepts exactly 128-char alias name but rejects 129 chars (> 128 boundary)', () => {
|
||||
resetAliases();
|
||||
|
||||
// 128 characters — exactly at limit (alias.length > 128 is false)
|
||||
const name128 = 'a'.repeat(128);
|
||||
const result128 = aliases.setAlias(name128, '/path/to/session');
|
||||
assert.strictEqual(result128.success, true,
|
||||
'128-char alias should be accepted (128 > 128 is false)');
|
||||
|
||||
// 129 characters — just over limit
|
||||
const name129 = 'a'.repeat(129);
|
||||
const result129 = aliases.setAlias(name129, '/path/to/session');
|
||||
assert.strictEqual(result129.success, false,
|
||||
'129-char alias should be rejected (129 > 128 is true)');
|
||||
assert.ok(result129.error.includes('128'),
|
||||
'Error should mention the 128 character limit');
|
||||
|
||||
// 1 character — minimum valid
|
||||
const name1 = 'x';
|
||||
const result1 = aliases.setAlias(name1, '/path/to/session');
|
||||
assert.strictEqual(result1.success, true,
|
||||
'Single character alias should be accepted');
|
||||
|
||||
// Verify the 128-char alias was actually stored
|
||||
const resolved = aliases.resolveAlias(name128);
|
||||
assert.ok(resolved, '128-char alias should be resolvable');
|
||||
assert.strictEqual(resolved.sessionPath, '/path/to/session');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 121: setAlias sessionPath validation — null, empty, whitespace, non-string ──
|
||||
console.log('\nRound 121: setAlias (sessionPath validation — null, empty, whitespace, non-string):');
|
||||
if (test('setAlias rejects invalid sessionPath: null, empty, whitespace-only, and non-string types', () => {
|
||||
resetAliases();
|
||||
|
||||
// null sessionPath → falsy → rejected
|
||||
const nullResult = aliases.setAlias('test-alias', null);
|
||||
assert.strictEqual(nullResult.success, false, 'null path should fail');
|
||||
assert.ok(nullResult.error.includes('empty'), 'Error should mention empty');
|
||||
|
||||
// undefined sessionPath → falsy → rejected
|
||||
const undefResult = aliases.setAlias('test-alias', undefined);
|
||||
assert.strictEqual(undefResult.success, false, 'undefined path should fail');
|
||||
|
||||
// empty string → falsy → rejected
|
||||
const emptyResult = aliases.setAlias('test-alias', '');
|
||||
assert.strictEqual(emptyResult.success, false, 'Empty string path should fail');
|
||||
|
||||
// whitespace-only → passes falsy check but trim().length === 0 → rejected
|
||||
const wsResult = aliases.setAlias('test-alias', ' ');
|
||||
assert.strictEqual(wsResult.success, false, 'Whitespace-only path should fail');
|
||||
|
||||
// number → typeof !== 'string' → rejected
|
||||
const numResult = aliases.setAlias('test-alias', 42);
|
||||
assert.strictEqual(numResult.success, false, 'Number path should fail');
|
||||
|
||||
// boolean → typeof !== 'string' → rejected
|
||||
const boolResult = aliases.setAlias('test-alias', true);
|
||||
assert.strictEqual(boolResult.success, false, 'Boolean path should fail');
|
||||
|
||||
// Valid path works
|
||||
const validResult = aliases.setAlias('test-alias', '/valid/path');
|
||||
assert.strictEqual(validResult.success, true, 'Valid string path should succeed');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 122: listAliases limit edge cases — limit=0, negative, NaN bypassed (JS falsy) ──
|
||||
console.log('\nRound 122: listAliases (limit edge cases — 0/negative/NaN are falsy, return all):');
|
||||
if (test('listAliases limit=0 returns all aliases because 0 is falsy in JS (no slicing)', () => {
|
||||
resetAliases();
|
||||
aliases.setAlias('alias-a', '/path/a');
|
||||
aliases.setAlias('alias-b', '/path/b');
|
||||
aliases.setAlias('alias-c', '/path/c');
|
||||
|
||||
// limit=0: 0 is falsy → `if (0 && 0 > 0)` short-circuits → no slicing → ALL returned
|
||||
const zeroResult = aliases.listAliases({ limit: 0 });
|
||||
assert.strictEqual(zeroResult.length, 3,
|
||||
'limit=0 should return ALL aliases (0 is falsy in JS)');
|
||||
|
||||
// limit=-1: -1 is truthy but -1 > 0 is false → no slicing → ALL returned
|
||||
const negResult = aliases.listAliases({ limit: -1 });
|
||||
assert.strictEqual(negResult.length, 3,
|
||||
'limit=-1 should return ALL aliases (-1 > 0 is false)');
|
||||
|
||||
// limit=NaN: NaN is falsy → no slicing → ALL returned
|
||||
const nanResult = aliases.listAliases({ limit: NaN });
|
||||
assert.strictEqual(nanResult.length, 3,
|
||||
'limit=NaN should return ALL aliases (NaN is falsy)');
|
||||
|
||||
// limit=1: normal case — returns exactly 1
|
||||
const oneResult = aliases.listAliases({ limit: 1 });
|
||||
assert.strictEqual(oneResult.length, 1,
|
||||
'limit=1 should return exactly 1 alias');
|
||||
|
||||
// limit=2: returns exactly 2
|
||||
const twoResult = aliases.listAliases({ limit: 2 });
|
||||
assert.strictEqual(twoResult.length, 2,
|
||||
'limit=2 should return exactly 2 aliases');
|
||||
|
||||
// limit=100 (more than total): returns all 3
|
||||
const bigResult = aliases.listAliases({ limit: 100 });
|
||||
assert.strictEqual(bigResult.length, 3,
|
||||
'limit > total should return all aliases');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 125: loadAliases with __proto__ key in JSON — no prototype pollution ──
|
||||
console.log('\nRound 125: loadAliases (__proto__ key in JSON — safe, no prototype pollution):');
|
||||
if (test('loadAliases with __proto__ alias key does not pollute Object prototype', () => {
|
||||
// JSON.parse('{"__proto__":...}') creates a normal property named "__proto__",
|
||||
// it does NOT modify Object.prototype. This is safe but worth documenting.
|
||||
// The alias would be accessible via data.aliases['__proto__'] and iterable
|
||||
// via Object.entries, but it won't affect other objects.
|
||||
resetAliases();
|
||||
|
||||
// Write raw JSON string with __proto__ as an alias name.
|
||||
// IMPORTANT: Cannot use JSON.stringify(obj) because {'__proto__':...} in JS
|
||||
// sets the prototype rather than creating an own property, so stringify drops it.
|
||||
// Must write the JSON string directly to simulate a maliciously crafted file.
|
||||
const aliasesPath = aliases.getAliasesPath();
|
||||
const now = new Date().toISOString();
|
||||
const rawJson = `{
|
||||
"version": "1.0.0",
|
||||
"aliases": {
|
||||
"__proto__": {
|
||||
"sessionPath": "/evil/path",
|
||||
"createdAt": "${now}",
|
||||
"title": "Prototype Pollution Attempt"
|
||||
},
|
||||
"normal": {
|
||||
"sessionPath": "/normal/path",
|
||||
"createdAt": "${now}",
|
||||
"title": "Normal Alias"
|
||||
}
|
||||
},
|
||||
"metadata": { "totalCount": 2, "lastUpdated": "${now}" }
|
||||
}`;
|
||||
fs.writeFileSync(aliasesPath, rawJson);
|
||||
|
||||
// Load aliases — should NOT pollute prototype
|
||||
const data = aliases.loadAliases();
|
||||
|
||||
// Verify __proto__ did NOT pollute Object.prototype
|
||||
const freshObj = {};
|
||||
assert.strictEqual(freshObj.sessionPath, undefined,
|
||||
'Object.prototype should NOT have sessionPath (no pollution)');
|
||||
assert.strictEqual(freshObj.title, undefined,
|
||||
'Object.prototype should NOT have title (no pollution)');
|
||||
|
||||
// The __proto__ key IS accessible as a normal property
|
||||
assert.ok(data.aliases['__proto__'],
|
||||
'__proto__ key exists as normal property in parsed aliases');
|
||||
assert.strictEqual(data.aliases['__proto__'].sessionPath, '/evil/path',
|
||||
'__proto__ alias data is accessible normally');
|
||||
|
||||
// Normal alias also works
|
||||
assert.ok(data.aliases['normal'],
|
||||
'Normal alias coexists with __proto__ key');
|
||||
|
||||
// resolveAlias with '__proto__' — rejected by regex (underscores ok but __ prefix works)
|
||||
// Actually ^[a-zA-Z0-9_-]+$ would ACCEPT '__proto__' since _ is allowed
|
||||
const resolved = aliases.resolveAlias('__proto__');
|
||||
// If the regex accepts it, it should find the alias
|
||||
if (resolved) {
|
||||
assert.strictEqual(resolved.sessionPath, '/evil/path',
|
||||
'resolveAlias can access __proto__ alias (regex allows underscores)');
|
||||
}
|
||||
|
||||
// Object.keys should enumerate __proto__ from JSON.parse
|
||||
const keys = Object.keys(data.aliases);
|
||||
assert.ok(keys.includes('__proto__'),
|
||||
'Object.keys includes __proto__ from JSON.parse (normal property)');
|
||||
assert.ok(keys.includes('normal'),
|
||||
'Object.keys includes normal alias');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -8,6 +8,8 @@
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const { execFileSync } = require('child_process');
|
||||
|
||||
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'setup-package-manager.js');
|
||||
@@ -256,6 +258,137 @@ function runTests() {
|
||||
assert.strictEqual(installCount, 4, `Expected 4 "Install:" entries, found ${installCount}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 62: --global success path and bare PM name ──
|
||||
console.log('\n--global success path (Round 62):');
|
||||
|
||||
if (test('--global npm writes config and succeeds', () => {
|
||||
const tmpDir = path.join(os.tmpdir(), `spm-test-global-${Date.now()}`);
|
||||
fs.mkdirSync(tmpDir, { recursive: true });
|
||||
try {
|
||||
const result = run(['--global', 'npm'], { HOME: tmpDir, USERPROFILE: tmpDir });
|
||||
assert.strictEqual(result.code, 0, `Expected exit 0, got ${result.code}. stderr: ${result.stderr}`);
|
||||
assert.ok(result.stdout.includes('Global preference set to'), 'Should show success message');
|
||||
assert.ok(result.stdout.includes('npm'), 'Should mention npm');
|
||||
// Verify config file was created
|
||||
const configPath = path.join(tmpDir, '.claude', 'package-manager.json');
|
||||
assert.ok(fs.existsSync(configPath), 'Config file should be created');
|
||||
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||
assert.strictEqual(config.packageManager, 'npm', 'Config should contain npm');
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nbare PM name success (Round 62):');
|
||||
|
||||
if (test('bare npm sets global preference and succeeds', () => {
|
||||
const tmpDir = path.join(os.tmpdir(), `spm-test-bare-${Date.now()}`);
|
||||
fs.mkdirSync(tmpDir, { recursive: true });
|
||||
try {
|
||||
const result = run(['npm'], { HOME: tmpDir, USERPROFILE: tmpDir });
|
||||
assert.strictEqual(result.code, 0, `Expected exit 0, got ${result.code}. stderr: ${result.stderr}`);
|
||||
assert.ok(result.stdout.includes('Global preference set to'), 'Should show success message');
|
||||
// Verify config file was created
|
||||
const configPath = path.join(tmpDir, '.claude', 'package-manager.json');
|
||||
assert.ok(fs.existsSync(configPath), 'Config file should be created');
|
||||
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||
assert.strictEqual(config.packageManager, 'npm', 'Config should contain npm');
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\n--detect source label (Round 62):');
|
||||
|
||||
if (test('--detect with env var shows source as environment', () => {
|
||||
const result = run(['--detect'], { CLAUDE_PACKAGE_MANAGER: 'pnpm' });
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Source: environment'), 'Should show environment as source');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 68: --project success path and --list (current) marker ──
|
||||
console.log('\n--project success path (Round 68):');
|
||||
|
||||
if (test('--project npm writes project config and succeeds', () => {
|
||||
const tmpDir = path.join(os.tmpdir(), `spm-test-project-${Date.now()}`);
|
||||
fs.mkdirSync(tmpDir, { recursive: true });
|
||||
try {
|
||||
const result = require('child_process').spawnSync('node', [SCRIPT, '--project', 'npm'], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
env: { ...process.env },
|
||||
timeout: 10000,
|
||||
cwd: tmpDir
|
||||
});
|
||||
assert.strictEqual(result.status, 0, `Expected exit 0, got ${result.status}. stderr: ${result.stderr}`);
|
||||
assert.ok(result.stdout.includes('Project preference set to'), 'Should show project success message');
|
||||
assert.ok(result.stdout.includes('npm'), 'Should mention npm');
|
||||
// Verify config file was created in the project CWD
|
||||
const configPath = path.join(tmpDir, '.claude', 'package-manager.json');
|
||||
assert.ok(fs.existsSync(configPath), 'Project config file should be created in CWD');
|
||||
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||
assert.strictEqual(config.packageManager, 'npm', 'Config should contain npm');
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\n--list (current) marker (Round 68):');
|
||||
|
||||
if (test('--list output includes (current) marker for active PM', () => {
|
||||
const result = run(['--list']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('(current)'), '--list should mark the active PM with (current)');
|
||||
// The (current) marker should appear exactly once
|
||||
const currentCount = (result.stdout.match(/\(current\)/g) || []).length;
|
||||
assert.strictEqual(currentCount, 1, `Expected exactly 1 "(current)" in --list, found ${currentCount}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 74: setGlobal catch — setPreferredPackageManager throws ──
|
||||
console.log('\nRound 74: setGlobal catch (save failure):');
|
||||
|
||||
if (test('--global npm fails when HOME is not a directory', () => {
|
||||
if (process.platform === 'win32') {
|
||||
console.log(' (skipped — /dev/null not available on Windows)');
|
||||
return;
|
||||
}
|
||||
// HOME=/dev/null causes ensureDir to throw ENOTDIR when creating ~/.claude/
|
||||
const result = run(['--global', 'npm'], { HOME: '/dev/null', USERPROFILE: '/dev/null' });
|
||||
assert.strictEqual(result.code, 1, `Expected exit 1, got ${result.code}`);
|
||||
assert.ok(result.stderr.includes('Error:'),
|
||||
`stderr should contain Error:, got: ${result.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 74: setProject catch — setProjectPackageManager throws ──
|
||||
console.log('\nRound 74: setProject catch (save failure):');
|
||||
|
||||
if (test('--project npm fails when CWD is read-only', () => {
|
||||
if (process.platform === 'win32' || process.getuid?.() === 0) {
|
||||
console.log(' (skipped — chmod ineffective on Windows/root)');
|
||||
return;
|
||||
}
|
||||
const tmpDir = path.join(os.tmpdir(), `spm-test-ro-${Date.now()}`);
|
||||
fs.mkdirSync(tmpDir, { recursive: true });
|
||||
try {
|
||||
// Make CWD read-only so .claude/ dir creation fails with EACCES
|
||||
fs.chmodSync(tmpDir, 0o555);
|
||||
const result = require('child_process').spawnSync('node', [SCRIPT, '--project', 'npm'], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
env: { ...process.env },
|
||||
timeout: 10000,
|
||||
cwd: tmpDir
|
||||
});
|
||||
assert.strictEqual(result.status, 1,
|
||||
`Expected exit 1, got ${result.status}. stderr: ${result.stderr}`);
|
||||
assert.ok(result.stderr.includes('Error:'),
|
||||
`stderr should contain Error:, got: ${result.stderr}`);
|
||||
} finally {
|
||||
try { fs.chmodSync(tmpDir, 0o755); } catch { /* best-effort */ }
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
@@ -451,6 +451,80 @@ function runTests() {
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 54: analysisResults with zero values ──
|
||||
console.log('\nanalysisResults zero values (Round 54):');
|
||||
|
||||
if (test('analysisResults handles zero values for all data fields', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.analysisResults({
|
||||
commits: 0, timeRange: '', contributors: 0, files: 0,
|
||||
}));
|
||||
const combined = logs.join('\n');
|
||||
assert.ok(combined.includes('0'), 'Should display zero values');
|
||||
assert.ok(logs.length > 0, 'Should produce output without crash');
|
||||
// Box lines should still be 60 chars wide
|
||||
const boxLines = combined.split('\n').filter(l => {
|
||||
const s = stripAnsi(l).trim();
|
||||
return s.startsWith('\u256D') || s.startsWith('\u2502') || s.startsWith('\u2570');
|
||||
});
|
||||
assert.ok(boxLines.length >= 3, 'Should render a complete box');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 68: demo function export ──
|
||||
console.log('\ndemo export (Round 68):');
|
||||
|
||||
if (test('module exports demo function alongside SkillCreateOutput', () => {
|
||||
const mod = require('../../scripts/skill-create-output');
|
||||
assert.ok(mod.demo, 'Should export demo function');
|
||||
assert.strictEqual(typeof mod.demo, 'function', 'demo should be a function');
|
||||
assert.ok(mod.SkillCreateOutput, 'Should also export SkillCreateOutput');
|
||||
assert.strictEqual(typeof mod.SkillCreateOutput, 'function', 'SkillCreateOutput should be a constructor');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 85: patterns() confidence=0 uses ?? (not ||) ──
|
||||
console.log('\nRound 85: patterns() confidence=0 nullish coalescing:');
|
||||
|
||||
if (test('patterns() with confidence=0 shows 0%, not 80% (nullish coalescing fix)', () => {
|
||||
const output = new SkillCreateOutput('repo');
|
||||
const logs = captureLog(() => output.patterns([
|
||||
{ name: 'Zero Confidence', trigger: 'never', confidence: 0, evidence: 'none' },
|
||||
]));
|
||||
const combined = stripAnsi(logs.join('\n'));
|
||||
// With ?? operator: 0 ?? 0.8 = 0 → Math.round(0 * 100) = 0 → shows "0%"
|
||||
// With || operator (bug): 0 || 0.8 = 0.8 → shows "80%"
|
||||
assert.ok(combined.includes('0%'), 'Should show 0% for zero confidence');
|
||||
assert.ok(!combined.includes('80%'),
|
||||
'Should NOT show 80% — confidence=0 is explicitly provided, not missing');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 87: analyzePhase() async method (untested) ──
|
||||
console.log('\nRound 87: analyzePhase() async method:');
|
||||
|
||||
if (test('analyzePhase completes without error and writes to stdout', () => {
|
||||
const output = new SkillCreateOutput('test-repo');
|
||||
// analyzePhase is async and calls animateProgress which uses sleep() and
|
||||
// process.stdout.write/clearLine/cursorTo. In non-TTY environments clearLine
|
||||
// and cursorTo are undefined, but the code uses optional chaining (?.) to
|
||||
// handle this safely. We verify it resolves without throwing.
|
||||
// Capture stdout.write to verify output was produced.
|
||||
const writes = [];
|
||||
const origWrite = process.stdout.write;
|
||||
process.stdout.write = function(str) { writes.push(String(str)); return true; };
|
||||
try {
|
||||
// Call synchronously by accessing the returned promise — we just need to
|
||||
// verify it doesn't throw during setup. The sleeps total 1.9s so we
|
||||
// verify the promise is a thenable (async function returns Promise).
|
||||
const promise = output.analyzePhase({ commits: 42 });
|
||||
assert.ok(promise && typeof promise.then === 'function',
|
||||
'analyzePhase should return a Promise');
|
||||
} finally {
|
||||
process.stdout.write = origWrite;
|
||||
}
|
||||
// Verify that process.stdout.write was called (the header line is written synchronously)
|
||||
assert.ok(writes.length > 0, 'Should have written output via process.stdout.write');
|
||||
assert.ok(writes.some(w => w.includes('Analyzing')), 'Should include "Analyzing" label');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
Reference in New Issue
Block a user