From 911d38f686996383d163b679fba2cf4495afe6e7 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Thu, 12 Feb 2026 15:24:28 -0800 Subject: [PATCH] feat: add 3 new skills, JS syntax validation in hooks CI, and edge case tests - New skills: api-design, database-migrations, deployment-patterns - validate-hooks.js: validate inline JS syntax in node -e hook commands - utils.test.js: edge case tests for findFiles with null/undefined inputs - README: update skill count to 35, add new skills to directory tree --- README.md | 7 +- scripts/ci/validate-hooks.js | 23 ++ skills/api-design/SKILL.md | 522 ++++++++++++++++++++++++++++ skills/database-migrations/SKILL.md | 334 ++++++++++++++++++ skills/deployment-patterns/SKILL.md | 426 +++++++++++++++++++++++ tests/lib/utils.test.js | 125 +++++++ 6 files changed, 1435 insertions(+), 2 deletions(-) create mode 100644 skills/api-design/SKILL.md create mode 100644 skills/database-migrations/SKILL.md create mode 100644 skills/deployment-patterns/SKILL.md diff --git a/README.md b/README.md index eee146e9..37b10bb4 100644 --- a/README.md +++ b/README.md @@ -143,7 +143,7 @@ For manual install instructions see the README in the `rules/` folder. /plugin list everything-claude-code@everything-claude-code ``` -✨ **That's it!** You now have access to 13 agents, 34 skills, and 31 commands. +✨ **That's it!** You now have access to 13 agents, 35 skills, and 31 commands. --- @@ -239,6 +239,9 @@ everything-claude-code/ | |-- postgres-patterns/ # PostgreSQL optimization patterns (NEW) | |-- nutrient-document-processing/ # Document processing with Nutrient API (NEW) | |-- project-guidelines-example/ # Template for project-specific skills +| |-- database-migrations/ # Migration patterns (Prisma, Drizzle, Django, Go) (NEW) +| |-- api-design/ # REST API design, pagination, error responses (NEW) +| |-- deployment-patterns/ # CI/CD, Docker, health checks, rollbacks (NEW) | |-- commands/ # Slash commands for quick execution | |-- tdd.md # /tdd - Test-driven development @@ -794,7 +797,7 @@ The configuration is automatically detected from `.opencode/opencode.json`. |---------|-------------|----------|--------| | Agents | ✅ 13 agents | ✅ 12 agents | **Claude Code leads** | | Commands | ✅ 31 commands | ✅ 24 commands | **Claude Code leads** | -| Skills | ✅ 34 skills | ✅ 16 skills | **Claude Code leads** | +| Skills | ✅ 35 skills | ✅ 16 skills | **Claude Code leads** | | Hooks | ✅ 3 phases | ✅ 20+ events | **OpenCode has more!** | | Rules | ✅ 8 rules | ✅ 8 rules | **Full parity** | | MCP Servers | ✅ Full | ✅ Full | **Full parity** | diff --git a/scripts/ci/validate-hooks.js b/scripts/ci/validate-hooks.js index bc07a9d9..68e0e45f 100644 --- a/scripts/ci/validate-hooks.js +++ b/scripts/ci/validate-hooks.js @@ -5,6 +5,7 @@ const fs = require('fs'); const path = require('path'); +const vm = require('vm'); const HOOKS_FILE = path.join(__dirname, '../../hooks/hooks.json'); const VALID_EVENTS = ['PreToolUse', 'PostToolUse', 'PreCompact', 'SessionStart', 'SessionEnd', 'Stop', 'Notification', 'SubagentStop']; @@ -68,6 +69,17 @@ function validateHooks() { if (!hook.command || (typeof hook.command !== 'string' && !Array.isArray(hook.command))) { console.error(`ERROR: ${eventType}[${i}].hooks[${j}] missing or invalid 'command' field`); hasErrors = true; + } else if (typeof hook.command === 'string') { + // Validate inline JS syntax in node -e commands + const nodeEMatch = hook.command.match(/^node -e "(.*)"$/s); + if (nodeEMatch) { + try { + new vm.Script(nodeEMatch[1].replace(/\\"/g, '"').replace(/\\n/g, '\n')); + } catch (syntaxErr) { + console.error(`ERROR: ${eventType}[${i}].hooks[${j}] has invalid inline JS: ${syntaxErr.message}`); + hasErrors = true; + } + } } } } @@ -96,6 +108,17 @@ function validateHooks() { if (!h.command || (typeof h.command !== 'string' && !Array.isArray(h.command))) { console.error(`ERROR: Hook ${i}.hooks[${j}] missing or invalid 'command' field`); hasErrors = true; + } else if (typeof h.command === 'string') { + // Validate inline JS syntax in node -e commands + const nodeEMatch = h.command.match(/^node -e "(.*)"$/s); + if (nodeEMatch) { + try { + new vm.Script(nodeEMatch[1].replace(/\\"/g, '"').replace(/\\n/g, '\n')); + } catch (syntaxErr) { + console.error(`ERROR: Hook ${i}.hooks[${j}] has invalid inline JS: ${syntaxErr.message}`); + hasErrors = true; + } + } } } } diff --git a/skills/api-design/SKILL.md b/skills/api-design/SKILL.md new file mode 100644 index 00000000..4a9aa417 --- /dev/null +++ b/skills/api-design/SKILL.md @@ -0,0 +1,522 @@ +--- +name: api-design +description: REST API design patterns including resource naming, status codes, pagination, filtering, error responses, versioning, and rate limiting for production APIs. +--- + +# API Design Patterns + +Conventions and best practices for designing consistent, developer-friendly REST APIs. + +## When to Activate + +- Designing new API endpoints +- Reviewing existing API contracts +- Adding pagination, filtering, or sorting +- Implementing error handling for APIs +- Planning API versioning strategy +- Building public or partner-facing APIs + +## Resource Design + +### URL Structure + +``` +# Resources are nouns, plural, lowercase, kebab-case +GET /api/v1/users +GET /api/v1/users/:id +POST /api/v1/users +PUT /api/v1/users/:id +PATCH /api/v1/users/:id +DELETE /api/v1/users/:id + +# Sub-resources for relationships +GET /api/v1/users/:id/orders +POST /api/v1/users/:id/orders + +# Actions that don't map to CRUD (use verbs sparingly) +POST /api/v1/orders/:id/cancel +POST /api/v1/auth/login +POST /api/v1/auth/refresh +``` + +### Naming Rules + +``` +# GOOD +/api/v1/team-members # kebab-case for multi-word resources +/api/v1/orders?status=active # query params for filtering +/api/v1/users/123/orders # nested resources for ownership + +# BAD +/api/v1/getUsers # verb in URL +/api/v1/user # singular (use plural) +/api/v1/team_members # snake_case in URLs +/api/v1/users/123/getOrders # verb in nested resource +``` + +## HTTP Methods and Status Codes + +### Method Semantics + +| Method | Idempotent | Safe | Use For | +|--------|-----------|------|---------| +| GET | Yes | Yes | Retrieve resources | +| POST | No | No | Create resources, trigger actions | +| PUT | Yes | No | Full replacement of a resource | +| PATCH | No* | No | Partial update of a resource | +| DELETE | Yes | No | Remove a resource | + +*PATCH can be made idempotent with proper implementation + +### Status Code Reference + +``` +# Success +200 OK — GET, PUT, PATCH (with response body) +201 Created — POST (include Location header) +204 No Content — DELETE, PUT (no response body) + +# Client Errors +400 Bad Request — Validation failure, malformed JSON +401 Unauthorized — Missing or invalid authentication +403 Forbidden — Authenticated but not authorized +404 Not Found — Resource doesn't exist +409 Conflict — Duplicate entry, state conflict +422 Unprocessable Entity — Semantically invalid (valid JSON, bad data) +429 Too Many Requests — Rate limit exceeded + +# Server Errors +500 Internal Server Error — Unexpected failure (never expose details) +502 Bad Gateway — Upstream service failed +503 Service Unavailable — Temporary overload, include Retry-After +``` + +### Common Mistakes + +``` +# BAD: 200 for everything +{ "status": 200, "success": false, "error": "Not found" } + +# GOOD: Use HTTP status codes semantically +HTTP/1.1 404 Not Found +{ "error": { "code": "not_found", "message": "User not found" } } + +# BAD: 500 for validation errors +# GOOD: 400 or 422 with field-level details + +# BAD: 200 for created resources +# GOOD: 201 with Location header +HTTP/1.1 201 Created +Location: /api/v1/users/abc-123 +``` + +## Response Format + +### Success Response + +```json +{ + "data": { + "id": "abc-123", + "email": "alice@example.com", + "name": "Alice", + "created_at": "2025-01-15T10:30:00Z" + } +} +``` + +### Collection Response (with Pagination) + +```json +{ + "data": [ + { "id": "abc-123", "name": "Alice" }, + { "id": "def-456", "name": "Bob" } + ], + "meta": { + "total": 142, + "page": 1, + "per_page": 20, + "total_pages": 8 + }, + "links": { + "self": "/api/v1/users?page=1&per_page=20", + "next": "/api/v1/users?page=2&per_page=20", + "last": "/api/v1/users?page=8&per_page=20" + } +} +``` + +### Error Response + +```json +{ + "error": { + "code": "validation_error", + "message": "Request validation failed", + "details": [ + { + "field": "email", + "message": "Must be a valid email address", + "code": "invalid_format" + }, + { + "field": "age", + "message": "Must be between 0 and 150", + "code": "out_of_range" + } + ] + } +} +``` + +### Response Envelope Variants + +```typescript +// Option A: Envelope with data wrapper (recommended for public APIs) +interface ApiResponse { + data: T; + meta?: PaginationMeta; + links?: PaginationLinks; +} + +interface ApiError { + error: { + code: string; + message: string; + details?: FieldError[]; + }; +} + +// Option B: Flat response (simpler, common for internal APIs) +// Success: just return the resource directly +// Error: return error object +// Distinguish by HTTP status code +``` + +## Pagination + +### Offset-Based (Simple) + +``` +GET /api/v1/users?page=2&per_page=20 + +# Implementation +SELECT * FROM users +ORDER BY created_at DESC +LIMIT 20 OFFSET 20; +``` + +**Pros:** Easy to implement, supports "jump to page N" +**Cons:** Slow on large offsets (OFFSET 100000), inconsistent with concurrent inserts + +### Cursor-Based (Scalable) + +``` +GET /api/v1/users?cursor=eyJpZCI6MTIzfQ&limit=20 + +# Implementation +SELECT * FROM users +WHERE id > :cursor_id +ORDER BY id ASC +LIMIT 21; -- fetch one extra to determine has_next +``` + +```json +{ + "data": [...], + "meta": { + "has_next": true, + "next_cursor": "eyJpZCI6MTQzfQ" + } +} +``` + +**Pros:** Consistent performance regardless of position, stable with concurrent inserts +**Cons:** Cannot jump to arbitrary page, cursor is opaque + +### When to Use Which + +| Use Case | Pagination Type | +|----------|----------------| +| Admin dashboards, small datasets (<10K) | Offset | +| Infinite scroll, feeds, large datasets | Cursor | +| Public APIs | Cursor (default) with offset (optional) | +| Search results | Offset (users expect page numbers) | + +## Filtering, Sorting, and Search + +### Filtering + +``` +# Simple equality +GET /api/v1/orders?status=active&customer_id=abc-123 + +# Comparison operators (use bracket notation) +GET /api/v1/products?price[gte]=10&price[lte]=100 +GET /api/v1/orders?created_at[after]=2025-01-01 + +# Multiple values (comma-separated) +GET /api/v1/products?category=electronics,clothing + +# Nested fields (dot notation) +GET /api/v1/orders?customer.country=US +``` + +### Sorting + +``` +# Single field (prefix - for descending) +GET /api/v1/products?sort=-created_at + +# Multiple fields (comma-separated) +GET /api/v1/products?sort=-featured,price,-created_at +``` + +### Full-Text Search + +``` +# Search query parameter +GET /api/v1/products?q=wireless+headphones + +# Field-specific search +GET /api/v1/users?email=alice +``` + +### Sparse Fieldsets + +``` +# Return only specified fields (reduces payload) +GET /api/v1/users?fields=id,name,email +GET /api/v1/orders?fields=id,total,status&include=customer.name +``` + +## Authentication and Authorization + +### Token-Based Auth + +``` +# Bearer token in Authorization header +GET /api/v1/users +Authorization: Bearer eyJhbGciOiJIUzI1NiIs... + +# API key (for server-to-server) +GET /api/v1/data +X-API-Key: sk_live_abc123 +``` + +### Authorization Patterns + +```typescript +// Resource-level: check ownership +app.get("/api/v1/orders/:id", async (req, res) => { + const order = await Order.findById(req.params.id); + if (!order) return res.status(404).json({ error: { code: "not_found" } }); + if (order.userId !== req.user.id) return res.status(403).json({ error: { code: "forbidden" } }); + return res.json({ data: order }); +}); + +// Role-based: check permissions +app.delete("/api/v1/users/:id", requireRole("admin"), async (req, res) => { + await User.delete(req.params.id); + return res.status(204).send(); +}); +``` + +## Rate Limiting + +### Headers + +``` +HTTP/1.1 200 OK +X-RateLimit-Limit: 100 +X-RateLimit-Remaining: 95 +X-RateLimit-Reset: 1640000000 + +# When exceeded +HTTP/1.1 429 Too Many Requests +Retry-After: 60 +{ + "error": { + "code": "rate_limit_exceeded", + "message": "Rate limit exceeded. Try again in 60 seconds." + } +} +``` + +### Rate Limit Tiers + +| Tier | Limit | Window | Use Case | +|------|-------|--------|----------| +| Anonymous | 30/min | Per IP | Public endpoints | +| Authenticated | 100/min | Per user | Standard API access | +| Premium | 1000/min | Per API key | Paid API plans | +| Internal | 10000/min | Per service | Service-to-service | + +## Versioning + +### URL Path Versioning (Recommended) + +``` +/api/v1/users +/api/v2/users +``` + +**Pros:** Explicit, easy to route, cacheable +**Cons:** URL changes between versions + +### Header Versioning + +``` +GET /api/users +Accept: application/vnd.myapp.v2+json +``` + +**Pros:** Clean URLs +**Cons:** Harder to test, easy to forget + +### Versioning Strategy + +``` +1. Start with /api/v1/ — don't version until you need to +2. Maintain at most 2 active versions (current + previous) +3. Deprecation timeline: + - Announce deprecation (6 months notice for public APIs) + - Add Sunset header: Sunset: Sat, 01 Jan 2026 00:00:00 GMT + - Return 410 Gone after sunset date +4. Non-breaking changes don't need a new version: + - Adding new fields to responses + - Adding new optional query parameters + - Adding new endpoints +5. Breaking changes require a new version: + - Removing or renaming fields + - Changing field types + - Changing URL structure + - Changing authentication method +``` + +## Implementation Patterns + +### TypeScript (Next.js API Route) + +```typescript +import { z } from "zod"; +import { NextRequest, NextResponse } from "next/server"; + +const createUserSchema = z.object({ + email: z.string().email(), + name: z.string().min(1).max(100), +}); + +export async function POST(req: NextRequest) { + const body = await req.json(); + const parsed = createUserSchema.safeParse(body); + + if (!parsed.success) { + return NextResponse.json({ + error: { + code: "validation_error", + message: "Request validation failed", + details: parsed.error.issues.map(i => ({ + field: i.path.join("."), + message: i.message, + code: i.code, + })), + }, + }, { status: 422 }); + } + + const user = await createUser(parsed.data); + + return NextResponse.json( + { data: user }, + { + status: 201, + headers: { Location: `/api/v1/users/${user.id}` }, + }, + ); +} +``` + +### Python (Django REST Framework) + +```python +from rest_framework import serializers, viewsets, status +from rest_framework.response import Response + +class CreateUserSerializer(serializers.Serializer): + email = serializers.EmailField() + name = serializers.CharField(max_length=100) + +class UserSerializer(serializers.ModelSerializer): + class Meta: + model = User + fields = ["id", "email", "name", "created_at"] + +class UserViewSet(viewsets.ModelViewSet): + serializer_class = UserSerializer + permission_classes = [IsAuthenticated] + + def get_serializer_class(self): + if self.action == "create": + return CreateUserSerializer + return UserSerializer + + def create(self, request): + serializer = CreateUserSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + user = UserService.create(**serializer.validated_data) + return Response( + {"data": UserSerializer(user).data}, + status=status.HTTP_201_CREATED, + headers={"Location": f"/api/v1/users/{user.id}"}, + ) +``` + +### Go (net/http) + +```go +func (h *UserHandler) CreateUser(w http.ResponseWriter, r *http.Request) { + var req CreateUserRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_json", "Invalid request body") + return + } + + if err := req.Validate(); err != nil { + writeError(w, http.StatusUnprocessableEntity, "validation_error", err.Error()) + return + } + + user, err := h.service.Create(r.Context(), req) + if err != nil { + switch { + case errors.Is(err, domain.ErrEmailTaken): + writeError(w, http.StatusConflict, "email_taken", "Email already registered") + default: + writeError(w, http.StatusInternalServerError, "internal_error", "Internal error") + } + return + } + + w.Header().Set("Location", fmt.Sprintf("/api/v1/users/%s", user.ID)) + writeJSON(w, http.StatusCreated, map[string]any{"data": user}) +} +``` + +## API Design Checklist + +Before shipping a new endpoint: + +- [ ] Resource URL follows naming conventions (plural, kebab-case, no verbs) +- [ ] Correct HTTP method used (GET for reads, POST for creates, etc.) +- [ ] Appropriate status codes returned (not 200 for everything) +- [ ] Input validated with schema (Zod, Pydantic, Bean Validation) +- [ ] Error responses follow standard format with codes and messages +- [ ] Pagination implemented for list endpoints (cursor or offset) +- [ ] Authentication required (or explicitly marked as public) +- [ ] Authorization checked (user can only access their own resources) +- [ ] Rate limiting configured +- [ ] Response does not leak internal details (stack traces, SQL errors) +- [ ] Consistent naming with existing endpoints (camelCase vs snake_case) +- [ ] Documented (OpenAPI/Swagger spec updated) diff --git a/skills/database-migrations/SKILL.md b/skills/database-migrations/SKILL.md new file mode 100644 index 00000000..789d2bba --- /dev/null +++ b/skills/database-migrations/SKILL.md @@ -0,0 +1,334 @@ +--- +name: database-migrations +description: Database migration best practices for schema changes, data migrations, rollbacks, and zero-downtime deployments across PostgreSQL, MySQL, and common ORMs (Prisma, Drizzle, Django, TypeORM, golang-migrate). +--- + +# Database Migration Patterns + +Safe, reversible database schema changes for production systems. + +## When to Activate + +- Creating or altering database tables +- Adding/removing columns or indexes +- Running data migrations (backfill, transform) +- Planning zero-downtime schema changes +- Setting up migration tooling for a new project + +## Core Principles + +1. **Every change is a migration** — never alter production databases manually +2. **Migrations are forward-only in production** — rollbacks use new forward migrations +3. **Schema and data migrations are separate** — never mix DDL and DML in one migration +4. **Test migrations against production-sized data** — a migration that works on 100 rows may lock on 10M +5. **Migrations are immutable once deployed** — never edit a migration that has run in production + +## Migration Safety Checklist + +Before applying any migration: + +- [ ] Migration has both UP and DOWN (or is explicitly marked irreversible) +- [ ] No full table locks on large tables (use concurrent operations) +- [ ] New columns have defaults or are nullable (never add NOT NULL without default) +- [ ] Indexes created concurrently (not inline with CREATE TABLE for existing tables) +- [ ] Data backfill is a separate migration from schema change +- [ ] Tested against a copy of production data +- [ ] Rollback plan documented + +## PostgreSQL Patterns + +### Adding a Column Safely + +```sql +-- GOOD: Nullable column, no lock +ALTER TABLE users ADD COLUMN avatar_url TEXT; + +-- GOOD: Column with default (Postgres 11+ is instant, no rewrite) +ALTER TABLE users ADD COLUMN is_active BOOLEAN NOT NULL DEFAULT true; + +-- BAD: NOT NULL without default on existing table (requires full rewrite) +ALTER TABLE users ADD COLUMN role TEXT NOT NULL; +-- This locks the table and rewrites every row +``` + +### Adding an Index Without Downtime + +```sql +-- BAD: Blocks writes on large tables +CREATE INDEX idx_users_email ON users (email); + +-- GOOD: Non-blocking, allows concurrent writes +CREATE INDEX CONCURRENTLY idx_users_email ON users (email); + +-- Note: CONCURRENTLY cannot run inside a transaction block +-- Most migration tools need special handling for this +``` + +### Renaming a Column (Zero-Downtime) + +Never rename directly in production. Use the expand-contract pattern: + +```sql +-- Step 1: Add new column (migration 001) +ALTER TABLE users ADD COLUMN display_name TEXT; + +-- Step 2: Backfill data (migration 002, data migration) +UPDATE users SET display_name = username WHERE display_name IS NULL; + +-- Step 3: Update application code to read/write both columns +-- Deploy application changes + +-- Step 4: Stop writing to old column, drop it (migration 003) +ALTER TABLE users DROP COLUMN username; +``` + +### Removing a Column Safely + +```sql +-- Step 1: Remove all application references to the column +-- Step 2: Deploy application without the column reference +-- Step 3: Drop column in next migration +ALTER TABLE orders DROP COLUMN legacy_status; + +-- For Django: use SeparateDatabaseAndState to remove from model +-- without generating DROP COLUMN (then drop in next migration) +``` + +### Large Data Migrations + +```sql +-- BAD: Updates all rows in one transaction (locks table) +UPDATE users SET normalized_email = LOWER(email); + +-- GOOD: Batch update with progress +DO $$ +DECLARE + batch_size INT := 10000; + rows_updated INT; +BEGIN + LOOP + UPDATE users + SET normalized_email = LOWER(email) + WHERE id IN ( + SELECT id FROM users + WHERE normalized_email IS NULL + LIMIT batch_size + FOR UPDATE SKIP LOCKED + ); + GET DIAGNOSTICS rows_updated = ROW_COUNT; + RAISE NOTICE 'Updated % rows', rows_updated; + EXIT WHEN rows_updated = 0; + COMMIT; + END LOOP; +END $$; +``` + +## Prisma (TypeScript/Node.js) + +### Workflow + +```bash +# Create migration from schema changes +npx prisma migrate dev --name add_user_avatar + +# Apply pending migrations in production +npx prisma migrate deploy + +# Reset database (dev only) +npx prisma migrate reset + +# Generate client after schema changes +npx prisma generate +``` + +### Schema Example + +```prisma +model User { + id String @id @default(cuid()) + email String @unique + name String? + avatarUrl String? @map("avatar_url") + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + orders Order[] + + @@map("users") + @@index([email]) +} +``` + +### Custom SQL Migration + +For operations Prisma cannot express (concurrent indexes, data backfills): + +```bash +# Create empty migration, then edit the SQL manually +npx prisma migrate dev --create-only --name add_email_index +``` + +```sql +-- migrations/20240115_add_email_index/migration.sql +-- Prisma cannot generate CONCURRENTLY, so we write it manually +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email ON users (email); +``` + +## Drizzle (TypeScript/Node.js) + +### Workflow + +```bash +# Generate migration from schema changes +npx drizzle-kit generate + +# Apply migrations +npx drizzle-kit migrate + +# Push schema directly (dev only, no migration file) +npx drizzle-kit push +``` + +### Schema Example + +```typescript +import { pgTable, text, timestamp, uuid, boolean } from "drizzle-orm/pg-core"; + +export const users = pgTable("users", { + id: uuid("id").primaryKey().defaultRandom(), + email: text("email").notNull().unique(), + name: text("name"), + isActive: boolean("is_active").notNull().default(true), + createdAt: timestamp("created_at").notNull().defaultNow(), + updatedAt: timestamp("updated_at").notNull().defaultNow(), +}); +``` + +## Django (Python) + +### Workflow + +```bash +# Generate migration from model changes +python manage.py makemigrations + +# Apply migrations +python manage.py migrate + +# Show migration status +python manage.py showmigrations + +# Generate empty migration for custom SQL +python manage.py makemigrations --empty app_name -n description +``` + +### Data Migration + +```python +from django.db import migrations + +def backfill_display_names(apps, schema_editor): + User = apps.get_model("accounts", "User") + batch_size = 5000 + users = User.objects.filter(display_name="") + while users.exists(): + batch = list(users[:batch_size]) + for user in batch: + user.display_name = user.username + User.objects.bulk_update(batch, ["display_name"], batch_size=batch_size) + +def reverse_backfill(apps, schema_editor): + pass # Data migration, no reverse needed + +class Migration(migrations.Migration): + dependencies = [("accounts", "0015_add_display_name")] + + operations = [ + migrations.RunPython(backfill_display_names, reverse_backfill), + ] +``` + +### SeparateDatabaseAndState + +Remove a column from the Django model without dropping it from the database immediately: + +```python +class Migration(migrations.Migration): + operations = [ + migrations.SeparateDatabaseAndState( + state_operations=[ + migrations.RemoveField(model_name="user", name="legacy_field"), + ], + database_operations=[], # Don't touch the DB yet + ), + ] +``` + +## golang-migrate (Go) + +### Workflow + +```bash +# Create migration pair +migrate create -ext sql -dir migrations -seq add_user_avatar + +# Apply all pending migrations +migrate -path migrations -database "$DATABASE_URL" up + +# Rollback last migration +migrate -path migrations -database "$DATABASE_URL" down 1 + +# Force version (fix dirty state) +migrate -path migrations -database "$DATABASE_URL" force VERSION +``` + +### Migration Files + +```sql +-- migrations/000003_add_user_avatar.up.sql +ALTER TABLE users ADD COLUMN avatar_url TEXT; +CREATE INDEX CONCURRENTLY idx_users_avatar ON users (avatar_url) WHERE avatar_url IS NOT NULL; + +-- migrations/000003_add_user_avatar.down.sql +DROP INDEX IF EXISTS idx_users_avatar; +ALTER TABLE users DROP COLUMN IF EXISTS avatar_url; +``` + +## Zero-Downtime Migration Strategy + +For critical production changes, follow the expand-contract pattern: + +``` +Phase 1: EXPAND + - Add new column/table (nullable or with default) + - Deploy: app writes to BOTH old and new + - Backfill existing data + +Phase 2: MIGRATE + - Deploy: app reads from NEW, writes to BOTH + - Verify data consistency + +Phase 3: CONTRACT + - Deploy: app only uses NEW + - Drop old column/table in separate migration +``` + +### Timeline Example + +``` +Day 1: Migration adds new_status column (nullable) +Day 1: Deploy app v2 — writes to both status and new_status +Day 2: Run backfill migration for existing rows +Day 3: Deploy app v3 — reads from new_status only +Day 7: Migration drops old status column +``` + +## Anti-Patterns + +| Anti-Pattern | Why It Fails | Better Approach | +|-------------|-------------|-----------------| +| Manual SQL in production | No audit trail, unrepeatable | Always use migration files | +| Editing deployed migrations | Causes drift between environments | Create new migration instead | +| NOT NULL without default | Locks table, rewrites all rows | Add nullable, backfill, then add constraint | +| Inline index on large table | Blocks writes during build | CREATE INDEX CONCURRENTLY | +| Schema + data in one migration | Hard to rollback, long transactions | Separate migrations | +| Dropping column before removing code | Application errors on missing column | Remove code first, drop column next deploy | diff --git a/skills/deployment-patterns/SKILL.md b/skills/deployment-patterns/SKILL.md new file mode 100644 index 00000000..8bed7a23 --- /dev/null +++ b/skills/deployment-patterns/SKILL.md @@ -0,0 +1,426 @@ +--- +name: deployment-patterns +description: Deployment workflows, CI/CD pipeline patterns, Docker containerization, health checks, rollback strategies, and production readiness checklists for web applications. +--- + +# Deployment Patterns + +Production deployment workflows and CI/CD best practices. + +## When to Activate + +- Setting up CI/CD pipelines +- Dockerizing an application +- Planning deployment strategy (blue-green, canary, rolling) +- Implementing health checks and readiness probes +- Preparing for a production release +- Configuring environment-specific settings + +## Deployment Strategies + +### Rolling Deployment (Default) + +Replace instances gradually — old and new versions run simultaneously during rollout. + +``` +Instance 1: v1 → v2 (update first) +Instance 2: v1 (still running v1) +Instance 3: v1 (still running v1) + +Instance 1: v2 +Instance 2: v1 → v2 (update second) +Instance 3: v1 + +Instance 1: v2 +Instance 2: v2 +Instance 3: v1 → v2 (update last) +``` + +**Pros:** Zero downtime, gradual rollout +**Cons:** Two versions run simultaneously — requires backward-compatible changes +**Use when:** Standard deployments, backward-compatible changes + +### Blue-Green Deployment + +Run two identical environments. Switch traffic atomically. + +``` +Blue (v1) ← traffic +Green (v2) idle, running new version + +# After verification: +Blue (v1) idle (becomes standby) +Green (v2) ← traffic +``` + +**Pros:** Instant rollback (switch back to blue), clean cutover +**Cons:** Requires 2x infrastructure during deployment +**Use when:** Critical services, zero-tolerance for issues + +### Canary Deployment + +Route a small percentage of traffic to the new version first. + +``` +v1: 95% of traffic +v2: 5% of traffic (canary) + +# If metrics look good: +v1: 50% of traffic +v2: 50% of traffic + +# Final: +v2: 100% of traffic +``` + +**Pros:** Catches issues with real traffic before full rollout +**Cons:** Requires traffic splitting infrastructure, monitoring +**Use when:** High-traffic services, risky changes, feature flags + +## Docker + +### Multi-Stage Dockerfile (Node.js) + +```dockerfile +# Stage 1: Install dependencies +FROM node:22-alpine AS deps +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci --production=false + +# Stage 2: Build +FROM node:22-alpine AS builder +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules +COPY . . +RUN npm run build +RUN npm prune --production + +# Stage 3: Production image +FROM node:22-alpine AS runner +WORKDIR /app + +RUN addgroup -g 1001 -S appgroup && adduser -S appuser -u 1001 +USER appuser + +COPY --from=builder --chown=appuser:appgroup /app/node_modules ./node_modules +COPY --from=builder --chown=appuser:appgroup /app/dist ./dist +COPY --from=builder --chown=appuser:appgroup /app/package.json ./ + +ENV NODE_ENV=production +EXPOSE 3000 + +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1 + +CMD ["node", "dist/server.js"] +``` + +### Multi-Stage Dockerfile (Go) + +```dockerfile +FROM golang:1.22-alpine AS builder +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /server ./cmd/server + +FROM alpine:3.19 AS runner +RUN apk --no-cache add ca-certificates +RUN adduser -D -u 1001 appuser +USER appuser + +COPY --from=builder /server /server + +EXPOSE 8080 +HEALTHCHECK --interval=30s --timeout=3s CMD wget -qO- http://localhost:8080/health || exit 1 +CMD ["/server"] +``` + +### Multi-Stage Dockerfile (Python/Django) + +```dockerfile +FROM python:3.12-slim AS builder +WORKDIR /app +RUN pip install --no-cache-dir uv +COPY requirements.txt . +RUN uv pip install --system --no-cache -r requirements.txt + +FROM python:3.12-slim AS runner +WORKDIR /app + +RUN useradd -r -u 1001 appuser +USER appuser + +COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages +COPY --from=builder /usr/local/bin /usr/local/bin +COPY . . + +ENV PYTHONUNBUFFERED=1 +EXPOSE 8000 + +HEALTHCHECK --interval=30s --timeout=3s CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health/')" || exit 1 +CMD ["gunicorn", "config.wsgi:application", "--bind", "0.0.0.0:8000", "--workers", "4"] +``` + +### Docker Best Practices + +``` +# GOOD practices +- Use specific version tags (node:22-alpine, not node:latest) +- Multi-stage builds to minimize image size +- Run as non-root user +- Copy dependency files first (layer caching) +- Use .dockerignore to exclude node_modules, .git, tests +- Add HEALTHCHECK instruction +- Set resource limits in docker-compose or k8s + +# BAD practices +- Running as root +- Using :latest tags +- Copying entire repo in one COPY layer +- Installing dev dependencies in production image +- Storing secrets in image (use env vars or secrets manager) +``` + +## CI/CD Pipeline + +### GitHub Actions (Standard Pipeline) + +```yaml +name: CI/CD + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: npm + - run: npm ci + - run: npm run lint + - run: npm run typecheck + - run: npm test -- --coverage + - uses: actions/upload-artifact@v4 + if: always() + with: + name: coverage + path: coverage/ + + build: + needs: test + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' + steps: + - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: docker/build-push-action@v5 + with: + push: true + tags: ghcr.io/${{ github.repository }}:${{ github.sha }} + cache-from: type=gha + cache-to: type=gha,mode=max + + deploy: + needs: build + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' + environment: production + steps: + - name: Deploy to production + run: | + # Platform-specific deployment command + # Railway: railway up + # Vercel: vercel --prod + # K8s: kubectl set image deployment/app app=ghcr.io/${{ github.repository }}:${{ github.sha }} + echo "Deploying ${{ github.sha }}" +``` + +### Pipeline Stages + +``` +PR opened: + lint → typecheck → unit tests → integration tests → preview deploy + +Merged to main: + lint → typecheck → unit tests → integration tests → build image → deploy staging → smoke tests → deploy production +``` + +## Health Checks + +### Health Check Endpoint + +```typescript +// Simple health check +app.get("/health", (req, res) => { + res.status(200).json({ status: "ok" }); +}); + +// Detailed health check (for internal monitoring) +app.get("/health/detailed", async (req, res) => { + const checks = { + database: await checkDatabase(), + redis: await checkRedis(), + externalApi: await checkExternalApi(), + }; + + const allHealthy = Object.values(checks).every(c => c.status === "ok"); + + res.status(allHealthy ? 200 : 503).json({ + status: allHealthy ? "ok" : "degraded", + timestamp: new Date().toISOString(), + version: process.env.APP_VERSION || "unknown", + uptime: process.uptime(), + checks, + }); +}); + +async function checkDatabase(): Promise { + try { + await db.query("SELECT 1"); + return { status: "ok", latency_ms: 2 }; + } catch (err) { + return { status: "error", message: "Database unreachable" }; + } +} +``` + +### Kubernetes Probes + +```yaml +livenessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 30 + failureThreshold: 3 + +readinessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 5 + periodSeconds: 10 + failureThreshold: 2 + +startupProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 0 + periodSeconds: 5 + failureThreshold: 30 # 30 * 5s = 150s max startup time +``` + +## Environment Configuration + +### Twelve-Factor App Pattern + +```bash +# All config via environment variables — never in code +DATABASE_URL=postgres://user:pass@host:5432/db +REDIS_URL=redis://host:6379/0 +API_KEY=${API_KEY} # injected by secrets manager +LOG_LEVEL=info +PORT=3000 + +# Environment-specific behavior +NODE_ENV=production # or staging, development +APP_ENV=production # explicit app environment +``` + +### Configuration Validation + +```typescript +import { z } from "zod"; + +const envSchema = z.object({ + NODE_ENV: z.enum(["development", "staging", "production"]), + PORT: z.coerce.number().default(3000), + DATABASE_URL: z.string().url(), + REDIS_URL: z.string().url(), + JWT_SECRET: z.string().min(32), + LOG_LEVEL: z.enum(["debug", "info", "warn", "error"]).default("info"), +}); + +// Validate at startup — fail fast if config is wrong +export const env = envSchema.parse(process.env); +``` + +## Rollback Strategy + +### Instant Rollback + +```bash +# Docker/Kubernetes: point to previous image +kubectl rollout undo deployment/app + +# Vercel: promote previous deployment +vercel rollback + +# Railway: redeploy previous commit +railway up --commit + +# Database: rollback migration (if reversible) +npx prisma migrate resolve --rolled-back +``` + +### Rollback Checklist + +- [ ] Previous image/artifact is available and tagged +- [ ] Database migrations are backward-compatible (no destructive changes) +- [ ] Feature flags can disable new features without deploy +- [ ] Monitoring alerts configured for error rate spikes +- [ ] Rollback tested in staging before production release + +## Production Readiness Checklist + +Before any production deployment: + +### Application +- [ ] All tests pass (unit, integration, E2E) +- [ ] No hardcoded secrets in code or config files +- [ ] Error handling covers all edge cases +- [ ] Logging is structured (JSON) and does not contain PII +- [ ] Health check endpoint returns meaningful status + +### Infrastructure +- [ ] Docker image builds reproducibly (pinned versions) +- [ ] Environment variables documented and validated at startup +- [ ] Resource limits set (CPU, memory) +- [ ] Horizontal scaling configured (min/max instances) +- [ ] SSL/TLS enabled on all endpoints + +### Monitoring +- [ ] Application metrics exported (request rate, latency, errors) +- [ ] Alerts configured for error rate > threshold +- [ ] Log aggregation set up (structured logs, searchable) +- [ ] Uptime monitoring on health endpoint + +### Security +- [ ] Dependencies scanned for CVEs +- [ ] CORS configured for allowed origins only +- [ ] Rate limiting enabled on public endpoints +- [ ] Authentication and authorization verified +- [ ] Security headers set (CSP, HSTS, X-Frame-Options) + +### Operations +- [ ] Rollback plan documented and tested +- [ ] Database migration tested against production-sized data +- [ ] Runbook for common failure scenarios +- [ ] On-call rotation and escalation path defined diff --git a/tests/lib/utils.test.js b/tests/lib/utils.test.js index 208ef15c..cbeef9a5 100644 --- a/tests/lib/utils.test.js +++ b/tests/lib/utils.test.js @@ -248,6 +248,131 @@ function runTests() { } })) passed++; else failed++; + // Edge case tests for defensive code + console.log('\nEdge Cases:'); + + if (test('findFiles returns empty for null/undefined dir', () => { + assert.deepStrictEqual(utils.findFiles(null, '*.txt'), []); + assert.deepStrictEqual(utils.findFiles(undefined, '*.txt'), []); + assert.deepStrictEqual(utils.findFiles('', '*.txt'), []); + })) passed++; else failed++; + + if (test('findFiles returns empty for null/undefined pattern', () => { + assert.deepStrictEqual(utils.findFiles('/tmp', null), []); + assert.deepStrictEqual(utils.findFiles('/tmp', undefined), []); + assert.deepStrictEqual(utils.findFiles('/tmp', ''), []); + })) passed++; else failed++; + + if (test('findFiles supports maxAge filter', () => { + const testDir = path.join(utils.getTempDir(), `utils-test-maxage-${Date.now()}`); + try { + fs.mkdirSync(testDir); + fs.writeFileSync(path.join(testDir, 'recent.txt'), 'content'); + const results = utils.findFiles(testDir, '*.txt', { maxAge: 1 }); + assert.strictEqual(results.length, 1); + assert.ok(results[0].path.endsWith('recent.txt')); + } finally { + fs.rmSync(testDir, { recursive: true, force: true }); + } + })) passed++; else failed++; + + if (test('findFiles supports recursive option', () => { + const testDir = path.join(utils.getTempDir(), `utils-test-recursive-${Date.now()}`); + const subDir = path.join(testDir, 'sub'); + try { + fs.mkdirSync(subDir, { recursive: true }); + fs.writeFileSync(path.join(testDir, 'top.txt'), 'content'); + fs.writeFileSync(path.join(subDir, 'nested.txt'), 'content'); + // Without recursive: only top level + const shallow = utils.findFiles(testDir, '*.txt', { recursive: false }); + assert.strictEqual(shallow.length, 1); + // With recursive: finds nested too + const deep = utils.findFiles(testDir, '*.txt', { recursive: true }); + assert.strictEqual(deep.length, 2); + } finally { + fs.rmSync(testDir, { recursive: true, force: true }); + } + })) passed++; else failed++; + + if (test('countInFile handles invalid regex pattern', () => { + const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`); + try { + utils.writeFile(testFile, 'test content'); + const count = utils.countInFile(testFile, '(unclosed'); + assert.strictEqual(count, 0); + } finally { + fs.unlinkSync(testFile); + } + })) passed++; else failed++; + + if (test('countInFile handles non-string non-regex pattern', () => { + const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`); + try { + utils.writeFile(testFile, 'test content'); + const count = utils.countInFile(testFile, 42); + assert.strictEqual(count, 0); + } finally { + fs.unlinkSync(testFile); + } + })) passed++; else failed++; + + if (test('countInFile enforces global flag on RegExp', () => { + const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`); + try { + utils.writeFile(testFile, 'foo bar foo baz foo'); + // RegExp without global flag — countInFile should still count all + const count = utils.countInFile(testFile, /foo/); + assert.strictEqual(count, 3); + } finally { + fs.unlinkSync(testFile); + } + })) passed++; else failed++; + + if (test('grepFile handles invalid regex pattern', () => { + const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`); + try { + utils.writeFile(testFile, 'test content'); + const matches = utils.grepFile(testFile, '[invalid'); + assert.deepStrictEqual(matches, []); + } finally { + fs.unlinkSync(testFile); + } + })) passed++; else failed++; + + if (test('replaceInFile returns false for non-existent file', () => { + const result = utils.replaceInFile('/non/existent/file.txt', 'foo', 'bar'); + assert.strictEqual(result, false); + })) passed++; else failed++; + + if (test('countInFile returns 0 for non-existent file', () => { + const count = utils.countInFile('/non/existent/file.txt', /foo/g); + assert.strictEqual(count, 0); + })) passed++; else failed++; + + if (test('grepFile returns empty for non-existent file', () => { + const matches = utils.grepFile('/non/existent/file.txt', /foo/); + assert.deepStrictEqual(matches, []); + })) passed++; else failed++; + + if (test('commandExists rejects unsafe command names', () => { + assert.strictEqual(utils.commandExists('cmd; rm -rf'), false); + assert.strictEqual(utils.commandExists('$(whoami)'), false); + assert.strictEqual(utils.commandExists('cmd && echo hi'), false); + })) passed++; else failed++; + + if (test('ensureDir is idempotent', () => { + const testDir = path.join(utils.getTempDir(), `utils-test-idem-${Date.now()}`); + try { + const result1 = utils.ensureDir(testDir); + const result2 = utils.ensureDir(testDir); + assert.strictEqual(result1, testDir); + assert.strictEqual(result2, testDir); + assert.ok(fs.existsSync(testDir)); + } finally { + fs.rmSync(testDir, { recursive: true, force: true }); + } + })) passed++; else failed++; + // System functions tests console.log('\nSystem Functions:');