mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-04-03 07:33:31 +08:00
* fix(ci): resolve cross-platform test failures - Sanity check script (check-codex-global-state.sh) now falls back to grep -E when ripgrep is not available, fixing the codex-hooks sync test on all CI platforms. Patterns converted to POSIX ERE for portability. - Unicode safety test accepts both / and \ path separators so the executable-file assertion passes on Windows. - Gacha test sets PYTHONUTF8=1 so Python uses UTF-8 stdout encoding on Windows instead of cp1252, preventing UnicodeEncodeError on box-drawing characters. - Quoted-hook-path test skipped on Windows where NTFS disallows double-quote characters in filenames. * feat: port remotion-video-creation skill (29 rules), restore missing files New skill: - remotion-video-creation: 29 domain-specific Remotion rules covering 3D/Three.js, animations, audio, captions, charts, compositions, fonts, GIFs, Lottie, measuring, sequencing, tailwind, text animations, timing, transitions, trimming, and video embedding. Ported from personal skills. Restored: - autonomous-agent-harness/SKILL.md (was in commit but missing from worktree) - lead-intelligence/ (full directory restored from branch commit) Updated: - manifests/install-modules.json: added remotion-video-creation to media-generation - README.md + AGENTS.md: synced counts to 139 skills Catalog validates: 30 agents, 60 commands, 139 skills. * fix(security): pin MCP server versions, add dependabot, pin github-script SHA Critical: - Pin all npx -y MCP server packages to specific versions in .mcp.json to prevent supply chain attacks via version hijacking: - @modelcontextprotocol/server-github@2025.4.8 - @modelcontextprotocol/server-memory@2026.1.26 - @modelcontextprotocol/server-sequential-thinking@2025.12.18 - @playwright/mcp@0.0.69 (was 0.0.68) Medium: - Add .github/dependabot.yml for weekly npm + github-actions updates with grouped minor/patch PRs - Pin actions/github-script to SHA (was @v7 tag, now pinned to commit) * feat: add social-graph-ranker skill — weighted network proximity scoring New skill: social-graph-ranker - Weighted social graph traversal with exponential decay across hops - Bridge Score: B(m) = Σ w(t) · λ^(d(m,t)-1) ranks mutuals by target proximity - Extended Score incorporates 2nd-order network (mutual-of-mutual connections) - Final ranking includes engagement bonus for responsive connections - Runs in parallel with lead-intelligence skill for combined warm+cold outreach - Supports X API + LinkedIn CSV for graph harvesting - Outputs tiered action list: warm intros, direct outreach, network gap analysis Added to business-content install module. Catalog validates: 30/60/140. * fix(security): npm audit fix — resolve all dependency vulnerabilities Applied npm audit fix --force to resolve: - minimatch ReDoS (3 vulnerabilities, HIGH) - smol-toml DoS (MODERATE) - brace-expansion memory exhaustion (MODERATE) - markdownlint-cli upgraded from 0.47.0 to 0.48.0 npm audit now reports 0 vulnerabilities. * fix: resolve markdown lint and yarn lockfile sync - MD047: ensure single trailing newline on all remotion rule files - MD012: remove consecutive blank lines in lottie, measuring-dom-nodes, trimming - MD034: wrap bare URLs in angle brackets (tailwind, transcribe-captions) - yarn.lock: regenerated to sync with npm audit changes in package.json * fix: replace unicode arrows in lead-intelligence (CI unicode safety check)
230 lines
5.4 KiB
Markdown
230 lines
5.4 KiB
Markdown
---
|
|
name: extract-frames
|
|
description: Extract frames from videos at specific timestamps using Mediabunny
|
|
metadata:
|
|
tags: frames, extract, video, thumbnail, filmstrip, canvas
|
|
---
|
|
|
|
# Extracting frames from videos
|
|
|
|
Use Mediabunny to extract frames from videos at specific timestamps. This is useful for generating thumbnails, filmstrips, or processing individual frames.
|
|
|
|
## The `extractFrames()` function
|
|
|
|
This function can be copy-pasted into any project.
|
|
|
|
```tsx
|
|
import {
|
|
ALL_FORMATS,
|
|
Input,
|
|
UrlSource,
|
|
VideoSample,
|
|
VideoSampleSink,
|
|
} from "mediabunny";
|
|
|
|
type Options = {
|
|
track: { width: number; height: number };
|
|
container: string;
|
|
durationInSeconds: number | null;
|
|
};
|
|
|
|
export type ExtractFramesTimestampsInSecondsFn = (
|
|
options: Options
|
|
) => Promise<number[]> | number[];
|
|
|
|
export type ExtractFramesProps = {
|
|
src: string;
|
|
timestampsInSeconds: number[] | ExtractFramesTimestampsInSecondsFn;
|
|
onVideoSample: (sample: VideoSample) => void;
|
|
signal?: AbortSignal;
|
|
};
|
|
|
|
export async function extractFrames({
|
|
src,
|
|
timestampsInSeconds,
|
|
onVideoSample,
|
|
signal,
|
|
}: ExtractFramesProps): Promise<void> {
|
|
using input = new Input({
|
|
formats: ALL_FORMATS,
|
|
source: new UrlSource(src),
|
|
});
|
|
|
|
const [durationInSeconds, format, videoTrack] = await Promise.all([
|
|
input.computeDuration(),
|
|
input.getFormat(),
|
|
input.getPrimaryVideoTrack(),
|
|
]);
|
|
|
|
if (!videoTrack) {
|
|
throw new Error("No video track found in the input");
|
|
}
|
|
|
|
if (signal?.aborted) {
|
|
throw new Error("Aborted");
|
|
}
|
|
|
|
const timestamps =
|
|
typeof timestampsInSeconds === "function"
|
|
? await timestampsInSeconds({
|
|
track: {
|
|
width: videoTrack.displayWidth,
|
|
height: videoTrack.displayHeight,
|
|
},
|
|
container: format.name,
|
|
durationInSeconds,
|
|
})
|
|
: timestampsInSeconds;
|
|
|
|
if (timestamps.length === 0) {
|
|
return;
|
|
}
|
|
|
|
if (signal?.aborted) {
|
|
throw new Error("Aborted");
|
|
}
|
|
|
|
const sink = new VideoSampleSink(videoTrack);
|
|
|
|
for await (using videoSample of sink.samplesAtTimestamps(timestamps)) {
|
|
if (signal?.aborted) {
|
|
break;
|
|
}
|
|
|
|
if (!videoSample) {
|
|
continue;
|
|
}
|
|
|
|
onVideoSample(videoSample);
|
|
}
|
|
}
|
|
```
|
|
|
|
## Basic usage
|
|
|
|
Extract frames at specific timestamps:
|
|
|
|
```tsx
|
|
await extractFrames({
|
|
src: "https://remotion.media/video.mp4",
|
|
timestampsInSeconds: [0, 1, 2, 3, 4],
|
|
onVideoSample: (sample) => {
|
|
const canvas = document.createElement("canvas");
|
|
canvas.width = sample.displayWidth;
|
|
canvas.height = sample.displayHeight;
|
|
const ctx = canvas.getContext("2d");
|
|
sample.draw(ctx!, 0, 0);
|
|
},
|
|
});
|
|
```
|
|
|
|
## Creating a filmstrip
|
|
|
|
Use a callback function to dynamically calculate timestamps based on video metadata:
|
|
|
|
```tsx
|
|
const canvasWidth = 500;
|
|
const canvasHeight = 80;
|
|
const fromSeconds = 0;
|
|
const toSeconds = 10;
|
|
|
|
await extractFrames({
|
|
src: "https://remotion.media/video.mp4",
|
|
timestampsInSeconds: async ({ track, durationInSeconds }) => {
|
|
const aspectRatio = track.width / track.height;
|
|
const amountOfFramesFit = Math.ceil(
|
|
canvasWidth / (canvasHeight * aspectRatio)
|
|
);
|
|
const segmentDuration = toSeconds - fromSeconds;
|
|
const timestamps: number[] = [];
|
|
|
|
for (let i = 0; i < amountOfFramesFit; i++) {
|
|
timestamps.push(
|
|
fromSeconds + (segmentDuration / amountOfFramesFit) * (i + 0.5)
|
|
);
|
|
}
|
|
|
|
return timestamps;
|
|
},
|
|
onVideoSample: (sample) => {
|
|
console.log(`Frame at ${sample.timestamp}s`);
|
|
|
|
const canvas = document.createElement("canvas");
|
|
canvas.width = sample.displayWidth;
|
|
canvas.height = sample.displayHeight;
|
|
const ctx = canvas.getContext("2d");
|
|
sample.draw(ctx!, 0, 0);
|
|
},
|
|
});
|
|
```
|
|
|
|
## Cancellation with AbortSignal
|
|
|
|
Cancel frame extraction after a timeout:
|
|
|
|
```tsx
|
|
const controller = new AbortController();
|
|
|
|
setTimeout(() => controller.abort(), 5000);
|
|
|
|
try {
|
|
await extractFrames({
|
|
src: "https://remotion.media/video.mp4",
|
|
timestampsInSeconds: [0, 1, 2, 3, 4],
|
|
onVideoSample: (sample) => {
|
|
using frame = sample;
|
|
const canvas = document.createElement("canvas");
|
|
canvas.width = frame.displayWidth;
|
|
canvas.height = frame.displayHeight;
|
|
const ctx = canvas.getContext("2d");
|
|
frame.draw(ctx!, 0, 0);
|
|
},
|
|
signal: controller.signal,
|
|
});
|
|
|
|
console.log("Frame extraction complete!");
|
|
} catch (error) {
|
|
console.error("Frame extraction was aborted or failed:", error);
|
|
}
|
|
```
|
|
|
|
## Timeout with Promise.race
|
|
|
|
```tsx
|
|
const controller = new AbortController();
|
|
|
|
const timeoutPromise = new Promise<never>((_, reject) => {
|
|
const timeoutId = setTimeout(() => {
|
|
controller.abort();
|
|
reject(new Error("Frame extraction timed out after 10 seconds"));
|
|
}, 10000);
|
|
|
|
controller.signal.addEventListener("abort", () => clearTimeout(timeoutId), {
|
|
once: true,
|
|
});
|
|
});
|
|
|
|
try {
|
|
await Promise.race([
|
|
extractFrames({
|
|
src: "https://remotion.media/video.mp4",
|
|
timestampsInSeconds: [0, 1, 2, 3, 4],
|
|
onVideoSample: (sample) => {
|
|
using frame = sample;
|
|
const canvas = document.createElement("canvas");
|
|
canvas.width = frame.displayWidth;
|
|
canvas.height = frame.displayHeight;
|
|
const ctx = canvas.getContext("2d");
|
|
frame.draw(ctx!, 0, 0);
|
|
},
|
|
signal: controller.signal,
|
|
}),
|
|
timeoutPromise,
|
|
]);
|
|
|
|
console.log("Frame extraction complete!");
|
|
} catch (error) {
|
|
console.error("Frame extraction was aborted or failed:", error);
|
|
}
|
|
```
|