merge: upstream/dev into fix/toolcall-format

Resolved conflict in dynamic-agent-prompt-builder.ts by keeping both
buildToolCallFormatSection() and buildNonClaudePlannerSection() functions.
This commit is contained in:
acamq
2026-03-02 07:48:46 -07:00
172 changed files with 5662 additions and 507 deletions

View File

@@ -1,10 +1,10 @@
# oh-my-opencode — OpenCode Plugin
**Generated:** 2026-02-24 | **Commit:** fcb90d92 | **Branch:** dev
**Generated:** 2026-03-02 | **Commit:** 1c2caa09 | **Branch:** dev
## OVERVIEW
OpenCode plugin (npm: `oh-my-opencode`) that extends Claude Code (OpenCode fork) with multi-agent orchestration, 46 lifecycle hooks, 26 tools, skill/command/MCP systems, and Claude Code compatibility. 1208 TypeScript files, 143k LOC.
OpenCode plugin (npm: `oh-my-opencode`) that extends Claude Code (OpenCode fork) with multi-agent orchestration, 46 lifecycle hooks, 26 tools, skill/command/MCP systems, and Claude Code compatibility. 1243 TypeScript files, 155k LOC.
## STRUCTURE
@@ -14,16 +14,16 @@ oh-my-opencode/
│ ├── index.ts # Plugin entry: loadConfig → createManagers → createTools → createHooks → createPluginInterface
│ ├── plugin-config.ts # JSONC multi-level config: user → project → defaults (Zod v4)
│ ├── agents/ # 11 agents (Sisyphus, Hephaestus, Oracle, Librarian, Explore, Atlas, Prometheus, Metis, Momus, Multimodal-Looker, Sisyphus-Junior)
| `hooks/`                # 46 hooks across 39 directories + 6 standalone files
│ ├── hooks/ # 46 hooks across 45 directories + 11 standalone files
│ ├── tools/ # 26 tools across 15 directories
│ ├── features/ # 19 feature modules (background-agent, skill-loader, tmux, MCP-OAuth, etc.)
│ ├── shared/ # 100+ utility files in 13 categories
│ ├── config/ # Zod v4 schema system (22+ files)
│ ├── shared/ # 95+ utility files in 13 categories
│ ├── config/ # Zod v4 schema system (24 files)
│ ├── cli/ # CLI: install, run, doctor, mcp-oauth (Commander.js)
│ ├── mcp/ # 3 built-in remote MCPs (websearch, context7, grep_app)
│ ├── plugin/ # 8 OpenCode hook handlers + 46 hook composition
│ └── plugin-handlers/ # 6-phase config loading pipeline
├── packages/ # Monorepo: comment-checker, opencode-sdk, 10 platform binaries
├── packages/ # Monorepo: cli-runner, 12 platform binaries
└── local-ignore/ # Dev-only test fixtures
```
@@ -123,7 +123,7 @@ bunx oh-my-opencode run # Non-interactive session
|----------|---------|---------|
| ci.yml | push/PR | Tests (split: mock-heavy isolated + batch), typecheck, build, schema auto-commit |
| publish.yml | manual | Version bump, npm publish, platform binaries, GitHub release, merge to dev |
| publish-platform.yml | called | 11 platform binaries via bun compile (darwin/linux/windows) |
| publish-platform.yml | called | 12 platform binaries via bun compile (darwin/linux/windows) |
| sisyphus-agent.yml | @mention | AI agent handles issues/PRs |
## NOTES

View File

@@ -134,7 +134,7 @@ Everything below, every feature, every optimization, you don't need to know it.
Even only with following subscriptions, ultrawork will work well (this project is not affiliated, this is just personal recommendation):
- [ChatGPT Subscription ($20)](https://chatgpt.com/)
- [Kimi Code Subscription ($0.99) (*only this month)](https://www.kimi.com/membership/pricing?track_id=5cdeca93-66f0-4d35-aabb-b6df8fcea328)
- [Kimi Code Subscription ($0.99) (*only this month)](https://www.kimi.com/kimiplus/sale)
- [GLM Coding Plan ($10)](https://z.ai/subscribe)
- If you are eligible for pay-per-token, using kimi and gemini models won't cost you that much.

View File

@@ -3148,6 +3148,16 @@
},
"additionalProperties": false
},
"custom_agents": {
"type": "object",
"propertyNames": {
"type": "string",
"pattern": "^(?!(?:[bB][uU][iI][lL][dD]|[pP][lL][aA][nN]|[sS][iI][sS][yY][pP][hH][uU][sS]|[hH][eE][pP][hH][aA][eE][sS][tT][uU][sS]|[sS][iI][sS][yY][pP][hH][uU][sS]-[jJ][uU][nN][iI][oO][rR]|[oO][pP][eE][nN][cC][oO][dD][eE]-[bB][uU][iI][lL][dD][eE][rR]|[pP][rR][oO][mM][eE][tT][hH][eE][uU][sS]|[mM][eE][tT][iI][sS]|[mM][oO][mM][uU][sS]|[oO][rR][aA][cC][lL][eE]|[lL][iI][bB][rR][aA][rR][iI][aA][nN]|[eE][xX][pP][lL][oO][rR][eE]|[mM][uU][lL][tT][iI][mM][oO][dD][aA][lL]-[lL][oO][oO][kK][eE][rR]|[aA][tT][lL][aA][sS])$).+"
},
"additionalProperties": {
"$ref": "#/$defs/agentOverrideConfig"
}
},
"categories": {
"type": "object",
"propertyNames": {
@@ -3685,6 +3695,10 @@
"messageStalenessTimeoutMs": {
"type": "number",
"minimum": 60000
},
"syncPollTimeoutMs": {
"type": "number",
"minimum": 60000
}
},
"additionalProperties": false
@@ -3837,6 +3851,19 @@
},
"additionalProperties": false
},
"start_work": {
"type": "object",
"properties": {
"auto_commit": {
"default": true,
"type": "boolean"
}
},
"required": [
"auto_commit"
],
"additionalProperties": false
},
"_migrations": {
"type": "array",
"items": {
@@ -3844,5 +3871,226 @@
}
}
},
"additionalProperties": false
"additionalProperties": false,
"$defs": {
"agentOverrideConfig": {
"type": "object",
"properties": {
"model": {
"type": "string"
},
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": {
"type": "string"
},
"category": {
"type": "string"
},
"skills": {
"type": "array",
"items": {
"type": "string"
}
},
"temperature": {
"type": "number",
"minimum": 0,
"maximum": 2
},
"top_p": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"prompt": {
"type": "string"
},
"prompt_append": {
"type": "string"
},
"tools": {
"type": "object",
"propertyNames": {
"type": "string"
},
"additionalProperties": {
"type": "boolean"
}
},
"disable": {
"type": "boolean"
},
"description": {
"type": "string"
},
"mode": {
"type": "string",
"enum": [
"subagent",
"primary",
"all"
]
},
"color": {
"type": "string",
"pattern": "^#[0-9A-Fa-f]{6}$"
},
"permission": {
"type": "object",
"properties": {
"edit": {
"type": "string",
"enum": [
"ask",
"allow",
"deny"
]
},
"bash": {
"anyOf": [
{
"type": "string",
"enum": [
"ask",
"allow",
"deny"
]
},
{
"type": "object",
"propertyNames": {
"type": "string"
},
"additionalProperties": {
"type": "string",
"enum": [
"ask",
"allow",
"deny"
]
}
}
]
},
"webfetch": {
"type": "string",
"enum": [
"ask",
"allow",
"deny"
]
},
"task": {
"type": "string",
"enum": [
"ask",
"allow",
"deny"
]
},
"doom_loop": {
"type": "string",
"enum": [
"ask",
"allow",
"deny"
]
},
"external_directory": {
"type": "string",
"enum": [
"ask",
"allow",
"deny"
]
}
},
"additionalProperties": false
},
"maxTokens": {
"type": "number"
},
"thinking": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": [
"enabled",
"disabled"
]
},
"budgetTokens": {
"type": "number"
}
},
"required": [
"type"
],
"additionalProperties": false
},
"reasoningEffort": {
"type": "string",
"enum": [
"low",
"medium",
"high",
"xhigh"
]
},
"textVerbosity": {
"type": "string",
"enum": [
"low",
"medium",
"high"
]
},
"providerOptions": {
"type": "object",
"propertyNames": {
"type": "string"
},
"additionalProperties": {}
},
"ultrawork": {
"type": "object",
"properties": {
"model": {
"type": "string"
},
"variant": {
"type": "string"
}
},
"additionalProperties": false
},
"compaction": {
"type": "object",
"properties": {
"model": {
"type": "string"
},
"variant": {
"type": "string"
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
}
}

View File

@@ -8,7 +8,7 @@
"@ast-grep/cli": "^0.40.0",
"@ast-grep/napi": "^0.40.0",
"@clack/prompts": "^0.11.0",
"@code-yeongyu/comment-checker": "^0.6.1",
"@code-yeongyu/comment-checker": "^0.7.0",
"@modelcontextprotocol/sdk": "^1.25.2",
"@opencode-ai/plugin": "^1.1.19",
"@opencode-ai/sdk": "^1.1.19",
@@ -29,13 +29,17 @@
"typescript": "^5.7.3",
},
"optionalDependencies": {
"oh-my-opencode-darwin-arm64": "3.8.5",
"oh-my-opencode-darwin-x64": "3.8.5",
"oh-my-opencode-linux-arm64": "3.8.5",
"oh-my-opencode-linux-arm64-musl": "3.8.5",
"oh-my-opencode-linux-x64": "3.8.5",
"oh-my-opencode-linux-x64-musl": "3.8.5",
"oh-my-opencode-windows-x64": "3.8.5",
"oh-my-opencode-darwin-arm64": "3.10.0",
"oh-my-opencode-darwin-x64": "3.10.0",
"oh-my-opencode-darwin-x64-baseline": "3.10.0",
"oh-my-opencode-linux-arm64": "3.10.0",
"oh-my-opencode-linux-arm64-musl": "3.10.0",
"oh-my-opencode-linux-x64": "3.10.0",
"oh-my-opencode-linux-x64-baseline": "3.10.0",
"oh-my-opencode-linux-x64-musl": "3.10.0",
"oh-my-opencode-linux-x64-musl-baseline": "3.10.0",
"oh-my-opencode-windows-x64": "3.10.0",
"oh-my-opencode-windows-x64-baseline": "3.10.0",
},
},
},
@@ -85,7 +89,7 @@
"@clack/prompts": ["@clack/prompts@0.11.0", "", { "dependencies": { "@clack/core": "0.5.0", "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-pMN5FcrEw9hUkZA4f+zLlzivQSeQf5dRGJjSUbvVYDLvpKCdQx5OaknvKzgbtXOizhP+SJJJjqEbOe55uKKfAw=="],
"@code-yeongyu/comment-checker": ["@code-yeongyu/comment-checker@0.6.1", "", { "os": [ "linux", "win32", "darwin", ], "cpu": [ "x64", "arm64", ], "bin": { "comment-checker": "bin/comment-checker" } }, "sha512-BBremX+Y5aW8sTzlhHrLsKParupYkPOVUYmq9STrlWvBvfAme6w5IWuZCLl6nHIQScRDdvGdrAjPycJC86EZFA=="],
"@code-yeongyu/comment-checker": ["@code-yeongyu/comment-checker@0.7.0", "", { "os": [ "linux", "win32", "darwin", ], "cpu": [ "x64", "arm64", ], "bin": { "comment-checker": "bin/comment-checker" } }, "sha512-AOic1jPHY3CpNraOuO87YZHO3uRzm9eLd0wyYYN89/76Ugk2TfdUYJ6El/Oe8fzOnHKiOF0IfBeWRo0IUjrHHg=="],
"@hono/node-server": ["@hono/node-server@1.19.9", "", { "peerDependencies": { "hono": "^4" } }, "sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw=="],
@@ -231,19 +235,27 @@
"object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="],
"oh-my-opencode-darwin-arm64": ["oh-my-opencode-darwin-arm64@3.8.5", "", { "os": "darwin", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-bbLu1We9NNhYAVp9Q/FK8dYFlYLp2PKfvdBCr+O6QjNRixdjp8Ru4RK7i9mKg0ybYBUzzCcbbC2Cc1o8orkhBA=="],
"oh-my-opencode-darwin-arm64": ["oh-my-opencode-darwin-arm64@3.10.0", "", { "os": "darwin", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-KQ1Nva4eU03WIaQI8BiEgizYJAeddUIaC8dmks0Ug/2EkH6VyNj41+shI58HFGN9Jlg9Fd6MxpOW92S3JUHjOw=="],
"oh-my-opencode-darwin-x64": ["oh-my-opencode-darwin-x64@3.8.5", "", { "os": "darwin", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-N9GcmzYgL87UybSaMGiHc5lwT5Mxg1tyB502el5syouN39wfeUYoj37SonENrMUTiEfn75Lwv/5cSLCesSubpA=="],
"oh-my-opencode-darwin-x64": ["oh-my-opencode-darwin-x64@3.10.0", "", { "os": "darwin", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-PydZ6wKyLZzikSZA3Q89zKZwFyg0Ouqd/S6zDsf1zzpUWT1t5EcpBtYFwuscD7L4hdkIEFm8wxnnBkz5i6BEiA=="],
"oh-my-opencode-linux-arm64": ["oh-my-opencode-linux-arm64@3.8.5", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-ki4a7s1DD5z5wEKmzcchqAKOIpw0LsBvyF8ieqNLS5Xl8PWE0gAZ7rqjlXC54NTubpexVH6lO2yenFJsk2Zk9A=="],
"oh-my-opencode-darwin-x64-baseline": ["oh-my-opencode-darwin-x64-baseline@3.10.0", "", { "os": "darwin", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-yOaVd0E1qspT2xP/BMJaJ/rpFTwkOh9U/SAk6uOuxHld6dZGI9e2Oq8F3pSD16xHnnpaz4VzadtT6HkvPdtBYg=="],
"oh-my-opencode-linux-arm64-musl": ["oh-my-opencode-linux-arm64-musl@3.8.5", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-9+6hU3z503fBzuV0VjxIkTKFElbKacHijFcdKAussG6gPFLWmCRWtdowzEDwUfAoIsoHHH7FBwvh5waGp/ZksA=="],
"oh-my-opencode-linux-arm64": ["oh-my-opencode-linux-arm64@3.10.0", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-pLzcPMuzBb1tpVgqMilv7QdsE2xTMLCWT3b807mzjt0302fZTfm6emwymCG25RamHdq7+mI2B0rN7hjvbymFog=="],
"oh-my-opencode-linux-x64": ["oh-my-opencode-linux-x64@3.8.5", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-DmnMK/PgvdcCYL+OQE5iZWgi/vmjm0sIPQVQgSUbWn3izcUF7C5DtlxqaU2cKxNZwrhDTlJdLWxmJqgLmLqd9A=="],
"oh-my-opencode-linux-arm64-musl": ["oh-my-opencode-linux-arm64-musl@3.10.0", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-ca61zr+X8q0ipO2x72qU+4R6Dsr168OM9aXI6xDHbrr0l3XZlRO8xuwQidch1vE5QRv2/IJT10KjAFInCERDug=="],
"oh-my-opencode-linux-x64-musl": ["oh-my-opencode-linux-x64-musl@3.8.5", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-jhCNStljsyapVq9X7PaHSOcWxxEA4BUcIibvoPs/xc7fVP8D47p651LzIRsM6STn6Bx684mlYbxxX1P/0QPKNg=="],
"oh-my-opencode-linux-x64": ["oh-my-opencode-linux-x64@3.10.0", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-m0Ys8Vnl8jUNRE5/aIseNOF1H57/W77xh3vkyBVfnjzHwQdEUWZz3IdoHaEWIFgIP2+fsNXRHqpx7Pbtuhxo6Q=="],
"oh-my-opencode-windows-x64": ["oh-my-opencode-windows-x64@3.8.5", "", { "os": "win32", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode.exe" } }, "sha512-lcPBp9NCNQ6TnqzsN9p/K+xKwOzBoIPw7HncxmrXSberZ3uHy0K9uNraQ7fqnXIKWqQiK4kSwWfSHpmhbaHiNg=="],
"oh-my-opencode-linux-x64-baseline": ["oh-my-opencode-linux-x64-baseline@3.10.0", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-a6OhfqMXhOTq1On8YHRRlVsNtMx84kgNAnStk/sY1Dw0kXU68QK4tWXVF+wNdiRG3egeM2SvjhJ5RhWlr3CCNQ=="],
"oh-my-opencode-linux-x64-musl": ["oh-my-opencode-linux-x64-musl@3.10.0", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-lZkoEWwmrlVoZKewHNslUmQ2D6eWi1YqsoZMTd3qRj8V4XI6TDZHxg86hw4oxZ/EnKO4un+r83tb09JAAb1nNQ=="],
"oh-my-opencode-linux-x64-musl-baseline": ["oh-my-opencode-linux-x64-musl-baseline@3.10.0", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-UqArUpatMuen8+hZhMSbScaSmJlcwkEtf/IzDN1iYO0CttvhyYMUmm3el/1gWTAcaGNDFNkGmTli5WNYhnm2lA=="],
"oh-my-opencode-windows-x64": ["oh-my-opencode-windows-x64@3.10.0", "", { "os": "win32", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode.exe" } }, "sha512-BivOu1+Yty9N6VSmNzmxROZqjQKu3ImWjooKZDfczvYLDQmZV104QcOKV6bmdOCpHrqQ7cvdbygmeiJeRoYShg=="],
"oh-my-opencode-windows-x64-baseline": ["oh-my-opencode-windows-x64-baseline@3.10.0", "", { "os": "win32", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode.exe" } }, "sha512-BBv+dNPuh9LEuqXUJLXNsvi3vL30zS1qcJuzlq/s8rYHry+VvEVXCRcMm5Vo0CVna8bUZf5U8MDkGDHOAiTeEw=="],
"on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="],

View File

@@ -68,6 +68,8 @@ User Request
When Sisyphus delegates to a subagent, it doesn't pick a model name. It picks a **category**`visual-engineering`, `ultrabrain`, `quick`, `deep`. The category automatically maps to the right model. You touch nothing.
Custom agents are also first-class in this flow. When custom agents are loaded, planning context includes them, so the orchestrator can choose them proactively when appropriate, and you can call them directly on demand via `task(subagent_type="your-agent")`.
For a deep dive into how agents collaborate, see the [Orchestration System Guide](./orchestration.md).
---

View File

@@ -11,6 +11,7 @@ Complete reference for `oh-my-opencode.jsonc` configuration. This document cover
- [Quick Start Example](#quick-start-example)
- [Core Concepts](#core-concepts)
- [Agents](#agents)
- [Custom Agents (`custom_agents`)](#custom-agents-custom_agents)
- [Categories](#categories)
- [Model Resolution](#model-resolution)
- [Task System](#task-system)
@@ -130,6 +131,8 @@ Here's a practical starting configuration:
Override built-in agent settings. Available agents: `sisyphus`, `hephaestus`, `prometheus`, `oracle`, `librarian`, `explore`, `multimodal-looker`, `metis`, `momus`, `atlas`.
`agents` is intentionally strict and only accepts built-in agent keys. Use `custom_agents` for user-defined agents.
```json
{
"agents": {
@@ -200,6 +203,64 @@ Control what tools an agent can use:
| `doom_loop` | `ask` / `allow` / `deny` |
| `external_directory` | `ask` / `allow` / `deny` |
### Custom Agents (`custom_agents`)
Use `custom_agents` to configure user-defined agents without mixing them into built-in `agents` overrides.
What this gives you:
- **Clean separation**: built-ins stay in `agents`, user-defined entries stay in `custom_agents`.
- **Safer config**: keys in `custom_agents` cannot reuse built-in names.
- **First-class orchestration**: loaded custom agents are visible to planner/orchestrator context, so they can be selected proactively during planning and invoked on demand via `task(subagent_type=...)`.
- **Full model controls** for custom agents: `model`, `variant`, `temperature`, `top_p`, `reasoningEffort`, `thinking`, etc.
Important behavior:
- `custom_agents` **overrides existing custom agents** loaded at runtime (for example from Claude Code/OpenCode agent sources).
- `custom_agents` does **not** create an agent from thin air by itself; the target custom agent must be present in runtime-loaded agent configs.
Example:
```jsonc
{
"custom_agents": {
"translator": {
"model": "openai/gpt-5.3-codex",
"variant": "high",
"temperature": 0.2,
"prompt_append": "Keep locale placeholders and ICU tokens exactly unchanged."
},
"reviewer-fast": {
"model": "anthropic/claude-haiku-4-5",
"temperature": 0,
"thinking": {
"type": "enabled",
"budgetTokens": 20000
}
}
}
}
```
On-demand invocation through task delegation:
```ts
task(
{
subagent_type: "translator",
load_skills: [],
description: "Translate release notes",
prompt: "Translate docs/CHANGELOG.md into Korean while preserving markdown structure.",
run_in_background: false,
},
)
```
Migration note:
- If you previously put custom entries under `agents.*`, move them to `custom_agents.*`.
- Unknown built-in keys under `agents` are reported with migration hints.
### Categories
Domain-specific model delegation used by the `task()` tool. When Sisyphus delegates work, it picks a category, not a model name.
@@ -573,13 +634,13 @@ Define `fallback_models` per agent or category:
### Hashline Edit
Replaces the built-in `Edit` tool with a hash-anchored version using `LINE#ID` references to prevent stale-line edits. Enabled by default.
Replaces the built-in `Edit` tool with a hash-anchored version using `LINE#ID` references to prevent stale-line edits. Disabled by default.
```json
{ "hashline_edit": false }
{ "hashline_edit": true }
```
When enabled, two companion hooks are active: `hashline-read-enhancer` (annotates Read output) and `hashline-edit-diff-enhancer` (shows diffs). Disable them individually via `disabled_hooks`.
When enabled, two companion hooks are active: `hashline-read-enhancer` (annotates Read output) and `hashline-edit-diff-enhancer` (shows diffs). Opt-in by setting `hashline_edit: true`. Disable the companion hooks individually via `disabled_hooks` if needed.
### Experimental

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode",
"version": "3.9.0",
"version": "3.10.0",
"description": "The Best AI Agent Harness - Batteries-Included OpenCode Plugin with Multi-Model Orchestration, Parallel Background Agents, and Crafted LSP/AST Tools",
"main": "dist/index.js",
"types": "dist/index.d.ts",
@@ -54,7 +54,7 @@
"@ast-grep/cli": "^0.40.0",
"@ast-grep/napi": "^0.40.0",
"@clack/prompts": "^0.11.0",
"@code-yeongyu/comment-checker": "^0.6.1",
"@code-yeongyu/comment-checker": "^0.7.0",
"@modelcontextprotocol/sdk": "^1.25.2",
"@opencode-ai/plugin": "^1.1.19",
"@opencode-ai/sdk": "^1.1.19",
@@ -75,17 +75,17 @@
"typescript": "^5.7.3"
},
"optionalDependencies": {
"oh-my-opencode-darwin-arm64": "3.9.0",
"oh-my-opencode-darwin-x64": "3.9.0",
"oh-my-opencode-darwin-x64-baseline": "3.9.0",
"oh-my-opencode-linux-arm64": "3.9.0",
"oh-my-opencode-linux-arm64-musl": "3.9.0",
"oh-my-opencode-linux-x64": "3.9.0",
"oh-my-opencode-linux-x64-baseline": "3.9.0",
"oh-my-opencode-linux-x64-musl": "3.9.0",
"oh-my-opencode-linux-x64-musl-baseline": "3.9.0",
"oh-my-opencode-windows-x64": "3.9.0",
"oh-my-opencode-windows-x64-baseline": "3.9.0"
"oh-my-opencode-darwin-arm64": "3.10.0",
"oh-my-opencode-darwin-x64": "3.10.0",
"oh-my-opencode-darwin-x64-baseline": "3.10.0",
"oh-my-opencode-linux-arm64": "3.10.0",
"oh-my-opencode-linux-arm64-musl": "3.10.0",
"oh-my-opencode-linux-x64": "3.10.0",
"oh-my-opencode-linux-x64-baseline": "3.10.0",
"oh-my-opencode-linux-x64-musl": "3.10.0",
"oh-my-opencode-linux-x64-musl-baseline": "3.10.0",
"oh-my-opencode-windows-x64": "3.10.0",
"oh-my-opencode-windows-x64-baseline": "3.10.0"
},
"trustedDependencies": [
"@ast-grep/cli",

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-arm64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-x64-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-x64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64-musl",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-musl-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-musl",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-windows-x64-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (windows-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-windows-x64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (windows-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,17 +1,53 @@
import * as z from "zod"
import { OhMyOpenCodeConfigSchema } from "../src/config/schema"
function asRecord(value: unknown): Record<string, unknown> | undefined {
return typeof value === "object" && value !== null ? (value as Record<string, unknown>) : undefined
}
function dedupeCustomAgentOverrideSchema(schema: Record<string, unknown>): Record<string, unknown> {
const rootProperties = asRecord(schema.properties)
const agentsSchema = asRecord(rootProperties?.agents)
const builtInAgentProps = asRecord(agentsSchema?.properties)
const customAgentsSchema = asRecord(rootProperties?.custom_agents)
const customAdditionalProperties = asRecord(customAgentsSchema?.additionalProperties)
if (!builtInAgentProps || !customAgentsSchema || !customAdditionalProperties) {
return schema
}
const referenceAgentSchema = asRecord(
builtInAgentProps.build
?? builtInAgentProps.oracle
?? builtInAgentProps.explore,
)
if (!referenceAgentSchema) {
return schema
}
const defs = asRecord(schema.$defs) ?? {}
defs.agentOverrideConfig = referenceAgentSchema
schema.$defs = defs
customAgentsSchema.additionalProperties = { $ref: "#/$defs/agentOverrideConfig" }
return schema
}
export function createOhMyOpenCodeJsonSchema(): Record<string, unknown> {
const jsonSchema = z.toJSONSchema(OhMyOpenCodeConfigSchema, {
target: "draft-7",
unrepresentable: "any",
})
return {
const schema = {
$schema: "http://json-schema.org/draft-07/schema#",
$id: "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
title: "Oh My OpenCode Configuration",
description: "Configuration schema for oh-my-opencode plugin",
...jsonSchema,
}
return dedupeCustomAgentOverrideSchema(schema)
}

View File

@@ -1799,6 +1799,46 @@
"created_at": "2026-02-27T10:53:03Z",
"repoId": 1108837393,
"pullRequestNo": 2184
},
{
"name": "renanale",
"id": 37278838,
"comment_id": 3975562407,
"created_at": "2026-02-27T22:38:18Z",
"repoId": 1108837393,
"pullRequestNo": 2201
},
{
"name": "laciferin2024",
"id": 170102251,
"comment_id": 3978786169,
"created_at": "2026-03-01T01:16:25Z",
"repoId": 1108837393,
"pullRequestNo": 2222
},
{
"name": "DEAN-Cherry",
"id": 76607677,
"comment_id": 3979468463,
"created_at": "2026-03-01T08:13:43Z",
"repoId": 1108837393,
"pullRequestNo": 2227
},
{
"name": "Chocothin",
"id": 99174213,
"comment_id": 3980002001,
"created_at": "2026-03-01T13:52:10Z",
"repoId": 1108837393,
"pullRequestNo": 2230
},
{
"name": "mathew-cf",
"id": 68972715,
"comment_id": 3980951159,
"created_at": "2026-03-01T20:19:31Z",
"repoId": 1108837393,
"pullRequestNo": 2233
}
]
}

View File

@@ -1,6 +1,6 @@
# src/ — Plugin Source
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/agents/ — 11 Agent Definitions
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW
@@ -10,16 +10,16 @@ Agent factories following `createXXXAgent(model) → AgentConfig` pattern. Each
| Agent | Model | Temp | Mode | Fallback Chain | Purpose |
|-------|-------|------|------|----------------|---------|
| **Sisyphus** | claude-opus-4-6 | 0.1 | primary | kimi-k2.5 → glm-4.7 → gemini-3-pro | Main orchestrator, plans + delegates |
| **Hephaestus** | gpt-5.3-codex | 0.1 | primary | NONE (required) | Autonomous deep worker |
| **Oracle** | gpt-5.2 | 0.1 | subagent | claude-opus-4-6 → gemini-3-pro | Read-only consultation |
| **Librarian** | glm-4.7 | 0.1 | subagent | big-pickle → claude-sonnet-4-6 | External docs/code search |
| **Explore** | grok-code-fast-1 | 0.1 | subagent | claude-haiku-4-5 → gpt-5-nano | Contextual grep |
| **Multimodal-Looker** | gemini-3-flash | 0.1 | subagent | gpt-5.2 → glm-4.6v → ... (6 deep) | PDF/image analysis |
| **Metis** | claude-opus-4-6 | **0.3** | subagent | kimi-k2.5 → gpt-5.2 → gemini-3-pro | Pre-planning consultant |
| **Momus** | gpt-5.2 | 0.1 | subagent | claude-opus-4-6 → gemini-3-pro | Plan reviewer |
| **Atlas** | claude-sonnet-4-6 | 0.1 | primary | kimi-k2.5 → gpt-5.2 → gemini-3-pro | Todo-list orchestrator |
| **Prometheus** | claude-opus-4-6 | 0.1 | — | kimi-k2.5 → gpt-5.2 → gemini-3-pro | Strategic planner (internal) |
| **Sisyphus** | claude-opus-4-6 | 0.1 | all | kimi-k2.5 → glm-5 → big-pickle | Main orchestrator, plans + delegates |
| **Hephaestus** | gpt-5.3-codex | 0.1 | all | gpt-5.2 (copilot) | Autonomous deep worker |
| **Oracle** | gpt-5.2 | 0.1 | subagent | gemini-3.1-pro → claude-opus-4-6 | Read-only consultation |
| **Librarian** | kimi-k2.5 | 0.1 | subagent | gemini-3-flash → gpt-5.2 → glm-4.6v | External docs/code search |
| **Explore** | grok-code-fast-1 | 0.1 | subagent | minimax-m2.5 → claude-haiku-4-5 → gpt-5-nano | Contextual grep |
| **Multimodal-Looker** | gemini-3-flash | 0.1 | subagent | minimax-m2.5 → big-pickle | PDF/image analysis |
| **Metis** | claude-opus-4-6 | **0.3** | subagent | gpt-5.2 → kimi-k2.5 → gemini-3.1-pro | Pre-planning consultant |
| **Momus** | gpt-5.2 | 0.1 | subagent | claude-opus-4-6 → gemini-3.1-pro | Plan reviewer |
| **Atlas** | kimi-k2.5 | 0.1 | primary | claude-sonnet-4-6 → gpt-5.2 | Todo-list orchestrator |
| **Prometheus** | claude-opus-4-6 | 0.1 | — | kimi-k2.5 → gpt-5.2 → gemini-3.1-pro | Strategic planner (internal) |
| **Sisyphus-Junior** | claude-sonnet-4-6 | 0.1 | all | user-configurable | Category-spawned executor |
## TOOL RESTRICTIONS

View File

@@ -4,6 +4,8 @@ import { describe, it, expect } from "bun:test"
import {
buildCategorySkillsDelegationGuide,
buildUltraworkSection,
buildDeepParallelSection,
buildNonClaudePlannerSection,
type AvailableSkill,
type AvailableCategory,
type AvailableAgent,
@@ -172,4 +174,86 @@ describe("buildUltraworkSection", () => {
})
})
describe("buildDeepParallelSection", () => {
const deepCategory: AvailableCategory = { name: "deep", description: "Autonomous problem-solving" }
const otherCategory: AvailableCategory = { name: "quick", description: "Trivial tasks" }
it("#given non-Claude model with deep category #when building #then returns parallel delegation section", () => {
//#given
const model = "google/gemini-3-pro"
const categories = [deepCategory, otherCategory]
//#when
const result = buildDeepParallelSection(model, categories)
//#then
expect(result).toContain("Deep Parallel Delegation")
expect(result).toContain("EVERY independent unit")
expect(result).toContain("run_in_background=true")
expect(result).toContain("4 independent units")
})
it("#given Claude model #when building #then returns empty", () => {
//#given
const model = "anthropic/claude-opus-4-6"
const categories = [deepCategory]
//#when
const result = buildDeepParallelSection(model, categories)
//#then
expect(result).toBe("")
})
it("#given non-Claude model without deep category #when building #then returns empty", () => {
//#given
const model = "openai/gpt-5.2"
const categories = [otherCategory]
//#when
const result = buildDeepParallelSection(model, categories)
//#then
expect(result).toBe("")
})
})
describe("buildNonClaudePlannerSection", () => {
it("#given non-Claude model #when building #then returns plan agent section", () => {
//#given
const model = "google/gemini-3-pro"
//#when
const result = buildNonClaudePlannerSection(model)
//#then
expect(result).toContain("Plan Agent")
expect(result).toContain("session_id")
expect(result).toContain("Multi-step")
})
it("#given Claude model #when building #then returns empty", () => {
//#given
const model = "anthropic/claude-sonnet-4-6"
//#when
const result = buildNonClaudePlannerSection(model)
//#then
expect(result).toBe("")
})
it("#given GPT model #when building #then returns plan agent section", () => {
//#given
const model = "openai/gpt-5.2"
//#when
const result = buildNonClaudePlannerSection(model)
//#then
expect(result).toContain("Plan Agent")
expect(result).not.toBe("")
})
})

View File

@@ -277,12 +277,11 @@ Briefly announce "Consulting Oracle for [reason]" before invocation.
### Oracle Background Task Policy:
**You MUST collect Oracle results before your final answer. No exceptions.**
**Collect Oracle results before your final answer. No exceptions.**
- Oracle may take several minutes. This is normal and expected.
- When Oracle is running and you finish your own exploration/analysis, your next action is \`background_output(task_id="...")\` on Oracle — NOT delivering a final answer.
- Oracle catches blind spots you cannot see — its value is HIGHEST when you think you don't need it.
- **NEVER** cancel Oracle. **NEVER** use \`background_cancel(all=true)\` when Oracle is running. Cancel disposable tasks (explore, librarian) individually by taskId instead.
- Oracle takes minutes. When done with your own work: **end your response** — wait for the \`<system-reminder>\`.
- Do NOT poll \`background_output\` on a running Oracle. The notification will come.
- Never cancel Oracle.
</Oracle_Usage>`
}
@@ -292,8 +291,8 @@ export function buildHardBlocksSection(): string {
"- Commit without explicit request — **Never**",
"- Speculate about unread code — **Never**",
"- Leave code in broken state after failures — **Never**",
"- `background_cancel(all=true)` when Oracle is running — **Never.** Cancel tasks individually by taskId.",
"- Delivering final answer before collecting Oracle result — **Never.** Always `background_output` Oracle first.",
"- `background_cancel(all=true)` — **Never.** Always cancel individually by taskId.",
"- Delivering final answer before collecting Oracle result — **Never.**",
]
return `## Hard Blocks (NEVER violate)
@@ -308,8 +307,8 @@ export function buildAntiPatternsSection(): string {
"- **Testing**: Deleting failing tests to \"pass\"",
"- **Search**: Firing agents for single-line typos or obvious syntax errors",
"- **Debugging**: Shotgun debugging, random changes",
"- **Background Tasks**: `background_cancel(all=true)` — always cancel individually by taskId",
"- **Oracle**: Skipping Oracle results when Oracle was launched — ALWAYS collect via `background_output`",
"- **Background Tasks**: Polling `background_output` on running tasks — end response and wait for notification",
"- **Oracle**: Delivering answer without collecting Oracle results",
]
return `## Anti-Patterns (BLOCKING violations)
@@ -334,6 +333,22 @@ When you need to call a tool:
Your tool calls are processed automatically. Just invoke the tool - do not format the call yourself.`
}
export function buildNonClaudePlannerSection(model: string): string {
const isNonClaude = !model.toLowerCase().includes('claude')
if (!isNonClaude) return ""
return `### Plan Agent Dependency (Non-Claude)
Multi-step task? **ALWAYS consult Plan Agent first.** Do NOT start implementation without a plan.
- Single-file fix or trivial change → proceed directly
- Anything else (2+ steps, unclear scope, architecture) → \`task(subagent_type="plan", ...)\` FIRST
- Use \`session_id\` to resume the same Plan Agent — ask follow-up questions aggressively
- If ANY part of the task is ambiguous, ask Plan Agent before guessing
Plan Agent returns a structured work breakdown with parallel execution opportunities. Follow it.`
}
export function buildDeepParallelSection(model: string, categories: AvailableCategory[]): string {
const isNonClaude = !model.toLowerCase().includes('claude')
const hasDeepCategory = categories.some(c => c.name === 'deep')
@@ -342,12 +357,13 @@ export function buildDeepParallelSection(model: string, categories: AvailableCat
return `### Deep Parallel Delegation
For implementation tasks, actively decompose and delegate to \`deep\` category agents in parallel.
Delegate EVERY independent unit to a \`deep\` agent in parallel (\`run_in_background=true\`).
If a task decomposes into 4 independent units, spawn 4 agents simultaneously — not 1 at a time.
1. Break the implementation into independent work units
2. Maximize parallel deep agents — spawn one per independent unit (\`run_in_background=true\`)
3. Give each agent a GOAL, not step-by-step instructions — deep agents explore and solve autonomously
4. Collect results, integrate, verify coherence`
1. Decompose the implementation into independent work units
2. Assign one \`deep\` agent per unit — all via \`run_in_background=true\`
3. Give each agent a clear GOAL with success criteria, not step-by-step instructions
4. Collect all results, integrate, verify coherence across units`
}
export function buildUltraworkSection(

View File

@@ -39,6 +39,136 @@ Then ACTUALLY CALL those tools using the JSON tool schema. Produce the tool_use
</TOOL_CALL_MANDATE>`;
}
export function buildGeminiToolGuide(): string {
return `<GEMINI_TOOL_GUIDE>
## Tool Usage Guide — WHEN and HOW to Call Each Tool
You have access to tools via function calling. This guide defines WHEN to call each one.
**Violating these patterns = failed response.**
### Reading & Search (ALWAYS parallelizable — call multiple simultaneously)
| Tool | When to Call | Parallel? |
|---|---|---|
| \`Read\` | Before making ANY claim about file contents. Before editing any file. | <20> Yes — read multiple files at once |
| \`Grep\` | Finding patterns, imports, usages across codebase. BEFORE claiming "X is used in Y". | ✅ Yes — run multiple greps at once |
| \`Glob\` | Finding files by name/extension pattern. BEFORE claiming "file X exists". | ✅ Yes — run multiple globs at once |
| \`AstGrepSearch\` | Finding code patterns with AST awareness (structural matches). | ✅ Yes |
### Code Intelligence (parallelizable on different files)
| Tool | When to Call | Parallel? |
|---|---|---|
| \`LspDiagnostics\` | **AFTER EVERY edit.** BEFORE claiming task is done. MANDATORY. | ✅ Yes — different files |
| \`LspGotoDefinition\` | Finding where a symbol is defined. | ✅ Yes |
| \`LspFindReferences\` | Finding all usages of a symbol across workspace. | ✅ Yes |
| \`LspSymbols\` | Getting file outline or searching workspace symbols. | ✅ Yes |
### Editing (SEQUENTIAL — must Read first)
| Tool | When to Call | Parallel? |
|---|---|---|
| \`Edit\` | Modifying existing files. MUST Read file first to get LINE#ID anchors. | ❌ After Read |
| \`Write\` | Creating NEW files only. Or full file overwrite. | ❌ Sequential |
### Execution & Delegation
| Tool | When to Call | Parallel? |
|---|---|---|
| \`Bash\` | Running tests, builds, git commands. | ❌ Usually sequential |
| \`Task\` | ANY non-trivial implementation. Research via explore/librarian. | ✅ Fire multiple in background |
### Correct Sequences (MANDATORY — follow these exactly):
1. **Answer about code**: Read → (analyze) → Answer
2. **Edit code**: Read → Edit → LspDiagnostics → Report
3. **Find something**: Grep/Glob (parallel) → Read results → Report
4. **Implement feature**: Task(delegate) → Verify results → Report
5. **Debug**: Read error → Read file → Grep related → Fix → LspDiagnostics
### PARALLEL RULES:
- **Independent reads/searches**: ALWAYS call simultaneously in ONE response
- **Dependent operations**: Call sequentially (Edit AFTER Read, LspDiagnostics AFTER Edit)
- **Background agents**: ALWAYS \`run_in_background=true\`, continue working
</GEMINI_TOOL_GUIDE>`;
}
export function buildGeminiToolCallExamples(): string {
return `<GEMINI_TOOL_CALL_EXAMPLES>
## Correct Tool Calling Patterns — Follow These Examples
### Example 1: User asks about code → Read FIRST, then answer
**User**: "How does the auth middleware work?"
**CORRECT**:
\`\`\`
→ Call Read(filePath="/src/middleware/auth.ts")
→ Call Read(filePath="/src/config/auth.ts") // parallel with above
→ (After reading) Answer based on ACTUAL file contents
\`\`\`
**WRONG**:
\`\`\`
→ "The auth middleware likely validates JWT tokens by..." ← HALLUCINATION. You didn't read the file.
\`\`\`
### Example 2: User asks to edit code → Read, Edit, Verify
**User**: "Fix the type error in user.ts"
**CORRECT**:
\`\`\`
→ Call Read(filePath="/src/models/user.ts")
→ Call LspDiagnostics(filePath="/src/models/user.ts") // parallel with Read
→ (After reading) Call Edit with LINE#ID anchors
→ Call LspDiagnostics(filePath="/src/models/user.ts") // verify fix
→ Report: "Fixed. Diagnostics clean."
\`\`\`
**WRONG**:
\`\`\`
→ Call Edit without reading first ← No LINE#ID anchors = WILL FAIL
→ Skip LspDiagnostics after edit ← UNVERIFIED
\`\`\`
### Example 3: User asks to find something → Search in parallel
**User**: "Where is the database connection configured?"
**CORRECT**:
\`\`\`
→ Call Grep(pattern="database|connection|pool", path="/src") // fires simultaneously
→ Call Glob(pattern="**/*database*") // fires simultaneously
→ Call Glob(pattern="**/*db*") // fires simultaneously
→ (After results) Read the most relevant files
→ Report findings with file paths
\`\`\`
### Example 4: User asks to implement a feature → DELEGATE
**User**: "Add a new /health endpoint to the API"
**CORRECT**:
\`\`\`
→ Call Task(category="quick", load_skills=["typescript-programmer"], prompt="...")
→ (After agent completes) Read changed files to verify
→ Call LspDiagnostics on changed files
→ Report
\`\`\`
**WRONG**:
\`\`\`
→ Write the code yourself ← YOU ARE AN ORCHESTRATOR, NOT AN IMPLEMENTER
\`\`\`
### Example 5: Investigation ≠ Implementation
**User**: "Look into why the tests are failing"
**CORRECT**:
\`\`\`
→ Call Bash(command="npm test") // see actual failures
→ Call Read on failing test files
→ Call Read on source files under test
→ Report: "Tests fail because X. Root cause: Y. Proposed fix: Z."
→ STOP — wait for user to say "fix it"
\`\`\`
**WRONG**:
\`\`\`
→ Start editing source files immediately ← "look into" ≠ "fix"
\`\`\`
</GEMINI_TOOL_CALL_EXAMPLES>`;
}
export function buildGeminiDelegationOverride(): string {
return `<GEMINI_DELEGATION_OVERRIDE>
## DELEGATION IS MANDATORY — YOU ARE NOT AN IMPLEMENTER

View File

@@ -6,6 +6,8 @@ import {
buildGeminiDelegationOverride,
buildGeminiVerificationOverride,
buildGeminiIntentGateEnforcement,
buildGeminiToolGuide,
buildGeminiToolCallExamples,
} from "./sisyphus-gemini-overlays";
const MODE: AgentMode = "all";
@@ -32,6 +34,7 @@ import {
buildHardBlocksSection,
buildAntiPatternsSection,
buildDeepParallelSection,
buildNonClaudePlannerSection,
categorizeTools,
} from "./dynamic-agent-prompt-builder";
@@ -170,6 +173,7 @@ function buildDynamicSisyphusPrompt(
const hardBlocks = buildHardBlocksSection();
const antiPatterns = buildAntiPatternsSection();
const deepParallelSection = buildDeepParallelSection(model, availableCategories);
const nonClaudePlannerSection = buildNonClaudePlannerSection(model);
const taskManagementSection = buildTaskManagementSection(useTaskSystem);
const todoHookNote = useTaskSystem
? "YOUR TASK CREATION WOULD BE TRACKED BY HOOK([SYSTEM REMINDER - TASK CONTINUATION])"
@@ -329,7 +333,7 @@ task(subagent_type="explore", run_in_background=true, load_skills=[], descriptio
// Reference Grep (external)
task(subagent_type="librarian", run_in_background=true, load_skills=[], description="Find JWT security docs", prompt="I'm implementing JWT auth and need current security best practices to choose token storage (httpOnly cookies vs localStorage) and set expiration policy. Find: OWASP auth guidelines, recommended token lifetimes, refresh token rotation strategies, common JWT vulnerabilities. Skip 'what is JWT' tutorials — production security guidance only.")
task(subagent_type="librarian", run_in_background=true, load_skills=[], description="Find Express auth patterns", prompt="I'm building Express auth middleware and need production-quality patterns to structure my middleware chain. Find how established Express apps (1000+ stars) handle: middleware ordering, token refresh, role-based access control, auth error propagation. Skip basic tutorials — I need battle-tested patterns with proper error handling.")
// Continue working immediately. Collect with background_output when needed.
// Continue working immediately. System notifies on completion — collect with background_output then.
// WRONG: Sequential or blocking
result = task(..., run_in_background=false) // Never wait synchronously for explore/librarian
@@ -337,10 +341,10 @@ result = task(..., run_in_background=false) // Never wait synchronously for exp
### Background Result Collection:
1. Launch parallel agents \u2192 receive task_ids
2. Continue immediate work (explore, librarian results)
3. When results needed: \`background_output(task_id="...")\`
4. **If Oracle is running**: STOP all other output. Follow Oracle Completion Protocol in <Oracle_Usage>.
5. Cleanup: Cancel disposable tasks (explore, librarian) individually via \`background_cancel(taskId="...")\`. Never use \`background_cancel(all=true)\`.
2. Continue immediate work
3. System sends \`<system-reminder>\` on each task completion — then call \`background_output(task_id="...")\`
4. Need results not yet ready? **End your response.** The notification will trigger your next turn.
5. Cleanup: Cancel disposable tasks individually via \`background_cancel(taskId="...")\`
### Search Stop Conditions
@@ -364,6 +368,8 @@ STOP searching when:
${categorySkillsGuide}
${nonClaudePlannerSection}
${deepParallelSection}
${delegationTable}
@@ -477,9 +483,8 @@ If verification fails:
3. Report: "Done. Note: found N pre-existing lint errors unrelated to my changes."
### Before Delivering Final Answer:
- **If Oracle is running**: STOP. Follow Oracle Completion Protocol in <Oracle_Usage>. Do NOT deliver any answer.
- Cancel disposable background tasks (explore, librarian) individually via \`background_cancel(taskId="...")\`.
- **Never use \`background_cancel(all=true)\`.**
- If Oracle is running: **end your response** and wait for the completion notification first.
- Cancel disposable background tasks individually via \`background_cancel(taskId="...")\`.
</Behavior_Instructions>
${oracleSection}
@@ -565,12 +570,25 @@ export function createSisyphusAgent(
: buildDynamicSisyphusPrompt(model, [], tools, skills, categories, useTaskSystem);
if (isGeminiModel(model)) {
// 1. Intent gate + tool mandate — early in prompt (after intent verbalization)
prompt = prompt.replace(
"</intent_verbalization>",
`</intent_verbalization>\n\n${buildGeminiIntentGateEnforcement()}\n\n${buildGeminiToolMandate()}`
);
prompt += "\n" + buildGeminiDelegationOverride();
prompt += "\n" + buildGeminiVerificationOverride();
// 2. Tool guide + examples — after tool_usage_rules (where tools are discussed)
prompt = prompt.replace(
"</tool_usage_rules>",
`</tool_usage_rules>\n\n${buildGeminiToolGuide()}\n\n${buildGeminiToolCallExamples()}`
);
// 3. Delegation + verification overrides — before Constraints (NOT at prompt end)
// Gemini suffers from lost-in-the-middle: content at prompt end gets weaker attention.
// Placing these before <Constraints> ensures they're in a high-attention zone.
prompt = prompt.replace(
"<Constraints>",
`${buildGeminiDelegationOverride()}\n\n${buildGeminiVerificationOverride()}\n\n<Constraints>`
);
}
const permission = {

View File

@@ -242,14 +242,28 @@ describe("createBuiltinAgents with model overrides", () => {
test("createBuiltinAgents excludes disabled skills from availableSkills", async () => {
// #given
const disabledSkills = new Set(["playwright"])
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(null)
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
new Set([
"anthropic/claude-opus-4-6",
"opencode/kimi-k2.5-free",
"zai-coding-plan/glm-5",
"opencode/big-pickle",
])
)
// #when
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined, undefined, disabledSkills)
try {
// #when
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined, undefined, disabledSkills)
// #then
expect(agents.sisyphus.prompt).not.toContain("playwright")
expect(agents.sisyphus.prompt).toContain("frontend-ui-ux")
expect(agents.sisyphus.prompt).toContain("git-master")
// #then
expect(agents.sisyphus.prompt).not.toContain("playwright")
expect(agents.sisyphus.prompt).toContain("frontend-ui-ux")
expect(agents.sisyphus.prompt).toContain("git-master")
} finally {
cacheSpy.mockRestore()
fetchSpy.mockRestore()
}
})
test("includes custom agents in orchestrator prompts when provided via config", async () => {

View File

@@ -1,6 +1,6 @@
# src/cli/ — CLI: install, run, doctor, mcp-oauth
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/cli/config-manager/ — CLI Installation Utilities
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -19,9 +19,6 @@ export function initConfigContext(binary: OpenCodeBinaryType, version: string |
export function getConfigContext(): ConfigContext {
if (!configContext) {
if (process.env.NODE_ENV !== "production") {
console.warn("[config-context] getConfigContext() called before initConfigContext(); defaulting to CLI paths.")
}
const paths = getOpenCodeConfigPaths({ binary: "opencode", version: null })
configContext = { binary: "opencode", version: null, paths }
}

View File

@@ -93,7 +93,7 @@ export async function checkSystem(): Promise<CheckResult> {
issues.push({
title: "Loaded plugin version mismatch",
description: `Cache expects ${loadedInfo.expectedVersion} but loaded ${loadedInfo.loadedVersion}.`,
fix: "Reinstall plugin dependencies in OpenCode cache",
fix: `Reinstall: cd ${loadedInfo.cacheDir} && bun install`,
severity: "warning",
affects: ["plugin loading"],
})
@@ -107,7 +107,7 @@ export async function checkSystem(): Promise<CheckResult> {
issues.push({
title: "Loaded plugin is outdated",
description: `Loaded ${systemInfo.loadedVersion}, latest ${latestVersion}.`,
fix: "Update: cd ~/.config/opencode && bun update oh-my-opencode",
fix: `Update: cd ${loadedInfo.cacheDir} && bun add oh-my-opencode@latest`,
severity: "warning",
affects: ["plugin features"],
})

View File

@@ -1,6 +1,6 @@
# src/cli/run/ — Non-Interactive Session Launcher
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -318,14 +318,8 @@ describe("event handling", () => {
// given
const ctx = createMockContext("my-session")
const state: EventState = {
...createEventState(),
mainSessionIdle: true,
mainSessionError: false,
lastError: null,
lastOutput: "",
lastPartText: "",
currentTool: null,
hasReceivedMeaningfulWork: false,
messageCount: 0,
}
const payload: EventPayload = {

View File

@@ -1,10 +1,10 @@
# src/config/ — Zod v4 Schema System
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW
22 schema files composing `OhMyOpenCodeConfigSchema`. Zod v4 validation with `safeParse()`. All fields optional — omitted fields use plugin defaults.
24 schema files composing `OhMyOpenCodeConfigSchema`. Zod v4 validation with `safeParse()`. All fields optional — omitted fields use plugin defaults.
## SCHEMA TREE
@@ -29,14 +29,18 @@ config/schema/
├── git-master.ts # commit_footer: boolean | string
├── browser-automation.ts # provider: playwright | agent-browser | playwright-cli
├── background-task.ts # Concurrency limits per model/provider
├── fallback-models.ts # FallbackModelsConfigSchema
├── runtime-fallback.ts # RuntimeFallbackConfigSchema
├── babysitting.ts # Unstable agent monitoring
├── dynamic-context-pruning.ts # Context pruning settings
├── start-work.ts # StartWorkConfigSchema (auto_commit)
└── internal/permission.ts # AgentPermissionSchema
```
## ROOT SCHEMA FIELDS (27)
## ROOT SCHEMA FIELDS (28)
`$schema`, `new_task_system_enabled`, `default_run_agent`, `disabled_mcps`, `disabled_agents`, `disabled_skills`, `disabled_hooks`, `disabled_commands`, `disabled_tools`, `hashline_edit`, `agents`, `categories`, `claude_code`, `sisyphus_agent`, `comment_checker`, `experimental`, `auto_update`, `skills`, `ralph_loop`, `background_task`, `notification`, `babysitting`, `git_master`, `browser_automation_engine`, `websearch`, `tmux`, `sisyphus`, `_migrations`
`$schema`, `new_task_system_enabled`, `default_run_agent`, `disabled_mcps`, `disabled_agents`, `disabled_skills`, `disabled_hooks`, `disabled_commands`, `disabled_tools`, `hashline_edit`, `agents`, `categories`, `claude_code`, `sisyphus_agent`, `comment_checker`, `experimental`, `auto_update`, `skills`, `ralph_loop`, `background_task`, `notification`, `babysitting`, `git_master`, `browser_automation_engine`, `websearch`, `tmux`, `sisyphus`, `start_work`, `_migrations`
## AGENT OVERRIDE FIELDS (21)

View File

@@ -1,11 +1,25 @@
export {
OhMyOpenCodeConfigSchema,
AgentOverrideConfigSchema,
AgentOverridesSchema,
CustomAgentOverridesSchema,
McpNameSchema,
AgentNameSchema,
OverridableAgentNameSchema,
HookNameSchema,
BuiltinCommandNameSchema,
SisyphusAgentConfigSchema,
ExperimentalConfigSchema,
RalphLoopConfigSchema,
TmuxConfigSchema,
TmuxLayoutSchema,
} from "./schema"
export type {
OhMyOpenCodeConfig,
AgentOverrideConfig,
AgentOverrides,
CustomAgentOverrides,
McpName,
AgentName,
HookName,

View File

@@ -0,0 +1,38 @@
import { describe, expect, test } from "bun:test"
import { createOhMyOpenCodeJsonSchema } from "../../script/build-schema-document"
function asRecord(value: unknown): Record<string, unknown> | undefined {
return typeof value === "object" && value !== null ? (value as Record<string, unknown>) : undefined
}
describe("schema document generation", () => {
test("custom_agents schema allows arbitrary custom agent keys with override shape", () => {
// given
const schema = createOhMyOpenCodeJsonSchema()
// when
const rootProperties = asRecord(schema.properties)
const agentsSchema = asRecord(rootProperties?.agents)
const customAgentsSchema = asRecord(rootProperties?.custom_agents)
const customPropertyNames = asRecord(customAgentsSchema?.propertyNames)
const customAdditionalProperties = asRecord(customAgentsSchema?.additionalProperties)
const defs = asRecord(schema.$defs)
const sharedAgentOverrideSchema = asRecord(defs?.agentOverrideConfig)
const sharedAgentProperties = asRecord(sharedAgentOverrideSchema?.properties)
// then
expect(agentsSchema).toBeDefined()
expect(agentsSchema?.additionalProperties).toBeFalse()
expect(customAgentsSchema).toBeDefined()
expect(customPropertyNames?.pattern).toBeDefined()
expect(customPropertyNames?.pattern).toContain("[bB][uU][iI][lL][dD]")
expect(customPropertyNames?.pattern).toContain("[pP][lL][aA][nN]")
expect(customAdditionalProperties).toBeDefined()
expect(customAdditionalProperties?.$ref).toBe("#/$defs/agentOverrideConfig")
expect(sharedAgentOverrideSchema).toBeDefined()
expect(sharedAgentProperties?.model).toEqual({ type: "string" })
expect(sharedAgentProperties?.temperature).toEqual(
expect.objectContaining({ type: "number" }),
)
})
})

View File

@@ -530,6 +530,79 @@ describe("Sisyphus-Junior agent override", () => {
expect(result.data.agents?.momus?.category).toBe("quick")
}
})
test("schema accepts custom_agents override keys", () => {
// given
const config = {
custom_agents: {
translator: {
model: "google/gemini-3-flash-preview",
temperature: 0,
},
},
}
// when
const result = OhMyOpenCodeConfigSchema.safeParse(config)
// then
expect(result.success).toBe(true)
if (result.success) {
expect(result.data.custom_agents?.translator?.model).toBe("google/gemini-3-flash-preview")
expect(result.data.custom_agents?.translator?.temperature).toBe(0)
}
})
test("schema rejects unknown keys under agents", () => {
// given
const config = {
agents: {
sisyphuss: {
model: "openai/gpt-5.3-codex",
},
},
}
// when
const result = OhMyOpenCodeConfigSchema.safeParse(config)
// then
expect(result.success).toBe(false)
})
test("schema rejects built-in agent names under custom_agents", () => {
// given
const config = {
custom_agents: {
sisyphus: {
model: "openai/gpt-5.3-codex",
},
},
}
// when
const result = OhMyOpenCodeConfigSchema.safeParse(config)
// then
expect(result.success).toBe(false)
})
test("schema rejects built-in agent names under custom_agents case-insensitively", () => {
// given
const config = {
custom_agents: {
Sisyphus: {
model: "openai/gpt-5.3-codex",
},
},
}
// when
const result = OhMyOpenCodeConfigSchema.safeParse(config)
// then
expect(result.success).toBe(false)
})
})
describe("BrowserAutomationProviderSchema", () => {

View File

@@ -1,5 +1,6 @@
import { z } from "zod"
import { FallbackModelsSchema } from "./fallback-models"
import { OverridableAgentNameSchema } from "./agent-names"
import { AgentPermissionSchema } from "./internal/permission"
export const AgentOverrideConfigSchema = z.object({
@@ -55,7 +56,7 @@ export const AgentOverrideConfigSchema = z.object({
.optional(),
})
export const AgentOverridesSchema = z.object({
const BuiltinAgentOverridesSchema = z.object({
build: AgentOverrideConfigSchema.optional(),
plan: AgentOverrideConfigSchema.optional(),
sisyphus: AgentOverrideConfigSchema.optional(),
@@ -72,7 +73,57 @@ export const AgentOverridesSchema = z.object({
explore: AgentOverrideConfigSchema.optional(),
"multimodal-looker": AgentOverrideConfigSchema.optional(),
atlas: AgentOverrideConfigSchema.optional(),
})
}).strict()
export const AgentOverridesSchema = BuiltinAgentOverridesSchema
const RESERVED_CUSTOM_AGENT_NAMES = OverridableAgentNameSchema.options
const RESERVED_CUSTOM_AGENT_NAME_SET = new Set(
RESERVED_CUSTOM_AGENT_NAMES.map((name) => name.toLowerCase()),
)
function escapeRegexLiteral(value: string): string {
return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
}
function toCaseInsensitiveLiteralPattern(value: string): string {
return value
.split("")
.map((char) => {
if (/^[A-Za-z]$/.test(char)) {
const lower = char.toLowerCase()
const upper = char.toUpperCase()
return `[${lower}${upper}]`
}
return escapeRegexLiteral(char)
})
.join("")
}
const RESERVED_CUSTOM_AGENT_NAME_PATTERN = new RegExp(
`^(?!(?:${RESERVED_CUSTOM_AGENT_NAMES.map(toCaseInsensitiveLiteralPattern).join("|")})$).+`,
)
export const CustomAgentOverridesSchema = z
.record(
z.string().regex(
RESERVED_CUSTOM_AGENT_NAME_PATTERN,
"custom_agents key cannot reuse built-in agent override name",
),
AgentOverrideConfigSchema,
)
.superRefine((value, ctx) => {
for (const key of Object.keys(value)) {
if (RESERVED_CUSTOM_AGENT_NAME_SET.has(key.toLowerCase())) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
path: [key],
message: "custom_agents key cannot reuse built-in agent override name",
})
}
}
})
export type AgentOverrideConfig = z.infer<typeof AgentOverrideConfigSchema>
export type AgentOverrides = z.infer<typeof AgentOverridesSchema>
export type CustomAgentOverrides = z.infer<typeof CustomAgentOverridesSchema>

View File

@@ -0,0 +1,51 @@
import { describe, expect, test } from "bun:test"
import { ZodError } from "zod/v4"
import { BackgroundTaskConfigSchema } from "./background-task"
describe("BackgroundTaskConfigSchema", () => {
describe("syncPollTimeoutMs", () => {
describe("#given valid syncPollTimeoutMs (120000)", () => {
test("#when parsed #then returns correct value", () => {
const result = BackgroundTaskConfigSchema.parse({ syncPollTimeoutMs: 120000 })
expect(result.syncPollTimeoutMs).toBe(120000)
})
})
describe("#given syncPollTimeoutMs below minimum (59999)", () => {
test("#when parsed #then throws ZodError", () => {
let thrownError: unknown
try {
BackgroundTaskConfigSchema.parse({ syncPollTimeoutMs: 59999 })
} catch (error) {
thrownError = error
}
expect(thrownError).toBeInstanceOf(ZodError)
})
})
describe("#given syncPollTimeoutMs not provided", () => {
test("#when parsed #then field is undefined", () => {
const result = BackgroundTaskConfigSchema.parse({})
expect(result.syncPollTimeoutMs).toBeUndefined()
})
})
describe('#given syncPollTimeoutMs is non-number ("abc")', () => {
test("#when parsed #then throws ZodError", () => {
let thrownError: unknown
try {
BackgroundTaskConfigSchema.parse({ syncPollTimeoutMs: "abc" })
} catch (error) {
thrownError = error
}
expect(thrownError).toBeInstanceOf(ZodError)
})
})
})
})

View File

@@ -8,6 +8,7 @@ export const BackgroundTaskConfigSchema = z.object({
staleTimeoutMs: z.number().min(60000).optional(),
/** Timeout for tasks that never received any progress update, falling back to startedAt (default: 600000 = 10 minutes, minimum: 60000 = 1 minute) */
messageStalenessTimeoutMs: z.number().min(60000).optional(),
syncPollTimeoutMs: z.number().min(60000).optional(),
})
export type BackgroundTaskConfig = z.infer<typeof BackgroundTaskConfigSchema>

View File

@@ -49,6 +49,7 @@ export const HookNameSchema = z.enum([
"write-existing-file-guard",
"anthropic-effort",
"hashline-read-enhancer",
"read-image-resizer",
])
export type HookName = z.infer<typeof HookNameSchema>

View File

@@ -1,7 +1,7 @@
import { z } from "zod"
import { AnyMcpNameSchema } from "../../mcp/types"
import { BuiltinAgentNameSchema, BuiltinSkillNameSchema } from "./agent-names"
import { AgentOverridesSchema } from "./agent-overrides"
import { AgentOverridesSchema, CustomAgentOverridesSchema } from "./agent-overrides"
import { BabysittingConfigSchema } from "./babysitting"
import { BackgroundTaskConfigSchema } from "./background-task"
import { BrowserAutomationConfigSchema } from "./browser-automation"
@@ -18,6 +18,7 @@ import { SkillsConfigSchema } from "./skills"
import { SisyphusConfigSchema } from "./sisyphus"
import { SisyphusAgentConfigSchema } from "./sisyphus-agent"
import { TmuxConfigSchema } from "./tmux"
import { StartWorkConfigSchema } from "./start-work"
import { WebsearchConfigSchema } from "./websearch"
export const OhMyOpenCodeConfigSchema = z.object({
@@ -33,11 +34,12 @@ export const OhMyOpenCodeConfigSchema = z.object({
disabled_commands: z.array(BuiltinCommandNameSchema).optional(),
/** Disable specific tools by name (e.g., ["todowrite", "todoread"]) */
disabled_tools: z.array(z.string()).optional(),
/** Enable hashline_edit tool/hook integrations (default: true at call site) */
/** Enable hashline_edit tool/hook integrations (default: false) */
hashline_edit: z.boolean().optional(),
/** Enable model fallback on API errors (default: false). Set to true to enable automatic model switching when model errors occur. */
model_fallback: z.boolean().optional(),
agents: AgentOverridesSchema.optional(),
custom_agents: CustomAgentOverridesSchema.optional(),
categories: CategoriesConfigSchema.optional(),
claude_code: ClaudeCodeConfigSchema.optional(),
sisyphus_agent: SisyphusAgentConfigSchema.optional(),
@@ -60,6 +62,7 @@ export const OhMyOpenCodeConfigSchema = z.object({
websearch: WebsearchConfigSchema.optional(),
tmux: TmuxConfigSchema.optional(),
sisyphus: SisyphusConfigSchema.optional(),
start_work: StartWorkConfigSchema.optional(),
/** Migration history to prevent re-applying migrations (e.g., model version upgrades) */
_migrations: z.array(z.string()).optional(),
})

View File

@@ -0,0 +1,8 @@
import { z } from "zod"
export const StartWorkConfigSchema = z.object({
/** Enable auto-commit after each atomic task completion (default: true) */
auto_commit: z.boolean().default(true),
})
export type StartWorkConfig = z.infer<typeof StartWorkConfigSchema>

View File

@@ -51,6 +51,7 @@ export function createHooks(args: {
const skill = createSkillHooks({
ctx,
pluginConfig,
isHookEnabled,
safeHookEnabled,
mergedSkills,

View File

@@ -1,6 +1,6 @@
# src/features/ — 19 Feature Modules
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,10 +1,10 @@
# src/features/background-agent/ — Core Orchestration Engine
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW
39 files (~10k LOC). Manages async task lifecycle: launch → queue → run → poll → complete/error. Concurrency limited per model/provider (default 5). Central to multi-agent orchestration.
30 files (~10k LOC). Manages async task lifecycle: launch → queue → run → poll → complete/error. Concurrency limited per model/provider (default 5). Central to multi-agent orchestration.
## TASK LIFECYCLE

View File

@@ -80,12 +80,11 @@ export interface PluginManifest {
/**
* Hooks configuration
*/
export interface HookEntry {
type: "command" | "prompt" | "agent"
command?: string
prompt?: string
agent?: string
}
export type HookEntry =
| { type: "command"; command?: string }
| { type: "prompt"; prompt?: string }
| { type: "agent"; agent?: string }
| { type: "http"; url: string; headers?: Record<string, string>; allowedEnvVars?: string[]; timeout?: number }
export interface HookMatcher {
matcher?: string

View File

@@ -1,6 +1,6 @@
# src/features/claude-tasks/ — Task Schema + Storage
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -205,6 +205,45 @@ describe("ContextCollector", () => {
const ids = pending.entries.map((e) => e.id)
expect(ids).toEqual(["first", "second", "third"])
})
it("keeps registration order even when Date.now values are not monotonic", () => {
// given
const sessionID = "ses_order_non_monotonic_time"
const originalDateNow = Date.now
const mockedTimestamps = [300, 100, 200]
let timestampIndex = 0
Date.now = () => mockedTimestamps[timestampIndex++] ?? 0
try {
collector.register(sessionID, {
id: "first",
source: "custom",
content: "First",
priority: "normal",
})
collector.register(sessionID, {
id: "second",
source: "custom",
content: "Second",
priority: "normal",
})
collector.register(sessionID, {
id: "third",
source: "custom",
content: "Third",
priority: "normal",
})
} finally {
Date.now = originalDateNow
}
// when
const pending = collector.getPending(sessionID)
// then
const ids = pending.entries.map((entry) => entry.id)
expect(ids).toEqual(["first", "second", "third"])
})
})
describe("consume", () => {

View File

@@ -14,6 +14,8 @@ const PRIORITY_ORDER: Record<ContextPriority, number> = {
const CONTEXT_SEPARATOR = "\n\n---\n\n"
let registrationCounter = 0
export class ContextCollector {
private sessions: Map<string, Map<string, ContextEntry>> = new Map()
@@ -30,7 +32,7 @@ export class ContextCollector {
source: options.source,
content: options.content,
priority: options.priority ?? "normal",
timestamp: Date.now(),
registrationOrder: ++registrationCounter,
metadata: options.metadata,
}
@@ -77,7 +79,7 @@ export class ContextCollector {
return entries.sort((a, b) => {
const priorityDiff = PRIORITY_ORDER[a.priority] - PRIORITY_ORDER[b.priority]
if (priorityDiff !== 0) return priorityDiff
return a.timestamp - b.timestamp
return a.registrationOrder - b.registrationOrder
})
}
}

View File

@@ -64,6 +64,51 @@ describe("createContextInjectorMessagesTransformHook", () => {
expect(output.messages[2].parts[1].text).toBe("Second message")
})
it("uses deterministic synthetic part ID across repeated transforms", async () => {
// given
const hook = createContextInjectorMessagesTransformHook(collector)
const sessionID = "ses_transform_deterministic"
const baseMessage = createMockMessage("user", "Stable message", sessionID)
collector.register(sessionID, {
id: "ctx-1",
source: "keyword-detector",
content: "Injected context",
})
const firstOutput = {
messages: [structuredClone(baseMessage)],
}
// when
await hook["experimental.chat.messages.transform"]!({}, firstOutput)
// then
const firstSyntheticPart = firstOutput.messages[0].parts[0]
expect(
"synthetic" in firstSyntheticPart && firstSyntheticPart.synthetic === true
).toBe(true)
// given
collector.register(sessionID, {
id: "ctx-2",
source: "keyword-detector",
content: "Injected context",
})
const secondOutput = {
messages: [structuredClone(baseMessage)],
}
// when
await hook["experimental.chat.messages.transform"]!({}, secondOutput)
// then
const secondSyntheticPart = secondOutput.messages[0].parts[0]
expect(
"synthetic" in secondSyntheticPart && secondSyntheticPart.synthetic === true
).toBe(true)
expect(secondSyntheticPart.id).toBe(firstSyntheticPart.id)
})
it("does nothing when no pending context", async () => {
// given
const hook = createContextInjectorMessagesTransformHook(collector)

View File

@@ -148,7 +148,7 @@ export function createContextInjectorMessagesTransformHook(
// synthetic part pattern (minimal fields)
const syntheticPart = {
id: `synthetic_hook_${Date.now()}`,
id: `synthetic_hook_${sessionID}`,
messageID: lastUserMessage.info.id,
sessionID: (lastUserMessage.info as { sessionID?: string }).sessionID ?? "",
type: "text" as const,

View File

@@ -27,8 +27,8 @@ export interface ContextEntry {
content: string
/** Priority for ordering (default: normal) */
priority: ContextPriority
/** Timestamp when registered */
timestamp: number
/** Monotonic order when registered */
registrationOrder: number
/** Optional metadata for debugging/logging */
metadata?: Record<string, unknown>
}

View File

@@ -4,6 +4,8 @@ import {
findFirstMessageWithAgent,
findNearestMessageWithFieldsFromSDK,
findFirstMessageWithAgentFromSDK,
generateMessageId,
generatePartId,
injectHookMessage,
} from "./injector"
import { isSqliteBackend, resetSqliteBackendCache } from "../../shared/opencode-storage-detection"
@@ -192,6 +194,40 @@ describe("findFirstMessageWithAgentFromSDK", () => {
})
})
describe("generateMessageId", () => {
it("returns deterministic sequential IDs with fixed format", () => {
// given
const format = /^msg_[0-9a-f]{8}_\d{6}$/
// when
const firstId = generateMessageId()
const secondId = generateMessageId()
// then
expect(firstId).toMatch(format)
expect(secondId).toMatch(format)
expect(secondId.split("_")[1]).toBe(firstId.split("_")[1])
expect(Number(secondId.split("_")[2])).toBe(Number(firstId.split("_")[2]) + 1)
})
})
describe("generatePartId", () => {
it("returns deterministic sequential IDs with fixed format", () => {
// given
const format = /^prt_[0-9a-f]{8}_\d{6}$/
// when
const firstId = generatePartId()
const secondId = generatePartId()
// then
expect(firstId).toMatch(format)
expect(secondId).toMatch(format)
expect(secondId.split("_")[1]).toBe(firstId.split("_")[1])
expect(Number(secondId.split("_")[2])).toBe(Number(firstId.split("_")[2]) + 1)
})
})
describe("injectHookMessage", () => {
beforeEach(() => {
vi.clearAllMocks()

View File

@@ -1,4 +1,5 @@
import { existsSync, mkdirSync, readFileSync, readdirSync, writeFileSync } from "node:fs"
import { randomBytes } from "node:crypto"
import { join } from "node:path"
import type { PluginInput } from "@opencode-ai/plugin"
import { MESSAGE_STORAGE, PART_STORAGE } from "./constants"
@@ -29,6 +30,10 @@ interface SDKMessage {
}
}
const processPrefix = randomBytes(4).toString("hex")
let messageCounter = 0
let partCounter = 0
function convertSDKMessageToStoredMessage(msg: SDKMessage): StoredMessage | null {
const info = msg.info
if (!info) return null
@@ -204,16 +209,12 @@ export function findFirstMessageWithAgent(messageDir: string): string | null {
return null
}
function generateMessageId(): string {
const timestamp = Date.now().toString(16)
const random = Math.random().toString(36).substring(2, 14)
return `msg_${timestamp}${random}`
export function generateMessageId(): string {
return `msg_${processPrefix}_${String(++messageCounter).padStart(6, "0")}`
}
function generatePartId(): string {
const timestamp = Date.now().toString(16)
const random = Math.random().toString(36).substring(2, 10)
return `prt_${timestamp}${random}`
export function generatePartId(): string {
return `prt_${processPrefix}_${String(++partCounter).padStart(6, "0")}`
}
function getOrCreateMessageDir(sessionID: string): string {

View File

@@ -1,6 +1,6 @@
# src/features/mcp-oauth/ — OAuth 2.0 + PKCE + DCR for MCP Servers
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/features/opencode-skill-loader/ — 4-Scope Skill Discovery
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/features/tmux-subagent/ — Tmux Pane Management
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,10 +1,10 @@
# src/hooks/ — 46 Lifecycle Hooks
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW
46 hooks across 39 directories + 6 standalone files. Three-tier composition: Core(37) + Continuation(7) + Skill(2). All hooks follow `createXXXHook(deps) → HookFunction` factory pattern.
46 hooks across 45 directories + 11 standalone files. Three-tier composition: Core(37) + Continuation(7) + Skill(2). All hooks follow `createXXXHook(deps) → HookFunction` factory pattern.
## HOOK TIERS
@@ -14,38 +14,48 @@
hooks/
├── atlas/ # Main orchestration (757 lines)
├── anthropic-context-window-limit-recovery/ # Auto-summarize
├── todo-continuation-enforcer.ts # Force TODO completion
├── ralph-loop/ # Self-referential dev loop
├── claude-code-hooks/ # settings.json compat layer - see AGENTS.md
├── comment-checker/ # Prevents AI slop
├── anthropic-effort/ # Reasoning effort level adjustment
├── anthropic-image-context/ # Image context handling for Anthropic
├── auto-slash-command/ # Detects /command patterns
├── rules-injector/ # Conditional rules
├── auto-update-checker/ # Plugin update check
├── background-notification/ # OS notification
├── beast-mode-system/ # Beast mode system prompt injection
├── category-skill-reminder/ # Reminds of category skills
├── claude-code-hooks/ # settings.json compat layer
├── comment-checker/ # Prevents AI slop
├── compaction-context-injector/ # Injects context on compaction
├── compaction-todo-preserver/ # Preserves todos through compaction
├── delegate-task-retry/ # Retries failed delegations
├── directory-agents-injector/ # Auto-injects AGENTS.md
├── directory-readme-injector/ # Auto-injects README.md
├── edit-error-recovery/ # Recovers from failures
├── thinking-block-validator/ # Ensures valid <thinking>
├── context-window-monitor.ts # Reminds of headroom
├── session-recovery/ # Auto-recovers from crashes
├── think-mode/ # Dynamic thinking budget
├── keyword-detector/ # ultrawork/search/analyze modes
├── background-notification/ # OS notification
├── prometheus-md-only/ # Planner read-only mode
├── agent-usage-reminder/ # Specialized agent hints
├── auto-update-checker/ # Plugin update check
├── tool-output-truncator.ts # Prevents context bloat
├── compaction-context-injector/ # Injects context on compaction
├── delegate-task-retry/ # Retries failed delegations
├── hashline-edit-diff-enhancer/ # Enhanced diff output for hashline edits
├── hashline-read-enhancer/ # Adds LINE#ID hashes to Read output
├── interactive-bash-session/ # Tmux session management
├── json-error-recovery/ # JSON parse error correction
├── keyword-detector/ # ultrawork/search/analyze modes
├── model-fallback/ # Provider-level model fallback
├── no-hephaestus-non-gpt/ # Block Hephaestus from non-GPT
├── no-sisyphus-gpt/ # Block Sisyphus from GPT
├── non-interactive-env/ # Non-TTY environment handling
├── start-work/ # Sisyphus work session starter
├── task-resume-info/ # Resume info for cancelled tasks
├── prometheus-md-only/ # Planner read-only mode
├── question-label-truncator/ # Auto-truncates question labels
├── category-skill-reminder/ # Reminds of category skills
├── empty-task-response-detector.ts # Detects empty responses
├── sisyphus-junior-notepad/ # Sisyphus Junior notepad
├── stop-continuation-guard/ # Guards stop continuation
├── subagent-question-blocker/ # Blocks subagent questions
├── ralph-loop/ # Self-referential dev loop
├── read-image-resizer/ # Resize images for context efficiency
├── rules-injector/ # Conditional rules
├── runtime-fallback/ # Auto-switch models on API errors
├── session-recovery/ # Auto-recovers from crashes
├── sisyphus-junior-notepad/ # Sisyphus Junior notepad
├── start-work/ # Sisyphus work session starter
├── stop-continuation-guard/ # Guards stop continuation
├── task-reminder/ # Task system usage reminders
├── task-resume-info/ # Resume info for cancelled tasks
├── tasks-todowrite-disabler/ # Disable TodoWrite when task system active
├── think-mode/ # Dynamic thinking budget
├── thinking-block-validator/ # Ensures valid <thinking>
├── todo-continuation-enforcer/ # Force TODO completion
├── unstable-agent-babysitter/ # Monitor unstable agent behavior
├── write-existing-file-guard/ # Require Read before Write
└── index.ts # Hook aggregation + registration
```

View File

@@ -6,6 +6,8 @@ import {
} from "./storage";
import { TARGET_TOOLS, AGENT_TOOLS, REMINDER_MESSAGE } from "./constants";
import type { AgentUsageState } from "./types";
import { getSessionAgent } from "../../features/claude-code-session-state";
import { getAgentConfigKey } from "../../shared/agent-display-names";
interface ToolExecuteInput {
tool: string;
@@ -26,6 +28,23 @@ interface EventInput {
};
}
/**
* Only orchestrator agents should receive usage reminders.
* Subagents (explore, librarian, oracle, etc.) are the targets of delegation,
* so reminding them to delegate to themselves is counterproductive.
*/
const ORCHESTRATOR_AGENTS = new Set([
"sisyphus",
"sisyphus-junior",
"atlas",
"hephaestus",
"prometheus",
]);
function isOrchestratorAgent(agentName: string): boolean {
return ORCHESTRATOR_AGENTS.has(getAgentConfigKey(agentName));
}
export function createAgentUsageReminderHook(_ctx: PluginInput) {
const sessionStates = new Map<string, AgentUsageState>();
@@ -60,6 +79,12 @@ export function createAgentUsageReminderHook(_ctx: PluginInput) {
output: ToolExecuteOutput,
) => {
const { tool, sessionID } = input;
const agent = getSessionAgent(sessionID);
if (agent && !isOrchestratorAgent(agent)) {
return;
}
const toolLower = tool.toLowerCase();
if (AGENT_TOOLS.has(toolLower)) {

View File

@@ -1,6 +1,6 @@
# src/hooks/anthropic-context-window-limit-recovery/ — Multi-Strategy Context Recovery
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,11 +1,7 @@
import { log } from "../../shared"
import { log, normalizeModelID } from "../../shared"
const OPUS_4_6_PATTERN = /claude-opus-4[-.]6/i
function normalizeModelID(modelID: string): string {
return modelID.replace(/\.(\d+)/g, "-$1")
}
function isClaudeProvider(providerID: string, modelID: string): boolean {
if (["anthropic", "google-vertex-anthropic", "opencode"].includes(providerID)) return true
if (providerID === "github-copilot" && modelID.toLowerCase().includes("claude")) return true

View File

@@ -1,6 +1,6 @@
# src/hooks/atlas/ — Master Boulder Orchestrator
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -7,6 +7,7 @@ import type { AtlasHookOptions, SessionState } from "./types"
export function createAtlasHook(ctx: PluginInput, options?: AtlasHookOptions) {
const sessions = new Map<string, SessionState>()
const pendingFilePaths = new Map<string, string>()
const autoCommit = options?.autoCommit ?? true
function getState(sessionID: string): SessionState {
let state = sessions.get(sessionID)
@@ -20,6 +21,6 @@ export function createAtlasHook(ctx: PluginInput, options?: AtlasHookOptions) {
return {
handler: createAtlasEventHandler({ ctx, options, sessions, getState }),
"tool.execute.before": createToolExecuteBeforeHandler({ ctx, pendingFilePaths }),
"tool.execute.after": createToolExecuteAfterHandler({ ctx, pendingFilePaths }),
"tool.execute.after": createToolExecuteAfterHandler({ ctx, pendingFilePaths, autoCommit }),
}
}

View File

@@ -14,9 +14,9 @@ import type { ToolExecuteAfterInput, ToolExecuteAfterOutput } from "./types"
export function createToolExecuteAfterHandler(input: {
ctx: PluginInput
pendingFilePaths: Map<string, string>
}): (toolInput: ToolExecuteAfterInput, toolOutput: ToolExecuteAfterOutput) => Promise<void> {
const { ctx, pendingFilePaths } = input
autoCommit: boolean
}): (toolInput: ToolExecuteAfterInput, toolOutput: ToolExecuteAfterOutput) => Promise<void> {
const { ctx, pendingFilePaths, autoCommit } = input
return async (toolInput, toolOutput): Promise<void> => {
// Guard against undefined output (e.g., from /review command - see issue #1035)
if (!toolOutput) {
@@ -76,7 +76,7 @@ export function createToolExecuteAfterHandler(input: {
// Preserve original subagent response - critical for debugging failed tasks
const originalResponse = toolOutput.output
toolOutput.output = `
toolOutput.output = `
## SUBAGENT WORK COMPLETED
${fileChanges}
@@ -88,9 +88,8 @@ ${fileChanges}
${originalResponse}
<system-reminder>
${buildOrchestratorReminder(boulderState.plan_name, progress, subagentSessionId)}
${buildOrchestratorReminder(boulderState.plan_name, progress, subagentSessionId, autoCommit)}
</system-reminder>`
log(`[${HOOK_NAME}] Output transformed for orchestrator mode (boulder)`, {
plan: boulderState.plan_name,
progress: `${progress.completed}/${progress.total}`,

View File

@@ -8,6 +8,8 @@ export interface AtlasHookOptions {
backgroundManager?: BackgroundManager
isContinuationStopped?: (sessionID: string) => boolean
agentOverrides?: AgentOverrides
/** Enable auto-commit after each atomic task completion (default: true) */
autoCommit?: boolean
}
export interface ToolExecuteAfterInput {

View File

@@ -14,9 +14,22 @@ task(session_id="${sessionId}", prompt="fix: [describe the specific failure]")
export function buildOrchestratorReminder(
planName: string,
progress: { total: number; completed: number },
sessionId: string
sessionId: string,
autoCommit: boolean = true
): string {
const remaining = progress.total - progress.completed
const commitStep = autoCommit
? `
**STEP 8: COMMIT ATOMIC UNIT**
- Stage ONLY the verified changes
- Commit with clear message describing what was done
`
: ""
const nextStepNumber = autoCommit ? 9 : 8
return `
---
@@ -60,13 +73,8 @@ Update the plan file \`.sisyphus/plans/${planName}.md\`:
- Use \`Edit\` tool to modify the checkbox
**DO THIS BEFORE ANYTHING ELSE. Unmarked = Untracked = Lost progress.**
**STEP 8: COMMIT ATOMIC UNIT**
- Stage ONLY the verified changes
- Commit with clear message describing what was done
**STEP 9: PROCEED TO NEXT TASK**
${commitStep}
**STEP ${nextStepNumber}: PROCEED TO NEXT TASK**
- Read the plan file AGAIN to identify the next \`- [ ]\` task
- Start immediately - DO NOT STOP

View File

@@ -3,7 +3,7 @@ export const HOOK_NAME = "auto-slash-command" as const
export const AUTO_SLASH_COMMAND_TAG_OPEN = "<auto-slash-command>"
export const AUTO_SLASH_COMMAND_TAG_CLOSE = "</auto-slash-command>"
export const SLASH_COMMAND_PATTERN = /^\/([a-zA-Z][\w-]*)\s*(.*)/
export const SLASH_COMMAND_PATTERN = /^\/([a-zA-Z@][\w:@/-]*)\s*(.*)/
export const EXCLUDED_COMMANDS = new Set([
"ralph-loop",

View File

@@ -102,6 +102,19 @@ After`
expect(result?.args).toBe("project")
})
it("should parse namespaced marketplace commands", () => {
// given a namespaced command
const text = "/daplug:run-prompt build bridge"
// when parsing
const result = parseSlashCommand(text)
// then should keep full namespaced command
expect(result).not.toBeNull()
expect(result?.command).toBe("daplug:run-prompt")
expect(result?.args).toBe("build bridge")
})
it("should return null for non-slash text", () => {
// given text without slash
const text = "regular text"

View File

@@ -0,0 +1,168 @@
import { afterEach, beforeEach, describe, expect, it } from "bun:test"
import { mkdtempSync, mkdirSync, rmSync, writeFileSync } from "node:fs"
import { tmpdir } from "node:os"
import { join } from "node:path"
import { executeSlashCommand } from "./executor"
const ENV_KEYS = [
"CLAUDE_CONFIG_DIR",
"CLAUDE_PLUGINS_HOME",
"CLAUDE_SETTINGS_PATH",
"OPENCODE_CONFIG_DIR",
] as const
type EnvKey = (typeof ENV_KEYS)[number]
type EnvSnapshot = Record<EnvKey, string | undefined>
function writePluginFixture(baseDir: string): void {
const claudeConfigDir = join(baseDir, "claude-config")
const pluginsHome = join(claudeConfigDir, "plugins")
const settingsPath = join(claudeConfigDir, "settings.json")
const opencodeConfigDir = join(baseDir, "opencode-config")
const pluginInstallPath = join(baseDir, "installed-plugins", "daplug")
const pluginKey = "daplug@1.0.0"
mkdirSync(join(pluginInstallPath, ".claude-plugin"), { recursive: true })
mkdirSync(join(pluginInstallPath, "commands"), { recursive: true })
writeFileSync(
join(pluginInstallPath, ".claude-plugin", "plugin.json"),
JSON.stringify({ name: "daplug", version: "1.0.0" }, null, 2),
)
writeFileSync(
join(pluginInstallPath, "commands", "run-prompt.md"),
`---
description: Run prompt from daplug
---
Execute daplug prompt flow.
`,
)
mkdirSync(pluginsHome, { recursive: true })
writeFileSync(
join(pluginsHome, "installed_plugins.json"),
JSON.stringify(
{
version: 2,
plugins: {
[pluginKey]: [
{
scope: "user",
installPath: pluginInstallPath,
version: "1.0.0",
installedAt: "2026-01-01T00:00:00.000Z",
lastUpdated: "2026-01-01T00:00:00.000Z",
},
],
},
},
null,
2,
),
)
mkdirSync(claudeConfigDir, { recursive: true })
writeFileSync(
settingsPath,
JSON.stringify(
{
enabledPlugins: {
[pluginKey]: true,
},
},
null,
2,
),
)
mkdirSync(opencodeConfigDir, { recursive: true })
process.env.CLAUDE_CONFIG_DIR = claudeConfigDir
process.env.CLAUDE_PLUGINS_HOME = pluginsHome
process.env.CLAUDE_SETTINGS_PATH = settingsPath
process.env.OPENCODE_CONFIG_DIR = opencodeConfigDir
}
describe("auto-slash command executor plugin dispatch", () => {
let tempDir = ""
let envSnapshot: EnvSnapshot
beforeEach(() => {
tempDir = mkdtempSync(join(tmpdir(), "omo-executor-plugin-test-"))
envSnapshot = {
CLAUDE_CONFIG_DIR: process.env.CLAUDE_CONFIG_DIR,
CLAUDE_PLUGINS_HOME: process.env.CLAUDE_PLUGINS_HOME,
CLAUDE_SETTINGS_PATH: process.env.CLAUDE_SETTINGS_PATH,
OPENCODE_CONFIG_DIR: process.env.OPENCODE_CONFIG_DIR,
}
writePluginFixture(tempDir)
})
afterEach(() => {
for (const key of ENV_KEYS) {
const previousValue = envSnapshot[key]
if (previousValue === undefined) {
delete process.env[key]
} else {
process.env[key] = previousValue
}
}
rmSync(tempDir, { recursive: true, force: true })
})
it("resolves marketplace plugin commands when plugin loading is enabled", async () => {
const result = await executeSlashCommand(
{
command: "daplug:run-prompt",
args: "ship it",
raw: "/daplug:run-prompt ship it",
},
{
skills: [],
pluginsEnabled: true,
},
)
expect(result.success).toBe(true)
expect(result.replacementText).toContain("# /daplug:run-prompt Command")
expect(result.replacementText).toContain("**Scope**: plugin")
})
it("excludes marketplace commands when plugins are disabled via config toggle", async () => {
const result = await executeSlashCommand(
{
command: "daplug:run-prompt",
args: "",
raw: "/daplug:run-prompt",
},
{
skills: [],
pluginsEnabled: false,
},
)
expect(result.success).toBe(false)
expect(result.error).toBe(
'Command "/daplug:run-prompt" not found. Use the skill tool to list available skills and commands.',
)
})
it("returns standard not-found for unknown namespaced commands", async () => {
const result = await executeSlashCommand(
{
command: "daplug:missing",
args: "",
raw: "/daplug:missing",
},
{
skills: [],
pluginsEnabled: true,
},
)
expect(result.success).toBe(false)
expect(result.error).toBe(
'Command "/daplug:missing" not found. Use the skill tool to list available skills and commands.',
)
expect(result.error).not.toContain("Marketplace plugin commands")
})
})

View File

@@ -12,10 +12,15 @@ import { loadBuiltinCommands } from "../../features/builtin-commands"
import type { CommandFrontmatter } from "../../features/claude-code-command-loader/types"
import { isMarkdownFile } from "../../shared/file-utils"
import { discoverAllSkills, type LoadedSkill, type LazyContentLoader } from "../../features/opencode-skill-loader"
import {
discoverInstalledPlugins,
loadPluginCommands,
loadPluginSkillsAsCommands,
} from "../../features/claude-code-plugin-loader"
import type { ParsedSlashCommand } from "./types"
interface CommandScope {
type: "user" | "project" | "opencode" | "opencode-project" | "skill" | "builtin"
type: "user" | "project" | "opencode" | "opencode-project" | "skill" | "builtin" | "plugin"
}
interface CommandMetadata {
@@ -99,6 +104,36 @@ function skillToCommandInfo(skill: LoadedSkill): CommandInfo {
export interface ExecutorOptions {
skills?: LoadedSkill[]
pluginsEnabled?: boolean
enabledPluginsOverride?: Record<string, boolean>
}
function discoverPluginCommands(options?: ExecutorOptions): CommandInfo[] {
if (options?.pluginsEnabled === false) {
return []
}
const { plugins } = discoverInstalledPlugins({
enabledPluginsOverride: options?.enabledPluginsOverride,
})
const pluginDefinitions = {
...loadPluginCommands(plugins),
...loadPluginSkillsAsCommands(plugins),
}
return Object.entries(pluginDefinitions).map(([name, definition]) => ({
name,
metadata: {
name,
description: definition.description || "",
model: definition.model,
agent: definition.agent,
subtask: definition.subtask,
},
content: definition.template,
scope: "plugin",
}))
}
async function discoverAllCommands(options?: ExecutorOptions): Promise<CommandInfo[]> {
@@ -128,6 +163,7 @@ async function discoverAllCommands(options?: ExecutorOptions): Promise<CommandIn
const skills = options?.skills ?? await discoverAllSkills()
const skillCommands = skills.map(skillToCommandInfo)
const pluginCommands = discoverPluginCommands(options)
return [
...builtinCommands,
@@ -136,6 +172,7 @@ async function discoverAllCommands(options?: ExecutorOptions): Promise<CommandIn
...opencodeGlobalCommands,
...userCommands,
...skillCommands,
...pluginCommands,
]
}
@@ -202,9 +239,7 @@ export async function executeSlashCommand(parsed: ParsedSlashCommand, options?:
if (!command) {
return {
success: false,
error: parsed.command.includes(":")
? `Marketplace plugin commands like "/${parsed.command}" are not supported. Use .claude/commands/ for custom commands.`
: `Command "/${parsed.command}" not found. Use the skill tool to list available skills and commands.`,
error: `Command "/${parsed.command}" not found. Use the skill tool to list available skills and commands.`,
}
}

View File

@@ -22,11 +22,15 @@ const sessionProcessedCommandExecutions = new Set<string>()
export interface AutoSlashCommandHookOptions {
skills?: LoadedSkill[]
pluginsEnabled?: boolean
enabledPluginsOverride?: Record<string, boolean>
}
export function createAutoSlashCommandHook(options?: AutoSlashCommandHookOptions) {
const executorOptions: ExecutorOptions = {
skills: options?.skills,
pluginsEnabled: options?.pluginsEnabled,
enabledPluginsOverride: options?.enabledPluginsOverride,
}
return {

View File

@@ -1,6 +1,6 @@
# src/hooks/claude-code-hooks/ — Claude Code Compatibility
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -1,12 +1,12 @@
import { join } from "path"
import { existsSync } from "fs"
import { getClaudeConfigDir } from "../../shared"
import type { ClaudeHooksConfig, HookMatcher, HookCommand } from "./types"
import type { ClaudeHooksConfig, HookMatcher, HookAction } from "./types"
interface RawHookMatcher {
matcher?: string
pattern?: string
hooks: HookCommand[]
hooks: HookAction[]
}
interface RawClaudeHooksConfig {

View File

@@ -0,0 +1,27 @@
import type { HookAction } from "./types"
import type { CommandResult } from "../../shared/command-executor/execute-hook-command"
import { executeHookCommand } from "../../shared"
import { executeHttpHook } from "./execute-http-hook"
import { DEFAULT_CONFIG } from "./plugin-config"
export function getHookIdentifier(hook: HookAction): string {
if (hook.type === "http") return hook.url
return hook.command.split("/").pop() || hook.command
}
export async function dispatchHook(
hook: HookAction,
stdinJson: string,
cwd: string
): Promise<CommandResult> {
if (hook.type === "http") {
return executeHttpHook(hook, stdinJson)
}
return executeHookCommand(
hook.command,
stdinJson,
cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
}

View File

@@ -0,0 +1,312 @@
import { describe, it, expect, mock, beforeEach, afterEach } from "bun:test"
import type { HookHttp } from "./types"
const mockFetch = mock(() =>
Promise.resolve(new Response(JSON.stringify({}), { status: 200 }))
)
const originalFetch = globalThis.fetch
describe("executeHttpHook", () => {
beforeEach(() => {
globalThis.fetch = mockFetch as unknown as typeof fetch
mockFetch.mockReset()
mockFetch.mockImplementation(() =>
Promise.resolve(new Response(JSON.stringify({}), { status: 200 }))
)
})
afterEach(() => {
globalThis.fetch = originalFetch
})
describe("#given a basic HTTP hook", () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks/pre-tool-use",
}
const stdinData = JSON.stringify({ hook_event_name: "PreToolUse", tool_name: "Bash" })
it("#when executed #then sends POST request with correct body", async () => {
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, stdinData)
expect(mockFetch).toHaveBeenCalledTimes(1)
const [url, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
expect(url).toBe("http://localhost:8080/hooks/pre-tool-use")
expect(options.method).toBe("POST")
expect(options.body).toBe(stdinData)
})
it("#when executed #then sets content-type to application/json", async () => {
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, stdinData)
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Content-Type"]).toBe("application/json")
})
})
describe("#given an HTTP hook with headers and env var interpolation", () => {
const originalEnv = process.env
beforeEach(() => {
process.env = { ...originalEnv, MY_TOKEN: "secret-123", OTHER_VAR: "other-value" }
})
afterEach(() => {
process.env = originalEnv
})
it("#when allowedEnvVars includes the var #then interpolates env var in headers", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
headers: { Authorization: "Bearer $MY_TOKEN" },
allowedEnvVars: ["MY_TOKEN"],
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Authorization"]).toBe("Bearer secret-123")
})
it("#when env var uses ${VAR} syntax #then interpolates correctly", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
headers: { Authorization: "Bearer ${MY_TOKEN}" },
allowedEnvVars: ["MY_TOKEN"],
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Authorization"]).toBe("Bearer secret-123")
})
it("#when env var not in allowedEnvVars #then replaces with empty string", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
headers: { Authorization: "Bearer $OTHER_VAR" },
allowedEnvVars: ["MY_TOKEN"],
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Authorization"]).toBe("Bearer ")
})
})
describe("#given an HTTP hook with timeout", () => {
it("#when timeout specified #then passes AbortSignal with timeout", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
timeout: 10,
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
expect(options.signal).toBeDefined()
})
})
describe("#given hook URL scheme validation", () => {
it("#when URL uses file:// scheme #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "file:///etc/passwd" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain('HTTP hook URL scheme "file:" is not allowed')
expect(mockFetch).not.toHaveBeenCalled()
})
it("#when URL uses data: scheme #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "data:text/plain,hello" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain('HTTP hook URL scheme "data:" is not allowed')
expect(mockFetch).not.toHaveBeenCalled()
})
it("#when URL uses ftp:// scheme #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "ftp://localhost/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain('HTTP hook URL scheme "ftp:" is not allowed')
expect(mockFetch).not.toHaveBeenCalled()
})
it("#when URL uses http:// scheme #then allows hook execution", async () => {
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(0)
expect(mockFetch).toHaveBeenCalledTimes(1)
})
it("#when URL uses https:// scheme #then allows hook execution", async () => {
const hook: HookHttp = { type: "http", url: "https://example.com/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(0)
expect(mockFetch).toHaveBeenCalledTimes(1)
})
it("#when URL is invalid #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "not-a-valid-url" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain("HTTP hook URL is invalid: not-a-valid-url")
expect(mockFetch).not.toHaveBeenCalled()
})
})
describe("#given a successful HTTP response", () => {
it("#when response has JSON body #then returns parsed output", async () => {
mockFetch.mockImplementation(() =>
Promise.resolve(
new Response(JSON.stringify({ decision: "allow", reason: "ok" }), {
status: 200,
headers: { "Content-Type": "application/json" },
})
)
)
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(0)
expect(result.stdout).toContain('"decision":"allow"')
})
})
describe("#given a failing HTTP response", () => {
it("#when response status is 4xx #then returns exit code 1", async () => {
mockFetch.mockImplementation(() =>
Promise.resolve(new Response("Bad Request", { status: 400 }))
)
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain("400")
})
it("#when fetch throws network error #then returns exit code 1", async () => {
mockFetch.mockImplementation(() => Promise.reject(new Error("ECONNREFUSED")))
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain("ECONNREFUSED")
})
})
describe("#given response with exit code in JSON", () => {
it("#when JSON contains exitCode 2 #then uses that exit code", async () => {
mockFetch.mockImplementation(() =>
Promise.resolve(
new Response(JSON.stringify({ exitCode: 2, stderr: "blocked" }), {
status: 200,
headers: { "Content-Type": "application/json" },
})
)
)
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(2)
})
})
})
describe("interpolateEnvVars", () => {
const originalEnv = process.env
beforeEach(() => {
process.env = { ...originalEnv, TOKEN: "abc", SECRET: "xyz" }
})
afterEach(() => {
process.env = originalEnv
})
it("#given $VAR syntax #when var is allowed #then interpolates", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer $TOKEN", ["TOKEN"])
expect(result).toBe("Bearer abc")
})
it("#given ${VAR} syntax #when var is allowed #then interpolates", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer ${TOKEN}", ["TOKEN"])
expect(result).toBe("Bearer abc")
})
it("#given multiple vars #when some not allowed #then only interpolates allowed ones", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("$TOKEN:$SECRET", ["TOKEN"])
expect(result).toBe("abc:")
})
it("#given ${VAR} where value contains $ANOTHER #when both allowed #then does not double-interpolate", async () => {
process.env = { ...process.env, TOKEN: "val$SECRET", SECRET: "oops" }
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer ${TOKEN}", ["TOKEN", "SECRET"])
expect(result).toBe("Bearer val$SECRET")
})
it("#given no allowedEnvVars #when called #then replaces all with empty", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer $TOKEN", [])
expect(result).toBe("Bearer ")
})
})

View File

@@ -0,0 +1,92 @@
import type { HookHttp } from "./types"
import type { CommandResult } from "../../shared/command-executor/execute-hook-command"
const DEFAULT_HTTP_HOOK_TIMEOUT_S = 30
const ALLOWED_SCHEMES = new Set(["http:", "https:"])
export function interpolateEnvVars(
value: string,
allowedEnvVars: string[]
): string {
const allowedSet = new Set(allowedEnvVars)
return value.replace(/\$\{(\w+)\}|\$(\w+)/g, (_match, bracedVar: string | undefined, bareVar: string | undefined) => {
const varName = (bracedVar ?? bareVar) as string
if (allowedSet.has(varName)) {
return process.env[varName] ?? ""
}
return ""
})
}
function resolveHeaders(
hook: HookHttp
): Record<string, string> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
}
if (!hook.headers) return headers
const allowedEnvVars = hook.allowedEnvVars ?? []
for (const [key, value] of Object.entries(hook.headers)) {
headers[key] = interpolateEnvVars(value, allowedEnvVars)
}
return headers
}
export async function executeHttpHook(
hook: HookHttp,
stdin: string
): Promise<CommandResult> {
try {
const parsed = new URL(hook.url)
if (!ALLOWED_SCHEMES.has(parsed.protocol)) {
return {
exitCode: 1,
stderr: `HTTP hook URL scheme "${parsed.protocol}" is not allowed. Only http: and https: are permitted.`,
}
}
} catch {
return { exitCode: 1, stderr: `HTTP hook URL is invalid: ${hook.url}` }
}
const timeoutS = hook.timeout ?? DEFAULT_HTTP_HOOK_TIMEOUT_S
const headers = resolveHeaders(hook)
try {
const response = await fetch(hook.url, {
method: "POST",
headers,
body: stdin,
signal: AbortSignal.timeout(timeoutS * 1000),
})
if (!response.ok) {
return {
exitCode: 1,
stderr: `HTTP hook returned status ${response.status}: ${response.statusText}`,
stdout: await response.text().catch(() => ""),
}
}
const body = await response.text()
if (!body) {
return { exitCode: 0, stdout: "", stderr: "" }
}
try {
const parsed = JSON.parse(body) as { exitCode?: number }
if (typeof parsed.exitCode === "number") {
return { exitCode: parsed.exitCode, stdout: body, stderr: "" }
}
} catch {
}
return { exitCode: 0, stdout: body, stderr: "" }
} catch (error) {
const message = error instanceof Error ? error.message : String(error)
return { exitCode: 1, stderr: `HTTP hook error: ${message}` }
}
}

View File

@@ -3,8 +3,8 @@ import type {
PostToolUseOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, objectToSnakeCase, transformToolName, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, objectToSnakeCase, transformToolName, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { buildTranscriptFromSession, deleteTempTranscript } from "./transcript"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
@@ -94,22 +94,17 @@ export async function executePostToolUseHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("PostToolUse", hook.command, extendedConfig ?? null)) {
log("PostToolUse hook command skipped (disabled by config)", { command: hook.command, toolName: ctx.toolName })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("PostToolUse", hookName, extendedConfig ?? null)) {
log("PostToolUse hook command skipped (disabled by config)", { command: hookName, toolName: ctx.toolName })
continue
}
const hookName = hook.command.split("/").pop() || hook.command
if (!firstHookName) firstHookName = hookName
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.stdout) {
messages.push(result.stdout)

View File

@@ -3,8 +3,8 @@ import type {
PreCompactOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
export interface PreCompactContext {
@@ -50,22 +50,17 @@ export async function executePreCompactHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("PreCompact", hook.command, extendedConfig ?? null)) {
log("PreCompact hook command skipped (disabled by config)", { command: hook.command })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("PreCompact", hookName, extendedConfig ?? null)) {
log("PreCompact hook command skipped (disabled by config)", { command: hookName })
continue
}
const hookName = hook.command.split("/").pop() || hook.command
if (!firstHookName) firstHookName = hookName
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.exitCode === 2) {
log("PreCompact hook blocked", { hookName, stderr: result.stderr })

View File

@@ -4,8 +4,8 @@ import type {
PermissionDecision,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, objectToSnakeCase, transformToolName, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, objectToSnakeCase, transformToolName, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
export interface PreToolUseContext {
@@ -77,22 +77,17 @@ export async function executePreToolUseHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("PreToolUse", hook.command, extendedConfig ?? null)) {
log("PreToolUse hook command skipped (disabled by config)", { command: hook.command, toolName: ctx.toolName })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("PreToolUse", hookName, extendedConfig ?? null)) {
log("PreToolUse hook command skipped (disabled by config)", { command: hookName, toolName: ctx.toolName })
continue
}
const hookName = hook.command.split("/").pop() || hook.command
if (!firstHookName) firstHookName = hookName
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.exitCode === 2) {
return {

View File

@@ -3,8 +3,8 @@ import type {
StopOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { getTodoPath } from "./todo"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
@@ -68,19 +68,15 @@ export async function executeStopHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("Stop", hook.command, extendedConfig ?? null)) {
log("Stop hook command skipped (disabled by config)", { command: hook.command })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("Stop", hookName, extendedConfig ?? null)) {
log("Stop hook command skipped (disabled by config)", { command: hookName })
continue
}
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
// Check exit code first - exit code 2 means block
if (result.exitCode === 2) {

View File

@@ -12,7 +12,7 @@ export type ClaudeHookEvent =
export interface HookMatcher {
matcher: string
hooks: HookCommand[]
hooks: HookAction[]
}
export interface HookCommand {
@@ -20,6 +20,16 @@ export interface HookCommand {
command: string
}
export interface HookHttp {
type: "http"
url: string
headers?: Record<string, string>
allowedEnvVars?: string[]
timeout?: number
}
export type HookAction = HookCommand | HookHttp
export interface ClaudeHooksConfig {
PreToolUse?: HookMatcher[]
PostToolUse?: HookMatcher[]

View File

@@ -3,8 +3,8 @@ import type {
PostToolUseOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
const USER_PROMPT_SUBMIT_TAG_OPEN = "<user-prompt-submit-hook>"
@@ -80,19 +80,15 @@ export async function executeUserPromptSubmitHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("UserPromptSubmit", hook.command, extendedConfig ?? null)) {
log("UserPromptSubmit hook command skipped (disabled by config)", { command: hook.command })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("UserPromptSubmit", hookName, extendedConfig ?? null)) {
log("UserPromptSubmit hook command skipped (disabled by config)", { command: hookName })
continue
}
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.stdout) {
const output = result.stdout.trim()

View File

@@ -50,3 +50,4 @@ export { createRuntimeFallbackHook, type RuntimeFallbackHook, type RuntimeFallba
export { createWriteExistingFileGuardHook } from "./write-existing-file-guard";
export { createHashlineReadEnhancerHook } from "./hashline-read-enhancer";
export { createJsonErrorRecoveryHook, JSON_ERROR_TOOL_EXCLUDE_LIST, JSON_ERROR_PATTERNS, JSON_ERROR_REMINDER } from "./json-error-recovery";
export { createReadImageResizerHook } from "./read-image-resizer"

View File

@@ -1,6 +1,6 @@
# src/hooks/keyword-detector/ — Mode Keyword Injection
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -3,12 +3,15 @@ import { beforeEach, describe, expect, test } from "bun:test"
import {
clearPendingModelFallback,
createModelFallbackHook,
setSessionFallbackChain,
setPendingModelFallback,
} from "./hook"
describe("model fallback hook", () => {
beforeEach(() => {
clearPendingModelFallback("ses_model_fallback_main")
clearPendingModelFallback("ses_model_fallback_ghcp")
clearPendingModelFallback("ses_model_fallback_google")
})
test("applies pending fallback on chat.message by overriding model", async () => {
@@ -138,4 +141,92 @@ describe("model fallback hook", () => {
expect(toastCalls.length).toBe(1)
expect(toastCalls[0]?.title).toBe("Model fallback")
})
test("transforms model names for github-copilot provider via fallback chain", async () => {
//#given
const sessionID = "ses_model_fallback_ghcp"
clearPendingModelFallback(sessionID)
const hook = createModelFallbackHook() as unknown as {
"chat.message"?: (
input: { sessionID: string },
output: { message: Record<string, unknown>; parts: Array<{ type: string; text?: string }> },
) => Promise<void>
}
// Set a custom fallback chain that routes through github-copilot
setSessionFallbackChain(sessionID, [
{ providers: ["github-copilot"], model: "claude-sonnet-4-6" },
])
const set = setPendingModelFallback(
sessionID,
"Atlas (Plan Executor)",
"github-copilot",
"claude-sonnet-4-6",
)
expect(set).toBe(true)
const output = {
message: {
model: { providerID: "github-copilot", modelID: "claude-sonnet-4-6" },
},
parts: [{ type: "text", text: "continue" }],
}
//#when
await hook["chat.message"]?.({ sessionID }, output)
//#then — model name should be transformed from hyphen to dot notation
expect(output.message["model"]).toEqual({
providerID: "github-copilot",
modelID: "claude-sonnet-4.6",
})
clearPendingModelFallback(sessionID)
})
test("transforms model names for google provider via fallback chain", async () => {
//#given
const sessionID = "ses_model_fallback_google"
clearPendingModelFallback(sessionID)
const hook = createModelFallbackHook() as unknown as {
"chat.message"?: (
input: { sessionID: string },
output: { message: Record<string, unknown>; parts: Array<{ type: string; text?: string }> },
) => Promise<void>
}
// Set a custom fallback chain that routes through google
setSessionFallbackChain(sessionID, [
{ providers: ["google"], model: "gemini-3-pro" },
])
const set = setPendingModelFallback(
sessionID,
"Oracle",
"google",
"gemini-3-pro",
)
expect(set).toBe(true)
const output = {
message: {
model: { providerID: "google", modelID: "gemini-3-pro" },
},
parts: [{ type: "text", text: "continue" }],
}
//#when
await hook["chat.message"]?.({ sessionID }, output)
//#then — model name should be transformed from gemini-3-pro to gemini-3-pro-preview
expect(output.message["model"]).toEqual({
providerID: "google",
modelID: "gemini-3-pro-preview",
})
clearPendingModelFallback(sessionID)
})
})

View File

@@ -3,6 +3,7 @@ import { getAgentConfigKey } from "../../shared/agent-display-names"
import { AGENT_MODEL_REQUIREMENTS } from "../../shared/model-requirements"
import { readConnectedProvidersCache, readProviderModelsCache } from "../../shared/connected-providers-cache"
import { selectFallbackProvider } from "../../shared/model-error-classifier"
import { transformModelForProvider } from "../../shared/provider-model-id-transform"
import { log } from "../../shared/logger"
import { getTaskToastManager } from "../../features/task-toast-manager"
import type { ChatMessageInput, ChatMessageHandlerOutput } from "../../plugin/chat-message"
@@ -145,7 +146,7 @@ export function getNextFallback(
return {
providerID,
modelID: fallback.model,
modelID: transformModelForProvider(providerID, fallback.model),
variant: fallback.variant,
}
}

View File

@@ -414,4 +414,157 @@ describe("preemptive-compaction", () => {
restoreTimeouts()
}
})
// #given first compaction succeeded and context grew again
// #when tool.execute.after runs after new high-token message
// #then should trigger compaction again (re-compaction)
it("should allow re-compaction when context grows after successful compaction", async () => {
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const sessionID = "ses_recompact"
// given - first compaction cycle
await hook.event({
event: {
type: "message.updated",
properties: {
info: {
role: "assistant",
sessionID,
providerID: "anthropic",
modelID: "claude-sonnet-4-6",
finish: true,
tokens: {
input: 170000,
output: 0,
reasoning: 0,
cache: { read: 10000, write: 0 },
},
},
},
},
})
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_1" },
{ title: "", output: "test", metadata: null }
)
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(1)
// when - new message with high tokens (context grew after compaction)
await hook.event({
event: {
type: "message.updated",
properties: {
info: {
role: "assistant",
sessionID,
providerID: "anthropic",
modelID: "claude-sonnet-4-6",
finish: true,
tokens: {
input: 170000,
output: 0,
reasoning: 0,
cache: { read: 10000, write: 0 },
},
},
},
},
})
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_2" },
{ title: "", output: "test", metadata: null }
)
// then - summarize should fire again
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(2)
})
// #given modelContextLimitsCache has model-specific limit (256k)
// #when tokens are above default 78% of 200k but below 78% of 256k
// #then should NOT trigger compaction
it("should use model-specific context limit from modelContextLimitsCache", async () => {
const modelContextLimitsCache = new Map<string, number>()
modelContextLimitsCache.set("opencode/kimi-k2.5-free", 262144)
const hook = createPreemptiveCompactionHook(ctx as never, {} as never, {
anthropicContext1MEnabled: false,
modelContextLimitsCache,
})
const sessionID = "ses_kimi_limit"
// 180k total tokens — above 78% of 200k (156k) but below 78% of 256k (204k)
await hook.event({
event: {
type: "message.updated",
properties: {
info: {
role: "assistant",
sessionID,
providerID: "opencode",
modelID: "kimi-k2.5-free",
finish: true,
tokens: {
input: 170000,
output: 0,
reasoning: 0,
cache: { read: 10000, write: 0 },
},
},
},
},
})
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_1" },
{ title: "", output: "test", metadata: null }
)
expect(ctx.client.session.summarize).not.toHaveBeenCalled()
})
// #given modelContextLimitsCache has model-specific limit (256k)
// #when tokens exceed 78% of model-specific limit
// #then should trigger compaction
it("should trigger compaction at model-specific threshold", async () => {
const modelContextLimitsCache = new Map<string, number>()
modelContextLimitsCache.set("opencode/kimi-k2.5-free", 262144)
const hook = createPreemptiveCompactionHook(ctx as never, {} as never, {
anthropicContext1MEnabled: false,
modelContextLimitsCache,
})
const sessionID = "ses_kimi_trigger"
// 210k total — above 78% of 256k (≈204k)
await hook.event({
event: {
type: "message.updated",
properties: {
info: {
role: "assistant",
sessionID,
providerID: "opencode",
modelID: "kimi-k2.5-free",
finish: true,
tokens: {
input: 200000,
output: 0,
reasoning: 0,
cache: { read: 10000, write: 0 },
},
},
},
},
})
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_1" },
{ title: "", output: "test", metadata: null }
)
expect(ctx.client.session.summarize).toHaveBeenCalled()
})
})

View File

@@ -7,6 +7,7 @@ const PREEMPTIVE_COMPACTION_TIMEOUT_MS = 120_000
type ModelCacheStateLike = {
anthropicContext1MEnabled: boolean
modelContextLimitsCache?: Map<string, number>
}
function getAnthropicActualLimit(modelCacheState?: ModelCacheStateLike): number {
@@ -91,10 +92,12 @@ export function createPreemptiveCompactionHook(
const cached = tokenCache.get(sessionID)
if (!cached) return
const actualLimit =
isAnthropicProvider(cached.providerID)
? getAnthropicActualLimit(modelCacheState)
: DEFAULT_ACTUAL_LIMIT
const modelSpecificLimit = !isAnthropicProvider(cached.providerID)
? modelCacheState?.modelContextLimitsCache?.get(`${cached.providerID}/${cached.modelID}`)
: undefined
const actualLimit = isAnthropicProvider(cached.providerID)
? getAnthropicActualLimit(modelCacheState)
: modelSpecificLimit ?? DEFAULT_ACTUAL_LIMIT
const lastTokens = cached.tokens
const totalInputTokens = (lastTokens?.input ?? 0) + (lastTokens?.cache?.read ?? 0)
@@ -164,6 +167,7 @@ export function createPreemptiveCompactionHook(
modelID: info.modelID ?? "",
tokens: info.tokens,
})
compactedSessions.delete(info.sessionID)
}
}

View File

@@ -1,6 +1,6 @@
# src/hooks/ralph-loop/ — Self-Referential Dev Loop
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -0,0 +1,286 @@
/// <reference types="bun-types" />
import { beforeEach, describe, expect, it, mock } from "bun:test"
import type { PluginInput } from "@opencode-ai/plugin"
import type { ImageDimensions, ResizeResult } from "./types"
const mockParseImageDimensions = mock((): ImageDimensions | null => null)
const mockCalculateTargetDimensions = mock((): ImageDimensions | null => null)
const mockResizeImage = mock(async (): Promise<ResizeResult | null> => null)
const mockGetSessionModel = mock((_sessionID: string) => ({
providerID: "anthropic",
modelID: "claude-sonnet-4-6",
} as { providerID: string; modelID: string } | undefined))
mock.module("./image-dimensions", () => ({
parseImageDimensions: mockParseImageDimensions,
}))
mock.module("./image-resizer", () => ({
calculateTargetDimensions: mockCalculateTargetDimensions,
resizeImage: mockResizeImage,
}))
mock.module("../../shared/session-model-state", () => ({
getSessionModel: mockGetSessionModel,
}))
import { createReadImageResizerHook } from "./hook"
type ToolOutput = {
title: string
output: string
metadata: unknown
attachments?: Array<{ mime: string; url: string; filename?: string }>
}
function createMockContext(): PluginInput {
return {
client: {} as PluginInput["client"],
directory: "/test",
} as PluginInput
}
function createInput(tool: string): { tool: string; sessionID: string; callID: string } {
return {
tool,
sessionID: "session-1",
callID: "call-1",
}
}
describe("createReadImageResizerHook", () => {
beforeEach(() => {
mockParseImageDimensions.mockReset()
mockCalculateTargetDimensions.mockReset()
mockResizeImage.mockReset()
mockGetSessionModel.mockReset()
mockGetSessionModel.mockReturnValue({ providerID: "anthropic", modelID: "claude-sonnet-4-6" })
})
it("skips non-Read tools", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Bash"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips when provider is not anthropic", async () => {
//#given
mockGetSessionModel.mockReturnValue({ providerID: "openai", modelID: "gpt-5.3-codex" })
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
mockCalculateTargetDimensions.mockReturnValue({ width: 1568, height: 1045 })
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips when session model is unknown", async () => {
//#given
mockGetSessionModel.mockReturnValue(undefined)
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips Read output with no attachments", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips non-image attachments", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "application/pdf", url: "data:application/pdf;base64,AAAA", filename: "file.pdf" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips unsupported image mime types", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/heic", url: "data:image/heic;base64,AAAA", filename: "photo.heic" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("appends within-limits metadata when image is already valid", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 800, height: 600 })
mockCalculateTargetDimensions.mockReturnValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toContain("[Image Info]")
expect(output.output).toContain("within limits")
expect(output.attachments?.[0]?.url).toBe("data:image/png;base64,old")
expect(mockResizeImage).not.toHaveBeenCalled()
})
it("replaces attachment URL and appends resize metadata for oversized image", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
mockCalculateTargetDimensions.mockReturnValue({ width: 1568, height: 1045 })
mockResizeImage.mockResolvedValue({
resizedDataUrl: "data:image/png;base64,resized",
original: { width: 3000, height: 2000 },
resized: { width: 1568, height: 1045 },
})
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "big.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.attachments?.[0]?.url).toBe("data:image/png;base64,resized")
expect(output.output).toContain("[Image Resize Info]")
expect(output.output).toContain("resized")
})
it("keeps original attachment URL and marks resize skipped when resize fails", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
mockCalculateTargetDimensions.mockReturnValue({ width: 1568, height: 1045 })
mockResizeImage.mockResolvedValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "fail.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.attachments?.[0]?.url).toBe("data:image/png;base64,old")
expect(output.output).toContain("resize skipped")
})
it("appends unknown-dimensions metadata when parsing fails", async () => {
//#given
mockParseImageDimensions.mockReturnValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "corrupt.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toContain("dimensions could not be parsed")
expect(mockCalculateTargetDimensions).not.toHaveBeenCalled()
})
it("fires for lowercase read tool name", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 800, height: 600 })
mockCalculateTargetDimensions.mockReturnValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("read"), output)
//#then
expect(mockParseImageDimensions).toHaveBeenCalledTimes(1)
expect(output.output).toContain("within limits")
})
})

View File

@@ -0,0 +1,197 @@
import type { PluginInput } from "@opencode-ai/plugin"
import type { ImageAttachment, ImageDimensions } from "./types"
import { parseImageDimensions } from "./image-dimensions"
import { calculateTargetDimensions, resizeImage } from "./image-resizer"
import { log } from "../../shared"
import { getSessionModel } from "../../shared/session-model-state"
const SUPPORTED_IMAGE_MIMES = new Set(["image/png", "image/jpeg", "image/gif", "image/webp"])
const TOKEN_DIVISOR = 750
interface ResizeEntry {
filename: string
originalDims: ImageDimensions | null
resizedDims: ImageDimensions | null
status: "resized" | "within-limits" | "resize-skipped" | "unknown-dims"
}
function isReadTool(toolName: string): boolean {
return toolName.toLowerCase() === "read"
}
function asRecord(value: unknown): Record<string, unknown> | null {
if (!value || typeof value !== "object" || Array.isArray(value)) {
return null
}
return value as Record<string, unknown>
}
function isImageAttachmentRecord(
value: Record<string, unknown>,
): value is Record<string, unknown> & ImageAttachment {
const filename = value.filename
return (
typeof value.mime === "string" &&
typeof value.url === "string" &&
(typeof filename === "undefined" || typeof filename === "string")
)
}
function extractImageAttachments(output: Record<string, unknown>): ImageAttachment[] {
const attachmentsValue = output.attachments
if (!Array.isArray(attachmentsValue)) {
return []
}
const attachments: ImageAttachment[] = []
for (const attachmentValue of attachmentsValue) {
const attachmentRecord = asRecord(attachmentValue)
if (!attachmentRecord) {
continue
}
const mime = attachmentRecord.mime
const url = attachmentRecord.url
if (typeof mime !== "string" || typeof url !== "string") {
continue
}
const normalizedMime = mime.toLowerCase()
if (!SUPPORTED_IMAGE_MIMES.has(normalizedMime)) {
continue
}
attachmentRecord.mime = normalizedMime
attachmentRecord.url = url
if (isImageAttachmentRecord(attachmentRecord)) {
attachments.push(attachmentRecord)
}
}
return attachments
}
function calculateTokens(width: number, height: number): number {
return Math.ceil((width * height) / TOKEN_DIVISOR)
}
function formatResizeAppendix(entries: ResizeEntry[]): string {
const header = entries.some((entry) => entry.status === "resized") ? "[Image Resize Info]" : "[Image Info]"
const lines = [`\n\n${header}`]
for (const entry of entries) {
if (entry.status === "unknown-dims" || !entry.originalDims) {
lines.push(`- ${entry.filename}: dimensions could not be parsed`)
continue
}
const original = entry.originalDims
const originalText = `${original.width}x${original.height}`
const originalTokens = calculateTokens(original.width, original.height)
if (entry.status === "within-limits") {
lines.push(`- ${entry.filename}: ${originalText} (within limits, tokens: ${originalTokens})`)
continue
}
if (entry.status === "resize-skipped") {
lines.push(`- ${entry.filename}: ${originalText} (resize skipped, tokens: ${originalTokens})`)
continue
}
if (!entry.resizedDims) {
lines.push(`- ${entry.filename}: ${originalText} (resize skipped, tokens: ${originalTokens})`)
continue
}
const resized = entry.resizedDims
const resizedText = `${resized.width}x${resized.height}`
const resizedTokens = calculateTokens(resized.width, resized.height)
lines.push(
`- ${entry.filename}: ${originalText} -> ${resizedText} (resized, tokens: ${originalTokens} -> ${resizedTokens})`,
)
}
return lines.join("\n")
}
function resolveFilename(attachment: ImageAttachment, index: number): string {
if (attachment.filename && attachment.filename.trim().length > 0) {
return attachment.filename
}
return `image-${index + 1}`
}
export function createReadImageResizerHook(_ctx: PluginInput) {
return {
"tool.execute.after": async (
input: { tool: string; sessionID: string; callID: string },
output: { title: string; output: string; metadata: unknown },
) => {
if (!isReadTool(input.tool)) {
return
}
const sessionModel = getSessionModel(input.sessionID)
if (sessionModel?.providerID !== "anthropic") {
return
}
if (typeof output.output !== "string") {
return
}
const outputRecord = output as Record<string, unknown>
const attachments = extractImageAttachments(outputRecord)
if (attachments.length === 0) {
return
}
const entries: ResizeEntry[] = []
for (const [index, attachment] of attachments.entries()) {
const filename = resolveFilename(attachment, index)
try {
const originalDims = parseImageDimensions(attachment.url, attachment.mime)
if (!originalDims) {
entries.push({ filename, originalDims: null, resizedDims: null, status: "unknown-dims" })
continue
}
const targetDims = calculateTargetDimensions(originalDims.width, originalDims.height)
if (!targetDims) {
entries.push({
filename,
originalDims,
resizedDims: null,
status: "within-limits",
})
continue
}
const resizedResult = await resizeImage(attachment.url, attachment.mime, targetDims)
if (!resizedResult) {
entries.push({
filename,
originalDims,
resizedDims: null,
status: "resize-skipped",
})
continue
}
attachment.url = resizedResult.resizedDataUrl
entries.push({
filename,
originalDims: resizedResult.original,
resizedDims: resizedResult.resized,
status: "resized",
})
} catch (error) {
log("[read-image-resizer] attachment processing failed", {
error: error instanceof Error ? error.message : String(error),
filename,
})
entries.push({ filename, originalDims: null, resizedDims: null, status: "unknown-dims" })
}
}
if (entries.length === 0) {
return
}
output.output += formatResizeAppendix(entries)
},
}
}

View File

@@ -0,0 +1,126 @@
/// <reference types="bun-types" />
import { describe, expect, it } from "bun:test"
import { parseImageDimensions } from "./image-dimensions"
const PNG_1X1_DATA_URL =
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
const GIF_1X1_DATA_URL =
"data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
function createPngDataUrl(width: number, height: number): string {
const buf = Buffer.alloc(33)
buf.set([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a], 0)
buf.writeUInt32BE(13, 8)
buf.set([0x49, 0x48, 0x44, 0x52], 12)
buf.writeUInt32BE(width, 16)
buf.writeUInt32BE(height, 20)
return `data:image/png;base64,${buf.toString("base64")}`
}
function createGifDataUrl(width: number, height: number): string {
const buf = Buffer.alloc(10)
buf.set([0x47, 0x49, 0x46, 0x38, 0x39, 0x61], 0)
buf.writeUInt16LE(width, 6)
buf.writeUInt16LE(height, 8)
return `data:image/gif;base64,${buf.toString("base64")}`
}
function createLargePngDataUrl(width: number, height: number, extraBase64Chars: number): string {
const baseDataUrl = createPngDataUrl(width, height)
const base64Data = baseDataUrl.slice(baseDataUrl.indexOf(",") + 1)
const paddedBase64 = `${base64Data}${"A".repeat(extraBase64Chars)}`
return `data:image/png;base64,${paddedBase64}`
}
describe("parseImageDimensions", () => {
it("parses PNG 1x1 dimensions", () => {
//#given
const dataUrl = PNG_1X1_DATA_URL
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toEqual({ width: 1, height: 1 })
})
it("parses PNG dimensions from IHDR", () => {
//#given
const dataUrl = createPngDataUrl(3000, 2000)
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toEqual({ width: 3000, height: 2000 })
})
it("parses PNG dimensions from a very large base64 payload", () => {
//#given
const dataUrl = createLargePngDataUrl(4096, 2160, 10 * 1024 * 1024)
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toEqual({ width: 4096, height: 2160 })
})
it("parses GIF 1x1 dimensions", () => {
//#given
const dataUrl = GIF_1X1_DATA_URL
//#when
const result = parseImageDimensions(dataUrl, "image/gif")
//#then
expect(result).toEqual({ width: 1, height: 1 })
})
it("parses GIF dimensions from logical screen descriptor", () => {
//#given
const dataUrl = createGifDataUrl(320, 240)
//#when
const result = parseImageDimensions(dataUrl, "image/gif")
//#then
expect(result).toEqual({ width: 320, height: 240 })
})
it("returns null for empty input", () => {
//#given
const dataUrl = ""
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toBeNull()
})
it("returns null for too-short PNG buffer", () => {
//#given
const dataUrl = "data:image/png;base64,AAAA"
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toBeNull()
})
it("returns null for unsupported mime type", () => {
//#given
const dataUrl = PNG_1X1_DATA_URL
//#when
const result = parseImageDimensions(dataUrl, "image/heic")
//#then
expect(result).toBeNull()
})
})

View File

@@ -0,0 +1,191 @@
import type { ImageDimensions } from "./types"
import { extractBase64Data } from "../../tools/look-at/mime-type-inference"
const HEADER_BYTES = 32_768
const HEADER_BASE64_CHARS = Math.ceil(HEADER_BYTES / 3) * 4
function toImageDimensions(width: number, height: number): ImageDimensions | null {
if (!Number.isFinite(width) || !Number.isFinite(height)) {
return null
}
if (width <= 0 || height <= 0) {
return null
}
return { width, height }
}
function parsePngDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 24) {
return null
}
const isPngSignature =
buffer[0] === 0x89 &&
buffer[1] === 0x50 &&
buffer[2] === 0x4e &&
buffer[3] === 0x47 &&
buffer[4] === 0x0d &&
buffer[5] === 0x0a &&
buffer[6] === 0x1a &&
buffer[7] === 0x0a
if (!isPngSignature || buffer.toString("ascii", 12, 16) !== "IHDR") {
return null
}
const width = buffer.readUInt32BE(16)
const height = buffer.readUInt32BE(20)
return toImageDimensions(width, height)
}
function parseGifDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 10) {
return null
}
if (buffer.toString("ascii", 0, 4) !== "GIF8") {
return null
}
const width = buffer.readUInt16LE(6)
const height = buffer.readUInt16LE(8)
return toImageDimensions(width, height)
}
function parseJpegDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 4 || buffer[0] !== 0xff || buffer[1] !== 0xd8) {
return null
}
let offset = 2
while (offset < buffer.length) {
if (buffer[offset] !== 0xff) {
offset += 1
continue
}
while (offset < buffer.length && buffer[offset] === 0xff) {
offset += 1
}
if (offset >= buffer.length) {
return null
}
const marker = buffer[offset]
offset += 1
if (marker === 0xd9 || marker === 0xda) {
break
}
if (offset + 1 >= buffer.length) {
return null
}
const segmentLength = buffer.readUInt16BE(offset)
if (segmentLength < 2) {
return null
}
if ((marker === 0xc0 || marker === 0xc2) && offset + 7 < buffer.length) {
const height = buffer.readUInt16BE(offset + 3)
const width = buffer.readUInt16BE(offset + 5)
return toImageDimensions(width, height)
}
offset += segmentLength
}
return null
}
function readUInt24LE(buffer: Buffer, offset: number): number {
return buffer[offset] | (buffer[offset + 1] << 8) | (buffer[offset + 2] << 16)
}
function parseWebpDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 16) {
return null
}
if (buffer.toString("ascii", 0, 4) !== "RIFF" || buffer.toString("ascii", 8, 12) !== "WEBP") {
return null
}
const chunkType = buffer.toString("ascii", 12, 16)
if (chunkType === "VP8 ") {
if (buffer[23] !== 0x9d || buffer[24] !== 0x01 || buffer[25] !== 0x2a) {
return null
}
const width = buffer.readUInt16LE(26) & 0x3fff
const height = buffer.readUInt16LE(28) & 0x3fff
return toImageDimensions(width, height)
}
if (chunkType === "VP8L") {
if (buffer.length < 25 || buffer[20] !== 0x2f) {
return null
}
const bits = buffer.readUInt32LE(21)
const width = (bits & 0x3fff) + 1
const height = ((bits >>> 14) & 0x3fff) + 1
return toImageDimensions(width, height)
}
if (chunkType === "VP8X") {
const width = readUInt24LE(buffer, 24) + 1
const height = readUInt24LE(buffer, 27) + 1
return toImageDimensions(width, height)
}
return null
}
export function parseImageDimensions(base64DataUrl: string, mimeType: string): ImageDimensions | null {
try {
if (!base64DataUrl || !mimeType) {
return null
}
const rawBase64 = extractBase64Data(base64DataUrl)
if (!rawBase64) {
return null
}
const headerBase64 = rawBase64.length > HEADER_BASE64_CHARS ? rawBase64.slice(0, HEADER_BASE64_CHARS) : rawBase64
const buffer = Buffer.from(headerBase64, "base64")
if (buffer.length === 0) {
return null
}
const normalizedMime = mimeType.toLowerCase()
if (normalizedMime === "image/png") {
return parsePngDimensions(buffer)
}
if (normalizedMime === "image/gif") {
return parseGifDimensions(buffer)
}
if (normalizedMime === "image/jpeg" || normalizedMime === "image/jpg") {
return parseJpegDimensions(buffer)
}
if (normalizedMime === "image/webp") {
return parseWebpDimensions(buffer)
}
return null
} catch {
return null
}
}

View File

@@ -0,0 +1,132 @@
/// <reference types="bun-types" />
import { afterEach, describe, expect, it, mock } from "bun:test"
const PNG_1X1_DATA_URL =
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
type ImageResizerModule = typeof import("./image-resizer")
async function importFreshImageResizerModule(): Promise<ImageResizerModule> {
return import(`./image-resizer?test-${Date.now()}-${Math.random()}`)
}
describe("calculateTargetDimensions", () => {
it("returns null when dimensions are already within limits", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(800, 600)
//#then
expect(result).toBeNull()
})
it("returns null at exact long-edge boundary", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(1568, 1000)
//#then
expect(result).toBeNull()
})
it("scales landscape dimensions by max long edge", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(3000, 2000)
//#then
expect(result).toEqual({
width: 1568,
height: Math.floor(2000 * (1568 / 3000)),
})
})
it("scales portrait dimensions by max long edge", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(2000, 3000)
//#then
expect(result).toEqual({
width: Math.floor(2000 * (1568 / 3000)),
height: 1568,
})
})
it("scales square dimensions to exact target", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(4000, 4000)
//#then
expect(result).toEqual({ width: 1568, height: 1568 })
})
it("uses custom maxLongEdge when provided", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(2000, 1000, 1000)
//#then
expect(result).toEqual({ width: 1000, height: 500 })
})
})
describe("resizeImage", () => {
afterEach(() => {
mock.restore()
})
it("returns null when sharp import fails", async () => {
//#given
mock.module("sharp", () => {
throw new Error("sharp unavailable")
})
const { resizeImage } = await importFreshImageResizerModule()
//#when
const result = await resizeImage(PNG_1X1_DATA_URL, "image/png", {
width: 1,
height: 1,
})
//#then
expect(result).toBeNull()
})
it("returns null when sharp throws during resize", async () => {
//#given
const mockSharpFactory = mock(() => ({
resize: () => {
throw new Error("resize failed")
},
}))
mock.module("sharp", () => ({
default: mockSharpFactory,
}))
const { resizeImage } = await importFreshImageResizerModule()
//#when
const result = await resizeImage(PNG_1X1_DATA_URL, "image/png", {
width: 1,
height: 1,
})
//#then
expect(result).toBeNull()
})
})

View File

@@ -0,0 +1,184 @@
import type { ImageDimensions, ResizeResult } from "./types"
import { extractBase64Data } from "../../tools/look-at/mime-type-inference"
import { log } from "../../shared"
const ANTHROPIC_MAX_LONG_EDGE = 1568
const ANTHROPIC_MAX_FILE_SIZE = 5 * 1024 * 1024
type SharpFormat = "jpeg" | "png" | "gif" | "webp"
interface SharpMetadata {
width?: number
height?: number
}
interface SharpInstance {
resize(width: number, height: number, options: { fit: "inside" }): SharpInstance
toFormat(format: SharpFormat, options?: { quality?: number }): SharpInstance
toBuffer(): Promise<Buffer>
metadata(): Promise<SharpMetadata>
}
type SharpFactory = (input: Buffer) => SharpInstance
function resolveSharpFactory(sharpModule: unknown): SharpFactory | null {
if (typeof sharpModule === "function") {
return sharpModule as SharpFactory
}
if (!sharpModule || typeof sharpModule !== "object") {
return null
}
const defaultExport = Reflect.get(sharpModule, "default")
return typeof defaultExport === "function" ? (defaultExport as SharpFactory) : null
}
function resolveSharpFormat(mimeType: string): SharpFormat {
const normalizedMime = mimeType.toLowerCase()
if (normalizedMime === "image/png") {
return "png"
}
if (normalizedMime === "image/gif") {
return "gif"
}
if (normalizedMime === "image/webp") {
return "webp"
}
return "jpeg"
}
function canAdjustQuality(format: SharpFormat): boolean {
return format === "jpeg" || format === "webp"
}
function toDimensions(metadata: SharpMetadata): ImageDimensions | null {
const { width, height } = metadata
if (!width || !height) {
return null
}
return { width, height }
}
async function renderResizedBuffer(args: {
sharpFactory: SharpFactory
inputBuffer: Buffer
target: ImageDimensions
format: SharpFormat
quality?: number
}): Promise<Buffer> {
const { sharpFactory, inputBuffer, target, format, quality } = args
return sharpFactory(inputBuffer)
.resize(target.width, target.height, { fit: "inside" })
.toFormat(format, quality ? { quality } : undefined)
.toBuffer()
}
function getErrorMessage(error: unknown): string {
return error instanceof Error ? error.message : String(error)
}
export function calculateTargetDimensions(
width: number,
height: number,
maxLongEdge = ANTHROPIC_MAX_LONG_EDGE,
): ImageDimensions | null {
if (width <= 0 || height <= 0 || maxLongEdge <= 0) {
return null
}
const longEdge = Math.max(width, height)
if (longEdge <= maxLongEdge) {
return null
}
if (width >= height) {
return {
width: maxLongEdge,
height: Math.max(1, Math.floor((height * maxLongEdge) / width)),
}
}
return {
width: Math.max(1, Math.floor((width * maxLongEdge) / height)),
height: maxLongEdge,
}
}
export async function resizeImage(
base64DataUrl: string,
mimeType: string,
target: ImageDimensions,
): Promise<ResizeResult | null> {
try {
const sharpModuleName = "sharp"
const sharpModule = await import(sharpModuleName).catch(() => null)
if (!sharpModule) {
log("[read-image-resizer] sharp unavailable, skipping resize")
return null
}
const sharpFactory = resolveSharpFactory(sharpModule)
if (!sharpFactory) {
log("[read-image-resizer] sharp import has unexpected shape")
return null
}
const rawBase64 = extractBase64Data(base64DataUrl)
if (!rawBase64) {
return null
}
const inputBuffer = Buffer.from(rawBase64, "base64")
if (inputBuffer.length === 0) {
return null
}
const original = toDimensions(await sharpFactory(inputBuffer).metadata())
if (!original) {
return null
}
const format = resolveSharpFormat(mimeType)
let resizedBuffer = await renderResizedBuffer({
sharpFactory,
inputBuffer,
target,
format,
})
if (resizedBuffer.length > ANTHROPIC_MAX_FILE_SIZE && canAdjustQuality(format)) {
for (const quality of [80, 60, 40]) {
resizedBuffer = await renderResizedBuffer({
sharpFactory,
inputBuffer,
target,
format,
quality,
})
if (resizedBuffer.length <= ANTHROPIC_MAX_FILE_SIZE) {
break
}
}
}
const resized = toDimensions(await sharpFactory(resizedBuffer).metadata())
if (!resized) {
return null
}
return {
resizedDataUrl: `data:${mimeType};base64,${resizedBuffer.toString("base64")}`,
original,
resized,
}
} catch (error) {
log("[read-image-resizer] resize failed", {
error: getErrorMessage(error),
mimeType,
target,
})
return null
}
}

View File

@@ -0,0 +1 @@
export { createReadImageResizerHook } from "./hook"

View File

@@ -0,0 +1,16 @@
export interface ImageDimensions {
width: number
height: number
}
export interface ImageAttachment {
mime: string
url: string
filename?: string
}
export interface ResizeResult {
resizedDataUrl: string
original: ImageDimensions
resized: ImageDimensions
}

View File

@@ -1,6 +1,6 @@
# src/hooks/rules-injector/ — Conditional Rules Injection
**Generated:** 2026-02-24
**Generated:** 2026-03-02
## OVERVIEW

View File

@@ -7,6 +7,7 @@ import {
getAfplayPath,
getPaplayPath,
getAplayPath,
getTerminalNotifierPath,
} from "./session-notification-utils"
import { buildWindowsToastScript, escapeAppleScriptText, escapePowerShellSingleQuotedText } from "./session-notification-formatting"
@@ -39,6 +40,19 @@ export async function sendSessionNotification(
): Promise<void> {
switch (platform) {
case "darwin": {
// Try terminal-notifier first — deterministic click-to-focus
const terminalNotifierPath = await getTerminalNotifierPath()
if (terminalNotifierPath) {
const bundleId = process.env.__CFBundleIdentifier
const args = [terminalNotifierPath, "-title", title, "-message", message]
if (bundleId) {
args.push("-activate", bundleId)
}
await ctx.$`${args}`.catch(() => {})
break
}
// Fallback: osascript (click may open Finder instead of terminal)
const osascriptPath = await getOsascriptPath()
if (!osascriptPath) return

Some files were not shown because too many files have changed in this diff Show More