Compare commits

...

44 Commits

Author SHA1 Message Date
github-actions[bot]
f27fd9a6de release: v3.10.0 2026-03-02 06:27:47 +00:00
YeonGyu-Kim
3db46a58a7 feat(hashline): change hashline_edit default from true to false
Hashline edit tool and companion hooks now require explicit opt-in
via `"hashline_edit": true` in config. Previously enabled by default.

- tool-registry: hashline edit tool not registered unless opted in
- create-tool-guard-hooks: hashline-read-enhancer disabled by default
- Updated config schema comment and documentation
- Added TDD tests for default behavior
2026-03-02 15:20:31 +09:00
YeonGyu-Kim
0dd9ac43ea perf(read-image-resizer): decode only first 32KB of base64 for dimension parsing
Previously decoded entire image buffer to read headers. Now slices base64
to 32KB prefix before decoding — sufficient for PNG/GIF/WebP/JPEG headers.
Dramatically reduces memory allocation for large images.
2026-03-02 15:20:31 +09:00
YeonGyu-Kim
1a9e7eb305 fix(hook-message-injector): add process-unique prefix to message/part IDs to prevent storage collisions
IDs now include a random 8-hex-char prefix per process (e.g. msg_a1b2c3d4_000001)
preventing collisions when counters reset across process restarts.
2026-03-02 15:20:31 +09:00
YeonGyu-Kim
682a3c8515 fix(hooks): prevent SSRF via URL scheme validation and extend disable mechanism to HTTP hooks
- Restrict HTTP hook URLs to http: and https: schemes only (blocks file://, data://, ftp://)
- Extend hook disable config to cover HTTP hooks by matching against hook URL identifier
- Update all 5 hook executors (pre-tool-use, post-tool-use, stop, pre-compact, user-prompt-submit)
- Add 6 new tests for URL scheme validation (file, data, ftp rejection + http, https, invalid URL)
2026-03-02 15:20:31 +09:00
github-actions[bot]
a666612354 @mathew-cf has signed the CLA in code-yeongyu/oh-my-opencode#2233 2026-03-01 20:19:41 +00:00
github-actions[bot]
a6955d7d14 @Chocothin has signed the CLA in code-yeongyu/oh-my-opencode#2230 2026-03-01 13:52:22 +00:00
github-actions[bot]
1a25b251c3 @DEAN-Cherry has signed the CLA in code-yeongyu/oh-my-opencode#2227 2026-03-01 08:13:56 +00:00
github-actions[bot]
9a505a33ac @laciferin2024 has signed the CLA in code-yeongyu/oh-my-opencode#2222 2026-03-01 01:16:35 +00:00
YeonGyu-Kim
acc21326c5 Merge pull request #2212 from code-yeongyu/fix/h5-collector-ordering
fix(context-injector): use monotonic registration order instead of timestamp for deterministic sorting
2026-02-28 13:40:20 +09:00
YeonGyu-Kim
e7503655b9 Merge pull request #2211 from code-yeongyu/fix/c2-hook-message-ids
fix(hook-message-injector): use monotonic counter for deterministic message/part IDs
2026-02-28 13:40:18 +09:00
YeonGyu-Kim
73fea697d7 Merge pull request #2210 from code-yeongyu/fix/c1-synthetic-part-id
fix(context-injector): use deterministic synthetic part ID for cache stability
2026-02-28 13:40:16 +09:00
YeonGyu-Kim
d39b3aa9b7 Merge pull request #2151 from ualtinok/fix/agent-usage-reminder-subagent-exclusion
[Bug]: agent-usage-reminder hook sends circular reminders to explore/librarian subagents
2026-02-28 13:37:14 +09:00
YeonGyu-Kim
7c9f507dad fix(context-injector): use monotonic registration order instead of timestamp for deterministic sorting 2026-02-28 13:30:57 +09:00
YeonGyu-Kim
4d8360c72f fix(context-injector): use deterministic synthetic part ID for cache stability 2026-02-28 13:30:49 +09:00
YeonGyu-Kim
6e9f27350d fix(hook-message-injector): use monotonic counter for deterministic message/part IDs 2026-02-28 13:30:14 +09:00
YeonGyu-Kim
2478b28e71 Merge pull request #2204 from ualtinok/fix/glob-directory-prefix
fix(glob): use cwd-relative search for ripgrep to fix directory prefix patterns
2026-02-28 13:27:00 +09:00
ismeth
418cf8529f fix(glob): use cwd-relative search for ripgrep to fix directory prefix patterns
Ripgrep's --glob flag silently returns zero results when the search target
is an absolute path and the pattern contains directory prefixes (e.g.
'apps/backend/**/*.ts' with '/project'). This is a known ripgrep behavior
where glob matching fails against paths rooted at absolute arguments.

Fix by running ripgrep with cwd set to the search path and '.' as the
search target, matching how the find backend already operates. Ripgrep
then sees relative paths internally, so directory-prefixed globs match
correctly. Output paths are resolved back to absolute via resolve().
2026-02-28 13:26:30 +09:00
YeonGyu-Kim
cc6ab1addc feat(hooks): add read-image-resizer hook
Intercepts Read tool output with image attachments and resizes to comply with Anthropic API limits (≤1568px long edge, ≤5MB). Only activates for Anthropic provider sessions and appends resize metadata (original/new resolution, token count) to tool output.

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-28 13:21:40 +09:00
YeonGyu-Kim
74f7992442 feat(agents): add Gemini tool guide and few-shot examples to system prompt
Embed tool usage guide (per-tool parallel/sequential signals) and 5 concrete tool-calling examples directly in Gemini system prompt. Modeled after Antigravity's inline schema approach to improve Gemini tool-call quality.

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-28 13:21:40 +09:00
YeonGyu-Kim
13d689cb3a feat(agents): add Plan Agent dependency and strengthen Deep Parallel Delegation for non-Claude models
Non-Claude models skip planning and under-parallelize. Two new sections
injected only when model is not Claude:

- Plan Agent Dependency: multi-step tasks MUST consult Plan Agent first,
  use session_id for follow-ups, ask aggressively when ambiguous
- Deep Parallel Delegation (rewrite): explicit '4 units = 4 agents'
  pattern, each with clear GOAL + success criteria, all run_in_background
2026-02-28 13:21:40 +09:00
YeonGyu-Kim
29d606241b Merge pull request #2202 from ualtinok/fix/glob-grep-relative-path
fix(tools): resolve relative paths in glob/grep against project directory
2026-02-28 13:17:53 +09:00
YeonGyu-Kim
d65ee31d17 Merge pull request #2208 from code-yeongyu/feat/http-hook-support
feat(hooks): add HTTP hook handler support
2026-02-28 12:10:45 +09:00
YeonGyu-Kim
4dae458cf7 style(hooks): add blank line between interpolateEnvVars and resolveHeaders 2026-02-28 12:05:08 +09:00
YeonGyu-Kim
4740515f2f fix(agents): replace active polling with notification-based waiting for background tasks
Sisyphus prompt instructed 'your next action is background_output' which
caused agents to repeatedly poll running tasks instead of ending their
response and waiting for the system notification.

- Replace 'STOP all other output' with 'end your response' (actionable)
- Add system-reminder notification mechanism explanation
- Add explicit 'Do NOT poll' prohibition
- Reduce background_cancel(all=true) mentions from 5x to 1x (Hard Blocks)
- Reduce Oracle collect obligation from 4x to 2x
- Remove motivational fluff ('blind spots', 'normal and expected')

Net: -2 lines, clearer mechanism, eliminates polling loop root cause.
2026-02-28 12:04:18 +09:00
YeonGyu-Kim
3eb53adfc3 fix(hooks): resolve cubic review issues
- Replace two-pass env interpolation with single-pass combined regex to
  prevent re-interpolation of $-sequences in substituted header values
- Convert HookEntry to discriminated union so type: "http" requires url,
  preventing invalid configs from passing type checking
- Add regression test for double-interpolation edge case
2026-02-28 12:00:02 +09:00
YeonGyu-Kim
43dfdb2380 feat(hooks): add HTTP hook handler support
Add type: "http" hook support matching Claude Code's HTTP hook specification.
HTTP hooks send POST requests with JSON body, support env var interpolation
in headers via allowedEnvVars, and configurable timeout.

New files:
- execute-http-hook.ts: HTTP hook execution with env var interpolation
- dispatch-hook.ts: Unified dispatcher for command and HTTP hooks
- execute-http-hook.test.ts: 14 tests covering all HTTP hook scenarios

Modified files:
- types.ts: Added HookHttp interface, HookAction union type
- config.ts: Updated to accept HookAction in raw hook matchers
- pre-tool-use/post-tool-use/stop/user-prompt-submit/pre-compact:
  Updated all 5 executors to dispatch HTTP hooks via dispatchHook()
- plugin-loader/types.ts: Added "http" to HookEntry type union
2026-02-28 11:38:34 +09:00
ismeth
7cec6f7c8b fix(tools): resolve relative paths in glob/grep against project directory
When models pass relative paths (e.g. 'apps/ios/CleanSlate') to glob/grep
tools, they were passed directly to ripgrep which resolved them against
process.cwd(). In OpenCode Desktop, process.cwd() is '/' causing all
relative path lookups to fail with 'No such file or directory'.

Fix: use path.resolve(ctx.directory, args.path) to resolve relative paths
against the project directory instead of relying on process.cwd().
2026-02-28 00:58:05 +01:00
github-actions[bot]
866bd50dca @renanale has signed the CLA in code-yeongyu/oh-my-opencode#2201 2026-02-27 22:38:27 +00:00
Nguyen Khac Trung Kien
518e3c5da7 Merge pull request #2161 from acamq/fix/duplicate-agent-entries 2026-02-27 18:35:59 +07:00
Kenny
85126247b4 Merge pull request #2153 from devxoul/fix/docs-master-to-dev-branch
replace master branch references with dev
2026-02-27 19:33:48 +08:00
Kenny
bc2eaaf89b Merge pull request #2160 from dwnmf/add-russian-readme
Add Russian README
2026-02-27 19:32:14 +08:00
github-actions[bot]
0f73504639 @mertyldrm has signed the CLA in code-yeongyu/oh-my-opencode#2184 2026-02-27 10:53:16 +00:00
github-actions[bot]
db32ac5ae8 @YLRong has signed the CLA in code-yeongyu/oh-my-opencode#2176 2026-02-27 09:06:59 +00:00
YeonGyu-Kim
2eb7994163 fix(atlas): use start-work session agent for continuation gating
Prefer the in-memory session agent set by /start-work when validating idle continuation eligibility, so stale message storage agent values do not block boulder continuation.
2026-02-27 15:01:44 +09:00
github-actions[bot]
15ad9442a4 @1noilimrev has signed the CLA in code-yeongyu/oh-my-opencode#2166 2026-02-27 05:53:45 +00:00
LYA_CAP_OCEAN
07542d39aa Fix markdown blockquote structure in Russian README 2026-02-27 01:59:29 +03:00
acamq
d6dd54867a fix(agents): remove duplicate remapped agent keys 2026-02-26 15:58:02 -07:00
github-actions[bot]
f3cbc24e78 @dwnmf has signed the CLA in code-yeongyu/oh-my-opencode#2160 2026-02-26 22:51:52 +00:00
LYA_CAP_OCEAN
b76abeb8e0 Add Russian README 2026-02-27 01:49:51 +03:00
YeonGyu-Kim
21c249e8c8 fix(ci): pre-download baseline compile targets to avoid Bun extraction failures
Bun's internal download of baseline compile targets from npm registry
consistently fails on Windows CI runners (ExtractionFailed error).
Pre-download the baseline binary via curl into Bun's cache directory
so the compile step finds it already cached and skips the download.

Also makes publish job resilient with if: always() so one failed
platform doesn't block publishing all other successful platforms.
2026-02-27 04:43:29 +09:00
YeonGyu-Kim
0749a8f138 fix(ci): make platform publish resilient to individual build failures
publish job now runs with if: always() && !cancelled(), and gates
each publish step on download.outcome == 'success'. One flaky target
(e.g. windows-x64-baseline) no longer blocks all other platforms.
2026-02-27 04:39:05 +09:00
ismeth
35edcecd8f fix(agent-usage-reminder): skip reminders for non-orchestrator subagents 2026-02-26 17:05:33 +01:00
Jeon Suyeol
0cc9edac17 replace master branch references with dev across docs, schema URLs, and tests 2026-02-27 00:49:53 +09:00
75 changed files with 2745 additions and 212 deletions

View File

@@ -82,6 +82,52 @@ jobs:
cd packages/${{ matrix.platform }}
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
- name: Pre-download baseline compile target
if: steps.check.outputs.skip != 'true' && endsWith(matrix.platform, '-baseline')
shell: bash
run: |
BUN_VERSION=$(bun --version)
PLATFORM="${{ matrix.platform }}"
PKG_NAME="bun-${PLATFORM}"
CACHE_DIR=$(bun pm cache)
CACHE_DEST="${CACHE_DIR}/${PKG_NAME}-v${BUN_VERSION}"
if [[ -f "$CACHE_DEST" ]]; then
echo "✓ Compile target already cached at ${CACHE_DEST}"
exit 0
fi
echo "Pre-downloading ${PKG_NAME} v${BUN_VERSION} to ${CACHE_DEST}"
TARBALL_URL="https://registry.npmjs.org/@oven/bun-${PLATFORM}/-/bun-${PLATFORM}-${BUN_VERSION}.tgz"
echo "URL: ${TARBALL_URL}"
mkdir -p "$(dirname "$CACHE_DEST")"
TMP_DIR=$(mktemp -d)
# Download and extract the bun binary from npm tarball
curl -fsSL --retry 5 --retry-delay 5 "${TARBALL_URL}" | tar -xzf - -C "${TMP_DIR}"
if [[ "$PLATFORM" == windows-* ]]; then
BIN_NAME="bun.exe"
else
BIN_NAME="bun"
fi
# npm tarball has package/bin/bun structure
if [[ -f "${TMP_DIR}/package/bin/${BIN_NAME}" ]]; then
cp "${TMP_DIR}/package/bin/${BIN_NAME}" "${CACHE_DEST}"
elif [[ -f "${TMP_DIR}/package/${BIN_NAME}" ]]; then
cp "${TMP_DIR}/package/${BIN_NAME}" "${CACHE_DEST}"
else
echo "Could not find ${BIN_NAME} in tarball, listing contents:"
find "${TMP_DIR}" -type f
exit 1
fi
chmod +x "${CACHE_DEST}" 2>/dev/null || true
echo "✓ Pre-downloaded to ${CACHE_DEST}"
ls -lh "${CACHE_DEST}"
- name: Build binary
if: steps.check.outputs.skip != 'true'
uses: nick-fields/retry@v3
@@ -154,6 +200,7 @@ jobs:
# =============================================================================
publish:
needs: build
if: always() && !cancelled()
runs-on: ubuntu-latest
strategy:
fail-fast: false
@@ -176,14 +223,16 @@ jobs:
fi
- name: Download artifact
id: download
if: steps.check.outputs.skip != 'true'
continue-on-error: true
uses: actions/download-artifact@v4
with:
name: binary-${{ matrix.platform }}
path: .
- name: Extract artifact
if: steps.check.outputs.skip != 'true'
if: steps.check.outputs.skip != 'true' && steps.download.outcome == 'success'
run: |
PLATFORM="${{ matrix.platform }}"
mkdir -p packages/${PLATFORM}
@@ -199,13 +248,13 @@ jobs:
ls -la packages/${PLATFORM}/bin/
- uses: actions/setup-node@v4
if: steps.check.outputs.skip != 'true'
if: steps.check.outputs.skip != 'true' && steps.download.outcome == 'success'
with:
node-version: "24"
registry-url: "https://registry.npmjs.org"
- name: Publish ${{ matrix.platform }}
if: steps.check.outputs.skip != 'true'
if: steps.check.outputs.skip != 'true' && steps.download.outcome == 'success'
run: |
cd packages/${{ matrix.platform }}

View File

@@ -122,7 +122,7 @@ bunx oh-my-opencode run # Non-interactive session
| Workflow | Trigger | Purpose |
|----------|---------|---------|
| ci.yml | push/PR | Tests (split: mock-heavy isolated + batch), typecheck, build, schema auto-commit |
| publish.yml | manual | Version bump, npm publish, platform binaries, GitHub release, merge to master |
| publish.yml | manual | Version bump, npm publish, platform binaries, GitHub release, merge to dev |
| publish-platform.yml | called | 11 platform binaries via bun compile (darwin/linux/windows) |
| sisyphus-agent.yml | @mention | AI agent handles issues/PRs |

View File

@@ -44,7 +44,7 @@
[![GitHub Forks](https://img.shields.io/github/forks/code-yeongyu/oh-my-opencode?color=8ae8ff&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/network/members)
[![GitHub Stars](https://img.shields.io/github/stars/code-yeongyu/oh-my-opencode?color=ffcb47&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/stargazers)
[![GitHub Issues](https://img.shields.io/github/issues/code-yeongyu/oh-my-opencode?color=ff80eb&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/issues)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/master/LICENSE.md)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/dev/LICENSE.md)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/code-yeongyu/oh-my-opencode)
[English](README.md) | [한국어](README.ko.md) | [日本語](README.ja.md) | [简体中文](README.zh-cn.md)
@@ -96,7 +96,7 @@ OmOをインストールして、`ultrawork`とタイプしてください。狂
```
Install and configure oh-my-opencode by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
もしくは[インストールガイド](docs/guide/installation.md)を直接読んでもいいですが、マジでエージェントにやらせてください。人間は設定で必ずタイポします。
@@ -106,7 +106,7 @@ https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/
インストールガイドを取得して、それに従ってください:
```bash
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
---

View File

@@ -44,7 +44,7 @@
[![GitHub Forks](https://img.shields.io/github/forks/code-yeongyu/oh-my-opencode?color=8ae8ff&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/network/members)
[![GitHub Stars](https://img.shields.io/github/stars/code-yeongyu/oh-my-opencode?color=ffcb47&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/stargazers)
[![GitHub Issues](https://img.shields.io/github/issues/code-yeongyu/oh-my-opencode?color=ff80eb&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/issues)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/master/LICENSE.md)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/dev/LICENSE.md)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/code-yeongyu/oh-my-opencode)
[English](README.md) | [한국어](README.ko.md) | [日本語](README.ja.md) | [简体中文](README.zh-cn.md)
@@ -95,7 +95,7 @@ OmO 설치하고. `ultrawork` 치세요. 끝.
```
Install and configure oh-my-opencode by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
아니면 [설치 가이드](docs/guide/installation.md)를 직접 읽으셔도 되지만, 진심으로 그냥 에이전트한테 시키세요. 사람은 설정하다 꼭 오타 냅니다.
@@ -105,7 +105,7 @@ https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/
설치 가이드를 가져와서 따라 하세요:
```bash
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
---

View File

@@ -47,7 +47,7 @@
[![GitHub Forks](https://img.shields.io/github/forks/code-yeongyu/oh-my-opencode?color=8ae8ff&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/network/members)
[![GitHub Stars](https://img.shields.io/github/stars/code-yeongyu/oh-my-opencode?color=ffcb47&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/stargazers)
[![GitHub Issues](https://img.shields.io/github/issues/code-yeongyu/oh-my-opencode?color=ff80eb&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/issues)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/master/LICENSE.md)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/dev/LICENSE.md)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/code-yeongyu/oh-my-opencode)
[English](README.md) | [한국어](README.ko.md) | [日本語](README.ja.md) | [简体中文](README.zh-cn.md)
@@ -99,7 +99,7 @@ Copy and paste this prompt to your LLM agent (Claude Code, AmpCode, Cursor, etc.
```
Install and configure oh-my-opencode by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
Or read the [Installation Guide](docs/guide/installation.md), but seriously, let an agent do it. Humans fat-finger configs.
@@ -109,7 +109,7 @@ Or read the [Installation Guide](docs/guide/installation.md), but seriously, let
Fetch the installation guide and follow it:
```bash
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
---

367
README.ru.md Normal file
View File

@@ -0,0 +1,367 @@
> [!WARNING] **Предупреждение о безопасности: сайт-имитатор**
>
> **ohmyopencode.com НЕ аффилирован с этим проектом.** Мы не управляем этим сайтом и не одобряем его.
>
> OhMyOpenCode — **бесплатный и открытый проект**. Не скачивайте установщики и не вводите платёжные данные на сторонних сайтах, которые называют себя «официальными».
>
> Поскольку сайт-имитатор находится за платным доступом, мы **не можем проверить, что именно он распространяет**. Относитесь к любым загрузкам с него как к **потенциально небезопасным**.
>
> ✅ Официальные загрузки: https://github.com/code-yeongyu/oh-my-opencode/releases
> [!NOTE]
>
> [![Sisyphus Labs - Sisyphus is the agent that codes like your team.](./.github/assets/sisyphuslabs.png?v=2)](https://sisyphuslabs.ai)
>
> > **Мы создаём полноценную продуктовую версию Sisyphus, чтобы задать стандарты для frontier-агентов. <br />Присоединяйтесь к листу ожидания [здесь](https://sisyphuslabs.ai).**
> [!TIP] Будьте с нами!
>
> | [](https://discord.gg/PUwSMR9XNk) | Вступайте в наш [Discord](https://discord.gg/PUwSMR9XNk), чтобы общаться с контрибьюторами и пользователями `oh-my-opencode`. |
> | ----------------------------------- | ------------------------------------------------------------ |
> | [](https://x.com/justsisyphus) | Новости и обновления `oh-my-opencode` раньше публиковались на моём аккаунте X. <br /> После ошибочной блокировки, [@justsisyphus](https://x.com/justsisyphus) публикует обновления вместо меня. |
> | [](https://github.com/code-yeongyu) | Подпишитесь на [@code-yeongyu](https://github.com/code-yeongyu) на GitHub, чтобы следить за другими проектами. |
<!-- <CENTERED SECTION FOR GITHUB DISPLAY> --> <div align="center">
[![Oh My OpenCode](./.github/assets/hero.jpg)](https://github.com/code-yeongyu/oh-my-opencode#oh-my-opencode)
[![Preview](./.github/assets/omo.png)](https://github.com/code-yeongyu/oh-my-opencode#oh-my-opencode)
</div>
> Anthropic [**заблокировал OpenCode из-за нас.**](https://x.com/thdxr/status/2010149530486911014) **Да, это правда.** Они хотят держать вас в замкнутой системе. Claude Code — красивая тюрьма, но всё равно тюрьма.
>
> Мы не делаем привязки. Мы работаем с любыми моделями. Claude / Kimi / GLM для оркестрации. GPT для рассуждений. Minimax для скорости. Gemini для творческих задач. Будущее — не в выборе одного победителя, а в оркестровке всех. Модели дешевеют каждый месяц. Умнеют каждый месяц. Ни один провайдер не будет доминировать. Мы строим под открытый рынок, а не под чьи-то огороженные сады.
<div align="center">
[![GitHub Release](https://img.shields.io/github/v/release/code-yeongyu/oh-my-opencode?color=369eff&labelColor=black&logo=github&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/releases) [![npm downloads](https://img.shields.io/npm/dt/oh-my-opencode?color=ff6b35&labelColor=black&style=flat-square)](https://www.npmjs.com/package/oh-my-opencode) [![GitHub Contributors](https://img.shields.io/github/contributors/code-yeongyu/oh-my-opencode?color=c4f042&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/graphs/contributors) [![GitHub Forks](https://img.shields.io/github/forks/code-yeongyu/oh-my-opencode?color=8ae8ff&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/network/members) [![GitHub Stars](https://img.shields.io/github/stars/code-yeongyu/oh-my-opencode?color=ffcb47&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/stargazers) [![GitHub Issues](https://img.shields.io/github/issues/code-yeongyu/oh-my-opencode?color=ff80eb&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/issues) [![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/master/LICENSE.md) [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/code-yeongyu/oh-my-opencode)
English | 한국어 | 日本語 | 简体中文 | Русский
</div> <!-- </CENTERED SECTION FOR GITHUB DISPLAY> -->
## Отзывы
> «Из-за него я отменил подписку на Cursor. В опенсорс-сообществе происходит что-то невероятное.» — [Arthur Guiot](https://x.com/arthur_guiot/status/2008736347092382053?s=20)
> «Если Claude Code делает за 7 дней то, на что у человека уходит 3 месяца, Sisyphus справляется за 1 час. Он просто работает, пока задача не выполнена. Это дисциплинированный агент.» <br/>— B, исследователь в области квантовых финансов
> «За один день устранил 8000 предупреждений eslint с помощью Oh My Opencode.» <br/>— [Jacob Ferrari](https://x.com/jacobferrari_/status/2003258761952289061)
> «За ночь конвертировал приложение на tauri в 45k строк в веб-SaaS с помощью Ohmyopencode и ralph loop. Начал с промпта «проинтервьюируй меня», попросил оценки и рекомендации по вопросам. Было удивительно наблюдать за работой и утром проснуться с почти рабочим сайтом!» — [James Hargis](https://x.com/hargabyte/status/2007299688261882202)
> «Используйте oh-my-opencode — вы не захотите возвращаться назад.» <br/>— [d0t3ch](https://x.com/d0t3ch/status/2001685618200580503)
> «Пока не могу точно объяснить, почему это так круто, но опыт разработки вышел на совершенно другой уровень.» — [苔硯:こけすずり](https://x.com/kokesuzuri/status/2008532913961529372?s=20)
> «Экспериментирую с open code, oh my opencode и supermemory этим выходным, чтобы собрать нечто среднее между Minecraft и souls-like.» «Попросил добавить анимации приседания, пока хожу на обеденную прогулку. [Видео]» — [MagiMetal](https://x.com/MagiMetal/status/2005374704178373023)
> «Ребята, вам нужно включить это в ядро и нанять его. Серьёзно. Это очень, очень, очень хорошо.» <br/>— Henning Kilset
> «Наймите @yeon_gyu_kim, если сможете его уговорить, этот парень революционизировал opencode.» <br/>— [mysticaltech](https://x.com/mysticaltech/status/2001858758608376079)
> «Oh My OpenCode — это что-то с чем-то» — [YouTube — Darren Builds AI](https://www.youtube.com/watch?v=G_Snfh2M41M)
------
# Oh My OpenCode
Вы жонглируете Claude Code, Codex, случайными OSS-моделями. Настраиваете рабочие процессы. Дебажите агентов.
Мы уже проделали эту работу. Протестировали всё. Оставили только то, что реально работает.
Установите OmO. Введите `ultrawork`. Готово.
## Установка
### Для людей
Скопируйте и вставьте этот промпт в ваш LLM-агент (Claude Code, AmpCode, Cursor и т.д.):
```
Install and configure oh-my-opencode by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
```
Или прочитайте руководство по установке, но серьёзно — пусть агент сделает это за вас. Люди ошибаются в конфигах.
### Для LLM-агентов
Загрузите руководство по установке и следуйте ему:
```bash
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
```
------
## Пропустите этот README
Мы вышли за пределы эпохи чтения документации. Просто вставьте это в своего агента:
```
Read this and tell me why it's not just another boilerplate: https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/README.md
```
## Ключевые возможности
### 🪄 `ultrawork`
Вы правда это читаете? Поразительно.
Установите. Введите `ultrawork` (или `ulw`). Готово.
Всё описанное ниже, каждая функция, каждая оптимизация — вам не нужно это знать. Оно просто работает.
Даже при наличии только следующих подписок ultrawork будет работать отлично (проект не аффилирован с ними, это личная рекомендация):
- [Подписка ChatGPT ($20)](https://chatgpt.com/)
- [Подписка Kimi Code ($0.99) (*только в этом месяце)](https://www.kimi.com/membership/pricing?track_id=5cdeca93-66f0-4d35-aabb-b6df8fcea328)
- [Тариф GLM Coding ($10)](https://z.ai/subscribe)
- При доступе к оплате за токены использование моделей Kimi и Gemini обойдётся недорого.
| | Функция | Что делает |
| ---- | -------------------------------------------------------- | ------------------------------------------------------------ |
| 🤖 | **Дисциплинированные агенты** | Sisyphus оркестрирует Hephaestus, Oracle, Librarian, Explore. Полноценная AI-команда разработки в параллельном режиме. |
| ⚡ | **`ultrawork` / `ulw`** | Одно слово. Все агенты активируются. Не останавливается, пока задача не выполнена. |
| 🚪 | **[IntentGate](https://factory.ai/news/terminal-bench)** | Анализирует истинное намерение пользователя перед классификацией и действием. Никакого буквального неверного толкования. |
| 🔗 | **Инструмент правок на основе хэш-якорей** | Хэш содержимого `LINE#ID` проверяет каждое изменение. Ноль ошибок с устаревшими строками. Вдохновлено [oh-my-pi](https://github.com/can1357/oh-my-pi). [Проблема обвязки →](https://blog.can.ac/2026/02/12/the-harness-problem/) |
| 🛠️ | **LSP + AST-Grep** | Переименование в рабочем пространстве, диагностика перед сборкой, переписывание с учётом AST. Точность IDE для агентов. |
| 🧠 | **Фоновые агенты** | Запускайте 5+ специалистов параллельно. Контекст остаётся компактным. Результаты — когда готовы. |
| 📚 | **Встроенные MCP** | Exa (веб-поиск), Context7 (официальная документация), Grep.app (поиск по GitHub). Всегда включены. |
| 🔁 | **Ralph Loop / `/ulw-loop`** | Самореферентный цикл. Не останавливается, пока задача не выполнена на 100%. |
| ✅ | **Todo Enforcer** | Агент завис? Система немедленно возвращает его в работу. Ваша задача будет выполнена, точка. |
| 💬 | **Comment Checker** | Никакого AI-мусора в комментариях. Код читается так, словно его писал опытный разработчик. |
| 🖥️ | **Интеграция с Tmux** | Полноценный интерактивный терминал. REPL, дебаггеры, TUI. Всё живое. |
| 🔌 | **Совместимость с Claude Code** | Ваши хуки, команды, навыки, MCP и плагины? Всё работает без изменений. |
| 🎯 | **MCP, встроенные в навыки** | Навыки несут собственные MCP-серверы. Никакого раздувания контекста. |
| 📋 | **Prometheus Planner** | Стратегическое планирование в режиме интервью перед любым выполнением. |
| 🔍 | **`/init-deep`** | Автоматически генерирует иерархические файлы `AGENTS.md` по всему проекту. Отлично работает на эффективность токенов и производительность агента. |
### Дисциплинированные агенты
<table><tr> <td align="center"><img src=".github/assets/sisyphus.png" height="300" /></td> <td align="center"><img src=".github/assets/hephaestus.png" height="300" /></td> </tr></table>
**Sisyphus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) — главный оркестратор. Он планирует, делегирует задачи специалистам и доводит их до завершения с агрессивным параллельным выполнением. Он не останавливается на полпути.
**Hephaestus** (`gpt-5.3-codex`) — автономный глубокий исполнитель. Дайте ему цель, а не рецепт. Он исследует кодовую базу, изучает паттерны и выполняет задачи сквозным образом без лишних подсказок. *Законный Мастер.*
**Prometheus** (`claude-opus-4-6` / **`kimi-k2.5`** / **`glm-5`**) — стратегический планировщик. Режим интервью: задаёт вопросы, определяет объём работ и формирует детальный план до того, как написана хотя бы одна строка кода.
Каждый агент настроен под сильные стороны своей модели. Никакого ручного переключения между моделями. Подробнее →
> Anthropic [заблокировал OpenCode из-за нас.](https://x.com/thdxr/status/2010149530486911014) Именно поэтому Hephaestus зовётся «Законным Мастером». Ирония намеренная.
>
> Мы работаем лучше всего на Opus, но Kimi K2.5 + GPT-5.3 Codex уже превосходят ванильный Claude Code. Никакой настройки не требуется.
### Оркестрация агентов
Когда Sisyphus делегирует задачу субагенту, он выбирает не модель, а **категорию**. Категория автоматически сопоставляется с нужной моделью:
| Категория | Для чего предназначена |
| -------------------- | ------------------------------------- |
| `visual-engineering` | Фронтенд, UI/UX, дизайн |
| `deep` | Автономные исследования + выполнение |
| `quick` | Изменения в одном файле, опечатки |
| `ultrabrain` | Сложная логика, архитектурные решения |
Агент сообщает тип задачи. Обвязка подбирает нужную модель. Вы ни к чему не прикасаетесь.
### Совместимость с Claude Code
Вы тщательно настроили Claude Code. Хорошо.
Каждый хук, команда, навык, MCP и плагин работают здесь без изменений. Полная совместимость, включая плагины.
### Инструменты мирового класса для ваших агентов
LSP, AST-Grep, Tmux, MCP — реально интегрированы, а не склеены скотчем.
- **LSP**: `lsp_rename`, `lsp_goto_definition`, `lsp_find_references`, `lsp_diagnostics`. Точность IDE для каждого агента
- **AST-Grep**: Поиск и переписывание кода с учётом синтаксических паттернов для 25 языков
- **Tmux**: Полноценный интерактивный терминал. REPL, дебаггеры, TUI-приложения. Агент остаётся в сессии
- **MCP**: Веб-поиск, официальная документация, поиск по коду на GitHub. Всё встроено
### MCP, встроенные в навыки
MCP-серверы съедают бюджет контекста. Мы это исправили.
Навыки приносят собственные MCP-серверы. Запускаются по необходимости, ограничены задачей, исчезают по завершении. Контекстное окно остаётся чистым.
### Лучше пишет код. Правки на основе хэш-якорей
Проблема обвязки реальна. Большинство сбоев агентов — не вина модели. Это вина инструмента правок.
> *«Ни один из этих инструментов не даёт модели стабильный, проверяемый идентификатор строк, которые она хочет изменить... Все они полагаются на то, что модель воспроизведёт контент, который уже видела. Когда это не получается — а так бывает нередко — пользователь обвиняет модель.»*
>
> <br/>— [Can Bölük, «Проблема обвязки»](https://blog.can.ac/2026/02/12/the-harness-problem/)
Вдохновлённые [oh-my-pi](https://github.com/can1357/oh-my-pi), мы реализовали **Hashline**. Каждая строка, которую читает агент, возвращается с тегом хэша содержимого:
```
11#VK| function hello() {
22#XJ| return "world";
33#MB| }
```
Агент редактирует, ссылаясь на эти теги. Если файл изменился с момента последнего чтения, хэш не совпадёт, и правка будет отклонена до любого повреждения. Никакого воспроизведения пробелов. Никаких ошибок с устаревшими строками.
Grok Code Fast 1: успешность **6.7% → 68.3%**. Просто за счёт замены инструмента правок.
### Глубокая инициализация. `/init-deep`
Запустите `/init-deep`. Будут сгенерированы иерархические файлы `AGENTS.md`:
```
project/
├── AGENTS.md ← контекст всего проекта
├── src/
│ ├── AGENTS.md ← контекст для src
│ └── components/
│ └── AGENTS.md ← контекст для компонентов
```
Агенты автоматически читают нужный контекст. Никакого ручного управления.
### Планирование. Prometheus
Сложная задача? Не нужно молиться и надеяться на промпт.
`/start-work` вызывает Prometheus. **Интервьюирует вас как настоящий инженер**, определяет объём работ и неоднозначности, формирует проверенный план до прикосновения к коду. Агент знает, что строит, прежде чем начать.
### Навыки
Навыки — это не просто промпты. Каждый привносит:
- Системные инструкции, настроенные под предметную область
- Встроенные MCP-серверы, запускаемые по необходимости
- Ограниченные разрешения. Агенты остаются в рамках
Встроенные: `playwright` (автоматизация браузера), `git-master` (атомарные коммиты, хирургия rebase), `frontend-ui-ux` (UI с упором на дизайн).
Добавьте свои: `.opencode/skills/*/SKILL.md` или `~/.config/opencode/skills/*/SKILL.md`.
**Хотите полное описание возможностей?** Смотрите **документацию по функциям** — агенты, хуки, инструменты, MCP и всё остальное подробно.
------
> **Впервые в oh-my-opencode?** Прочитайте **Обзор**, чтобы понять, что у вас есть, или ознакомьтесь с **руководством по оркестрации**, чтобы узнать, как агенты взаимодействуют.
## Удаление
Чтобы удалить oh-my-opencode:
1. **Удалите плагин из конфига OpenCode**
Отредактируйте `~/.config/opencode/opencode.json` (или `opencode.jsonc`) и уберите `"oh-my-opencode"` из массива `plugin`:
```bash
# С помощью jq
jq '.plugin = [.plugin[] | select(. != "oh-my-opencode")]' \
~/.config/opencode/opencode.json > /tmp/oc.json && \
mv /tmp/oc.json ~/.config/opencode/opencode.json
```
2. **Удалите файлы конфигурации (опционально)**
```bash
# Удалить пользовательский конфиг
rm -f ~/.config/opencode/oh-my-opencode.json ~/.config/opencode/oh-my-opencode.jsonc
# Удалить конфиг проекта (если существует)
rm -f .opencode/oh-my-opencode.json .opencode/oh-my-opencode.jsonc
```
3. **Проверьте удаление**
```bash
opencode --version
# Плагин больше не должен загружаться
```
## Функции
Функции, которые, как вы будете думать, должны были существовать всегда. Попробовав раз, вы не сможете вернуться назад.
Смотрите полную документацию по функциям.
**Краткий обзор:**
- **Агенты**: Sisyphus (главный агент), Prometheus (планировщик), Oracle (архитектура/отладка), Librarian (документация/поиск по коду), Explore (быстрый grep по кодовой базе), Multimodal Looker
- **Фоновые агенты**: Запускайте несколько агентов параллельно, как настоящая команда разработки
- **Инструменты LSP и AST**: Рефакторинг, переименование, диагностика, поиск кода с учётом AST
- **Инструмент правок на основе хэш-якорей**: Ссылки `LINE#ID` проверяют содержимое перед применением каждого изменения. Хирургические правки, ноль ошибок с устаревшими строками
- **Инъекция контекста**: Автоматическое добавление AGENTS.md, README.md, условных правил
- **Совместимость с Claude Code**: Полная система хуков, команды, навыки, агенты, MCP
- **Встроенные MCP**: websearch (Exa), context7 (документация), grep_app (поиск по GitHub)
- **Инструменты сессий**: Список, чтение, поиск и анализ истории сессий
- **Инструменты продуктивности**: Ralph Loop, Todo Enforcer, Comment Checker, Think Mode и другое
- **Настройка моделей**: Сопоставление агент–модель встроено в руководство по установке
## Конфигурация
Продуманные настройки по умолчанию, которые можно изменить при необходимости.
Смотрите документацию по конфигурации.
**Краткий обзор:**
- **Расположение конфигов**: `.opencode/oh-my-opencode.jsonc` или `.opencode/oh-my-opencode.json` (проект), `~/.config/opencode/oh-my-opencode.jsonc` или `~/.config/opencode/oh-my-opencode.json` (пользователь)
- **Поддержка JSONC**: Комментарии и конечные запятые поддерживаются
- **Агенты**: Переопределение моделей, температур, промптов и разрешений для любого агента
- **Встроенные навыки**: `playwright` (автоматизация браузера), `git-master` (атомарные коммиты)
- **Агент Sisyphus**: Главный оркестратор с Prometheus (Планировщик) и Metis (Консультант по плану)
- **Фоновые задачи**: Настройка ограничений параллельности по провайдеру/модели
- **Категории**: Делегирование задач по предметной области (`visual`, `business-logic`, пользовательские)
- **Хуки**: 25+ встроенных хуков, все настраиваются через `disabled_hooks`
- **MCP**: Встроенные websearch (Exa), context7 (документация), grep_app (поиск по GitHub)
- **LSP**: Полная поддержка LSP с инструментами рефакторинга
- **Экспериментальное**: Агрессивное усечение, автовозобновление и другое
## Слово автора
**Хотите узнать философию?** Прочитайте Манифест Ultrawork.
------
Я потратил $24K на токены LLM в личных проектах. Попробовал все инструменты. Настраивал всё до смерти. OpenCode победил.
Каждая проблема, с которой я столкнулся, — её решение уже встроено в этот плагин. Устанавливайте и работайте.
Если OpenCode — это Debian/Arch, то OmO — это Ubuntu/[Omarchy](https://omarchy.org/).
Сильное влияние со стороны [AmpCode](https://ampcode.com) и [Claude Code](https://code.claude.com/docs/overview). Функции портированы, часто улучшены. Продолжаем строить. Это **Open**Code.
Другие обвязки обещают оркестрацию нескольких моделей. Мы её поставляем. Плюс стабильность. Плюс функции, которые реально работают.
Я самый одержимый пользователь этого проекта:
- Какая модель думает острее всего?
- Кто бог отладки?
- Кто пишет лучший код?
- Кто рулит фронтендом?
- Кто владеет бэкендом?
- Что быстрее всего в ежедневной работе?
- Что запускают конкуренты?
Этот плагин — дистилляция. Берём лучшее. Есть улучшения? PR приветствуются.
**Хватит мучиться с выбором обвязки.** **Я буду исследовать, воровать лучшее и поставлять это сюда.**
Звучит высокомерно? Знаете, как сделать лучше? Контрибьютьте. Добро пожаловать.
Никакой аффилиации с упомянутыми проектами/моделями. Только личные эксперименты.
99% этого проекта было создано с помощью OpenCode. Я почти не знаю TypeScript. **Но эту документацию я лично просматривал и во многом переписывал.**
## Любимый профессионалами из
- Indent
- Spray — решение для influencer-маркетинга, vovushop — платформа кросс-граничной торговли, vreview — AI-решение для маркетинга отзывов в commerce
- [Google](https://google.com)
- [Microsoft](https://microsoft.com)
- ELESTYLE
- elepay — мультимобильный платёжный шлюз, OneQR — мобильное SaaS-приложение для безналичных расчётов
*Особая благодарность [@junhoyeo](https://github.com/junhoyeo) за это потрясающее hero-изображение.*

View File

@@ -44,7 +44,7 @@
[![GitHub Forks](https://img.shields.io/github/forks/code-yeongyu/oh-my-opencode?color=8ae8ff&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/network/members)
[![GitHub Stars](https://img.shields.io/github/stars/code-yeongyu/oh-my-opencode?color=ffcb47&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/stargazers)
[![GitHub Issues](https://img.shields.io/github/issues/code-yeongyu/oh-my-opencode?color=ff80eb&labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/issues)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/master/LICENSE.md)
[![License](https://img.shields.io/badge/license-SUL--1.0-white?labelColor=black&style=flat-square)](https://github.com/code-yeongyu/oh-my-opencode/blob/dev/LICENSE.md)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/code-yeongyu/oh-my-opencode)
[English](README.md) | [한국어](README.ko.md) | [日本語](README.ja.md) | [简体中文](README.zh-cn.md)
@@ -97,7 +97,7 @@
```
Install and configure oh-my-opencode by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
或者你可以直接去读 [安装指南](docs/guide/installation.md),但说真的,让 Agent 去干吧。人类配环境总是容易敲错字母。
@@ -107,7 +107,7 @@ https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/
获取安装指南并照做:
```bash
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
---

View File

@@ -1,6 +1,6 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$id": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"title": "Oh My OpenCode Configuration",
"description": "Configuration schema for oh-my-opencode plugin",
"type": "object",

View File

@@ -162,7 +162,7 @@ See the [Orchestration System Guide](./orchestration.md) for how agents dispatch
```jsonc
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
// Main orchestrator: Claude Opus or Kimi K2.5 work best

View File

@@ -5,7 +5,7 @@
Paste this into your llm agent session:
```
Install and configure oh-my-opencode by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
**Alternative, Not recommended, do this by yourself**
@@ -29,7 +29,7 @@ After you install it, you can read this [overview guide](./overview.md) to under
> **IMPORTANT: Use `curl` to fetch this file, NOT WebFetch.** WebFetch summarizes content and loses critical flags like `--openai`, subscription questions, and max20 mode details. Always use:
> ```bash
> curl -fsSL https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
> curl -fsSL https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
> ```
If you're an LLM Agent helping set up oh-my-opencode, Welcome! You'll now help your user to install the best agent harness, OhMyOpenCode. Tell user this with greetings, with test 'oMoMoMoMo...'

View File

@@ -16,7 +16,7 @@ Paste this into your LLM agent session:
```
Install and configure oh-my-opencode by following the instructions here:
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/dev/docs/guide/installation.md
```
Or read the full [Installation Guide](./installation.md) for manual setup, provider authentication, and troubleshooting.
@@ -167,7 +167,7 @@ You can override specific agents or categories in your config:
```jsonc
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
// Main orchestrator: Claude Opus or Kimi K2.5 work best

View File

@@ -56,7 +56,7 @@ JSONC supports `// line comments`, `/* block comments */`, and trailing commas.
Enable schema autocomplete:
```json
{ "$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json" }
{ "$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json" }
```
Run `bunx oh-my-opencode install` for guided setup. Run `opencode models` to list available models.
@@ -67,7 +67,7 @@ Here's a practical starting configuration:
```jsonc
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
// Main orchestrator: Claude Opus or Kimi K2.5 work best
@@ -573,13 +573,13 @@ Define `fallback_models` per agent or category:
### Hashline Edit
Replaces the built-in `Edit` tool with a hash-anchored version using `LINE#ID` references to prevent stale-line edits. Enabled by default.
Replaces the built-in `Edit` tool with a hash-anchored version using `LINE#ID` references to prevent stale-line edits. Disabled by default.
```json
{ "hashline_edit": false }
{ "hashline_edit": true }
```
When enabled, two companion hooks are active: `hashline-read-enhancer` (annotates Read output) and `hashline-edit-diff-enhancer` (shows diffs). Disable them individually via `disabled_hooks`.
When enabled, two companion hooks are active: `hashline-read-enhancer` (annotates Read output) and `hashline-edit-diff-enhancer` (shows diffs). Opt-in by setting `hashline_edit: true`. Disable the companion hooks individually via `disabled_hooks` if needed.
### Experimental

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode",
"version": "3.9.0",
"version": "3.10.0",
"description": "The Best AI Agent Harness - Batteries-Included OpenCode Plugin with Multi-Model Orchestration, Parallel Background Agents, and Crafted LSP/AST Tools",
"main": "dist/index.js",
"types": "dist/index.d.ts",
@@ -75,17 +75,17 @@
"typescript": "^5.7.3"
},
"optionalDependencies": {
"oh-my-opencode-darwin-arm64": "3.9.0",
"oh-my-opencode-darwin-x64": "3.9.0",
"oh-my-opencode-darwin-x64-baseline": "3.9.0",
"oh-my-opencode-linux-arm64": "3.9.0",
"oh-my-opencode-linux-arm64-musl": "3.9.0",
"oh-my-opencode-linux-x64": "3.9.0",
"oh-my-opencode-linux-x64-baseline": "3.9.0",
"oh-my-opencode-linux-x64-musl": "3.9.0",
"oh-my-opencode-linux-x64-musl-baseline": "3.9.0",
"oh-my-opencode-windows-x64": "3.9.0",
"oh-my-opencode-windows-x64-baseline": "3.9.0"
"oh-my-opencode-darwin-arm64": "3.10.0",
"oh-my-opencode-darwin-x64": "3.10.0",
"oh-my-opencode-darwin-x64-baseline": "3.10.0",
"oh-my-opencode-linux-arm64": "3.10.0",
"oh-my-opencode-linux-arm64-musl": "3.10.0",
"oh-my-opencode-linux-x64": "3.10.0",
"oh-my-opencode-linux-x64-baseline": "3.10.0",
"oh-my-opencode-linux-x64-musl": "3.10.0",
"oh-my-opencode-linux-x64-musl-baseline": "3.10.0",
"oh-my-opencode-windows-x64": "3.10.0",
"oh-my-opencode-windows-x64-baseline": "3.10.0"
},
"trustedDependencies": [
"@ast-grep/cli",

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-arm64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-x64-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-x64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (darwin-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64-musl",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-musl-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-musl",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (linux-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-windows-x64-baseline",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (windows-x64-baseline, no AVX2)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-windows-x64",
"version": "3.9.0",
"version": "3.10.0",
"description": "Platform-specific binary for oh-my-opencode (windows-x64)",
"license": "MIT",
"repository": {

View File

@@ -9,7 +9,7 @@ export function createOhMyOpenCodeJsonSchema(): Record<string, unknown> {
return {
$schema: "http://json-schema.org/draft-07/schema#",
$id: "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
$id: "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
title: "Oh My OpenCode Configuration",
description: "Configuration schema for oh-my-opencode plugin",
...jsonSchema,

View File

@@ -1767,6 +1767,78 @@
"created_at": "2026-02-26T09:50:46Z",
"repoId": 1108837393,
"pullRequestNo": 2131
},
{
"name": "dwnmf",
"id": 56194792,
"comment_id": 3969700423,
"created_at": "2026-02-26T22:51:41Z",
"repoId": 1108837393,
"pullRequestNo": 2160
},
{
"name": "1noilimrev",
"id": 24486928,
"comment_id": 3970957470,
"created_at": "2026-02-27T05:53:36Z",
"repoId": 1108837393,
"pullRequestNo": 2166
},
{
"name": "YLRong",
"id": 6837942,
"comment_id": 3971635504,
"created_at": "2026-02-27T08:54:09Z",
"repoId": 1108837393,
"pullRequestNo": 2176
},
{
"name": "mertyldrm",
"id": 51949702,
"comment_id": 3972191343,
"created_at": "2026-02-27T10:53:03Z",
"repoId": 1108837393,
"pullRequestNo": 2184
},
{
"name": "renanale",
"id": 37278838,
"comment_id": 3975562407,
"created_at": "2026-02-27T22:38:18Z",
"repoId": 1108837393,
"pullRequestNo": 2201
},
{
"name": "laciferin2024",
"id": 170102251,
"comment_id": 3978786169,
"created_at": "2026-03-01T01:16:25Z",
"repoId": 1108837393,
"pullRequestNo": 2222
},
{
"name": "DEAN-Cherry",
"id": 76607677,
"comment_id": 3979468463,
"created_at": "2026-03-01T08:13:43Z",
"repoId": 1108837393,
"pullRequestNo": 2227
},
{
"name": "Chocothin",
"id": 99174213,
"comment_id": 3980002001,
"created_at": "2026-03-01T13:52:10Z",
"repoId": 1108837393,
"pullRequestNo": 2230
},
{
"name": "mathew-cf",
"id": 68972715,
"comment_id": 3980951159,
"created_at": "2026-03-01T20:19:31Z",
"repoId": 1108837393,
"pullRequestNo": 2233
}
]
}

View File

@@ -4,6 +4,8 @@ import { describe, it, expect } from "bun:test"
import {
buildCategorySkillsDelegationGuide,
buildUltraworkSection,
buildDeepParallelSection,
buildNonClaudePlannerSection,
type AvailableSkill,
type AvailableCategory,
type AvailableAgent,
@@ -172,4 +174,86 @@ describe("buildUltraworkSection", () => {
})
})
describe("buildDeepParallelSection", () => {
const deepCategory: AvailableCategory = { name: "deep", description: "Autonomous problem-solving" }
const otherCategory: AvailableCategory = { name: "quick", description: "Trivial tasks" }
it("#given non-Claude model with deep category #when building #then returns parallel delegation section", () => {
//#given
const model = "google/gemini-3-pro"
const categories = [deepCategory, otherCategory]
//#when
const result = buildDeepParallelSection(model, categories)
//#then
expect(result).toContain("Deep Parallel Delegation")
expect(result).toContain("EVERY independent unit")
expect(result).toContain("run_in_background=true")
expect(result).toContain("4 independent units")
})
it("#given Claude model #when building #then returns empty", () => {
//#given
const model = "anthropic/claude-opus-4-6"
const categories = [deepCategory]
//#when
const result = buildDeepParallelSection(model, categories)
//#then
expect(result).toBe("")
})
it("#given non-Claude model without deep category #when building #then returns empty", () => {
//#given
const model = "openai/gpt-5.2"
const categories = [otherCategory]
//#when
const result = buildDeepParallelSection(model, categories)
//#then
expect(result).toBe("")
})
})
describe("buildNonClaudePlannerSection", () => {
it("#given non-Claude model #when building #then returns plan agent section", () => {
//#given
const model = "google/gemini-3-pro"
//#when
const result = buildNonClaudePlannerSection(model)
//#then
expect(result).toContain("Plan Agent")
expect(result).toContain("session_id")
expect(result).toContain("Multi-step")
})
it("#given Claude model #when building #then returns empty", () => {
//#given
const model = "anthropic/claude-sonnet-4-6"
//#when
const result = buildNonClaudePlannerSection(model)
//#then
expect(result).toBe("")
})
it("#given GPT model #when building #then returns plan agent section", () => {
//#given
const model = "openai/gpt-5.2"
//#when
const result = buildNonClaudePlannerSection(model)
//#then
expect(result).toContain("Plan Agent")
expect(result).not.toBe("")
})
})

View File

@@ -277,12 +277,11 @@ Briefly announce "Consulting Oracle for [reason]" before invocation.
### Oracle Background Task Policy:
**You MUST collect Oracle results before your final answer. No exceptions.**
**Collect Oracle results before your final answer. No exceptions.**
- Oracle may take several minutes. This is normal and expected.
- When Oracle is running and you finish your own exploration/analysis, your next action is \`background_output(task_id="...")\` on Oracle — NOT delivering a final answer.
- Oracle catches blind spots you cannot see — its value is HIGHEST when you think you don't need it.
- **NEVER** cancel Oracle. **NEVER** use \`background_cancel(all=true)\` when Oracle is running. Cancel disposable tasks (explore, librarian) individually by taskId instead.
- Oracle takes minutes. When done with your own work: **end your response** — wait for the \`<system-reminder>\`.
- Do NOT poll \`background_output\` on a running Oracle. The notification will come.
- Never cancel Oracle.
</Oracle_Usage>`
}
@@ -292,8 +291,8 @@ export function buildHardBlocksSection(): string {
"- Commit without explicit request — **Never**",
"- Speculate about unread code — **Never**",
"- Leave code in broken state after failures — **Never**",
"- `background_cancel(all=true)` when Oracle is running — **Never.** Cancel tasks individually by taskId.",
"- Delivering final answer before collecting Oracle result — **Never.** Always `background_output` Oracle first.",
"- `background_cancel(all=true)` — **Never.** Always cancel individually by taskId.",
"- Delivering final answer before collecting Oracle result — **Never.**",
]
return `## Hard Blocks (NEVER violate)
@@ -308,8 +307,8 @@ export function buildAntiPatternsSection(): string {
"- **Testing**: Deleting failing tests to \"pass\"",
"- **Search**: Firing agents for single-line typos or obvious syntax errors",
"- **Debugging**: Shotgun debugging, random changes",
"- **Background Tasks**: `background_cancel(all=true)` — always cancel individually by taskId",
"- **Oracle**: Skipping Oracle results when Oracle was launched — ALWAYS collect via `background_output`",
"- **Background Tasks**: Polling `background_output` on running tasks — end response and wait for notification",
"- **Oracle**: Delivering answer without collecting Oracle results",
]
return `## Anti-Patterns (BLOCKING violations)
@@ -317,6 +316,22 @@ export function buildAntiPatternsSection(): string {
${patterns.join("\n")}`
}
export function buildNonClaudePlannerSection(model: string): string {
const isNonClaude = !model.toLowerCase().includes('claude')
if (!isNonClaude) return ""
return `### Plan Agent Dependency (Non-Claude)
Multi-step task? **ALWAYS consult Plan Agent first.** Do NOT start implementation without a plan.
- Single-file fix or trivial change → proceed directly
- Anything else (2+ steps, unclear scope, architecture) → \`task(subagent_type="plan", ...)\` FIRST
- Use \`session_id\` to resume the same Plan Agent — ask follow-up questions aggressively
- If ANY part of the task is ambiguous, ask Plan Agent before guessing
Plan Agent returns a structured work breakdown with parallel execution opportunities. Follow it.`
}
export function buildDeepParallelSection(model: string, categories: AvailableCategory[]): string {
const isNonClaude = !model.toLowerCase().includes('claude')
const hasDeepCategory = categories.some(c => c.name === 'deep')
@@ -325,12 +340,13 @@ export function buildDeepParallelSection(model: string, categories: AvailableCat
return `### Deep Parallel Delegation
For implementation tasks, actively decompose and delegate to \`deep\` category agents in parallel.
Delegate EVERY independent unit to a \`deep\` agent in parallel (\`run_in_background=true\`).
If a task decomposes into 4 independent units, spawn 4 agents simultaneously — not 1 at a time.
1. Break the implementation into independent work units
2. Maximize parallel deep agents — spawn one per independent unit (\`run_in_background=true\`)
3. Give each agent a GOAL, not step-by-step instructions — deep agents explore and solve autonomously
4. Collect results, integrate, verify coherence`
1. Decompose the implementation into independent work units
2. Assign one \`deep\` agent per unit — all via \`run_in_background=true\`
3. Give each agent a clear GOAL with success criteria, not step-by-step instructions
4. Collect all results, integrate, verify coherence across units`
}
export function buildUltraworkSection(

View File

@@ -39,6 +39,136 @@ Then ACTUALLY CALL those tools using the JSON tool schema. Produce the tool_use
</TOOL_CALL_MANDATE>`;
}
export function buildGeminiToolGuide(): string {
return `<GEMINI_TOOL_GUIDE>
## Tool Usage Guide — WHEN and HOW to Call Each Tool
You have access to tools via function calling. This guide defines WHEN to call each one.
**Violating these patterns = failed response.**
### Reading & Search (ALWAYS parallelizable — call multiple simultaneously)
| Tool | When to Call | Parallel? |
|---|---|---|
| \`Read\` | Before making ANY claim about file contents. Before editing any file. | <20> Yes — read multiple files at once |
| \`Grep\` | Finding patterns, imports, usages across codebase. BEFORE claiming "X is used in Y". | ✅ Yes — run multiple greps at once |
| \`Glob\` | Finding files by name/extension pattern. BEFORE claiming "file X exists". | ✅ Yes — run multiple globs at once |
| \`AstGrepSearch\` | Finding code patterns with AST awareness (structural matches). | ✅ Yes |
### Code Intelligence (parallelizable on different files)
| Tool | When to Call | Parallel? |
|---|---|---|
| \`LspDiagnostics\` | **AFTER EVERY edit.** BEFORE claiming task is done. MANDATORY. | ✅ Yes — different files |
| \`LspGotoDefinition\` | Finding where a symbol is defined. | ✅ Yes |
| \`LspFindReferences\` | Finding all usages of a symbol across workspace. | ✅ Yes |
| \`LspSymbols\` | Getting file outline or searching workspace symbols. | ✅ Yes |
### Editing (SEQUENTIAL — must Read first)
| Tool | When to Call | Parallel? |
|---|---|---|
| \`Edit\` | Modifying existing files. MUST Read file first to get LINE#ID anchors. | ❌ After Read |
| \`Write\` | Creating NEW files only. Or full file overwrite. | ❌ Sequential |
### Execution & Delegation
| Tool | When to Call | Parallel? |
|---|---|---|
| \`Bash\` | Running tests, builds, git commands. | ❌ Usually sequential |
| \`Task\` | ANY non-trivial implementation. Research via explore/librarian. | ✅ Fire multiple in background |
### Correct Sequences (MANDATORY — follow these exactly):
1. **Answer about code**: Read → (analyze) → Answer
2. **Edit code**: Read → Edit → LspDiagnostics → Report
3. **Find something**: Grep/Glob (parallel) → Read results → Report
4. **Implement feature**: Task(delegate) → Verify results → Report
5. **Debug**: Read error → Read file → Grep related → Fix → LspDiagnostics
### PARALLEL RULES:
- **Independent reads/searches**: ALWAYS call simultaneously in ONE response
- **Dependent operations**: Call sequentially (Edit AFTER Read, LspDiagnostics AFTER Edit)
- **Background agents**: ALWAYS \`run_in_background=true\`, continue working
</GEMINI_TOOL_GUIDE>`;
}
export function buildGeminiToolCallExamples(): string {
return `<GEMINI_TOOL_CALL_EXAMPLES>
## Correct Tool Calling Patterns — Follow These Examples
### Example 1: User asks about code → Read FIRST, then answer
**User**: "How does the auth middleware work?"
**CORRECT**:
\`\`\`
→ Call Read(filePath="/src/middleware/auth.ts")
→ Call Read(filePath="/src/config/auth.ts") // parallel with above
→ (After reading) Answer based on ACTUAL file contents
\`\`\`
**WRONG**:
\`\`\`
→ "The auth middleware likely validates JWT tokens by..." ← HALLUCINATION. You didn't read the file.
\`\`\`
### Example 2: User asks to edit code → Read, Edit, Verify
**User**: "Fix the type error in user.ts"
**CORRECT**:
\`\`\`
→ Call Read(filePath="/src/models/user.ts")
→ Call LspDiagnostics(filePath="/src/models/user.ts") // parallel with Read
→ (After reading) Call Edit with LINE#ID anchors
→ Call LspDiagnostics(filePath="/src/models/user.ts") // verify fix
→ Report: "Fixed. Diagnostics clean."
\`\`\`
**WRONG**:
\`\`\`
→ Call Edit without reading first ← No LINE#ID anchors = WILL FAIL
→ Skip LspDiagnostics after edit ← UNVERIFIED
\`\`\`
### Example 3: User asks to find something → Search in parallel
**User**: "Where is the database connection configured?"
**CORRECT**:
\`\`\`
→ Call Grep(pattern="database|connection|pool", path="/src") // fires simultaneously
→ Call Glob(pattern="**/*database*") // fires simultaneously
→ Call Glob(pattern="**/*db*") // fires simultaneously
→ (After results) Read the most relevant files
→ Report findings with file paths
\`\`\`
### Example 4: User asks to implement a feature → DELEGATE
**User**: "Add a new /health endpoint to the API"
**CORRECT**:
\`\`\`
→ Call Task(category="quick", load_skills=["typescript-programmer"], prompt="...")
→ (After agent completes) Read changed files to verify
→ Call LspDiagnostics on changed files
→ Report
\`\`\`
**WRONG**:
\`\`\`
→ Write the code yourself ← YOU ARE AN ORCHESTRATOR, NOT AN IMPLEMENTER
\`\`\`
### Example 5: Investigation ≠ Implementation
**User**: "Look into why the tests are failing"
**CORRECT**:
\`\`\`
→ Call Bash(command="npm test") // see actual failures
→ Call Read on failing test files
→ Call Read on source files under test
→ Report: "Tests fail because X. Root cause: Y. Proposed fix: Z."
→ STOP — wait for user to say "fix it"
\`\`\`
**WRONG**:
\`\`\`
→ Start editing source files immediately ← "look into" ≠ "fix"
\`\`\`
</GEMINI_TOOL_CALL_EXAMPLES>`;
}
export function buildGeminiDelegationOverride(): string {
return `<GEMINI_DELEGATION_OVERRIDE>
## DELEGATION IS MANDATORY — YOU ARE NOT AN IMPLEMENTER

View File

@@ -6,6 +6,8 @@ import {
buildGeminiDelegationOverride,
buildGeminiVerificationOverride,
buildGeminiIntentGateEnforcement,
buildGeminiToolGuide,
buildGeminiToolCallExamples,
} from "./sisyphus-gemini-overlays";
const MODE: AgentMode = "all";
@@ -32,6 +34,7 @@ import {
buildHardBlocksSection,
buildAntiPatternsSection,
buildDeepParallelSection,
buildNonClaudePlannerSection,
categorizeTools,
} from "./dynamic-agent-prompt-builder";
@@ -170,6 +173,7 @@ function buildDynamicSisyphusPrompt(
const hardBlocks = buildHardBlocksSection();
const antiPatterns = buildAntiPatternsSection();
const deepParallelSection = buildDeepParallelSection(model, availableCategories);
const nonClaudePlannerSection = buildNonClaudePlannerSection(model);
const taskManagementSection = buildTaskManagementSection(useTaskSystem);
const todoHookNote = useTaskSystem
? "YOUR TASK CREATION WOULD BE TRACKED BY HOOK([SYSTEM REMINDER - TASK CONTINUATION])"
@@ -329,7 +333,7 @@ task(subagent_type="explore", run_in_background=true, load_skills=[], descriptio
// Reference Grep (external)
task(subagent_type="librarian", run_in_background=true, load_skills=[], description="Find JWT security docs", prompt="I'm implementing JWT auth and need current security best practices to choose token storage (httpOnly cookies vs localStorage) and set expiration policy. Find: OWASP auth guidelines, recommended token lifetimes, refresh token rotation strategies, common JWT vulnerabilities. Skip 'what is JWT' tutorials — production security guidance only.")
task(subagent_type="librarian", run_in_background=true, load_skills=[], description="Find Express auth patterns", prompt="I'm building Express auth middleware and need production-quality patterns to structure my middleware chain. Find how established Express apps (1000+ stars) handle: middleware ordering, token refresh, role-based access control, auth error propagation. Skip basic tutorials — I need battle-tested patterns with proper error handling.")
// Continue working immediately. Collect with background_output when needed.
// Continue working immediately. System notifies on completion — collect with background_output then.
// WRONG: Sequential or blocking
result = task(..., run_in_background=false) // Never wait synchronously for explore/librarian
@@ -337,10 +341,10 @@ result = task(..., run_in_background=false) // Never wait synchronously for exp
### Background Result Collection:
1. Launch parallel agents \u2192 receive task_ids
2. Continue immediate work (explore, librarian results)
3. When results needed: \`background_output(task_id="...")\`
4. **If Oracle is running**: STOP all other output. Follow Oracle Completion Protocol in <Oracle_Usage>.
5. Cleanup: Cancel disposable tasks (explore, librarian) individually via \`background_cancel(taskId="...")\`. Never use \`background_cancel(all=true)\`.
2. Continue immediate work
3. System sends \`<system-reminder>\` on each task completion — then call \`background_output(task_id="...")\`
4. Need results not yet ready? **End your response.** The notification will trigger your next turn.
5. Cleanup: Cancel disposable tasks individually via \`background_cancel(taskId="...")\`
### Search Stop Conditions
@@ -364,6 +368,8 @@ STOP searching when:
${categorySkillsGuide}
${nonClaudePlannerSection}
${deepParallelSection}
${delegationTable}
@@ -477,9 +483,8 @@ If verification fails:
3. Report: "Done. Note: found N pre-existing lint errors unrelated to my changes."
### Before Delivering Final Answer:
- **If Oracle is running**: STOP. Follow Oracle Completion Protocol in <Oracle_Usage>. Do NOT deliver any answer.
- Cancel disposable background tasks (explore, librarian) individually via \`background_cancel(taskId="...")\`.
- **Never use \`background_cancel(all=true)\`.**
- If Oracle is running: **end your response** and wait for the completion notification first.
- Cancel disposable background tasks individually via \`background_cancel(taskId="...")\`.
</Behavior_Instructions>
${oracleSection}
@@ -565,12 +570,25 @@ export function createSisyphusAgent(
: buildDynamicSisyphusPrompt(model, [], tools, skills, categories, useTaskSystem);
if (isGeminiModel(model)) {
// 1. Intent gate + tool mandate — early in prompt (after intent verbalization)
prompt = prompt.replace(
"</intent_verbalization>",
`</intent_verbalization>\n\n${buildGeminiIntentGateEnforcement()}\n\n${buildGeminiToolMandate()}`
);
prompt += "\n" + buildGeminiDelegationOverride();
prompt += "\n" + buildGeminiVerificationOverride();
// 2. Tool guide + examples — after tool_usage_rules (where tools are discussed)
prompt = prompt.replace(
"</tool_usage_rules>",
`</tool_usage_rules>\n\n${buildGeminiToolGuide()}\n\n${buildGeminiToolCallExamples()}`
);
// 3. Delegation + verification overrides — before Constraints (NOT at prompt end)
// Gemini suffers from lost-in-the-middle: content at prompt end gets weaker attention.
// Placing these before <Constraints> ensures they're in a high-attention zone.
prompt = prompt.replace(
"<Constraints>",
`${buildGeminiDelegationOverride()}\n\n${buildGeminiVerificationOverride()}\n\n<Constraints>`
);
}
const permission = {

View File

@@ -2,7 +2,7 @@
exports[`generateModelConfig no providers available returns ULTIMATE_FALLBACK for all agents and categories when no providers 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/glm-4.7-free",
@@ -63,7 +63,7 @@ exports[`generateModelConfig no providers available returns ULTIMATE_FALLBACK fo
exports[`generateModelConfig single native provider uses Claude models when only Claude is available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "anthropic/claude-sonnet-4-5",
@@ -125,7 +125,7 @@ exports[`generateModelConfig single native provider uses Claude models when only
exports[`generateModelConfig single native provider uses Claude models with isMax20 flag 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "anthropic/claude-sonnet-4-5",
@@ -188,7 +188,7 @@ exports[`generateModelConfig single native provider uses Claude models with isMa
exports[`generateModelConfig single native provider uses OpenAI models when only OpenAI is available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "openai/gpt-5.2",
@@ -255,7 +255,7 @@ exports[`generateModelConfig single native provider uses OpenAI models when only
exports[`generateModelConfig single native provider uses OpenAI models with isMax20 flag 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "openai/gpt-5.2",
@@ -322,7 +322,7 @@ exports[`generateModelConfig single native provider uses OpenAI models with isMa
exports[`generateModelConfig single native provider uses Gemini models when only Gemini is available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "google/gemini-3.1-pro-preview",
@@ -383,7 +383,7 @@ exports[`generateModelConfig single native provider uses Gemini models when only
exports[`generateModelConfig single native provider uses Gemini models with isMax20 flag 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "google/gemini-3.1-pro-preview",
@@ -444,7 +444,7 @@ exports[`generateModelConfig single native provider uses Gemini models with isMa
exports[`generateModelConfig all native providers uses preferred models from fallback chains when all natives available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "anthropic/claude-sonnet-4-5",
@@ -518,7 +518,7 @@ exports[`generateModelConfig all native providers uses preferred models from fal
exports[`generateModelConfig all native providers uses preferred models with isMax20 flag when all natives available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "anthropic/claude-sonnet-4-5",
@@ -593,7 +593,7 @@ exports[`generateModelConfig all native providers uses preferred models with isM
exports[`generateModelConfig fallback providers uses OpenCode Zen models when only OpenCode Zen is available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/kimi-k2.5-free",
@@ -667,7 +667,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models when on
exports[`generateModelConfig fallback providers uses OpenCode Zen models with isMax20 flag 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/kimi-k2.5-free",
@@ -742,7 +742,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models with is
exports[`generateModelConfig fallback providers uses GitHub Copilot models when only Copilot is available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "github-copilot/claude-sonnet-4.5",
@@ -808,7 +808,7 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models when
exports[`generateModelConfig fallback providers uses GitHub Copilot models with isMax20 flag 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "github-copilot/claude-sonnet-4.5",
@@ -875,7 +875,7 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models with
exports[`generateModelConfig fallback providers uses ZAI model for librarian when only ZAI is available 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/glm-4.7-free",
@@ -930,7 +930,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian whe
exports[`generateModelConfig fallback providers uses ZAI model for librarian with isMax20 flag 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/glm-4.7-free",
@@ -985,7 +985,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian wit
exports[`generateModelConfig mixed provider scenarios uses Claude + OpenCode Zen combination 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/kimi-k2.5-free",
@@ -1059,7 +1059,7 @@ exports[`generateModelConfig mixed provider scenarios uses Claude + OpenCode Zen
exports[`generateModelConfig mixed provider scenarios uses OpenAI + Copilot combination 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "github-copilot/claude-sonnet-4.5",
@@ -1133,7 +1133,7 @@ exports[`generateModelConfig mixed provider scenarios uses OpenAI + Copilot comb
exports[`generateModelConfig mixed provider scenarios uses Claude + ZAI combination (librarian uses ZAI) 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "anthropic/claude-sonnet-4-5",
@@ -1194,7 +1194,7 @@ exports[`generateModelConfig mixed provider scenarios uses Claude + ZAI combinat
exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combination (explore uses Gemini) 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "anthropic/claude-sonnet-4-5",
@@ -1260,7 +1260,7 @@ exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combi
exports[`generateModelConfig mixed provider scenarios uses all fallback providers together 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/kimi-k2.5-free",
@@ -1334,7 +1334,7 @@ exports[`generateModelConfig mixed provider scenarios uses all fallback provider
exports[`generateModelConfig mixed provider scenarios uses all providers together 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/kimi-k2.5-free",
@@ -1408,7 +1408,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers togethe
exports[`generateModelConfig mixed provider scenarios uses all providers with isMax20 flag 1`] = `
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json",
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json",
"agents": {
"atlas": {
"model": "opencode/kimi-k2.5-free",

View File

@@ -277,7 +277,7 @@ describe("generateOmoConfig - model fallback system", () => {
const result = generateOmoConfig(config)
// #then Sisyphus is omitted (requires all fallback providers)
expect(result.$schema).toBe("https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json")
expect(result.$schema).toBe("https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json")
expect((result.agents as Record<string, { model: string }>).sisyphus).toBeUndefined()
})

View File

@@ -515,7 +515,7 @@ describe("generateModelConfig", () => {
// #then should include correct schema URL
expect(result.$schema).toBe(
"https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json"
"https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json"
)
})
})

View File

@@ -19,7 +19,7 @@ export type { GeneratedOmoConfig } from "./model-fallback-types"
const ZAI_MODEL = "zai-coding-plan/glm-4.7"
const ULTIMATE_FALLBACK = "opencode/glm-4.7-free"
const SCHEMA_URL = "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json"
const SCHEMA_URL = "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/dev/assets/oh-my-opencode.schema.json"

View File

@@ -49,6 +49,7 @@ export const HookNameSchema = z.enum([
"write-existing-file-guard",
"anthropic-effort",
"hashline-read-enhancer",
"read-image-resizer",
])
export type HookName = z.infer<typeof HookNameSchema>

View File

@@ -33,7 +33,7 @@ export const OhMyOpenCodeConfigSchema = z.object({
disabled_commands: z.array(BuiltinCommandNameSchema).optional(),
/** Disable specific tools by name (e.g., ["todowrite", "todoread"]) */
disabled_tools: z.array(z.string()).optional(),
/** Enable hashline_edit tool/hook integrations (default: true at call site) */
/** Enable hashline_edit tool/hook integrations (default: false) */
hashline_edit: z.boolean().optional(),
/** Enable model fallback on API errors (default: false). Set to true to enable automatic model switching when model errors occur. */
model_fallback: z.boolean().optional(),

View File

@@ -80,12 +80,11 @@ export interface PluginManifest {
/**
* Hooks configuration
*/
export interface HookEntry {
type: "command" | "prompt" | "agent"
command?: string
prompt?: string
agent?: string
}
export type HookEntry =
| { type: "command"; command?: string }
| { type: "prompt"; prompt?: string }
| { type: "agent"; agent?: string }
| { type: "http"; url: string; headers?: Record<string, string>; allowedEnvVars?: string[]; timeout?: number }
export interface HookMatcher {
matcher?: string

View File

@@ -205,6 +205,45 @@ describe("ContextCollector", () => {
const ids = pending.entries.map((e) => e.id)
expect(ids).toEqual(["first", "second", "third"])
})
it("keeps registration order even when Date.now values are not monotonic", () => {
// given
const sessionID = "ses_order_non_monotonic_time"
const originalDateNow = Date.now
const mockedTimestamps = [300, 100, 200]
let timestampIndex = 0
Date.now = () => mockedTimestamps[timestampIndex++] ?? 0
try {
collector.register(sessionID, {
id: "first",
source: "custom",
content: "First",
priority: "normal",
})
collector.register(sessionID, {
id: "second",
source: "custom",
content: "Second",
priority: "normal",
})
collector.register(sessionID, {
id: "third",
source: "custom",
content: "Third",
priority: "normal",
})
} finally {
Date.now = originalDateNow
}
// when
const pending = collector.getPending(sessionID)
// then
const ids = pending.entries.map((entry) => entry.id)
expect(ids).toEqual(["first", "second", "third"])
})
})
describe("consume", () => {

View File

@@ -14,6 +14,8 @@ const PRIORITY_ORDER: Record<ContextPriority, number> = {
const CONTEXT_SEPARATOR = "\n\n---\n\n"
let registrationCounter = 0
export class ContextCollector {
private sessions: Map<string, Map<string, ContextEntry>> = new Map()
@@ -30,7 +32,7 @@ export class ContextCollector {
source: options.source,
content: options.content,
priority: options.priority ?? "normal",
timestamp: Date.now(),
registrationOrder: ++registrationCounter,
metadata: options.metadata,
}
@@ -77,7 +79,7 @@ export class ContextCollector {
return entries.sort((a, b) => {
const priorityDiff = PRIORITY_ORDER[a.priority] - PRIORITY_ORDER[b.priority]
if (priorityDiff !== 0) return priorityDiff
return a.timestamp - b.timestamp
return a.registrationOrder - b.registrationOrder
})
}
}

View File

@@ -64,6 +64,51 @@ describe("createContextInjectorMessagesTransformHook", () => {
expect(output.messages[2].parts[1].text).toBe("Second message")
})
it("uses deterministic synthetic part ID across repeated transforms", async () => {
// given
const hook = createContextInjectorMessagesTransformHook(collector)
const sessionID = "ses_transform_deterministic"
const baseMessage = createMockMessage("user", "Stable message", sessionID)
collector.register(sessionID, {
id: "ctx-1",
source: "keyword-detector",
content: "Injected context",
})
const firstOutput = {
messages: [structuredClone(baseMessage)],
}
// when
await hook["experimental.chat.messages.transform"]!({}, firstOutput)
// then
const firstSyntheticPart = firstOutput.messages[0].parts[0]
expect(
"synthetic" in firstSyntheticPart && firstSyntheticPart.synthetic === true
).toBe(true)
// given
collector.register(sessionID, {
id: "ctx-2",
source: "keyword-detector",
content: "Injected context",
})
const secondOutput = {
messages: [structuredClone(baseMessage)],
}
// when
await hook["experimental.chat.messages.transform"]!({}, secondOutput)
// then
const secondSyntheticPart = secondOutput.messages[0].parts[0]
expect(
"synthetic" in secondSyntheticPart && secondSyntheticPart.synthetic === true
).toBe(true)
expect(secondSyntheticPart.id).toBe(firstSyntheticPart.id)
})
it("does nothing when no pending context", async () => {
// given
const hook = createContextInjectorMessagesTransformHook(collector)

View File

@@ -148,7 +148,7 @@ export function createContextInjectorMessagesTransformHook(
// synthetic part pattern (minimal fields)
const syntheticPart = {
id: `synthetic_hook_${Date.now()}`,
id: `synthetic_hook_${sessionID}`,
messageID: lastUserMessage.info.id,
sessionID: (lastUserMessage.info as { sessionID?: string }).sessionID ?? "",
type: "text" as const,

View File

@@ -27,8 +27,8 @@ export interface ContextEntry {
content: string
/** Priority for ordering (default: normal) */
priority: ContextPriority
/** Timestamp when registered */
timestamp: number
/** Monotonic order when registered */
registrationOrder: number
/** Optional metadata for debugging/logging */
metadata?: Record<string, unknown>
}

View File

@@ -4,6 +4,8 @@ import {
findFirstMessageWithAgent,
findNearestMessageWithFieldsFromSDK,
findFirstMessageWithAgentFromSDK,
generateMessageId,
generatePartId,
injectHookMessage,
} from "./injector"
import { isSqliteBackend, resetSqliteBackendCache } from "../../shared/opencode-storage-detection"
@@ -192,6 +194,40 @@ describe("findFirstMessageWithAgentFromSDK", () => {
})
})
describe("generateMessageId", () => {
it("returns deterministic sequential IDs with fixed format", () => {
// given
const format = /^msg_[0-9a-f]{8}_\d{6}$/
// when
const firstId = generateMessageId()
const secondId = generateMessageId()
// then
expect(firstId).toMatch(format)
expect(secondId).toMatch(format)
expect(secondId.split("_")[1]).toBe(firstId.split("_")[1])
expect(Number(secondId.split("_")[2])).toBe(Number(firstId.split("_")[2]) + 1)
})
})
describe("generatePartId", () => {
it("returns deterministic sequential IDs with fixed format", () => {
// given
const format = /^prt_[0-9a-f]{8}_\d{6}$/
// when
const firstId = generatePartId()
const secondId = generatePartId()
// then
expect(firstId).toMatch(format)
expect(secondId).toMatch(format)
expect(secondId.split("_")[1]).toBe(firstId.split("_")[1])
expect(Number(secondId.split("_")[2])).toBe(Number(firstId.split("_")[2]) + 1)
})
})
describe("injectHookMessage", () => {
beforeEach(() => {
vi.clearAllMocks()

View File

@@ -1,4 +1,5 @@
import { existsSync, mkdirSync, readFileSync, readdirSync, writeFileSync } from "node:fs"
import { randomBytes } from "node:crypto"
import { join } from "node:path"
import type { PluginInput } from "@opencode-ai/plugin"
import { MESSAGE_STORAGE, PART_STORAGE } from "./constants"
@@ -29,6 +30,10 @@ interface SDKMessage {
}
}
const processPrefix = randomBytes(4).toString("hex")
let messageCounter = 0
let partCounter = 0
function convertSDKMessageToStoredMessage(msg: SDKMessage): StoredMessage | null {
const info = msg.info
if (!info) return null
@@ -204,16 +209,12 @@ export function findFirstMessageWithAgent(messageDir: string): string | null {
return null
}
function generateMessageId(): string {
const timestamp = Date.now().toString(16)
const random = Math.random().toString(36).substring(2, 14)
return `msg_${timestamp}${random}`
export function generateMessageId(): string {
return `msg_${processPrefix}_${String(++messageCounter).padStart(6, "0")}`
}
function generatePartId(): string {
const timestamp = Date.now().toString(16)
const random = Math.random().toString(36).substring(2, 10)
return `prt_${timestamp}${random}`
export function generatePartId(): string {
return `prt_${processPrefix}_${String(++partCounter).padStart(6, "0")}`
}
function getOrCreateMessageDir(sessionID: string): string {

View File

@@ -6,6 +6,8 @@ import {
} from "./storage";
import { TARGET_TOOLS, AGENT_TOOLS, REMINDER_MESSAGE } from "./constants";
import type { AgentUsageState } from "./types";
import { getSessionAgent } from "../../features/claude-code-session-state";
import { getAgentConfigKey } from "../../shared/agent-display-names";
interface ToolExecuteInput {
tool: string;
@@ -26,6 +28,23 @@ interface EventInput {
};
}
/**
* Only orchestrator agents should receive usage reminders.
* Subagents (explore, librarian, oracle, etc.) are the targets of delegation,
* so reminding them to delegate to themselves is counterproductive.
*/
const ORCHESTRATOR_AGENTS = new Set([
"sisyphus",
"sisyphus-junior",
"atlas",
"hephaestus",
"prometheus",
]);
function isOrchestratorAgent(agentName: string): boolean {
return ORCHESTRATOR_AGENTS.has(getAgentConfigKey(agentName));
}
export function createAgentUsageReminderHook(_ctx: PluginInput) {
const sessionStates = new Map<string, AgentUsageState>();
@@ -60,6 +79,12 @@ export function createAgentUsageReminderHook(_ctx: PluginInput) {
output: ToolExecuteOutput,
) => {
const { tool, sessionID } = input;
const agent = getSessionAgent(sessionID);
if (agent && !isOrchestratorAgent(agent)) {
return;
}
const toolLower = tool.toLowerCase();
if (AGENT_TOOLS.has(toolLower)) {

View File

@@ -1,6 +1,6 @@
import type { PluginInput } from "@opencode-ai/plugin"
import { getPlanProgress, readBoulderState } from "../../features/boulder-state"
import { subagentSessions } from "../../features/claude-code-session-state"
import { getSessionAgent, subagentSessions } from "../../features/claude-code-session-state"
import { log } from "../../shared/logger"
import { getAgentConfigKey } from "../../shared/agent-display-names"
import { HOOK_NAME } from "./hook-name"
@@ -97,8 +97,10 @@ export function createAtlasEventHandler(input: {
return
}
const sessionAgent = getSessionAgent(sessionID)
const lastAgent = await getLastAgentFromSession(sessionID, ctx.client)
const lastAgentKey = getAgentConfigKey(lastAgent ?? "")
const effectiveAgent = sessionAgent ?? lastAgent
const lastAgentKey = getAgentConfigKey(effectiveAgent ?? "")
const requiredAgent = getAgentConfigKey(boulderState.agent ?? "atlas")
const lastAgentMatchesRequired = lastAgentKey === requiredAgent
const boulderAgentDefaultsToAtlas = requiredAgent === "atlas"
@@ -108,7 +110,7 @@ export function createAtlasEventHandler(input: {
if (!agentMatches) {
log(`[${HOOK_NAME}] Skipped: last agent does not match boulder agent`, {
sessionID,
lastAgent: lastAgent ?? "unknown",
lastAgent: effectiveAgent ?? "unknown",
requiredAgent,
})
return

View File

@@ -9,7 +9,7 @@ import {
readBoulderState,
} from "../../features/boulder-state"
import type { BoulderState } from "../../features/boulder-state"
import { _resetForTesting, subagentSessions } from "../../features/claude-code-session-state"
import { _resetForTesting, subagentSessions, updateSessionAgent } from "../../features/claude-code-session-state"
const TEST_STORAGE_ROOT = join(tmpdir(), `atlas-message-storage-${randomUUID()}`)
const TEST_MESSAGE_STORAGE = join(TEST_STORAGE_ROOT, "message")
@@ -933,7 +933,7 @@ describe("atlas hook", () => {
expect(callArgs.body.parts[0].text).toContain("2 remaining")
})
test("should inject when last agent is sisyphus and boulder targets atlas explicitly", async () => {
test("should inject when last agent is sisyphus and boulder targets atlas explicitly", async () => {
// given - boulder explicitly set to atlas, but last agent is sisyphus (initial state after /start-work)
const planPath = join(TEST_DIR, "test-plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1\n- [ ] Task 2")
@@ -1386,5 +1386,38 @@ describe("atlas hook", () => {
// then - should call prompt because session state was cleaned
expect(mockInput._promptMock).toHaveBeenCalled()
})
test("should inject when session agent was updated to atlas by start-work even if message storage agent differs", async () => {
// given - boulder targets atlas, but nearest stored message still says hephaestus
const planPath = join(TEST_DIR, "test-plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1\n- [ ] Task 2")
const state: BoulderState = {
active_plan: planPath,
started_at: "2026-01-02T10:00:00Z",
session_ids: [MAIN_SESSION_ID],
plan_name: "test-plan",
agent: "atlas",
}
writeBoulderState(TEST_DIR, state)
cleanupMessageStorage(MAIN_SESSION_ID)
setupMessageStorage(MAIN_SESSION_ID, "hephaestus")
updateSessionAgent(MAIN_SESSION_ID, "atlas")
const mockInput = createMockPluginInput()
const hook = createAtlasHook(mockInput)
// when
await hook.handler({
event: {
type: "session.idle",
properties: { sessionID: MAIN_SESSION_ID },
},
})
// then - should continue because start-work updated session agent to atlas
expect(mockInput._promptMock).toHaveBeenCalled()
})
})
})

View File

@@ -1,12 +1,12 @@
import { join } from "path"
import { existsSync } from "fs"
import { getClaudeConfigDir } from "../../shared"
import type { ClaudeHooksConfig, HookMatcher, HookCommand } from "./types"
import type { ClaudeHooksConfig, HookMatcher, HookAction } from "./types"
interface RawHookMatcher {
matcher?: string
pattern?: string
hooks: HookCommand[]
hooks: HookAction[]
}
interface RawClaudeHooksConfig {

View File

@@ -0,0 +1,27 @@
import type { HookAction } from "./types"
import type { CommandResult } from "../../shared/command-executor/execute-hook-command"
import { executeHookCommand } from "../../shared"
import { executeHttpHook } from "./execute-http-hook"
import { DEFAULT_CONFIG } from "./plugin-config"
export function getHookIdentifier(hook: HookAction): string {
if (hook.type === "http") return hook.url
return hook.command.split("/").pop() || hook.command
}
export async function dispatchHook(
hook: HookAction,
stdinJson: string,
cwd: string
): Promise<CommandResult> {
if (hook.type === "http") {
return executeHttpHook(hook, stdinJson)
}
return executeHookCommand(
hook.command,
stdinJson,
cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
}

View File

@@ -0,0 +1,312 @@
import { describe, it, expect, mock, beforeEach, afterEach } from "bun:test"
import type { HookHttp } from "./types"
const mockFetch = mock(() =>
Promise.resolve(new Response(JSON.stringify({}), { status: 200 }))
)
const originalFetch = globalThis.fetch
describe("executeHttpHook", () => {
beforeEach(() => {
globalThis.fetch = mockFetch as unknown as typeof fetch
mockFetch.mockReset()
mockFetch.mockImplementation(() =>
Promise.resolve(new Response(JSON.stringify({}), { status: 200 }))
)
})
afterEach(() => {
globalThis.fetch = originalFetch
})
describe("#given a basic HTTP hook", () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks/pre-tool-use",
}
const stdinData = JSON.stringify({ hook_event_name: "PreToolUse", tool_name: "Bash" })
it("#when executed #then sends POST request with correct body", async () => {
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, stdinData)
expect(mockFetch).toHaveBeenCalledTimes(1)
const [url, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
expect(url).toBe("http://localhost:8080/hooks/pre-tool-use")
expect(options.method).toBe("POST")
expect(options.body).toBe(stdinData)
})
it("#when executed #then sets content-type to application/json", async () => {
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, stdinData)
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Content-Type"]).toBe("application/json")
})
})
describe("#given an HTTP hook with headers and env var interpolation", () => {
const originalEnv = process.env
beforeEach(() => {
process.env = { ...originalEnv, MY_TOKEN: "secret-123", OTHER_VAR: "other-value" }
})
afterEach(() => {
process.env = originalEnv
})
it("#when allowedEnvVars includes the var #then interpolates env var in headers", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
headers: { Authorization: "Bearer $MY_TOKEN" },
allowedEnvVars: ["MY_TOKEN"],
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Authorization"]).toBe("Bearer secret-123")
})
it("#when env var uses ${VAR} syntax #then interpolates correctly", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
headers: { Authorization: "Bearer ${MY_TOKEN}" },
allowedEnvVars: ["MY_TOKEN"],
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Authorization"]).toBe("Bearer secret-123")
})
it("#when env var not in allowedEnvVars #then replaces with empty string", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
headers: { Authorization: "Bearer $OTHER_VAR" },
allowedEnvVars: ["MY_TOKEN"],
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
const headers = options.headers as Record<string, string>
expect(headers["Authorization"]).toBe("Bearer ")
})
})
describe("#given an HTTP hook with timeout", () => {
it("#when timeout specified #then passes AbortSignal with timeout", async () => {
const hook: HookHttp = {
type: "http",
url: "http://localhost:8080/hooks",
timeout: 10,
}
const { executeHttpHook } = await import("./execute-http-hook")
await executeHttpHook(hook, "{}")
const [, options] = mockFetch.mock.calls[0] as unknown as [string, RequestInit]
expect(options.signal).toBeDefined()
})
})
describe("#given hook URL scheme validation", () => {
it("#when URL uses file:// scheme #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "file:///etc/passwd" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain('HTTP hook URL scheme "file:" is not allowed')
expect(mockFetch).not.toHaveBeenCalled()
})
it("#when URL uses data: scheme #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "data:text/plain,hello" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain('HTTP hook URL scheme "data:" is not allowed')
expect(mockFetch).not.toHaveBeenCalled()
})
it("#when URL uses ftp:// scheme #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "ftp://localhost/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain('HTTP hook URL scheme "ftp:" is not allowed')
expect(mockFetch).not.toHaveBeenCalled()
})
it("#when URL uses http:// scheme #then allows hook execution", async () => {
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(0)
expect(mockFetch).toHaveBeenCalledTimes(1)
})
it("#when URL uses https:// scheme #then allows hook execution", async () => {
const hook: HookHttp = { type: "http", url: "https://example.com/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(0)
expect(mockFetch).toHaveBeenCalledTimes(1)
})
it("#when URL is invalid #then rejects with exit code 1", async () => {
const hook: HookHttp = { type: "http", url: "not-a-valid-url" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain("HTTP hook URL is invalid: not-a-valid-url")
expect(mockFetch).not.toHaveBeenCalled()
})
})
describe("#given a successful HTTP response", () => {
it("#when response has JSON body #then returns parsed output", async () => {
mockFetch.mockImplementation(() =>
Promise.resolve(
new Response(JSON.stringify({ decision: "allow", reason: "ok" }), {
status: 200,
headers: { "Content-Type": "application/json" },
})
)
)
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(0)
expect(result.stdout).toContain('"decision":"allow"')
})
})
describe("#given a failing HTTP response", () => {
it("#when response status is 4xx #then returns exit code 1", async () => {
mockFetch.mockImplementation(() =>
Promise.resolve(new Response("Bad Request", { status: 400 }))
)
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain("400")
})
it("#when fetch throws network error #then returns exit code 1", async () => {
mockFetch.mockImplementation(() => Promise.reject(new Error("ECONNREFUSED")))
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(1)
expect(result.stderr).toContain("ECONNREFUSED")
})
})
describe("#given response with exit code in JSON", () => {
it("#when JSON contains exitCode 2 #then uses that exit code", async () => {
mockFetch.mockImplementation(() =>
Promise.resolve(
new Response(JSON.stringify({ exitCode: 2, stderr: "blocked" }), {
status: 200,
headers: { "Content-Type": "application/json" },
})
)
)
const hook: HookHttp = { type: "http", url: "http://localhost:8080/hooks" }
const { executeHttpHook } = await import("./execute-http-hook")
const result = await executeHttpHook(hook, "{}")
expect(result.exitCode).toBe(2)
})
})
})
describe("interpolateEnvVars", () => {
const originalEnv = process.env
beforeEach(() => {
process.env = { ...originalEnv, TOKEN: "abc", SECRET: "xyz" }
})
afterEach(() => {
process.env = originalEnv
})
it("#given $VAR syntax #when var is allowed #then interpolates", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer $TOKEN", ["TOKEN"])
expect(result).toBe("Bearer abc")
})
it("#given ${VAR} syntax #when var is allowed #then interpolates", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer ${TOKEN}", ["TOKEN"])
expect(result).toBe("Bearer abc")
})
it("#given multiple vars #when some not allowed #then only interpolates allowed ones", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("$TOKEN:$SECRET", ["TOKEN"])
expect(result).toBe("abc:")
})
it("#given ${VAR} where value contains $ANOTHER #when both allowed #then does not double-interpolate", async () => {
process.env = { ...process.env, TOKEN: "val$SECRET", SECRET: "oops" }
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer ${TOKEN}", ["TOKEN", "SECRET"])
expect(result).toBe("Bearer val$SECRET")
})
it("#given no allowedEnvVars #when called #then replaces all with empty", async () => {
const { interpolateEnvVars } = await import("./execute-http-hook")
const result = interpolateEnvVars("Bearer $TOKEN", [])
expect(result).toBe("Bearer ")
})
})

View File

@@ -0,0 +1,92 @@
import type { HookHttp } from "./types"
import type { CommandResult } from "../../shared/command-executor/execute-hook-command"
const DEFAULT_HTTP_HOOK_TIMEOUT_S = 30
const ALLOWED_SCHEMES = new Set(["http:", "https:"])
export function interpolateEnvVars(
value: string,
allowedEnvVars: string[]
): string {
const allowedSet = new Set(allowedEnvVars)
return value.replace(/\$\{(\w+)\}|\$(\w+)/g, (_match, bracedVar: string | undefined, bareVar: string | undefined) => {
const varName = (bracedVar ?? bareVar) as string
if (allowedSet.has(varName)) {
return process.env[varName] ?? ""
}
return ""
})
}
function resolveHeaders(
hook: HookHttp
): Record<string, string> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
}
if (!hook.headers) return headers
const allowedEnvVars = hook.allowedEnvVars ?? []
for (const [key, value] of Object.entries(hook.headers)) {
headers[key] = interpolateEnvVars(value, allowedEnvVars)
}
return headers
}
export async function executeHttpHook(
hook: HookHttp,
stdin: string
): Promise<CommandResult> {
try {
const parsed = new URL(hook.url)
if (!ALLOWED_SCHEMES.has(parsed.protocol)) {
return {
exitCode: 1,
stderr: `HTTP hook URL scheme "${parsed.protocol}" is not allowed. Only http: and https: are permitted.`,
}
}
} catch {
return { exitCode: 1, stderr: `HTTP hook URL is invalid: ${hook.url}` }
}
const timeoutS = hook.timeout ?? DEFAULT_HTTP_HOOK_TIMEOUT_S
const headers = resolveHeaders(hook)
try {
const response = await fetch(hook.url, {
method: "POST",
headers,
body: stdin,
signal: AbortSignal.timeout(timeoutS * 1000),
})
if (!response.ok) {
return {
exitCode: 1,
stderr: `HTTP hook returned status ${response.status}: ${response.statusText}`,
stdout: await response.text().catch(() => ""),
}
}
const body = await response.text()
if (!body) {
return { exitCode: 0, stdout: "", stderr: "" }
}
try {
const parsed = JSON.parse(body) as { exitCode?: number }
if (typeof parsed.exitCode === "number") {
return { exitCode: parsed.exitCode, stdout: body, stderr: "" }
}
} catch {
}
return { exitCode: 0, stdout: body, stderr: "" }
} catch (error) {
const message = error instanceof Error ? error.message : String(error)
return { exitCode: 1, stderr: `HTTP hook error: ${message}` }
}
}

View File

@@ -3,8 +3,8 @@ import type {
PostToolUseOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, objectToSnakeCase, transformToolName, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, objectToSnakeCase, transformToolName, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { buildTranscriptFromSession, deleteTempTranscript } from "./transcript"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
@@ -94,22 +94,17 @@ export async function executePostToolUseHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("PostToolUse", hook.command, extendedConfig ?? null)) {
log("PostToolUse hook command skipped (disabled by config)", { command: hook.command, toolName: ctx.toolName })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("PostToolUse", hookName, extendedConfig ?? null)) {
log("PostToolUse hook command skipped (disabled by config)", { command: hookName, toolName: ctx.toolName })
continue
}
const hookName = hook.command.split("/").pop() || hook.command
if (!firstHookName) firstHookName = hookName
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.stdout) {
messages.push(result.stdout)

View File

@@ -3,8 +3,8 @@ import type {
PreCompactOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
export interface PreCompactContext {
@@ -50,22 +50,17 @@ export async function executePreCompactHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("PreCompact", hook.command, extendedConfig ?? null)) {
log("PreCompact hook command skipped (disabled by config)", { command: hook.command })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("PreCompact", hookName, extendedConfig ?? null)) {
log("PreCompact hook command skipped (disabled by config)", { command: hookName })
continue
}
const hookName = hook.command.split("/").pop() || hook.command
if (!firstHookName) firstHookName = hookName
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.exitCode === 2) {
log("PreCompact hook blocked", { hookName, stderr: result.stderr })

View File

@@ -4,8 +4,8 @@ import type {
PermissionDecision,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, objectToSnakeCase, transformToolName, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, objectToSnakeCase, transformToolName, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
export interface PreToolUseContext {
@@ -77,22 +77,17 @@ export async function executePreToolUseHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("PreToolUse", hook.command, extendedConfig ?? null)) {
log("PreToolUse hook command skipped (disabled by config)", { command: hook.command, toolName: ctx.toolName })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("PreToolUse", hookName, extendedConfig ?? null)) {
log("PreToolUse hook command skipped (disabled by config)", { command: hookName, toolName: ctx.toolName })
continue
}
const hookName = hook.command.split("/").pop() || hook.command
if (!firstHookName) firstHookName = hookName
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.exitCode === 2) {
return {

View File

@@ -3,8 +3,8 @@ import type {
StopOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { getTodoPath } from "./todo"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
@@ -68,19 +68,15 @@ export async function executeStopHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("Stop", hook.command, extendedConfig ?? null)) {
log("Stop hook command skipped (disabled by config)", { command: hook.command })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("Stop", hookName, extendedConfig ?? null)) {
log("Stop hook command skipped (disabled by config)", { command: hookName })
continue
}
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
// Check exit code first - exit code 2 means block
if (result.exitCode === 2) {

View File

@@ -12,7 +12,7 @@ export type ClaudeHookEvent =
export interface HookMatcher {
matcher: string
hooks: HookCommand[]
hooks: HookAction[]
}
export interface HookCommand {
@@ -20,6 +20,16 @@ export interface HookCommand {
command: string
}
export interface HookHttp {
type: "http"
url: string
headers?: Record<string, string>
allowedEnvVars?: string[]
timeout?: number
}
export type HookAction = HookCommand | HookHttp
export interface ClaudeHooksConfig {
PreToolUse?: HookMatcher[]
PostToolUse?: HookMatcher[]

View File

@@ -3,8 +3,8 @@ import type {
PostToolUseOutput,
ClaudeHooksConfig,
} from "./types"
import { findMatchingHooks, executeHookCommand, log } from "../../shared"
import { DEFAULT_CONFIG } from "./plugin-config"
import { findMatchingHooks, log } from "../../shared"
import { dispatchHook, getHookIdentifier } from "./dispatch-hook"
import { isHookCommandDisabled, type PluginExtendedConfig } from "./config-loader"
const USER_PROMPT_SUBMIT_TAG_OPEN = "<user-prompt-submit-hook>"
@@ -80,19 +80,15 @@ export async function executeUserPromptSubmitHooks(
for (const matcher of matchers) {
if (!matcher.hooks || matcher.hooks.length === 0) continue
for (const hook of matcher.hooks) {
if (hook.type !== "command") continue
if (hook.type !== "command" && hook.type !== "http") continue
if (isHookCommandDisabled("UserPromptSubmit", hook.command, extendedConfig ?? null)) {
log("UserPromptSubmit hook command skipped (disabled by config)", { command: hook.command })
const hookName = getHookIdentifier(hook)
if (isHookCommandDisabled("UserPromptSubmit", hookName, extendedConfig ?? null)) {
log("UserPromptSubmit hook command skipped (disabled by config)", { command: hookName })
continue
}
const result = await executeHookCommand(
hook.command,
JSON.stringify(stdinData),
ctx.cwd,
{ forceZsh: DEFAULT_CONFIG.forceZsh, zshPath: DEFAULT_CONFIG.zshPath }
)
const result = await dispatchHook(hook, JSON.stringify(stdinData), ctx.cwd)
if (result.stdout) {
const output = result.stdout.trim()

View File

@@ -50,3 +50,4 @@ export { createRuntimeFallbackHook, type RuntimeFallbackHook, type RuntimeFallba
export { createWriteExistingFileGuardHook } from "./write-existing-file-guard";
export { createHashlineReadEnhancerHook } from "./hashline-read-enhancer";
export { createJsonErrorRecoveryHook, JSON_ERROR_TOOL_EXCLUDE_LIST, JSON_ERROR_PATTERNS, JSON_ERROR_REMINDER } from "./json-error-recovery";
export { createReadImageResizerHook } from "./read-image-resizer"

View File

@@ -0,0 +1,286 @@
/// <reference types="bun-types" />
import { beforeEach, describe, expect, it, mock } from "bun:test"
import type { PluginInput } from "@opencode-ai/plugin"
import type { ImageDimensions, ResizeResult } from "./types"
const mockParseImageDimensions = mock((): ImageDimensions | null => null)
const mockCalculateTargetDimensions = mock((): ImageDimensions | null => null)
const mockResizeImage = mock(async (): Promise<ResizeResult | null> => null)
const mockGetSessionModel = mock((_sessionID: string) => ({
providerID: "anthropic",
modelID: "claude-sonnet-4-6",
} as { providerID: string; modelID: string } | undefined))
mock.module("./image-dimensions", () => ({
parseImageDimensions: mockParseImageDimensions,
}))
mock.module("./image-resizer", () => ({
calculateTargetDimensions: mockCalculateTargetDimensions,
resizeImage: mockResizeImage,
}))
mock.module("../../shared/session-model-state", () => ({
getSessionModel: mockGetSessionModel,
}))
import { createReadImageResizerHook } from "./hook"
type ToolOutput = {
title: string
output: string
metadata: unknown
attachments?: Array<{ mime: string; url: string; filename?: string }>
}
function createMockContext(): PluginInput {
return {
client: {} as PluginInput["client"],
directory: "/test",
} as PluginInput
}
function createInput(tool: string): { tool: string; sessionID: string; callID: string } {
return {
tool,
sessionID: "session-1",
callID: "call-1",
}
}
describe("createReadImageResizerHook", () => {
beforeEach(() => {
mockParseImageDimensions.mockReset()
mockCalculateTargetDimensions.mockReset()
mockResizeImage.mockReset()
mockGetSessionModel.mockReset()
mockGetSessionModel.mockReturnValue({ providerID: "anthropic", modelID: "claude-sonnet-4-6" })
})
it("skips non-Read tools", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Bash"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips when provider is not anthropic", async () => {
//#given
mockGetSessionModel.mockReturnValue({ providerID: "openai", modelID: "gpt-5.3-codex" })
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
mockCalculateTargetDimensions.mockReturnValue({ width: 1568, height: 1045 })
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips when session model is unknown", async () => {
//#given
mockGetSessionModel.mockReturnValue(undefined)
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips Read output with no attachments", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips non-image attachments", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "application/pdf", url: "data:application/pdf;base64,AAAA", filename: "file.pdf" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("skips unsupported image mime types", async () => {
//#given
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/heic", url: "data:image/heic;base64,AAAA", filename: "photo.heic" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toBe("original output")
expect(mockParseImageDimensions).not.toHaveBeenCalled()
})
it("appends within-limits metadata when image is already valid", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 800, height: 600 })
mockCalculateTargetDimensions.mockReturnValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toContain("[Image Info]")
expect(output.output).toContain("within limits")
expect(output.attachments?.[0]?.url).toBe("data:image/png;base64,old")
expect(mockResizeImage).not.toHaveBeenCalled()
})
it("replaces attachment URL and appends resize metadata for oversized image", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
mockCalculateTargetDimensions.mockReturnValue({ width: 1568, height: 1045 })
mockResizeImage.mockResolvedValue({
resizedDataUrl: "data:image/png;base64,resized",
original: { width: 3000, height: 2000 },
resized: { width: 1568, height: 1045 },
})
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "big.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.attachments?.[0]?.url).toBe("data:image/png;base64,resized")
expect(output.output).toContain("[Image Resize Info]")
expect(output.output).toContain("resized")
})
it("keeps original attachment URL and marks resize skipped when resize fails", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 3000, height: 2000 })
mockCalculateTargetDimensions.mockReturnValue({ width: 1568, height: 1045 })
mockResizeImage.mockResolvedValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "fail.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.attachments?.[0]?.url).toBe("data:image/png;base64,old")
expect(output.output).toContain("resize skipped")
})
it("appends unknown-dimensions metadata when parsing fails", async () => {
//#given
mockParseImageDimensions.mockReturnValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "corrupt.png" }],
}
//#when
await hook["tool.execute.after"](createInput("Read"), output)
//#then
expect(output.output).toContain("dimensions could not be parsed")
expect(mockCalculateTargetDimensions).not.toHaveBeenCalled()
})
it("fires for lowercase read tool name", async () => {
//#given
mockParseImageDimensions.mockReturnValue({ width: 800, height: 600 })
mockCalculateTargetDimensions.mockReturnValue(null)
const hook = createReadImageResizerHook(createMockContext())
const output: ToolOutput = {
title: "Read",
output: "original output",
metadata: {},
attachments: [{ mime: "image/png", url: "data:image/png;base64,old", filename: "image.png" }],
}
//#when
await hook["tool.execute.after"](createInput("read"), output)
//#then
expect(mockParseImageDimensions).toHaveBeenCalledTimes(1)
expect(output.output).toContain("within limits")
})
})

View File

@@ -0,0 +1,197 @@
import type { PluginInput } from "@opencode-ai/plugin"
import type { ImageAttachment, ImageDimensions } from "./types"
import { parseImageDimensions } from "./image-dimensions"
import { calculateTargetDimensions, resizeImage } from "./image-resizer"
import { log } from "../../shared"
import { getSessionModel } from "../../shared/session-model-state"
const SUPPORTED_IMAGE_MIMES = new Set(["image/png", "image/jpeg", "image/gif", "image/webp"])
const TOKEN_DIVISOR = 750
interface ResizeEntry {
filename: string
originalDims: ImageDimensions | null
resizedDims: ImageDimensions | null
status: "resized" | "within-limits" | "resize-skipped" | "unknown-dims"
}
function isReadTool(toolName: string): boolean {
return toolName.toLowerCase() === "read"
}
function asRecord(value: unknown): Record<string, unknown> | null {
if (!value || typeof value !== "object" || Array.isArray(value)) {
return null
}
return value as Record<string, unknown>
}
function isImageAttachmentRecord(
value: Record<string, unknown>,
): value is Record<string, unknown> & ImageAttachment {
const filename = value.filename
return (
typeof value.mime === "string" &&
typeof value.url === "string" &&
(typeof filename === "undefined" || typeof filename === "string")
)
}
function extractImageAttachments(output: Record<string, unknown>): ImageAttachment[] {
const attachmentsValue = output.attachments
if (!Array.isArray(attachmentsValue)) {
return []
}
const attachments: ImageAttachment[] = []
for (const attachmentValue of attachmentsValue) {
const attachmentRecord = asRecord(attachmentValue)
if (!attachmentRecord) {
continue
}
const mime = attachmentRecord.mime
const url = attachmentRecord.url
if (typeof mime !== "string" || typeof url !== "string") {
continue
}
const normalizedMime = mime.toLowerCase()
if (!SUPPORTED_IMAGE_MIMES.has(normalizedMime)) {
continue
}
attachmentRecord.mime = normalizedMime
attachmentRecord.url = url
if (isImageAttachmentRecord(attachmentRecord)) {
attachments.push(attachmentRecord)
}
}
return attachments
}
function calculateTokens(width: number, height: number): number {
return Math.ceil((width * height) / TOKEN_DIVISOR)
}
function formatResizeAppendix(entries: ResizeEntry[]): string {
const header = entries.some((entry) => entry.status === "resized") ? "[Image Resize Info]" : "[Image Info]"
const lines = [`\n\n${header}`]
for (const entry of entries) {
if (entry.status === "unknown-dims" || !entry.originalDims) {
lines.push(`- ${entry.filename}: dimensions could not be parsed`)
continue
}
const original = entry.originalDims
const originalText = `${original.width}x${original.height}`
const originalTokens = calculateTokens(original.width, original.height)
if (entry.status === "within-limits") {
lines.push(`- ${entry.filename}: ${originalText} (within limits, tokens: ${originalTokens})`)
continue
}
if (entry.status === "resize-skipped") {
lines.push(`- ${entry.filename}: ${originalText} (resize skipped, tokens: ${originalTokens})`)
continue
}
if (!entry.resizedDims) {
lines.push(`- ${entry.filename}: ${originalText} (resize skipped, tokens: ${originalTokens})`)
continue
}
const resized = entry.resizedDims
const resizedText = `${resized.width}x${resized.height}`
const resizedTokens = calculateTokens(resized.width, resized.height)
lines.push(
`- ${entry.filename}: ${originalText} -> ${resizedText} (resized, tokens: ${originalTokens} -> ${resizedTokens})`,
)
}
return lines.join("\n")
}
function resolveFilename(attachment: ImageAttachment, index: number): string {
if (attachment.filename && attachment.filename.trim().length > 0) {
return attachment.filename
}
return `image-${index + 1}`
}
export function createReadImageResizerHook(_ctx: PluginInput) {
return {
"tool.execute.after": async (
input: { tool: string; sessionID: string; callID: string },
output: { title: string; output: string; metadata: unknown },
) => {
if (!isReadTool(input.tool)) {
return
}
const sessionModel = getSessionModel(input.sessionID)
if (sessionModel?.providerID !== "anthropic") {
return
}
if (typeof output.output !== "string") {
return
}
const outputRecord = output as Record<string, unknown>
const attachments = extractImageAttachments(outputRecord)
if (attachments.length === 0) {
return
}
const entries: ResizeEntry[] = []
for (const [index, attachment] of attachments.entries()) {
const filename = resolveFilename(attachment, index)
try {
const originalDims = parseImageDimensions(attachment.url, attachment.mime)
if (!originalDims) {
entries.push({ filename, originalDims: null, resizedDims: null, status: "unknown-dims" })
continue
}
const targetDims = calculateTargetDimensions(originalDims.width, originalDims.height)
if (!targetDims) {
entries.push({
filename,
originalDims,
resizedDims: null,
status: "within-limits",
})
continue
}
const resizedResult = await resizeImage(attachment.url, attachment.mime, targetDims)
if (!resizedResult) {
entries.push({
filename,
originalDims,
resizedDims: null,
status: "resize-skipped",
})
continue
}
attachment.url = resizedResult.resizedDataUrl
entries.push({
filename,
originalDims: resizedResult.original,
resizedDims: resizedResult.resized,
status: "resized",
})
} catch (error) {
log("[read-image-resizer] attachment processing failed", {
error: error instanceof Error ? error.message : String(error),
filename,
})
entries.push({ filename, originalDims: null, resizedDims: null, status: "unknown-dims" })
}
}
if (entries.length === 0) {
return
}
output.output += formatResizeAppendix(entries)
},
}
}

View File

@@ -0,0 +1,126 @@
/// <reference types="bun-types" />
import { describe, expect, it } from "bun:test"
import { parseImageDimensions } from "./image-dimensions"
const PNG_1X1_DATA_URL =
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
const GIF_1X1_DATA_URL =
"data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
function createPngDataUrl(width: number, height: number): string {
const buf = Buffer.alloc(33)
buf.set([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a], 0)
buf.writeUInt32BE(13, 8)
buf.set([0x49, 0x48, 0x44, 0x52], 12)
buf.writeUInt32BE(width, 16)
buf.writeUInt32BE(height, 20)
return `data:image/png;base64,${buf.toString("base64")}`
}
function createGifDataUrl(width: number, height: number): string {
const buf = Buffer.alloc(10)
buf.set([0x47, 0x49, 0x46, 0x38, 0x39, 0x61], 0)
buf.writeUInt16LE(width, 6)
buf.writeUInt16LE(height, 8)
return `data:image/gif;base64,${buf.toString("base64")}`
}
function createLargePngDataUrl(width: number, height: number, extraBase64Chars: number): string {
const baseDataUrl = createPngDataUrl(width, height)
const base64Data = baseDataUrl.slice(baseDataUrl.indexOf(",") + 1)
const paddedBase64 = `${base64Data}${"A".repeat(extraBase64Chars)}`
return `data:image/png;base64,${paddedBase64}`
}
describe("parseImageDimensions", () => {
it("parses PNG 1x1 dimensions", () => {
//#given
const dataUrl = PNG_1X1_DATA_URL
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toEqual({ width: 1, height: 1 })
})
it("parses PNG dimensions from IHDR", () => {
//#given
const dataUrl = createPngDataUrl(3000, 2000)
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toEqual({ width: 3000, height: 2000 })
})
it("parses PNG dimensions from a very large base64 payload", () => {
//#given
const dataUrl = createLargePngDataUrl(4096, 2160, 10 * 1024 * 1024)
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toEqual({ width: 4096, height: 2160 })
})
it("parses GIF 1x1 dimensions", () => {
//#given
const dataUrl = GIF_1X1_DATA_URL
//#when
const result = parseImageDimensions(dataUrl, "image/gif")
//#then
expect(result).toEqual({ width: 1, height: 1 })
})
it("parses GIF dimensions from logical screen descriptor", () => {
//#given
const dataUrl = createGifDataUrl(320, 240)
//#when
const result = parseImageDimensions(dataUrl, "image/gif")
//#then
expect(result).toEqual({ width: 320, height: 240 })
})
it("returns null for empty input", () => {
//#given
const dataUrl = ""
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toBeNull()
})
it("returns null for too-short PNG buffer", () => {
//#given
const dataUrl = "data:image/png;base64,AAAA"
//#when
const result = parseImageDimensions(dataUrl, "image/png")
//#then
expect(result).toBeNull()
})
it("returns null for unsupported mime type", () => {
//#given
const dataUrl = PNG_1X1_DATA_URL
//#when
const result = parseImageDimensions(dataUrl, "image/heic")
//#then
expect(result).toBeNull()
})
})

View File

@@ -0,0 +1,191 @@
import type { ImageDimensions } from "./types"
import { extractBase64Data } from "../../tools/look-at/mime-type-inference"
const HEADER_BYTES = 32_768
const HEADER_BASE64_CHARS = Math.ceil(HEADER_BYTES / 3) * 4
function toImageDimensions(width: number, height: number): ImageDimensions | null {
if (!Number.isFinite(width) || !Number.isFinite(height)) {
return null
}
if (width <= 0 || height <= 0) {
return null
}
return { width, height }
}
function parsePngDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 24) {
return null
}
const isPngSignature =
buffer[0] === 0x89 &&
buffer[1] === 0x50 &&
buffer[2] === 0x4e &&
buffer[3] === 0x47 &&
buffer[4] === 0x0d &&
buffer[5] === 0x0a &&
buffer[6] === 0x1a &&
buffer[7] === 0x0a
if (!isPngSignature || buffer.toString("ascii", 12, 16) !== "IHDR") {
return null
}
const width = buffer.readUInt32BE(16)
const height = buffer.readUInt32BE(20)
return toImageDimensions(width, height)
}
function parseGifDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 10) {
return null
}
if (buffer.toString("ascii", 0, 4) !== "GIF8") {
return null
}
const width = buffer.readUInt16LE(6)
const height = buffer.readUInt16LE(8)
return toImageDimensions(width, height)
}
function parseJpegDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 4 || buffer[0] !== 0xff || buffer[1] !== 0xd8) {
return null
}
let offset = 2
while (offset < buffer.length) {
if (buffer[offset] !== 0xff) {
offset += 1
continue
}
while (offset < buffer.length && buffer[offset] === 0xff) {
offset += 1
}
if (offset >= buffer.length) {
return null
}
const marker = buffer[offset]
offset += 1
if (marker === 0xd9 || marker === 0xda) {
break
}
if (offset + 1 >= buffer.length) {
return null
}
const segmentLength = buffer.readUInt16BE(offset)
if (segmentLength < 2) {
return null
}
if ((marker === 0xc0 || marker === 0xc2) && offset + 7 < buffer.length) {
const height = buffer.readUInt16BE(offset + 3)
const width = buffer.readUInt16BE(offset + 5)
return toImageDimensions(width, height)
}
offset += segmentLength
}
return null
}
function readUInt24LE(buffer: Buffer, offset: number): number {
return buffer[offset] | (buffer[offset + 1] << 8) | (buffer[offset + 2] << 16)
}
function parseWebpDimensions(buffer: Buffer): ImageDimensions | null {
if (buffer.length < 16) {
return null
}
if (buffer.toString("ascii", 0, 4) !== "RIFF" || buffer.toString("ascii", 8, 12) !== "WEBP") {
return null
}
const chunkType = buffer.toString("ascii", 12, 16)
if (chunkType === "VP8 ") {
if (buffer[23] !== 0x9d || buffer[24] !== 0x01 || buffer[25] !== 0x2a) {
return null
}
const width = buffer.readUInt16LE(26) & 0x3fff
const height = buffer.readUInt16LE(28) & 0x3fff
return toImageDimensions(width, height)
}
if (chunkType === "VP8L") {
if (buffer.length < 25 || buffer[20] !== 0x2f) {
return null
}
const bits = buffer.readUInt32LE(21)
const width = (bits & 0x3fff) + 1
const height = ((bits >>> 14) & 0x3fff) + 1
return toImageDimensions(width, height)
}
if (chunkType === "VP8X") {
const width = readUInt24LE(buffer, 24) + 1
const height = readUInt24LE(buffer, 27) + 1
return toImageDimensions(width, height)
}
return null
}
export function parseImageDimensions(base64DataUrl: string, mimeType: string): ImageDimensions | null {
try {
if (!base64DataUrl || !mimeType) {
return null
}
const rawBase64 = extractBase64Data(base64DataUrl)
if (!rawBase64) {
return null
}
const headerBase64 = rawBase64.length > HEADER_BASE64_CHARS ? rawBase64.slice(0, HEADER_BASE64_CHARS) : rawBase64
const buffer = Buffer.from(headerBase64, "base64")
if (buffer.length === 0) {
return null
}
const normalizedMime = mimeType.toLowerCase()
if (normalizedMime === "image/png") {
return parsePngDimensions(buffer)
}
if (normalizedMime === "image/gif") {
return parseGifDimensions(buffer)
}
if (normalizedMime === "image/jpeg" || normalizedMime === "image/jpg") {
return parseJpegDimensions(buffer)
}
if (normalizedMime === "image/webp") {
return parseWebpDimensions(buffer)
}
return null
} catch {
return null
}
}

View File

@@ -0,0 +1,132 @@
/// <reference types="bun-types" />
import { afterEach, describe, expect, it, mock } from "bun:test"
const PNG_1X1_DATA_URL =
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
type ImageResizerModule = typeof import("./image-resizer")
async function importFreshImageResizerModule(): Promise<ImageResizerModule> {
return import(`./image-resizer?test-${Date.now()}-${Math.random()}`)
}
describe("calculateTargetDimensions", () => {
it("returns null when dimensions are already within limits", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(800, 600)
//#then
expect(result).toBeNull()
})
it("returns null at exact long-edge boundary", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(1568, 1000)
//#then
expect(result).toBeNull()
})
it("scales landscape dimensions by max long edge", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(3000, 2000)
//#then
expect(result).toEqual({
width: 1568,
height: Math.floor(2000 * (1568 / 3000)),
})
})
it("scales portrait dimensions by max long edge", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(2000, 3000)
//#then
expect(result).toEqual({
width: Math.floor(2000 * (1568 / 3000)),
height: 1568,
})
})
it("scales square dimensions to exact target", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(4000, 4000)
//#then
expect(result).toEqual({ width: 1568, height: 1568 })
})
it("uses custom maxLongEdge when provided", async () => {
//#given
const { calculateTargetDimensions } = await importFreshImageResizerModule()
//#when
const result = calculateTargetDimensions(2000, 1000, 1000)
//#then
expect(result).toEqual({ width: 1000, height: 500 })
})
})
describe("resizeImage", () => {
afterEach(() => {
mock.restore()
})
it("returns null when sharp import fails", async () => {
//#given
mock.module("sharp", () => {
throw new Error("sharp unavailable")
})
const { resizeImage } = await importFreshImageResizerModule()
//#when
const result = await resizeImage(PNG_1X1_DATA_URL, "image/png", {
width: 1,
height: 1,
})
//#then
expect(result).toBeNull()
})
it("returns null when sharp throws during resize", async () => {
//#given
const mockSharpFactory = mock(() => ({
resize: () => {
throw new Error("resize failed")
},
}))
mock.module("sharp", () => ({
default: mockSharpFactory,
}))
const { resizeImage } = await importFreshImageResizerModule()
//#when
const result = await resizeImage(PNG_1X1_DATA_URL, "image/png", {
width: 1,
height: 1,
})
//#then
expect(result).toBeNull()
})
})

View File

@@ -0,0 +1,184 @@
import type { ImageDimensions, ResizeResult } from "./types"
import { extractBase64Data } from "../../tools/look-at/mime-type-inference"
import { log } from "../../shared"
const ANTHROPIC_MAX_LONG_EDGE = 1568
const ANTHROPIC_MAX_FILE_SIZE = 5 * 1024 * 1024
type SharpFormat = "jpeg" | "png" | "gif" | "webp"
interface SharpMetadata {
width?: number
height?: number
}
interface SharpInstance {
resize(width: number, height: number, options: { fit: "inside" }): SharpInstance
toFormat(format: SharpFormat, options?: { quality?: number }): SharpInstance
toBuffer(): Promise<Buffer>
metadata(): Promise<SharpMetadata>
}
type SharpFactory = (input: Buffer) => SharpInstance
function resolveSharpFactory(sharpModule: unknown): SharpFactory | null {
if (typeof sharpModule === "function") {
return sharpModule as SharpFactory
}
if (!sharpModule || typeof sharpModule !== "object") {
return null
}
const defaultExport = Reflect.get(sharpModule, "default")
return typeof defaultExport === "function" ? (defaultExport as SharpFactory) : null
}
function resolveSharpFormat(mimeType: string): SharpFormat {
const normalizedMime = mimeType.toLowerCase()
if (normalizedMime === "image/png") {
return "png"
}
if (normalizedMime === "image/gif") {
return "gif"
}
if (normalizedMime === "image/webp") {
return "webp"
}
return "jpeg"
}
function canAdjustQuality(format: SharpFormat): boolean {
return format === "jpeg" || format === "webp"
}
function toDimensions(metadata: SharpMetadata): ImageDimensions | null {
const { width, height } = metadata
if (!width || !height) {
return null
}
return { width, height }
}
async function renderResizedBuffer(args: {
sharpFactory: SharpFactory
inputBuffer: Buffer
target: ImageDimensions
format: SharpFormat
quality?: number
}): Promise<Buffer> {
const { sharpFactory, inputBuffer, target, format, quality } = args
return sharpFactory(inputBuffer)
.resize(target.width, target.height, { fit: "inside" })
.toFormat(format, quality ? { quality } : undefined)
.toBuffer()
}
function getErrorMessage(error: unknown): string {
return error instanceof Error ? error.message : String(error)
}
export function calculateTargetDimensions(
width: number,
height: number,
maxLongEdge = ANTHROPIC_MAX_LONG_EDGE,
): ImageDimensions | null {
if (width <= 0 || height <= 0 || maxLongEdge <= 0) {
return null
}
const longEdge = Math.max(width, height)
if (longEdge <= maxLongEdge) {
return null
}
if (width >= height) {
return {
width: maxLongEdge,
height: Math.max(1, Math.floor((height * maxLongEdge) / width)),
}
}
return {
width: Math.max(1, Math.floor((width * maxLongEdge) / height)),
height: maxLongEdge,
}
}
export async function resizeImage(
base64DataUrl: string,
mimeType: string,
target: ImageDimensions,
): Promise<ResizeResult | null> {
try {
const sharpModuleName = "sharp"
const sharpModule = await import(sharpModuleName).catch(() => null)
if (!sharpModule) {
log("[read-image-resizer] sharp unavailable, skipping resize")
return null
}
const sharpFactory = resolveSharpFactory(sharpModule)
if (!sharpFactory) {
log("[read-image-resizer] sharp import has unexpected shape")
return null
}
const rawBase64 = extractBase64Data(base64DataUrl)
if (!rawBase64) {
return null
}
const inputBuffer = Buffer.from(rawBase64, "base64")
if (inputBuffer.length === 0) {
return null
}
const original = toDimensions(await sharpFactory(inputBuffer).metadata())
if (!original) {
return null
}
const format = resolveSharpFormat(mimeType)
let resizedBuffer = await renderResizedBuffer({
sharpFactory,
inputBuffer,
target,
format,
})
if (resizedBuffer.length > ANTHROPIC_MAX_FILE_SIZE && canAdjustQuality(format)) {
for (const quality of [80, 60, 40]) {
resizedBuffer = await renderResizedBuffer({
sharpFactory,
inputBuffer,
target,
format,
quality,
})
if (resizedBuffer.length <= ANTHROPIC_MAX_FILE_SIZE) {
break
}
}
}
const resized = toDimensions(await sharpFactory(resizedBuffer).metadata())
if (!resized) {
return null
}
return {
resizedDataUrl: `data:${mimeType};base64,${resizedBuffer.toString("base64")}`,
original,
resized,
}
} catch (error) {
log("[read-image-resizer] resize failed", {
error: getErrorMessage(error),
mimeType,
target,
})
return null
}
}

View File

@@ -0,0 +1 @@
export { createReadImageResizerHook } from "./hook"

View File

@@ -0,0 +1,16 @@
export interface ImageDimensions {
width: number
height: number
}
export interface ImageAttachment {
mime: string
url: string
filename?: string
}
export interface ResizeResult {
resizedDataUrl: string
original: ImageDimensions
resized: ImageDimensions
}

View File

@@ -2,7 +2,7 @@ import { describe, it, expect } from "bun:test"
import { remapAgentKeysToDisplayNames } from "./agent-key-remapper"
describe("remapAgentKeysToDisplayNames", () => {
it("remaps known agent keys to display names while preserving original keys", () => {
it("remaps known agent keys to display names", () => {
// given agents with lowercase keys
const agents = {
sisyphus: { prompt: "test", mode: "primary" },
@@ -12,11 +12,10 @@ describe("remapAgentKeysToDisplayNames", () => {
// when remapping
const result = remapAgentKeysToDisplayNames(agents)
// then known agents get display name keys and original keys remain accessible
// then known agents get display name keys only
expect(result["Sisyphus (Ultraworker)"]).toBeDefined()
expect(result["oracle"]).toBeDefined()
expect(result["sisyphus"]).toBeDefined()
expect(result["Sisyphus (Ultraworker)"]).toBe(result["sisyphus"])
expect(result["sisyphus"]).toBeUndefined()
})
it("preserves unknown agent keys unchanged", () => {
@@ -32,7 +31,7 @@ describe("remapAgentKeysToDisplayNames", () => {
expect(result["custom-agent"]).toBeDefined()
})
it("remaps all core agents while preserving original keys", () => {
it("remaps all core agents to display names", () => {
// given all core agents
const agents = {
sisyphus: {},
@@ -47,20 +46,20 @@ describe("remapAgentKeysToDisplayNames", () => {
// when remapping
const result = remapAgentKeysToDisplayNames(agents)
// then all get display name keys while original keys still work
// then all get display name keys without lowercase duplicates
expect(result["Sisyphus (Ultraworker)"]).toBeDefined()
expect(result["sisyphus"]).toBeDefined()
expect(result["sisyphus"]).toBeUndefined()
expect(result["Hephaestus (Deep Agent)"]).toBeDefined()
expect(result["hephaestus"]).toBeDefined()
expect(result["hephaestus"]).toBeUndefined()
expect(result["Prometheus (Plan Builder)"]).toBeDefined()
expect(result["prometheus"]).toBeDefined()
expect(result["prometheus"]).toBeUndefined()
expect(result["Atlas (Plan Executor)"]).toBeDefined()
expect(result["atlas"]).toBeDefined()
expect(result["atlas"]).toBeUndefined()
expect(result["Metis (Plan Consultant)"]).toBeDefined()
expect(result["metis"]).toBeDefined()
expect(result["metis"]).toBeUndefined()
expect(result["Momus (Plan Critic)"]).toBeDefined()
expect(result["momus"]).toBeDefined()
expect(result["momus"]).toBeUndefined()
expect(result["Sisyphus-Junior"]).toBeDefined()
expect(result["sisyphus-junior"]).toBeDefined()
expect(result["sisyphus-junior"]).toBeUndefined()
})
})

View File

@@ -9,7 +9,6 @@ export function remapAgentKeysToDisplayNames(
const displayName = AGENT_DISPLAY_NAMES[key]
if (displayName && displayName !== key) {
result[displayName] = value
result[key] = value
} else {
result[key] = value
}

View File

@@ -12,6 +12,7 @@ import {
createTasksTodowriteDisablerHook,
createWriteExistingFileGuardHook,
createHashlineReadEnhancerHook,
createReadImageResizerHook,
createJsonErrorRecoveryHook,
} from "../../hooks"
import {
@@ -33,6 +34,7 @@ export type ToolGuardHooks = {
writeExistingFileGuard: ReturnType<typeof createWriteExistingFileGuardHook> | null
hashlineReadEnhancer: ReturnType<typeof createHashlineReadEnhancerHook> | null
jsonErrorRecovery: ReturnType<typeof createJsonErrorRecoveryHook> | null
readImageResizer: ReturnType<typeof createReadImageResizerHook> | null
}
export function createToolGuardHooks(args: {
@@ -98,13 +100,17 @@ export function createToolGuardHooks(args: {
: null
const hashlineReadEnhancer = isHookEnabled("hashline-read-enhancer")
? safeHook("hashline-read-enhancer", () => createHashlineReadEnhancerHook(ctx, { hashline_edit: { enabled: pluginConfig.hashline_edit ?? true } }))
? safeHook("hashline-read-enhancer", () => createHashlineReadEnhancerHook(ctx, { hashline_edit: { enabled: pluginConfig.hashline_edit ?? false } }))
: null
const jsonErrorRecovery = isHookEnabled("json-error-recovery")
? safeHook("json-error-recovery", () => createJsonErrorRecoveryHook(ctx))
: null
const readImageResizer = isHookEnabled("read-image-resizer")
? safeHook("read-image-resizer", () => createReadImageResizerHook(ctx))
: null
return {
commentChecker,
toolOutputTruncator,
@@ -116,5 +122,6 @@ export function createToolGuardHooks(args: {
writeExistingFileGuard,
hashlineReadEnhancer,
jsonErrorRecovery,
readImageResizer,
}
}

View File

@@ -43,6 +43,7 @@ export function createToolExecuteAfterHandler(args: {
await hooks.delegateTaskRetry?.["tool.execute.after"]?.(input, output)
await hooks.atlasHook?.["tool.execute.after"]?.(input, output)
await hooks.taskResumeInfo?.["tool.execute.after"]?.(input, output)
await hooks.readImageResizer?.["tool.execute.after"]?.(input, output)
await hooks.hashlineReadEnhancer?.["tool.execute.after"]?.(input, output)
await hooks.jsonErrorRecovery?.["tool.execute.after"]?.(input, output)
}

View File

@@ -1,5 +1,6 @@
const { describe, expect, test } = require("bun:test")
const { createToolExecuteBeforeHandler } = require("./tool-execute-before")
const { createToolRegistry } = require("./tool-registry")
describe("createToolExecuteBeforeHandler", () => {
test("does not execute subagent question blocker hook for question tool", async () => {
@@ -219,4 +220,54 @@ describe("createToolExecuteBeforeHandler", () => {
})
})
describe("createToolRegistry", () => {
function createRegistryInput(overrides = {}) {
return {
ctx: {
directory: process.cwd(),
client: {},
},
pluginConfig: {
...overrides,
},
managers: {
backgroundManager: {},
tmuxSessionManager: {},
skillMcpManager: {},
},
skillContext: {
mergedSkills: [],
availableSkills: [],
browserProvider: "playwright",
disabledSkills: new Set(),
},
availableCategories: [],
}
}
describe("#given hashline_edit is undefined", () => {
describe("#when creating tool registry", () => {
test("#then should not register edit tool", () => {
const result = createToolRegistry(createRegistryInput())
expect(result.filteredTools.edit).toBeUndefined()
})
})
})
describe("#given hashline_edit is true", () => {
describe("#when creating tool registry", () => {
test("#then should register edit tool", () => {
const result = createToolRegistry(
createRegistryInput({
hashline_edit: true,
}),
)
expect(result.filteredTools.edit).toBeDefined()
})
})
})
})
export {}

View File

@@ -113,7 +113,7 @@ export function createToolRegistry(args: {
}
: {}
const hashlineEnabled = pluginConfig.hashline_edit ?? true
const hashlineEnabled = pluginConfig.hashline_edit ?? false
const hashlineToolsRecord: Record<string, ToolDefinition> = hashlineEnabled
? { edit: createHashlineEditTool() }
: {}

View File

@@ -1,3 +1,4 @@
import { resolve } from "node:path"
import { spawn } from "bun"
import {
resolveGrepCli,
@@ -119,10 +120,9 @@ async function runRgFilesInternal(
if (isRg) {
const args = buildRgArgs(options)
const paths = options.paths?.length ? options.paths : ["."]
args.push(...paths)
cwd = options.paths?.[0] || "."
args.push(".")
command = [cli.path, ...args]
cwd = undefined
} else if (isWindows) {
command = buildPowerShellCommand(options)
cwd = undefined
@@ -177,7 +177,7 @@ async function runRgFilesInternal(
let filePath: string
if (isRg) {
filePath = line
filePath = cwd ? resolve(cwd, line) : line
} else if (isWindows) {
filePath = line.trim()
} else {

View File

@@ -1,3 +1,4 @@
import { resolve } from "node:path"
import type { PluginInput } from "@opencode-ai/plugin"
import { tool, type ToolDefinition } from "@opencode-ai/plugin/tool"
import { runRgFiles } from "./cli"
@@ -22,16 +23,17 @@ export function createGlobTools(ctx: PluginInput): Record<string, ToolDefinition
"simply omit it for the default behavior. Must be a valid directory path if provided."
),
},
execute: async (args) => {
execute: async (args, context) => {
try {
const cli = await resolveGrepCliWithAutoInstall()
const searchPath = args.path ?? ctx.directory
const paths = [searchPath]
const runtimeCtx = context as Record<string, unknown>
const dir = typeof runtimeCtx.directory === "string" ? runtimeCtx.directory : ctx.directory
const searchPath = args.path ? resolve(dir, args.path) : dir
const result = await runRgFiles(
{
pattern: args.pattern,
paths,
paths: [searchPath],
},
cli
)

View File

@@ -1,3 +1,4 @@
import { resolve } from "node:path"
import type { PluginInput } from "@opencode-ai/plugin"
import { tool, type ToolDefinition } from "@opencode-ai/plugin/tool"
import { runRg, runRgCount } from "./cli"
@@ -32,10 +33,12 @@ export function createGrepTools(ctx: PluginInput): Record<string, ToolDefinition
.optional()
.describe("Limit output to first N entries. 0 or omitted means no limit."),
},
execute: async (args) => {
execute: async (args, context) => {
try {
const globs = args.include ? [args.include] : undefined
const searchPath = args.path ?? ctx.directory
const runtimeCtx = context as Record<string, unknown>
const dir = typeof runtimeCtx.directory === "string" ? runtimeCtx.directory : ctx.directory
const searchPath = args.path ? resolve(dir, args.path) : dir
const paths = [searchPath]
const outputMode = args.output_mode ?? "files_with_matches"
const headLimit = args.head_limit ?? 0