Compare commits

...

59 Commits

Author SHA1 Message Date
github-actions[bot]
819c5b5d29 release: v3.2.3 2026-02-04 06:38:00 +00:00
YeonGyu-Kim
224afadbdb fix(skill-loader): respect disabledSkills in async skill resolution 2026-02-04 15:03:57 +09:00
YeonGyu-Kim
953b1f98c9 fix(ci): use regex variables for bash 5.2+ compatibility in changelog generation 2026-02-04 15:00:31 +09:00
YeonGyu-Kim
e073412da1 fix(auth): add graceful fallback for server auth injection
Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-04 14:52:31 +09:00
YeonGyu-Kim
0dd42e2901 fix(non-interactive-env): force unix export syntax for bash env prefix
Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-04 14:52:13 +09:00
YeonGyu-Kim
85932fadc7 test(skill-loader): fix test isolation by resetting skill content
Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-04 14:51:56 +09:00
YeonGyu-Kim
65043a7e94 fix: remove broken TOC links in translated READMEs
Remove outdated configuration section links that no longer exist.
Applies changes from PR #1386 (pierrecorsini).

Co-authored-by: Pierre CORSINI <pierrecorsini@users.noreply.github.com>
2026-02-04 13:54:50 +09:00
YeonGyu-Kim
ffcf1b5715 Merge pull request #1371 from YanzheL/feat/websearch-multi-provider
feat(mcp): add multi-provider websearch support (Exa + Tavily)
2026-02-04 13:52:36 +09:00
YeonGyu-Kim
d14f32f2d5 Merge pull request #1470 from Lynricsy/fix/categories-model-precedence
fix(delegate-task): honor explicit category model over sisyphus-junior
2026-02-04 13:52:25 +09:00
YeonGyu-Kim
f79f164cd5 fix(skill-loader): deterministic collision handling for skill names
- Separate directory and file entries, process directories first
- Use Map to deduplicate skills by name (first-wins)
- Directory skills (SKILL.md, {dir}.md) take precedence over file skills (*.md)
- Add test for collision scenario

Addresses Oracle P2 review feedback from PR #1254
2026-02-04 13:52:06 +09:00
YeonGyu-Kim
dee8cf1720 Merge pull request #1370 from misyuari/fix/refactor-skills
fix: update skill resolution to support disabled skills functionality
2026-02-04 13:47:26 +09:00
YeonGyu-Kim
8098e48658 Merge pull request #1254 from LeekJay/fix/nested-skill-discovery
feat(skill-loader): support nested skill directories
2026-02-04 13:40:03 +09:00
YeonGyu-Kim
0dad85ead7 hephaestus color improvement 2026-02-04 13:36:45 +09:00
YeonGyu-Kim
1e383f44d9 fix(background-agent): abort session on model suggestion retry failure
When promptWithModelSuggestionRetry() fails, the session was not being aborted, causing the polling loop to wait forever for an idle state. Added session.abort() calls in startTask() and resume() catch blocks.

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-04 13:36:45 +09:00
YeonGyu-Kim
30990f7f59 style(agents): update Hephaestus and Prometheus colors
- Hephaestus: #FF4500 (Magma Orange) → #708090 (Slate Gray)
  Blacksmith's hammer/iron theme, visible in both light and dark modes

- Prometheus: #9D4EDD (Amethyst Purple) → #FF5722 (Deep Orange)
  Fire/flame theme, restoring the original fire color concept
2026-02-04 13:36:45 +09:00
YeonGyu-Kim
51c7fee34c Merge pull request #1280 from Zacks-Zhang/fix/fix-stale-lsp-diagnostics
fix(lsp): prevent stale diagnostics by syncing didChange
2026-02-04 13:35:07 +09:00
YeonGyu-Kim
80e970cf36 Merge pull request #1297 from khduy/fix/deduplicate-settings-paths
fix(claude-code-hooks): deduplicate settings paths to prevent double hook execution
2026-02-04 13:35:06 +09:00
YeonGyu-Kim
b7b466f4f2 Merge pull request #1289 from KonaEspresso94/fix/agent-tools-bug
fix: honor tools overrides via permission migration
2026-02-04 13:34:53 +09:00
YeonGyu-Kim
5dabb8a198 Merge pull request #1393 from ualtinok/dev
fix: grep and glob tools usage without path param under Opencode Desktop
2026-02-04 13:34:52 +09:00
YeonGyu-Kim
d11f0685be Merge pull request #1388 from boguan/dev
fix: remove redundant removeCodeBlocks call
2026-02-04 13:34:51 +09:00
YeonGyu-Kim
814e14edf7 Merge pull request #1384 from devxoul/fix/readme-toc-links
fix: remove broken TOC links in README
2026-02-04 13:34:40 +09:00
Lynricsy
1411ca255a fix(delegate-task): honor explicit category model over sisyphus-junior 2026-02-04 11:51:20 +08:00
YeonGyu-Kim
4330f25fee revert(call-omo-agent): remove metis/momus from ALLOWED_AGENTS
call_omo_agent is for lightweight exploration agents (explore, librarian).
metis/momus are consultation agents that should be invoked via delegate_task.

Reverts part of #1462 that incorrectly added metis/momus to call_omo_agent.
2026-02-04 11:38:24 +09:00
YeonGyu-Kim
737fac4345 fix(agent-restrictions): add read-only restrictions for metis and momus
- Add metis and momus to AGENT_RESTRICTIONS with same pattern as oracle
- Deny write, edit, task, and delegate_task tools
- Enforces read-only design for these advisor agents
- Addresses cubic review feedback on #1462
2026-02-04 11:36:34 +09:00
YeonGyu-Kim
49a4a1bf9e fix(call-omo-agent): allow Prometheus to call Metis and Momus (#1462)
* fix(call-omo-agent): allow Prometheus to call Metis and Momus

* fix(call-omo-agent): update help text and remove unrelated bun.lock

- Update subagent_type description to include metis and momus
- Remove unrelated bun.lock changes (keeps PR scope tight)
- Addresses Oracle review feedback
2026-02-04 11:27:14 +09:00
YeonGyu-Kim
5ffecb60c9 fix(skill-mcp): avoid propertyNames for Gemini compatibility (#1465)
- Replace record(string, unknown) with object({}) in arguments schema
- record() generates propertyNames which Gemini rejects with 400 error
- object({}) generates plain { type: 'object' } without propertyNames
- Runtime parseArguments() already handles arbitrary object keys

Fixes #1315
2026-02-04 11:26:34 +09:00
YeonGyu-Kim
b954afca90 fix(model-requirements): use supported variant for gemini-3-pro (#1463)
* fix(model-requirements): use supported variant for gemini-3-pro

* fix(delegate-task): update artistry variant to high for gemini-3-pro

- Update DEFAULT_CATEGORIES artistry variant from 'max' to 'high'
- Update related test comment
- gemini-3-pro only supports low/high thinking levels, not max
- Addresses Oracle review feedback
2026-02-04 11:26:17 +09:00
YeonGyu-Kim
faae3d0f32 fix(model-availability): prefer exact model ID match in fuzzyMatchModel (#1460)
* fix(model-availability): prefer exact model ID match in fuzzyMatchModel

* fix(model-availability): use filter+shortest for multi-provider tie-break

- Change Priority 2 from find() to filter()+reduce()
- Preserves shortest-match tie-break when multiple providers share model ID
- Add test for multi-provider same model ID case
- Addresses Oracle review feedback
2026-02-04 11:25:59 +09:00
YeonGyu-Kim
c57c0a6bcb docs: clarify Prometheus invocation workflow (#1466) 2026-02-04 11:25:46 +09:00
YeonGyu-Kim
6a66bfccec fix(doctor): respect user-configured agent variant (#1464)
* fix(doctor): respect user-configured agent variant

* fix(doctor): align variant resolution with agent-variant.ts

- Add case-insensitive agent key lookup (matches canonical logic)
- Support category-based variant inheritance (agent.category -> categories[cat].variant)
- Separate getCategoryEffectiveVariant for category-specific resolution
- Addresses Oracle review feedback
2026-02-04 11:25:37 +09:00
YeonGyu-Kim
b19bc857e3 fix(docs): instruct curl over WebFetch for installation (#1461) 2026-02-04 11:25:25 +09:00
dan
2f9004f076 fix(auth): opencode desktop server unauthorized bugfix on subagent spawn (#1399)
* fix(auth): opencode desktop server unauthorized bugfix on subagent spawn

* refactor(auth): add runtime guard and throw on SDK mismatch

- Add JSDoc with SDK API documentation reference
- Replace silent failure with explicit Error throw when OPENCODE_SERVER_PASSWORD is set but client structure is incompatible
- Add runtime type guard for SDK client structure
- Add tests for error cases (missing _client, missing setConfig)
- Remove unrelated bun.lock changes

Co-authored-by: dan-myles <dan-myles@users.noreply.github.com>

---------

Co-authored-by: YeonGyu-Kim <code.yeon.gyu@gmail.com>
Co-authored-by: dan-myles <dan-myles@users.noreply.github.com>
2026-02-04 11:07:02 +09:00
Rishi Vhavle
6151d1cb5e fix: block bash commands in Prometheus mode to respect permission config (#1449)
Fixes #1428 - Prometheus bash bypass security issue
2026-02-04 11:06:54 +09:00
YeonGyu-Kim
13e1d7cbd7 fix(non-interactive-env): use detectShellType() instead of hardcoded 'unix' (#1459)
The shellType was hardcoded to 'unix' which breaks on native Windows shells
(cmd.exe, PowerShell) when running without Git Bash or WSL.

This change uses the existing detectShellType() function to dynamically
determine the correct shell type, enabling proper env var syntax for all
supported shell environments.
2026-02-04 10:52:46 +09:00
github-actions[bot]
5361cd0a5f @kaizen403 has signed the CLA in code-yeongyu/oh-my-opencode#1449 2026-02-03 20:44:35 +00:00
github-actions[bot]
437abd8c17 @wydrox has signed the CLA in code-yeongyu/oh-my-opencode#1436 2026-02-03 16:39:46 +00:00
YanzheL
9a2a6a695a fix(test): use try/finally for guaranteed env restoration 2026-02-03 23:37:12 +08:00
YanzheL
5a2ab0095d fix(mcp): lazy evaluation prevents crash when websearch disabled
createWebsearchConfig was called eagerly before checking disabledMcps,
causing Tavily missing-key error even when websearch was disabled.
Now each MCP is only created if not in disabledMcps list.
2026-02-03 23:37:12 +08:00
YanzheL
17cb49543a fix(mcp): rewrite tests to call createWebsearchConfig directly
Previously tests were tautological - they defined local logic
instead of invoking the actual implementation. Now all tests
properly exercise createWebsearchConfig.
2026-02-03 23:37:12 +08:00
YanzheL
fea7bd2dcf docs(mcp): document websearch provider configuration 2026-02-03 23:37:12 +08:00
YanzheL
ef3d0afa32 test(mcp): add websearch provider tests 2026-02-03 23:37:12 +08:00
YanzheL
00f576868b feat(mcp): add multi-provider websearch support 2026-02-03 23:37:12 +08:00
YanzheL
4840864ed8 feat(config): add websearch provider schema 2026-02-03 23:37:12 +08:00
github-actions[bot]
9f50947795 @filipemsilv4 has signed the CLA in code-yeongyu/oh-my-opencode#1435 2026-02-03 14:38:23 +00:00
github-actions[bot]
45290b5b8f @sk0x0y has signed the CLA in code-yeongyu/oh-my-opencode#1434 2026-02-03 14:21:40 +00:00
github-actions[bot]
9343f38479 @Stranmor has signed the CLA in code-yeongyu/oh-my-opencode#1432 2026-02-03 13:53:27 +00:00
github-actions[bot]
bf83712ae1 @ualtinok has signed the CLA in code-yeongyu/oh-my-opencode#1393 2026-02-03 12:43:21 +00:00
Muhammad Noor Misyuari
374acb3ac6 fix: update tests to reflect changes in skill resolution for async handling and disabled skills 2026-02-03 15:19:08 +07:00
Muhammad Noor Misyuari
ba2a9a9051 fix: update skill resolution to support disabled skills functionality 2026-02-03 15:19:08 +07:00
Muhammad Noor Misyuari
2236a940f8 fix: implement disabled skills functionality in skill resolution 2026-02-03 15:19:01 +07:00
github-actions[bot]
976ffaeb0d @ilarvne has signed the CLA in code-yeongyu/oh-my-opencode#1422 2026-02-03 08:15:51 +00:00
ismeth
527c21ea90 fix(tools): for overridden tools (glob, grep) path should use ctx.directory. OpenCode Desktop might not send path as a param and cwd might resolve to "/" 2026-02-02 11:34:33 +01:00
BoGuan
f68a6f7d1b fix: remove redundant removeCodeBlocks call
Remove duplicate removeCodeBlocks() call in keyword-detector/index.ts.

The detectKeywordsWithType() function already calls removeCodeBlocks() internally, so calling it before passing the text was redundant and caused unnecessary double processing.
2026-02-02 15:18:25 +08:00
konaespresso94
8a5b131c7f chore: tracking merge origin/dev 2026-02-02 15:56:00 +09:00
Suyeol Jeon
ce62da92c6 fix: remove broken TOC links pointing to non-existent sections 2026-02-02 15:16:55 +09:00
khduy
4c40c3adb1 fix(claude-code-hooks): deduplicate settings paths to prevent double hook execution
When cwd equals home directory, ~/.claude/settings.json was being loaded
twice (once as home config and once as cwd config), causing hooks like
Stop to execute twice.

This adds deduplication using Set to ensure each config file is only
loaded once.
2026-01-31 01:30:28 +07:00
konaespresso94
ba129784f5 fix(agents): honor tools overrides via permission migration 2026-01-31 00:29:11 +09:00
Zacks Zhang
3bb4289b18 fix(lsp): prevent stale diagnostics by syncing didChange 2026-01-30 16:39:55 +08:00
LeekJay
64b29ea097 feat(skill-loader): support nested skill directories
Add recursive directory scanning to discover skills in nested directories
like superpowers (e.g., skills/superpowers/brainstorming/SKILL.md).

Changes:
- Add namePrefix, depth, and maxDepth parameters to loadSkillsFromDir
- Recurse into subdirectories when no SKILL.md found at current level
- Construct hierarchical skill names (e.g., 'superpowers/brainstorming')
- Limit recursion depth to 2 levels to prevent infinite loops

This enables compatibility with the superpowers plugin which installs
skills as: ~/.config/opencode/skills/superpowers/ -> superpowers/skills/

Fixes skill discovery for nested directory structures.
2026-01-30 00:39:43 +08:00
65 changed files with 1684 additions and 285 deletions

View File

@@ -255,35 +255,43 @@ jobs:
DOCS=""
OTHER=""
# Store regexes in variables for bash 5.2+ compatibility
# (bash 5.2 changed how parentheses are parsed inside [[ =~ ]])
re_skip='^(chore|ci|release|test|ignore)'
re_feat_scoped='^feat\(([^)]+)\): (.+)$'
re_fix_scoped='^fix\(([^)]+)\): (.+)$'
re_refactor_scoped='^refactor\(([^)]+)\): (.+)$'
re_docs_scoped='^docs\(([^)]+)\): (.+)$'
while IFS= read -r commit; do
[ -z "$commit" ] && continue
# Skip chore, ci, release, test commits
[[ "$commit" =~ ^(chore|ci|release|test|ignore) ]] && continue
[[ "$commit" =~ $re_skip ]] && continue
if [[ "$commit" =~ ^feat ]]; then
# Extract scope and message: feat(scope): message -> **scope**: message
if [[ "$commit" =~ ^feat\(([^)]+)\):\ (.+)$ ]]; then
if [[ "$commit" =~ $re_feat_scoped ]]; then
FEATURES="${FEATURES}\n- **${BASH_REMATCH[1]}**: ${BASH_REMATCH[2]}"
else
MSG="${commit#feat: }"
FEATURES="${FEATURES}\n- ${MSG}"
fi
elif [[ "$commit" =~ ^fix ]]; then
if [[ "$commit" =~ ^fix\(([^)]+)\):\ (.+)$ ]]; then
if [[ "$commit" =~ $re_fix_scoped ]]; then
FIXES="${FIXES}\n- **${BASH_REMATCH[1]}**: ${BASH_REMATCH[2]}"
else
MSG="${commit#fix: }"
FIXES="${FIXES}\n- ${MSG}"
fi
elif [[ "$commit" =~ ^refactor ]]; then
if [[ "$commit" =~ ^refactor\(([^)]+)\):\ (.+)$ ]]; then
if [[ "$commit" =~ $re_refactor_scoped ]]; then
REFACTOR="${REFACTOR}\n- **${BASH_REMATCH[1]}**: ${BASH_REMATCH[2]}"
else
MSG="${commit#refactor: }"
REFACTOR="${REFACTOR}\n- ${MSG}"
fi
elif [[ "$commit" =~ ^docs ]]; then
if [[ "$commit" =~ ^docs\(([^)]+)\):\ (.+)$ ]]; then
if [[ "$commit" =~ $re_docs_scoped ]]; then
DOCS="${DOCS}\n- **${BASH_REMATCH[1]}**: ${BASH_REMATCH[2]}"
else
MSG="${commit#docs: }"

View File

@@ -121,16 +121,6 @@
- [アンインストール](#アンインストール)
- [機能](#機能)
- [設定](#設定)
- [JSONC のサポート](#jsonc-のサポート)
- [Google Auth](#google-auth)
- [Agents](#agents)
- [Permission オプション](#permission-オプション)
- [Sisyphus Agent](#sisyphus-agent)
- [Background Tasks](#background-tasks)
- [Hooks](#hooks)
- [MCPs](#mcps)
- [LSP](#lsp)
- [Experimental](#experimental)
- [作者のノート](#作者のノート)
- [注意](#注意)
- [こちらの企業の専門家にご愛用いただいています](#こちらの企業の専門家にご愛用いただいています)

View File

@@ -123,20 +123,6 @@
- [제거](#제거)
- [기능](#기능)
- [구성](#구성)
- [JSONC 지원](#jsonc-지원)
- [Google 인증](#google-인증)
- [에이전트](#에이전트)
- [권한 옵션](#권한-옵션)
- [내장 스킬](#내장-스킬)
- [Git Master](#git-master)
- [Sisyphus 에이전트](#sisyphus-에이전트)
- [백그라운드 작업](#백그라운드-작업)
- [카테고리](#카테고리)
- [](#훅)
- [MCP](#mcp)
- [LSP](#lsp)
- [실험적 기능](#실험적-기능)
- [환경 변수](#환경-변수)
- [작성자의 메모](#작성자의-메모)
- [경고](#경고)
- [다음 기업 전문가들이 사랑합니다](#다음-기업-전문가들이-사랑합니다)

View File

@@ -121,21 +121,7 @@ Yes, technically possible. But I cannot recommend using it.
- [For LLM Agents](#for-llm-agents)
- [Uninstallation](#uninstallation)
- [Features](#features)
- [Configuration](#configuration)
- [JSONC Support](#jsonc-support)
- [Google Auth](#google-auth)
- [Agents](#agents)
- [Permission Options](#permission-options)
- [Built-in Skills](#built-in-skills)
- [Git Master](#git-master)
- [Sisyphus Agent](#sisyphus-agent)
- [Background Tasks](#background-tasks)
- [Categories](#categories)
- [Hooks](#hooks)
- [MCPs](#mcps)
- [LSP](#lsp)
- [Experimental](#experimental)
- [Environment Variables](#environment-variables)
- [Configuration](#configuration)
- [Author's Note](#authors-note)
- [Warnings](#warnings)
- [Loved by professionals at](#loved-by-professionals-at)

View File

@@ -122,20 +122,6 @@
- [卸载](#卸载)
- [功能特性](#功能特性)
- [配置](#配置)
- [JSONC 支持](#jsonc-支持)
- [Google 认证](#google-认证)
- [智能体](#智能体)
- [权限选项](#权限选项)
- [内置技能](#内置技能)
- [Git Master](#git-master)
- [Sisyphus 智能体](#sisyphus-智能体)
- [后台任务](#后台任务)
- [类别](#类别)
- [钩子](#钩子)
- [MCP](#mcp)
- [LSP](#lsp)
- [实验性功能](#实验性功能)
- [环境变量](#环境变量)
- [作者札记](#作者札记)
- [警告](#警告)
- [受到以下专业人士的喜爱](#受到以下专业人士的喜爱)

View File

@@ -2977,6 +2977,18 @@
}
}
},
"websearch": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
"exa",
"tavily"
]
}
}
},
"tmux": {
"type": "object",
"properties": {

0
bin/oh-my-opencode.js Normal file → Executable file
View File

View File

@@ -28,13 +28,13 @@
"typescript": "^5.7.3",
},
"optionalDependencies": {
"oh-my-opencode-darwin-arm64": "3.2.1",
"oh-my-opencode-darwin-x64": "3.2.1",
"oh-my-opencode-linux-arm64": "3.2.1",
"oh-my-opencode-linux-arm64-musl": "3.2.1",
"oh-my-opencode-linux-x64": "3.2.1",
"oh-my-opencode-linux-x64-musl": "3.2.1",
"oh-my-opencode-windows-x64": "3.2.1",
"oh-my-opencode-darwin-arm64": "3.2.2",
"oh-my-opencode-darwin-x64": "3.2.2",
"oh-my-opencode-linux-arm64": "3.2.2",
"oh-my-opencode-linux-arm64-musl": "3.2.2",
"oh-my-opencode-linux-x64": "3.2.2",
"oh-my-opencode-linux-x64-musl": "3.2.2",
"oh-my-opencode-windows-x64": "3.2.2",
},
},
},
@@ -226,19 +226,19 @@
"object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="],
"oh-my-opencode-darwin-arm64": ["oh-my-opencode-darwin-arm64@3.2.1", "", { "os": "darwin", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-IvhHRUXTr/g/hJlkKTU2oCdgRl2BDl/Qre31Rukhs4NumlvME6iDmdnm8mM7bTxugfCBkfUUr7QJLxxLhzjdLA=="],
"oh-my-opencode-darwin-arm64": ["oh-my-opencode-darwin-arm64@3.2.2", "", { "os": "darwin", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-KyfoWcANfcvpfanrrX+Wc8vH8vr9mvr7dJMHBe2bkvuhdtHnLHOG18hQwLg6jk4HhdoZAeBEmkolOsK2k4XajA=="],
"oh-my-opencode-darwin-x64": ["oh-my-opencode-darwin-x64@3.2.1", "", { "os": "darwin", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-V2JbAdThAVfhBOcb+wBPZrAI0vBxPPRBdvmAixAxBOFC49CIJUrEFIRBUYFKhSQGHYWrNy8z0zJYoNQm4oQPog=="],
"oh-my-opencode-darwin-x64": ["oh-my-opencode-darwin-x64@3.2.2", "", { "os": "darwin", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-ajZ1E36Ixwdz6rvSUKUI08M2xOaNIl1ZsdVjknZTrPRtct9xgS+BEFCoSCov9bnV/9DrZD3mlZtO/+FFDbseUg=="],
"oh-my-opencode-linux-arm64": ["oh-my-opencode-linux-arm64@3.2.1", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-SeT8P7Icq5YH/AIaEF28J4q+ifUnOqO2UgMFtdFusr8JLadYFy+6dTdeAuD2uGGToDQ3ZNKuaG+lo84KzEhA5w=="],
"oh-my-opencode-linux-arm64": ["oh-my-opencode-linux-arm64@3.2.2", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-ItJsYfigXcOa8/ejTjopC4qk5BCeYioMQ693kPTpeYHK3ByugTjJk8aamE7bHlVnmrdgWldz91QFzaP82yOAdg=="],
"oh-my-opencode-linux-arm64-musl": ["oh-my-opencode-linux-arm64-musl@3.2.1", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-wJUEVVUn1gyVIFNV4mxWg9cYo1rQdTKUXdGLfiqPiyQhWhZLRfPJ+9qpghvIVv7Dne6rzkbhYWdwdk/tew5RtQ=="],
"oh-my-opencode-linux-arm64-musl": ["oh-my-opencode-linux-arm64-musl@3.2.2", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-/TvjYe/Kb//ZSHnJzgRj0QPKpS5Y2nermVTSaMTGS2btObXQyQWzuphDhsVRu60SVrNLbflHzfuTdqb3avDjyA=="],
"oh-my-opencode-linux-x64": ["oh-my-opencode-linux-x64@3.2.1", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-p/XValXi1RRTZV8mEsdStXwZBkyQpgZjB41HLf0VfizPMAKRr6/bhuFZ9BDZFIhcDnLYcGV54MAVEsWms5yC2A=="],
"oh-my-opencode-linux-x64": ["oh-my-opencode-linux-x64@3.2.2", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-Ka5j+tjuQkNnpESVzcTzW5tZMlBhOfP9F12+UaR72cIcwFpSoLMBp84rV6R0vXM0zUcrrN7mPeW66DvQ6A0XQQ=="],
"oh-my-opencode-linux-x64-musl": ["oh-my-opencode-linux-x64-musl@3.2.1", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-G7aNMqAMO2P+wUUaaAV8sXymm59cX4G9aVNXKAd/PM6RgFWh2F4HkXkOhOdHKYZzCl1QRhjh672mNillYsvebg=="],
"oh-my-opencode-linux-x64-musl": ["oh-my-opencode-linux-x64-musl@3.2.2", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-ISl0sTNShKCgPFO+rsDqEDsvVHQAMfOSAxO0KuWbHFKaH+KaRV4d3N/ihgxZ2M94CZjJLzZEuln+6kLZ93cvzQ=="],
"oh-my-opencode-windows-x64": ["oh-my-opencode-windows-x64@3.2.1", "", { "os": "win32", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode.exe" } }, "sha512-pyqTGlNxirKxQgXx9YJBq2y8KN/1oIygVupClmws7dDPj9etI1l8fs/SBEnMsYzMqTlGbLVeJ5+kj9p+yg7YDA=="],
"oh-my-opencode-windows-x64": ["oh-my-opencode-windows-x64@3.2.2", "", { "os": "win32", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode.exe" } }, "sha512-KeiJLQvJuZ+UYf/+eMsQXvCiHDRPk6tD15lL+qruLvU19va62JqMNvTuOv97732uF19iG0ZMiiVhqIMbSyVPqQ=="],
"on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="],

View File

@@ -27,6 +27,11 @@ After you install it, you can read this [overview guide](./overview.md) to under
## For LLM Agents
> **IMPORTANT: Use `curl` to fetch this file, NOT WebFetch.** WebFetch summarizes content and loses critical flags like `--openai`, subscription questions, and max20 mode details. Always use:
> ```bash
> curl -fsSL https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/refs/heads/master/docs/guide/installation.md
> ```
If you're an LLM Agent helping set up oh-my-opencode, Welcome! You'll now help your user to install the best agent harness, OhMyOpenCode. Tell user this with greetings, with test 'oMoMoMoMo...'
follow these steps:

View File

@@ -35,7 +35,216 @@ Oh-My-OpenCode solves this by clearly separating two roles:
---
## 2. Overall Architecture
## 2. Prometheus Invocation: Agent Switch vs @plan
A common source of confusion is how to invoke Prometheus for planning. **Both methods achieve the same result** - use whichever feels natural.
### Method 1: Switch to Prometheus Agent (Tab → Select Prometheus)
```
1. Press Tab at the prompt
2. Select "Prometheus" from the agent list
3. Describe your work: "I want to refactor the auth system"
4. Answer interview questions
5. Prometheus creates plan in .sisyphus/plans/{name}.md
```
### Method 2: Use @plan Command (in Sisyphus)
```
1. Stay in Sisyphus (default agent)
2. Type: @plan "I want to refactor the auth system"
3. The @plan command automatically switches to Prometheus
4. Answer interview questions
5. Prometheus creates plan in .sisyphus/plans/{name}.md
```
### Which Should You Use?
| Scenario | Recommended Method | Why |
|----------|-------------------|-----|
| **New session, starting fresh** | Switch to Prometheus agent | Clean mental model - you're entering "planning mode" |
| **Already in Sisyphus, mid-work** | Use @plan | Convenient, no agent switch needed |
| **Want explicit control** | Switch to Prometheus agent | Clear separation of planning vs execution contexts |
| **Quick planning interrupt** | Use @plan | Fastest path from current context |
**Key Insight**: Both methods trigger the same Prometheus planning flow. The @plan command is simply a convenience shortcut that:
1. Detects the `@plan` keyword in your message
2. Routes the request to Prometheus automatically
3. Returns you to Sisyphus after planning completes
---
## 3. /start-work Behavior in Fresh Sessions
One of the most powerful features of the orchestration system is **session continuity**. Understanding how `/start-work` behaves across sessions prevents confusion.
### What Happens When You Run /start-work
```
User: /start-work
[start-work hook activates]
Check: Does .sisyphus/boulder.json exist?
├─ YES (existing work) → RESUME MODE
│ - Read the existing boulder state
│ - Calculate progress (checked vs unchecked boxes)
│ - Inject continuation prompt with remaining tasks
│ - Atlas continues where you left off
└─ NO (fresh start) → INIT MODE
- Find the most recent plan in .sisyphus/plans/
- Create new boulder.json tracking this plan
- Switch session agent to Atlas
- Begin execution from task 1
```
### Session Continuity Explained
The `boulder.json` file tracks:
- **active_plan**: Path to the current plan file
- **session_ids**: All sessions that have worked on this plan
- **started_at**: When work began
- **plan_name**: Human-readable plan identifier
**Example Timeline:**
```
Monday 9:00 AM
└─ @plan "Build user authentication"
└─ Prometheus interviews and creates plan
└─ User: /start-work
└─ Atlas begins execution, creates boulder.json
└─ Task 1 complete, Task 2 in progress...
└─ [Session ends - computer crash, user logout, etc.]
Monday 2:00 PM (NEW SESSION)
└─ User opens new session (agent = Sisyphus by default)
└─ User: /start-work
└─ [start-work hook reads boulder.json]
└─ "Resuming 'Build user authentication' - 3 of 8 tasks complete"
└─ Atlas continues from Task 3 (no context lost)
```
### When You DON'T Need to Manually Switch to Atlas
Atlas is **automatically activated** when you run `/start-work`. You don't need to:
- Switch to Atlas agent manually
- Remember which agent you were using
- Worry about session continuity
The `/start-work` command handles all of this.
### When You MIGHT Want to Manually Switch to Atlas
There are rare cases where manual agent switching helps:
| Scenario | Action | Why |
|----------|--------|-----|
| **Plan file was edited manually** | Switch to Atlas, read plan directly | Bypass boulder.json resume logic |
| **Debugging orchestration issues** | Switch to Atlas for visibility | See Atlas-specific system prompts |
| **Force fresh execution** | Delete boulder.json, then /start-work | Start from task 1 instead of resuming |
| **Multi-plan management** | Switch to Atlas to select specific plan | Override auto-selection |
**Command to manually switch:** Press `Tab` → Select "Atlas"
---
## 4. Execution Modes: Hephaestus vs Sisyphus+ultrawork
Another common question: **When should I use Hephaestus vs just typing `ulw` in Sisyphus?**
### Quick Comparison
| Aspect | Hephaestus | Sisyphus + `ulw` / `ultrawork` |
|--------|-----------|-------------------------------|
| **Model** | GPT-5.2 Codex (medium reasoning) | Claude Opus 4.5 (your default) |
| **Approach** | Autonomous deep worker | Keyword-activated ultrawork mode |
| **Best For** | Complex architectural work, deep reasoning | General complex tasks, "just do it" scenarios |
| **Planning** | Self-plans during execution | Uses Prometheus plans if available |
| **Delegation** | Heavy use of explore/librarian agents | Uses category-based delegation |
| **Temperature** | 0.1 | 0.1 |
### When to Use Hephaestus
Switch to Hephaestus (Tab → Select Hephaestus) when:
1. **Deep architectural reasoning needed**
- "Design a new plugin system"
- "Refactor this monolith into microservices"
2. **Complex debugging requiring inference chains**
- "Why does this race condition only happen on Tuesdays?"
- "Trace this memory leak through 15 files"
3. **Cross-domain knowledge synthesis**
- "Integrate our Rust core with the TypeScript frontend"
- "Migrate from MongoDB to PostgreSQL with zero downtime"
4. **You specifically want GPT-5.2 Codex reasoning**
- Some problems benefit from GPT-5.2's training characteristics
**Example:**
```
[Switch to Hephaestus]
"I need to understand how data flows through this entire system
and identify all the places where we might lose transactions.
Explore thoroughly before proposing fixes."
```
### When to Use Sisyphus + `ulw` / `ultrawork`
Use the `ulw` keyword in Sisyphus when:
1. **You want the agent to figure it out**
- "ulw fix the failing tests"
- "ulw add input validation to the API"
2. **Complex but well-scoped tasks**
- "ulw implement JWT authentication following our patterns"
- "ulw create a new CLI command for deployments"
3. **You're feeling lazy** (officially supported use case)
- Don't want to write detailed requirements
- Trust the agent to explore and decide
4. **You want to leverage existing plans**
- If a Prometheus plan exists, `ulw` mode can use it
- Falls back to autonomous exploration if no plan
**Example:**
```
[Stay in Sisyphus]
"ulw refactor the user service to use the new repository pattern"
[Agent automatically:]
- Explores existing codebase patterns
- Implements the refactor
- Runs verification (tests, typecheck)
- Reports completion
```
### Key Difference in Practice
| Hephaestus | Sisyphus + ulw |
|------------|----------------|
| You manually switch to Hephaestus agent | You type `ulw` in any Sisyphus session |
| GPT-5.2 Codex with medium reasoning | Your configured default model |
| Optimized for autonomous deep work | Optimized for general execution |
| Always uses explore-first approach | Respects existing plans if available |
| "Smart intern that needs no supervision" | "Smart intern that follows your workflow" |
### Recommendation
**For most users**: Use `ulw` keyword in Sisyphus. It's the default path and works excellently for 90% of complex tasks.
**For power users**: Switch to Hephaestus when you specifically need GPT-5.2 Codex's reasoning style or want the "AmpCode deep mode" experience of fully autonomous exploration and execution.
---
## 5. Overall Architecture
```mermaid
flowchart TD
@@ -62,7 +271,7 @@ flowchart TD
---
## 3. Key Components
## 6. Key Components
### 🔮 Prometheus (The Planner)
@@ -85,13 +294,13 @@ flowchart TD
### ⚡ Atlas (The Plan Executor)
- **Model**: `anthropic/claude-opus-4-5` (Extended Thinking 32k)
- **Model**: `anthropic/claude-sonnet-4-5` (Extended Thinking 32k)
- **Role**: Execution and delegation
- **Characteristic**: Doesn't do everything directly, actively delegates to specialized agents (Frontend, Librarian, etc.).
---
## 4. Workflow
## 7. Workflow
### Phase 1: Interview and Planning (Interview Mode)
@@ -113,31 +322,44 @@ When the user requests "Make it a plan", plan generation begins.
When the user enters `/start-work`, the execution phase begins.
1. **State Management**: Creates `boulder.json` file to track current plan and session ID.
1. **State Management**: Creates/reads `boulder.json` file to track current plan and session ID.
2. **Task Execution**: Atlas reads the plan and processes TODOs one by one.
3. **Delegation**: UI work is delegated to Frontend agent, complex logic to Oracle.
4. **Continuity**: Even if the session is interrupted, work continues in the next session through `boulder.json`.
---
## 5. Commands and Usage
## 8. Commands and Usage
### `@plan [request]`
Invokes Prometheus to start a planning session.
Invokes Prometheus to start a planning session from Sisyphus.
- Example: `@plan "I want to refactor the authentication system to NextAuth"`
- Effect: Routes to Prometheus, then returns to Sisyphus when planning completes
### `/start-work`
Executes the generated plan.
- Function: Finds plan in `.sisyphus/plans/` and enters execution mode.
- If there's interrupted work, automatically resumes from where it left off.
- **Fresh session**: Finds plan in `.sisyphus/plans/` and enters execution mode
- **Existing boulder**: Resumes from where you left off (reads boulder.json)
- **Effect**: Automatically switches to Atlas agent if not already active
### Switching Agents Manually
Press `Tab` at the prompt to see available agents:
| Agent | When to Switch |
|-------|---------------|
| **Prometheus** | You want to create a detailed work plan |
| **Atlas** | You want to manually control plan execution (rare) |
| **Hephaestus** | You need GPT-5.2 Codex for deep autonomous work |
| **Sisyphus** | Return to default agent for normal prompting |
---
## 6. Configuration Guide
## 9. Configuration Guide
You can control related features in `oh-my-opencode.json`.
@@ -157,8 +379,46 @@ You can control related features in `oh-my-opencode.json`.
}
```
## 7. Best Practices
---
## 10. Best Practices
1. **Don't Rush Planning**: Invest sufficient time in the interview with Prometheus. The more perfect the plan, the faster the execution.
1. **Don't Rush**: Invest sufficient time in the interview with Prometheus. The more perfect the plan, the faster the execution.
2. **Single Plan Principle**: No matter how large the task, contain all TODOs in one plan file (`.md`). This prevents context fragmentation.
3. **Active Delegation**: During execution, delegate to specialized agents via `delegate_task` rather than modifying code directly.
4. **Trust /start-work Continuity**: Don't worry about session interruptions. `/start-work` will always resume your work from boulder.json.
5. **Use `ulw` for Convenience**: When in doubt, type `ulw` and let the system figure out the best approach.
6. **Reserve Hephaestus for Deep Work**: Don't overthink agent selection. Hephaestus shines for genuinely complex architectural challenges.
---
## 11. Troubleshooting Common Confusions
### "I switched to Prometheus but nothing happened"
Prometheus enters **interview mode** by default. It will ask you questions about your requirements. Answer them, then say "make it a plan" when ready.
### "/start-work says 'no active plan found'"
Either:
- No plans exist in `.sisyphus/plans/` → Create one with Prometheus first
- Plans exist but boulder.json points elsewhere → Delete `.sisyphus/boulder.json` and retry
### "I'm in Atlas but I want to switch back to normal mode"
Type `exit` or start a new session. Atlas is primarily entered via `/start-work` - you don't typically "switch to Atlas" manually.
### "What's the difference between @plan and just switching to Prometheus?"
**Nothing functional.** Both invoke Prometheus. @plan is a convenience command while switching agents is explicit control. Use whichever feels natural.
### "Should I use Hephaestus or type ulw?"
**For most tasks**: Type `ulw` in Sisyphus.
**Use Hephaestus when**: You specifically need GPT-5.2 Codex's reasoning style for deep architectural work or complex debugging.

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode",
"version": "3.2.2",
"version": "3.2.3",
"description": "The Best AI Agent Harness - Batteries-Included OpenCode Plugin with Multi-Model Orchestration, Parallel Background Agents, and Crafted LSP/AST Tools",
"main": "dist/index.js",
"types": "dist/index.d.ts",
@@ -74,13 +74,13 @@
"typescript": "^5.7.3"
},
"optionalDependencies": {
"oh-my-opencode-darwin-arm64": "3.2.2",
"oh-my-opencode-darwin-x64": "3.2.2",
"oh-my-opencode-linux-arm64": "3.2.2",
"oh-my-opencode-linux-arm64-musl": "3.2.2",
"oh-my-opencode-linux-x64": "3.2.2",
"oh-my-opencode-linux-x64-musl": "3.2.2",
"oh-my-opencode-windows-x64": "3.2.2"
"oh-my-opencode-darwin-arm64": "3.2.3",
"oh-my-opencode-darwin-x64": "3.2.3",
"oh-my-opencode-linux-arm64": "3.2.3",
"oh-my-opencode-linux-arm64-musl": "3.2.3",
"oh-my-opencode-linux-x64": "3.2.3",
"oh-my-opencode-linux-x64-musl": "3.2.3",
"oh-my-opencode-windows-x64": "3.2.3"
},
"trustedDependencies": [
"@ast-grep/cli",

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-arm64",
"version": "3.2.2",
"version": "3.2.3",
"description": "Platform-specific binary for oh-my-opencode (darwin-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-darwin-x64",
"version": "3.2.2",
"version": "3.2.3",
"description": "Platform-specific binary for oh-my-opencode (darwin-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64-musl",
"version": "3.2.2",
"version": "3.2.3",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-arm64",
"version": "3.2.2",
"version": "3.2.3",
"description": "Platform-specific binary for oh-my-opencode (linux-arm64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64-musl",
"version": "3.2.2",
"version": "3.2.3",
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-linux-x64",
"version": "3.2.2",
"version": "3.2.3",
"description": "Platform-specific binary for oh-my-opencode (linux-x64)",
"license": "MIT",
"repository": {

View File

@@ -1,6 +1,6 @@
{
"name": "oh-my-opencode-windows-x64",
"version": "3.2.2",
"version": "3.2.3",
"description": "Platform-specific binary for oh-my-opencode (windows-x64)",
"license": "MIT",
"repository": {

View File

@@ -1127,6 +1127,62 @@
"created_at": "2026-02-02T16:58:50Z",
"repoId": 1108837393,
"pullRequestNo": 1399
},
{
"name": "ilarvne",
"id": 99905590,
"comment_id": 3839771590,
"created_at": "2026-02-03T08:15:37Z",
"repoId": 1108837393,
"pullRequestNo": 1422
},
{
"name": "ualtinok",
"id": 94532,
"comment_id": 3841078284,
"created_at": "2026-02-03T12:39:59Z",
"repoId": 1108837393,
"pullRequestNo": 1393
},
{
"name": "Stranmor",
"id": 49376798,
"comment_id": 3841465375,
"created_at": "2026-02-03T13:53:13Z",
"repoId": 1108837393,
"pullRequestNo": 1432
},
{
"name": "sk0x0y",
"id": 35445665,
"comment_id": 3841625993,
"created_at": "2026-02-03T14:21:26Z",
"repoId": 1108837393,
"pullRequestNo": 1434
},
{
"name": "filipemsilv4",
"id": 59426206,
"comment_id": 3841722121,
"created_at": "2026-02-03T14:38:07Z",
"repoId": 1108837393,
"pullRequestNo": 1435
},
{
"name": "wydrox",
"id": 79707825,
"comment_id": 3842392636,
"created_at": "2026-02-03T16:39:35Z",
"repoId": 1108837393,
"pullRequestNo": 1436
},
{
"name": "kaizen403",
"id": 134706404,
"comment_id": 3843559932,
"created_at": "2026-02-03T20:44:25Z",
"repoId": 1108837393,
"pullRequestNo": 1449
}
]
}

View File

@@ -584,7 +584,7 @@ export function createHephaestusAgent(
model,
maxTokens: 32000,
prompt,
color: "#FF4500", // Magma Orange - forge heat, distinct from Prometheus purple
color: "#D97706", // Forged Amber - Golden heated metal, divine craftsman
permission: { question: "allow", call_omo_agent: "deny" } as AgentConfig["permission"],
reasoningEffort: "medium",
}

View File

@@ -41,7 +41,7 @@ describe("createBuiltinAgents with model overrides", () => {
}
// #when
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL)
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined)
// #then
expect(agents.sisyphus.model).toBe("github-copilot/gpt-5.2")
@@ -103,7 +103,7 @@ describe("createBuiltinAgents with model overrides", () => {
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(["openai"])
// #when
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL)
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined)
// #then - oracle resolves via connected cache fallback to openai/gpt-5.2 (not system default)
expect(agents.oracle.model).toBe("openai/gpt-5.2")
@@ -132,7 +132,7 @@ describe("createBuiltinAgents with model overrides", () => {
}
// #when
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL)
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined)
// #then
expect(agents.oracle.model).toBe("openai/gpt-5.2")
@@ -148,7 +148,7 @@ describe("createBuiltinAgents with model overrides", () => {
}
// #when
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL)
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined)
// #then
expect(agents.oracle.model).toBe("anthropic/claude-sonnet-4")
@@ -164,12 +164,25 @@ describe("createBuiltinAgents with model overrides", () => {
}
// #when
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL)
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined)
// #then
expect(agents.sisyphus.model).toBe("github-copilot/gpt-5.2")
expect(agents.sisyphus.temperature).toBe(0.5)
})
test("createBuiltinAgents excludes disabled skills from availableSkills", async () => {
// #given
const disabledSkills = new Set(["playwright"])
// #when
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], undefined, undefined, undefined, disabledSkills)
// #then
expect(agents.sisyphus.prompt).not.toContain("playwright")
expect(agents.sisyphus.prompt).toContain("frontend-ui-ux")
expect(agents.sisyphus.prompt).toContain("git-master")
})
})
describe("createBuiltinAgents without systemDefaultModel", () => {
@@ -740,6 +753,52 @@ describe("override.category expansion in createBuiltinAgents", () => {
})
})
describe("agent override tools migration", () => {
test("tools: { x: false } is migrated to permission: { x: deny }", async () => {
// #given
const overrides = {
explore: { tools: { "jetbrains_*": false } } as any,
}
// #when
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL)
// #then
expect(agents.explore).toBeDefined()
const permission = agents.explore.permission as Record<string, string>
expect(permission["jetbrains_*"]).toBe("deny")
})
test("tools: { x: true } is migrated to permission: { x: allow }", async () => {
// #given
const overrides = {
librarian: { tools: { "jetbrains_get_*": true } } as any,
}
// #when
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL)
// #then
expect(agents.librarian).toBeDefined()
const permission = agents.librarian.permission as Record<string, string>
expect(permission["jetbrains_get_*"]).toBe("allow")
})
test("tools config is removed after migration", async () => {
// #given
const overrides = {
explore: { tools: { "some_tool": false } } as any,
}
// #when
const agents = await createBuiltinAgents([], overrides, undefined, TEST_DEFAULT_MODEL)
// #then
expect(agents.explore).toBeDefined()
expect((agents.explore as any).tools).toBeUndefined()
})
})
describe("Deadlock prevention - fetchAvailableModels must not receive client", () => {
test("createBuiltinAgents should call fetchAvailableModels with undefined client to prevent deadlock", async () => {
// #given - This test ensures we don't regress on issue #1301

View File

@@ -11,7 +11,7 @@ import { createAtlasAgent, atlasPromptMetadata } from "./atlas"
import { createMomusAgent, momusPromptMetadata } from "./momus"
import { createHephaestusAgent } from "./hephaestus"
import type { AvailableAgent, AvailableCategory, AvailableSkill } from "./dynamic-agent-prompt-builder"
import { deepMerge, fetchAvailableModels, resolveModelPipeline, AGENT_MODEL_REQUIREMENTS, readConnectedProvidersCache, isModelAvailable, isAnyFallbackModelAvailable } from "../shared"
import { deepMerge, fetchAvailableModels, resolveModelPipeline, AGENT_MODEL_REQUIREMENTS, readConnectedProvidersCache, isModelAvailable, isAnyFallbackModelAvailable, migrateAgentConfig } from "../shared"
import { DEFAULT_CATEGORIES, CATEGORY_DESCRIPTIONS } from "../tools/delegate-task/constants"
import { resolveMultipleSkills } from "../features/opencode-skill-loader/skill-content"
import { createBuiltinSkills } from "../features/builtin-skills"
@@ -57,7 +57,8 @@ export function buildAgent(
model: string,
categories?: CategoriesConfig,
gitMasterConfig?: GitMasterConfig,
browserProvider?: BrowserAutomationProvider
browserProvider?: BrowserAutomationProvider,
disabledSkills?: Set<string>
): AgentConfig {
const base = isFactory(source) ? source(model) : source
const categoryConfigs: Record<string, CategoryConfig> = categories
@@ -81,7 +82,7 @@ export function buildAgent(
}
if (agentWithCategory.skills?.length) {
const { resolved } = resolveMultipleSkills(agentWithCategory.skills, { gitMasterConfig, browserProvider })
const { resolved } = resolveMultipleSkills(agentWithCategory.skills, { gitMasterConfig, browserProvider, disabledSkills })
if (resolved.size > 0) {
const skillContent = Array.from(resolved.values()).join("\n\n")
base.prompt = skillContent + (base.prompt ? "\n\n" + base.prompt : "")
@@ -207,7 +208,8 @@ function mergeAgentConfig(
base: AgentConfig,
override: AgentOverrideConfig
): AgentConfig {
const { prompt_append, ...rest } = override
const migratedOverride = migrateAgentConfig(override as Record<string, unknown>) as AgentOverrideConfig
const { prompt_append, ...rest } = migratedOverride
const merged = deepMerge(base, rest as Partial<AgentConfig>)
if (prompt_append && merged.prompt) {
@@ -233,7 +235,8 @@ export async function createBuiltinAgents(
discoveredSkills: LoadedSkill[] = [],
client?: any,
browserProvider?: BrowserAutomationProvider,
uiSelectedModel?: string
uiSelectedModel?: string,
disabledSkills?: Set<string>
): Promise<Record<string, AgentConfig>> {
const connectedProviders = readConnectedProvidersCache()
// IMPORTANT: Do NOT pass client to fetchAvailableModels during plugin initialization.
@@ -257,7 +260,7 @@ export async function createBuiltinAgents(
description: categories?.[name]?.description ?? CATEGORY_DESCRIPTIONS[name] ?? "General tasks",
}))
const builtinSkills = createBuiltinSkills({ browserProvider })
const builtinSkills = createBuiltinSkills({ browserProvider, disabledSkills })
const builtinSkillNames = new Set(builtinSkills.map(s => s.name))
const builtinAvailable: AvailableSkill[] = builtinSkills.map((skill) => ({
@@ -290,16 +293,16 @@ export async function createBuiltinAgents(
const override = agentOverrides[agentName]
?? Object.entries(agentOverrides).find(([key]) => key.toLowerCase() === agentName.toLowerCase())?.[1]
const requirement = AGENT_MODEL_REQUIREMENTS[agentName]
// Check if agent requires a specific model
if (requirement?.requiresModel && availableModels) {
if (!isModelAvailable(requirement.requiresModel, availableModels)) {
continue
}
}
const isPrimaryAgent = isFactory(source) && source.mode === "primary"
const resolution = applyModelResolution({
uiSelectedModel: isPrimaryAgent ? uiSelectedModel : undefined,
userModel: override?.model,
@@ -310,7 +313,7 @@ export async function createBuiltinAgents(
if (!resolution) continue
const { model, variant: resolvedVariant } = resolution
let config = buildAgent(source, model, mergedCategories, gitMasterConfig, browserProvider)
let config = buildAgent(source, model, mergedCategories, gitMasterConfig, browserProvider, disabledSkills)
// Apply resolved variant from model fallback chain
if (resolvedVariant) {
@@ -374,7 +377,7 @@ export async function createBuiltinAgents(
availableSkills,
availableCategories
)
if (sisyphusResolvedVariant) {
sisyphusConfig = { ...sisyphusConfig, variant: sisyphusResolvedVariant }
}
@@ -419,7 +422,7 @@ export async function createBuiltinAgents(
availableSkills,
availableCategories
)
hephaestusConfig = { ...hephaestusConfig, variant: hephaestusResolvedVariant ?? "medium" }
const hepOverrideCategory = (hephaestusOverride as Record<string, unknown> | undefined)?.category as string | undefined
@@ -467,7 +470,7 @@ export async function createBuiltinAgents(
availableSkills,
userCategories: categories,
})
if (atlasResolvedVariant) {
orchestratorConfig = { ...orchestratorConfig, variant: atlasResolvedVariant }
}

View File

@@ -335,18 +335,18 @@ exports[`generateModelConfig single native provider uses Gemini models when only
},
"metis": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"momus": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"multimodal-looker": {
"model": "google/gemini-3-flash",
},
"oracle": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"prometheus": {
"model": "google/gemini-3-pro",
@@ -355,14 +355,14 @@ exports[`generateModelConfig single native provider uses Gemini models when only
"categories": {
"artistry": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"quick": {
"model": "google/gemini-3-flash",
},
"ultrabrain": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"unspecified-high": {
"model": "google/gemini-3-flash",
@@ -395,18 +395,18 @@ exports[`generateModelConfig single native provider uses Gemini models with isMa
},
"metis": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"momus": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"multimodal-looker": {
"model": "google/gemini-3-flash",
},
"oracle": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"prometheus": {
"model": "google/gemini-3-pro",
@@ -415,14 +415,14 @@ exports[`generateModelConfig single native provider uses Gemini models with isMa
"categories": {
"artistry": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"quick": {
"model": "google/gemini-3-flash",
},
"ultrabrain": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"unspecified-high": {
"model": "google/gemini-3-pro",
@@ -484,7 +484,7 @@ exports[`generateModelConfig all native providers uses preferred models from fal
"categories": {
"artistry": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "openai/gpt-5.2-codex",
@@ -557,7 +557,7 @@ exports[`generateModelConfig all native providers uses preferred models with isM
"categories": {
"artistry": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "openai/gpt-5.2-codex",
@@ -631,7 +631,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models when on
"categories": {
"artistry": {
"model": "opencode/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "opencode/gpt-5.2-codex",
@@ -704,7 +704,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models with is
"categories": {
"artistry": {
"model": "opencode/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "opencode/gpt-5.2-codex",
@@ -778,7 +778,7 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models when
"categories": {
"artistry": {
"model": "github-copilot/gemini-3-pro-preview",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "github-copilot/gpt-5.2-codex",
@@ -851,7 +851,7 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models with
"categories": {
"artistry": {
"model": "github-copilot/gemini-3-pro-preview",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "github-copilot/gpt-5.2-codex",
@@ -1035,7 +1035,7 @@ exports[`generateModelConfig mixed provider scenarios uses Claude + OpenCode Zen
"categories": {
"artistry": {
"model": "opencode/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "opencode/gpt-5.2-codex",
@@ -1108,7 +1108,7 @@ exports[`generateModelConfig mixed provider scenarios uses OpenAI + Copilot comb
"categories": {
"artistry": {
"model": "github-copilot/gemini-3-pro-preview",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "openai/gpt-5.2-codex",
@@ -1225,7 +1225,7 @@ exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combi
},
"oracle": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"prometheus": {
"model": "anthropic/claude-opus-4-5",
@@ -1239,14 +1239,14 @@ exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combi
"categories": {
"artistry": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"quick": {
"model": "anthropic/claude-haiku-4-5",
},
"ultrabrain": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"unspecified-high": {
"model": "anthropic/claude-sonnet-4-5",
@@ -1308,7 +1308,7 @@ exports[`generateModelConfig mixed provider scenarios uses all fallback provider
"categories": {
"artistry": {
"model": "github-copilot/gemini-3-pro-preview",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "github-copilot/gpt-5.2-codex",
@@ -1381,7 +1381,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers togethe
"categories": {
"artistry": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "openai/gpt-5.2-codex",
@@ -1454,7 +1454,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers with is
"categories": {
"artistry": {
"model": "google/gemini-3-pro",
"variant": "max",
"variant": "high",
},
"deep": {
"model": "openai/gpt-5.2-codex",

View File

@@ -69,8 +69,8 @@ export interface ModelResolutionInfo {
}
interface OmoConfig {
agents?: Record<string, { model?: string }>
categories?: Record<string, { model?: string }>
agents?: Record<string, { model?: string; variant?: string; category?: string }>
categories?: Record<string, { model?: string; variant?: string }>
}
function loadConfig(): OmoConfig | null {
@@ -182,7 +182,44 @@ function formatModelWithVariant(model: string, variant?: string): string {
return variant ? `${model} (${variant})` : model
}
function getEffectiveVariant(requirement: ModelRequirement): string | undefined {
function getAgentOverride(
agentName: string,
config: OmoConfig,
): { variant?: string; category?: string } | undefined {
const agentOverrides = config.agents
if (!agentOverrides) return undefined
// Direct lookup first, then case-insensitive lookup (matches agent-variant.ts)
return (
agentOverrides[agentName] ??
Object.entries(agentOverrides).find(
([key]) => key.toLowerCase() === agentName.toLowerCase()
)?.[1]
)
}
function getEffectiveVariant(
name: string,
requirement: ModelRequirement,
config: OmoConfig,
): string | undefined {
const agentOverride = getAgentOverride(name, config)
// Priority 1: Agent's direct variant override
if (agentOverride?.variant) {
return agentOverride.variant
}
// Priority 2: Agent's category -> category's variant (matches agent-variant.ts)
const categoryName = agentOverride?.category
if (categoryName) {
const categoryVariant = config.categories?.[categoryName]?.variant
if (categoryVariant) {
return categoryVariant
}
}
// Priority 3: Fall back to requirement's fallback chain
const firstEntry = requirement.fallbackChain[0]
return firstEntry?.variant ?? requirement.variant
}
@@ -193,7 +230,20 @@ interface AvailableModelsInfo {
cacheExists: boolean
}
function buildDetailsArray(info: ModelResolutionInfo, available: AvailableModelsInfo): string[] {
function getCategoryEffectiveVariant(
categoryName: string,
requirement: ModelRequirement,
config: OmoConfig,
): string | undefined {
const categoryVariant = config.categories?.[categoryName]?.variant
if (categoryVariant) {
return categoryVariant
}
const firstEntry = requirement.fallbackChain[0]
return firstEntry?.variant ?? requirement.variant
}
function buildDetailsArray(info: ModelResolutionInfo, available: AvailableModelsInfo, config: OmoConfig): string[] {
const details: string[] = []
details.push("═══ Available Models (from cache) ═══")
@@ -215,14 +265,17 @@ function buildDetailsArray(info: ModelResolutionInfo, available: AvailableModels
details.push("Agents:")
for (const agent of info.agents) {
const marker = agent.userOverride ? "●" : "○"
const display = formatModelWithVariant(agent.effectiveModel, getEffectiveVariant(agent.requirement))
const display = formatModelWithVariant(agent.effectiveModel, getEffectiveVariant(agent.name, agent.requirement, config))
details.push(` ${marker} ${agent.name}: ${display}`)
}
details.push("")
details.push("Categories:")
for (const category of info.categories) {
const marker = category.userOverride ? "●" : "○"
const display = formatModelWithVariant(category.effectiveModel, getEffectiveVariant(category.requirement))
const display = formatModelWithVariant(
category.effectiveModel,
getCategoryEffectiveVariant(category.name, category.requirement, config)
)
details.push(` ${marker} ${category.name}: ${display}`)
}
details.push("")
@@ -249,7 +302,7 @@ export async function checkModelResolution(): Promise<CheckResult> {
name: CHECK_NAMES[CHECK_IDS.MODEL_RESOLUTION],
status: available.cacheExists ? "pass" : "warn",
message: `${agentCount} agents, ${categoryCount} categories${overrideNote}${cacheNote}`,
details: buildDetailsArray(info, available),
details: buildDetailsArray(info, available, config),
}
}

View File

@@ -340,6 +340,17 @@ export const BrowserAutomationConfigSchema = z.object({
provider: BrowserAutomationProviderSchema.default("playwright"),
})
export const WebsearchProviderSchema = z.enum(["exa", "tavily"])
export const WebsearchConfigSchema = z.object({
/**
* Websearch provider to use.
* - "exa": Uses Exa websearch (default, works without API key)
* - "tavily": Uses Tavily websearch (requires TAVILY_API_KEY)
*/
provider: WebsearchProviderSchema.optional(),
})
export const TmuxLayoutSchema = z.enum([
'main-horizontal', // main pane top, agent panes bottom stack
'main-vertical', // main pane left, agent panes right stack (default)
@@ -393,6 +404,7 @@ export const OhMyOpenCodeConfigSchema = z.object({
babysitting: BabysittingConfigSchema.optional(),
git_master: GitMasterConfigSchema.optional(),
browser_automation_engine: BrowserAutomationConfigSchema.optional(),
websearch: WebsearchConfigSchema.optional(),
tmux: TmuxConfigSchema.optional(),
sisyphus: SisyphusConfigSchema.optional(),
})
@@ -420,6 +432,8 @@ export type BuiltinCategoryName = z.infer<typeof BuiltinCategoryNameSchema>
export type GitMasterConfig = z.infer<typeof GitMasterConfigSchema>
export type BrowserAutomationProvider = z.infer<typeof BrowserAutomationProviderSchema>
export type BrowserAutomationConfig = z.infer<typeof BrowserAutomationConfigSchema>
export type WebsearchProvider = z.infer<typeof WebsearchProviderSchema>
export type WebsearchConfig = z.infer<typeof WebsearchConfigSchema>
export type TmuxConfig = z.infer<typeof TmuxConfigSchema>
export type TmuxLayout = z.infer<typeof TmuxLayoutSchema>
export type SisyphusTasksConfig = z.infer<typeof SisyphusTasksConfigSchema>

View File

@@ -351,6 +351,11 @@ export class BackgroundManager {
existingTask.concurrencyKey = undefined
}
// Abort the session to prevent infinite polling hang
this.client.session.abort({
path: { id: sessionID },
}).catch(() => {})
this.markForNotification(existingTask)
this.notifyParentSession(existingTask).catch(err => {
log("[background-agent] Failed to notify on error:", err)
@@ -600,6 +605,14 @@ export class BackgroundManager {
this.concurrencyManager.release(existingTask.concurrencyKey)
existingTask.concurrencyKey = undefined
}
// Abort the session to prevent infinite polling hang
if (existingTask.sessionID) {
this.client.session.abort({
path: { id: existingTask.sessionID },
}).catch(() => {})
}
this.markForNotification(existingTask)
this.notifyParentSession(existingTask).catch(err => {
log("[background-agent] Failed to notify on resume error:", err)

View File

@@ -86,4 +86,58 @@ describe("createBuiltinSkills", () => {
expect(defaultSkills).toHaveLength(4)
expect(agentBrowserSkills).toHaveLength(4)
})
test("should exclude playwright when it is in disabledSkills", () => {
// #given
const options = { disabledSkills: new Set(["playwright"]) }
// #when
const skills = createBuiltinSkills(options)
// #then
expect(skills.map((s) => s.name)).not.toContain("playwright")
expect(skills.map((s) => s.name)).toContain("frontend-ui-ux")
expect(skills.map((s) => s.name)).toContain("git-master")
expect(skills.map((s) => s.name)).toContain("dev-browser")
expect(skills.length).toBe(3)
})
test("should exclude multiple skills when they are in disabledSkills", () => {
// #given
const options = { disabledSkills: new Set(["playwright", "git-master"]) }
// #when
const skills = createBuiltinSkills(options)
// #then
expect(skills.map((s) => s.name)).not.toContain("playwright")
expect(skills.map((s) => s.name)).not.toContain("git-master")
expect(skills.map((s) => s.name)).toContain("frontend-ui-ux")
expect(skills.map((s) => s.name)).toContain("dev-browser")
expect(skills.length).toBe(2)
})
test("should return an empty array when all skills are disabled", () => {
// #given
const options = {
disabledSkills: new Set(["playwright", "frontend-ui-ux", "git-master", "dev-browser"]),
}
// #when
const skills = createBuiltinSkills(options)
// #then
expect(skills.length).toBe(0)
})
test("should return all skills when disabledSkills set is empty", () => {
// #given
const options = { disabledSkills: new Set<string>() }
// #when
const skills = createBuiltinSkills(options)
// #then
expect(skills.length).toBe(4)
})
})

View File

@@ -11,12 +11,19 @@ import {
export interface CreateBuiltinSkillsOptions {
browserProvider?: BrowserAutomationProvider
disabledSkills?: Set<string>
}
export function createBuiltinSkills(options: CreateBuiltinSkillsOptions = {}): BuiltinSkill[] {
const { browserProvider = "playwright" } = options
const { browserProvider = "playwright", disabledSkills } = options
const browserSkill = browserProvider === "agent-browser" ? agentBrowserSkill : playwrightSkill
return [browserSkill, frontendUiUxSkill, gitMasterSkill, devBrowserSkill]
const skills = [browserSkill, frontendUiUxSkill, gitMasterSkill, devBrowserSkill]
if (!disabledSkills) {
return skills
}
return skills.filter((skill) => !disabledSkills.has(skill.name))
}

View File

@@ -387,4 +387,171 @@ Skill body.
}
})
})
describe("nested skill discovery", () => {
it("discovers skills in nested directories (superpowers pattern)", async () => {
// #given - simulate superpowers structure: skills/superpowers/brainstorming/SKILL.md
const nestedDir = join(SKILLS_DIR, "superpowers", "brainstorming")
mkdirSync(nestedDir, { recursive: true })
const skillContent = `---
name: brainstorming
description: A nested skill for brainstorming
---
This is a nested skill.
`
writeFileSync(join(nestedDir, "SKILL.md"), skillContent)
// #when
const { discoverSkills } = await import("./loader")
const originalCwd = process.cwd()
process.chdir(TEST_DIR)
try {
const skills = await discoverSkills({ includeClaudeCodePaths: false })
const skill = skills.find(s => s.name === "superpowers/brainstorming")
// #then
expect(skill).toBeDefined()
expect(skill?.name).toBe("superpowers/brainstorming")
expect(skill?.definition.description).toContain("brainstorming")
} finally {
process.chdir(originalCwd)
}
})
it("discovers multiple skills in nested directories", async () => {
// #given - multiple nested skills
const skills = ["brainstorming", "debugging", "testing"]
for (const skillName of skills) {
const nestedDir = join(SKILLS_DIR, "superpowers", skillName)
mkdirSync(nestedDir, { recursive: true })
writeFileSync(join(nestedDir, "SKILL.md"), `---
name: ${skillName}
description: ${skillName} skill
---
Content for ${skillName}.
`)
}
// #when
const { discoverSkills } = await import("./loader")
const originalCwd = process.cwd()
process.chdir(TEST_DIR)
try {
const discoveredSkills = await discoverSkills({ includeClaudeCodePaths: false })
// #then
for (const skillName of skills) {
const skill = discoveredSkills.find(s => s.name === `superpowers/${skillName}`)
expect(skill).toBeDefined()
}
} finally {
process.chdir(originalCwd)
}
})
it("respects max depth limit", async () => {
// #given - deeply nested skill (3 levels deep, beyond default maxDepth of 2)
const deepDir = join(SKILLS_DIR, "level1", "level2", "level3", "deep-skill")
mkdirSync(deepDir, { recursive: true })
writeFileSync(join(deepDir, "SKILL.md"), `---
name: deep-skill
description: A deeply nested skill
---
Too deep.
`)
// #when
const { discoverSkills } = await import("./loader")
const originalCwd = process.cwd()
process.chdir(TEST_DIR)
try {
const skills = await discoverSkills({ includeClaudeCodePaths: false })
const skill = skills.find(s => s.name.includes("deep-skill"))
// #then - should not find skill beyond maxDepth
expect(skill).toBeUndefined()
} finally {
process.chdir(originalCwd)
}
})
it("flat skills still work alongside nested skills", async () => {
// #given - both flat and nested skills
const flatSkillDir = join(SKILLS_DIR, "flat-skill")
mkdirSync(flatSkillDir, { recursive: true })
writeFileSync(join(flatSkillDir, "SKILL.md"), `---
name: flat-skill
description: A flat skill
---
Flat content.
`)
const nestedDir = join(SKILLS_DIR, "nested", "nested-skill")
mkdirSync(nestedDir, { recursive: true })
writeFileSync(join(nestedDir, "SKILL.md"), `---
name: nested-skill
description: A nested skill
---
Nested content.
`)
// #when
const { discoverSkills } = await import("./loader")
const originalCwd = process.cwd()
process.chdir(TEST_DIR)
try {
const skills = await discoverSkills({ includeClaudeCodePaths: false })
// #then - both should be found
const flatSkill = skills.find(s => s.name === "flat-skill")
const nestedSkill = skills.find(s => s.name === "nested/nested-skill")
expect(flatSkill).toBeDefined()
expect(nestedSkill).toBeDefined()
} finally {
process.chdir(originalCwd)
}
})
it("prefers directory skill (SKILL.md) over file skill (*.md) on name collision", async () => {
// #given - both foo.md file AND foo/SKILL.md directory exist
// Directory skill should win (deterministic precedence: SKILL.md > {dir}.md > *.md)
const dirSkillDir = join(SKILLS_DIR, "collision-test")
mkdirSync(dirSkillDir, { recursive: true })
writeFileSync(join(dirSkillDir, "SKILL.md"), `---
name: collision-test
description: Directory-based skill (should win)
---
I am the directory skill.
`)
// Also create a file with same base name at parent level
writeFileSync(join(SKILLS_DIR, "collision-test.md"), `---
name: collision-test
description: File-based skill (should lose)
---
I am the file skill.
`)
// #when
const { discoverSkills } = await import("./loader")
const originalCwd = process.cwd()
process.chdir(TEST_DIR)
try {
const skills = await discoverSkills({ includeClaudeCodePaths: false })
// #then - only one skill should exist, and it should be the directory-based one
const matchingSkills = skills.filter(s => s.name === "collision-test")
expect(matchingSkills).toHaveLength(1)
expect(matchingSkills[0]?.definition.description).toContain("Directory-based skill")
} finally {
process.chdir(originalCwd)
}
})
})
})

View File

@@ -66,7 +66,8 @@ async function loadSkillFromPath(
skillPath: string,
resolvedPath: string,
defaultName: string,
scope: SkillScope
scope: SkillScope,
namePrefix: string = ""
): Promise<LoadedSkill | null> {
try {
const content = await fs.readFile(skillPath, "utf-8")
@@ -75,7 +76,10 @@ async function loadSkillFromPath(
const mcpJsonMcp = await loadMcpJsonFromDir(resolvedPath)
const mcpConfig = mcpJsonMcp || frontmatterMcp
const skillName = data.name || defaultName
// For nested skills, use the full path as the name (e.g., "superpowers/brainstorming")
// For flat skills, use frontmatter name or directory name
const baseName = data.name || defaultName
const skillName = namePrefix ? `${namePrefix}/${baseName}` : baseName
const originalDescription = data.description || ""
const isOpencodeSource = scope === "opencode" || scope === "opencode-project"
const formattedDescription = `(${scope} - Skill) ${originalDescription}`
@@ -128,48 +132,67 @@ $ARGUMENTS
}
}
async function loadSkillsFromDir(skillsDir: string, scope: SkillScope): Promise<LoadedSkill[]> {
async function loadSkillsFromDir(
skillsDir: string,
scope: SkillScope,
namePrefix: string = "",
depth: number = 0,
maxDepth: number = 2
): Promise<LoadedSkill[]> {
const entries = await fs.readdir(skillsDir, { withFileTypes: true }).catch(() => [])
const skills: LoadedSkill[] = []
const skillMap = new Map<string, LoadedSkill>()
for (const entry of entries) {
if (entry.name.startsWith(".")) continue
const directories = entries.filter(e => !e.name.startsWith(".") && (e.isDirectory() || e.isSymbolicLink()))
const files = entries.filter(e => !e.name.startsWith(".") && !e.isDirectory() && !e.isSymbolicLink() && isMarkdownFile(e))
for (const entry of directories) {
const entryPath = join(skillsDir, entry.name)
const resolvedPath = await resolveSymlinkAsync(entryPath)
const dirName = entry.name
if (entry.isDirectory() || entry.isSymbolicLink()) {
const resolvedPath = await resolveSymlinkAsync(entryPath)
const dirName = entry.name
const skillMdPath = join(resolvedPath, "SKILL.md")
try {
await fs.access(skillMdPath)
const skill = await loadSkillFromPath(skillMdPath, resolvedPath, dirName, scope)
if (skill) skills.push(skill)
continue
} catch {
const skillMdPath = join(resolvedPath, "SKILL.md")
try {
await fs.access(skillMdPath)
const skill = await loadSkillFromPath(skillMdPath, resolvedPath, dirName, scope, namePrefix)
if (skill && !skillMap.has(skill.name)) {
skillMap.set(skill.name, skill)
}
const namedSkillMdPath = join(resolvedPath, `${dirName}.md`)
try {
await fs.access(namedSkillMdPath)
const skill = await loadSkillFromPath(namedSkillMdPath, resolvedPath, dirName, scope)
if (skill) skills.push(skill)
continue
} catch {
}
continue
} catch {
}
if (isMarkdownFile(entry)) {
const skillName = basename(entry.name, ".md")
const skill = await loadSkillFromPath(entryPath, skillsDir, skillName, scope)
if (skill) skills.push(skill)
const namedSkillMdPath = join(resolvedPath, `${dirName}.md`)
try {
await fs.access(namedSkillMdPath)
const skill = await loadSkillFromPath(namedSkillMdPath, resolvedPath, dirName, scope, namePrefix)
if (skill && !skillMap.has(skill.name)) {
skillMap.set(skill.name, skill)
}
continue
} catch {
}
if (depth < maxDepth) {
const newPrefix = namePrefix ? `${namePrefix}/${dirName}` : dirName
const nestedSkills = await loadSkillsFromDir(resolvedPath, scope, newPrefix, depth + 1, maxDepth)
for (const nestedSkill of nestedSkills) {
if (!skillMap.has(nestedSkill.name)) {
skillMap.set(nestedSkill.name, nestedSkill)
}
}
}
}
return skills
for (const entry of files) {
const entryPath = join(skillsDir, entry.name)
const baseName = basename(entry.name, ".md")
const skill = await loadSkillFromPath(entryPath, skillsDir, baseName, scope, namePrefix)
if (skill && !skillMap.has(skill.name)) {
skillMap.set(skill.name, skill)
}
}
return Array.from(skillMap.values())
}
function skillsToRecord(skills: LoadedSkill[]): Record<string, CommandDefinition> {

View File

@@ -1,6 +1,34 @@
import { describe, it, expect } from "bun:test"
/// <reference types="bun-types" />
import { describe, it, expect, beforeEach, afterEach } from "bun:test"
import { join } from "node:path"
import { tmpdir } from "node:os"
import { resolveSkillContent, resolveMultipleSkills, resolveSkillContentAsync, resolveMultipleSkillsAsync } from "./skill-content"
let originalEnv: Record<string, string | undefined>
let testConfigDir: string
beforeEach(() => {
originalEnv = {
CLAUDE_CONFIG_DIR: process.env.CLAUDE_CONFIG_DIR,
OPENCODE_CONFIG_DIR: process.env.OPENCODE_CONFIG_DIR,
}
const unique = `skill-content-test-${Date.now()}-${Math.random().toString(16).slice(2)}`
testConfigDir = join(tmpdir(), unique)
process.env.CLAUDE_CONFIG_DIR = testConfigDir
process.env.OPENCODE_CONFIG_DIR = testConfigDir
})
afterEach(() => {
for (const [key, value] of Object.entries(originalEnv)) {
if (value !== undefined) {
process.env[key] = value
} else {
delete process.env[key]
}
}
})
describe("resolveSkillContent", () => {
it("should return template for existing skill", () => {
// given: builtin skills with 'frontend-ui-ux' skill
@@ -33,10 +61,12 @@ describe("resolveSkillContent", () => {
expect(result).toBeNull()
})
it("should return null for empty string", () => {
// given: builtin skills
// when: resolving content for empty string
const result = resolveSkillContent("")
it("should return null for disabled skill", () => {
// given: frontend-ui-ux skill disabled
const options = { disabledSkills: new Set(["frontend-ui-ux"]) }
// when: resolving content for disabled skill
const result = resolveSkillContent("frontend-ui-ux", options)
// then: returns null
expect(result).toBeNull()
@@ -96,6 +126,20 @@ describe("resolveMultipleSkills", () => {
expect(result.notFound).toEqual(["skill-one", "skill-two", "skill-three"])
})
it("should treat disabled skills as not found", () => {
// #given: frontend-ui-ux disabled, playwright not disabled
const skillNames = ["frontend-ui-ux", "playwright"]
const options = { disabledSkills: new Set(["frontend-ui-ux"]) }
// #when: resolving multiple skills with disabled one
const result = resolveMultipleSkills(skillNames, options)
// #then: frontend-ui-ux in notFound, playwright resolved
expect(result.resolved.size).toBe(1)
expect(result.resolved.has("playwright")).toBe(true)
expect(result.notFound).toEqual(["frontend-ui-ux"])
})
it("should preserve skill order in resolved map", () => {
// given: list of skill names in specific order
const skillNames = ["playwright", "frontend-ui-ux"]
@@ -111,21 +155,24 @@ describe("resolveMultipleSkills", () => {
})
describe("resolveSkillContentAsync", () => {
it("should return template for builtin skill", async () => {
it("should return template for builtin skill async", async () => {
// given: builtin skill 'frontend-ui-ux'
// when: resolving content async
const result = await resolveSkillContentAsync("frontend-ui-ux")
const options = { disabledSkills: new Set(["frontend-ui-ux"]) }
const result = await resolveSkillContentAsync("git-master", options)
// then: returns template string
expect(result).not.toBeNull()
expect(typeof result).toBe("string")
expect(result).toContain("Role: Designer-Turned-Developer")
expect(result).toContain("Git Master Agent")
})
it("should return null for non-existent skill", async () => {
// given: non-existent skill name
// when: resolving content async
const result = await resolveSkillContentAsync("definitely-not-a-skill-12345")
it("should return null for disabled skill async", async () => {
// given: frontend-ui-ux disabled
const options = { disabledSkills: new Set(["frontend-ui-ux"]) }
// when: resolving content async for disabled skill
const result = await resolveSkillContentAsync("frontend-ui-ux", options)
// then: returns null
expect(result).toBeNull()
@@ -133,9 +180,9 @@ describe("resolveSkillContentAsync", () => {
})
describe("resolveMultipleSkillsAsync", () => {
it("should resolve builtin skills", async () => {
it("should resolve builtin skills async", async () => {
// given: builtin skill names
const skillNames = ["playwright", "frontend-ui-ux"]
const skillNames = ["playwright", "git-master"]
// when: resolving multiple skills async
const result = await resolveMultipleSkillsAsync(skillNames)
@@ -144,10 +191,10 @@ describe("resolveMultipleSkillsAsync", () => {
expect(result.resolved.size).toBe(2)
expect(result.notFound).toEqual([])
expect(result.resolved.get("playwright")).toContain("Playwright Browser Automation")
expect(result.resolved.get("frontend-ui-ux")).toContain("Designer-Turned-Developer")
expect(result.resolved.get("git-master")).toContain("Git Master Agent")
})
it("should handle partial success with non-existent skills", async () => {
it("should handle partial success with non-existent skills async", async () => {
// given: mix of existing and non-existing skills
const skillNames = ["playwright", "nonexistent-skill-12345"]
@@ -160,6 +207,20 @@ describe("resolveMultipleSkillsAsync", () => {
expect(result.resolved.get("playwright")).toContain("Playwright Browser Automation")
})
it("should treat disabled skills as not found async", async () => {
// #given: frontend-ui-ux disabled
const skillNames = ["frontend-ui-ux", "playwright"]
const options = { disabledSkills: new Set(["frontend-ui-ux"]) }
// #when: resolving multiple skills async with disabled one
const result = await resolveMultipleSkillsAsync(skillNames, options)
// #then: frontend-ui-ux in notFound, playwright resolved
expect(result.resolved.size).toBe(1)
expect(result.resolved.has("playwright")).toBe(true)
expect(result.notFound).toEqual(["frontend-ui-ux"])
})
it("should NOT inject watermark when both options are disabled", async () => {
// given: git-master skill with watermark disabled
const skillNames = ["git-master"]

View File

@@ -8,6 +8,7 @@ import type { GitMasterConfig, BrowserAutomationProvider } from "../../config/sc
export interface SkillResolutionOptions {
gitMasterConfig?: GitMasterConfig
browserProvider?: BrowserAutomationProvider
disabledSkills?: Set<string>
}
const cachedSkillsByProvider = new Map<string, LoadedSkill[]>()
@@ -18,12 +19,22 @@ function clearSkillCache(): void {
async function getAllSkills(options?: SkillResolutionOptions): Promise<LoadedSkill[]> {
const cacheKey = options?.browserProvider ?? "playwright"
const cached = cachedSkillsByProvider.get(cacheKey)
if (cached) return cached
const hasDisabledSkills = options?.disabledSkills && options.disabledSkills.size > 0
// Skip cache if disabledSkills is provided (varies between calls)
if (!hasDisabledSkills) {
const cached = cachedSkillsByProvider.get(cacheKey)
if (cached) return cached
}
const [discoveredSkills, builtinSkillDefs] = await Promise.all([
discoverSkills({ includeClaudeCodePaths: true }),
Promise.resolve(createBuiltinSkills({ browserProvider: options?.browserProvider })),
Promise.resolve(
createBuiltinSkills({
browserProvider: options?.browserProvider,
disabledSkills: options?.disabledSkills,
})
),
])
const builtinSkillsAsLoaded: LoadedSkill[] = builtinSkillDefs.map((skill) => ({
@@ -47,8 +58,15 @@ async function getAllSkills(options?: SkillResolutionOptions): Promise<LoadedSki
const discoveredNames = new Set(discoveredSkills.map((s) => s.name))
const uniqueBuiltins = builtinSkillsAsLoaded.filter((s) => !discoveredNames.has(s.name))
const allSkills = [...discoveredSkills, ...uniqueBuiltins]
cachedSkillsByProvider.set(cacheKey, allSkills)
let allSkills = [...discoveredSkills, ...uniqueBuiltins]
// Filter discovered skills by disabledSkills (builtin skills are already filtered by createBuiltinSkills)
if (hasDisabledSkills) {
allSkills = allSkills.filter((s) => !options!.disabledSkills!.has(s.name))
} else {
cachedSkillsByProvider.set(cacheKey, allSkills)
}
return allSkills
}
@@ -122,7 +140,10 @@ export function injectGitMasterConfig(template: string, config?: GitMasterConfig
}
export function resolveSkillContent(skillName: string, options?: SkillResolutionOptions): string | null {
const skills = createBuiltinSkills({ browserProvider: options?.browserProvider })
const skills = createBuiltinSkills({
browserProvider: options?.browserProvider,
disabledSkills: options?.disabledSkills,
})
const skill = skills.find((s) => s.name === skillName)
if (!skill) return null
@@ -137,7 +158,10 @@ export function resolveMultipleSkills(skillNames: string[], options?: SkillResol
resolved: Map<string, string>
notFound: string[]
} {
const skills = createBuiltinSkills({ browserProvider: options?.browserProvider })
const skills = createBuiltinSkills({
browserProvider: options?.browserProvider,
disabledSkills: options?.disabledSkills,
})
const skillMap = new Map(skills.map((s) => [s.name, s.template]))
const resolved = new Map<string, string>()

View File

@@ -55,7 +55,9 @@ export function getClaudeSettingsPaths(customPath?: string): string[] {
paths.unshift(customPath)
}
return paths
// Deduplicate paths to prevent loading the same file multiple times
// (e.g., when cwd is the home directory)
return [...new Set(paths)]
}
function mergeHooksConfig(

View File

@@ -36,7 +36,7 @@ export function createKeywordDetectorHook(ctx: PluginInput, collector?: ContextC
// Remove system-reminder content to prevent automated system messages from triggering mode keywords
const cleanText = removeSystemReminders(promptText)
const modelID = input.model?.modelID
let detectedKeywords = detectKeywordsWithType(removeCodeBlocks(cleanText), currentAgent, modelID)
let detectedKeywords = detectKeywordsWithType(cleanText, currentAgent, modelID)
if (isPlannerAgent(currentAgent)) {
detectedKeywords = detectedKeywords.filter((k) => k.type !== "ultrawork")

View File

@@ -1,5 +1,4 @@
import type { PluginInput } from "@opencode-ai/plugin"
import type { ShellType } from "../../shared"
import { HOOK_NAME, NON_INTERACTIVE_ENV, SHELL_COMMAND_PATTERNS } from "./constants"
import { log, buildEnvPrefix } from "../../shared"
@@ -54,10 +53,8 @@ export function createNonInteractiveEnvHook(_ctx: PluginInput) {
// for git commands to prevent interactive prompts.
// The bash tool always runs in a Unix-like shell (bash/sh), even on Windows
// (via Git Bash, WSL, etc.), so we always use unix export syntax.
// This fixes GitHub issues #983 and #889.
const shellType: ShellType = "unix"
const envPrefix = buildEnvPrefix(NON_INTERACTIVE_ENV, shellType)
// (via Git Bash, WSL, etc.), so always use unix export syntax.
const envPrefix = buildEnvPrefix(NON_INTERACTIVE_ENV, "unix")
output.args.command = `${envPrefix} ${command}`
log(`[${HOOK_NAME}] Prepended non-interactive env vars to git command`, {

View File

@@ -9,7 +9,7 @@ export const ALLOWED_EXTENSIONS = [".md"]
export const ALLOWED_PATH_PREFIX = ".sisyphus"
export const BLOCKED_TOOLS = ["Write", "Edit", "write", "edit"]
export const BLOCKED_TOOLS = ["Write", "Edit", "write", "edit", "bash"]
export const PLANNING_CONSULT_WARNING = `

View File

@@ -173,7 +173,25 @@ describe("prometheus-md-only", () => {
).rejects.toThrow("can only write/edit .md files")
})
test("should not affect non-Write/Edit tools", async () => {
test("should block bash commands from Prometheus", async () => {
// given
const hook = createPrometheusMdOnlyHook(createMockPluginInput())
const input = {
tool: "bash",
sessionID: TEST_SESSION_ID,
callID: "call-1",
}
const output = {
args: { command: "echo test" },
}
// when / #then
await expect(
hook["tool.execute.before"](input, output)
).rejects.toThrow("cannot execute bash commands")
})
test("should not affect non-blocked tools", async () => {
// given
const hook = createPrometheusMdOnlyHook(createMockPluginInput())
const input = {

View File

@@ -106,6 +106,20 @@ export function createPrometheusMdOnlyHook(ctx: PluginInput) {
return
}
// Block bash commands completely - Prometheus is read-only
if (toolName === "bash") {
log(`[${HOOK_NAME}] Blocked: Prometheus cannot execute bash commands`, {
sessionID: input.sessionID,
tool: toolName,
agent: agentName,
})
throw new Error(
`[${HOOK_NAME}] ${getAgentDisplayName("prometheus")} cannot execute bash commands. ` +
`${getAgentDisplayName("prometheus")} is a READ-ONLY planner. Use /start-work to execute the plan. ` +
`APOLOGIZE TO THE USER, REMIND OF YOUR PLAN WRITING PROCESSES, TELL USER WHAT YOU WILL GOING TO DO AS THE PROCESS, WRITE THE PLAN`
)
}
const filePath = (output.args.filePath ?? output.args.path ?? output.args.file) as string | undefined
if (!filePath) {
return

View File

@@ -98,6 +98,7 @@ import {
getOpenCodeVersion,
isOpenCodeVersionAtLeast,
OPENCODE_NATIVE_AGENTS_INJECTION_VERSION,
injectServerAuthIntoClient,
} from "./shared";
import { loadPluginConfig } from "./plugin-config";
import { createModelCacheState } from "./plugin-state";
@@ -107,6 +108,7 @@ const OhMyOpenCodePlugin: Plugin = async (ctx) => {
log("[OhMyOpenCodePlugin] ENTRY - plugin loading", {
directory: ctx.directory,
});
injectServerAuthIntoClient(ctx.client);
// Start background tmux check immediately
startTmuxCheck();
@@ -386,6 +388,7 @@ const OhMyOpenCodePlugin: Plugin = async (ctx) => {
const lookAt = isMultimodalLookerEnabled ? createLookAt(ctx) : null;
const browserProvider =
pluginConfig.browser_automation_engine?.provider ?? "playwright";
const disabledSkills = new Set<string>(pluginConfig.disabled_skills ?? []);
const delegateTask = createDelegateTask({
manager: backgroundManager,
client: ctx.client,
@@ -394,6 +397,7 @@ const OhMyOpenCodePlugin: Plugin = async (ctx) => {
gitMasterConfig: pluginConfig.git_master,
sisyphusJuniorModel: pluginConfig.agents?.["sisyphus-junior"]?.model,
browserProvider,
disabledSkills,
onSyncSessionCreated: async (event) => {
log("[index] onSyncSessionCreated callback", {
sessionID: event.sessionID,
@@ -412,11 +416,8 @@ const OhMyOpenCodePlugin: Plugin = async (ctx) => {
});
},
});
const disabledSkills = new Set(pluginConfig.disabled_skills ?? []);
const systemMcpNames = getSystemMcpServerNames();
const builtinSkills = createBuiltinSkills({ browserProvider }).filter(
(skill) => {
if (disabledSkills.has(skill.name as never)) return false;
const builtinSkills = createBuiltinSkills({ browserProvider, disabledSkills }).filter((skill) => {
if (skill.mcpConfig) {
for (const mcpName of Object.keys(skill.mcpConfig)) {
if (systemMcpNames.has(mcpName)) return false;
@@ -448,6 +449,7 @@ const OhMyOpenCodePlugin: Plugin = async (ctx) => {
mcpManager: skillMcpManager,
getSessionID: getSessionIDForMcp,
gitMasterConfig: pluginConfig.git_master,
disabledSkills
});
const skillMcpTool = createSkillMcpTool({
manager: skillMcpManager,

View File

@@ -25,7 +25,7 @@ mcp/
| Name | URL | Purpose | Auth |
|------|-----|---------|------|
| websearch | mcp.exa.ai/mcp?tools=web_search_exa | Real-time web search | EXA_API_KEY |
| websearch | mcp.exa.ai / mcp.tavily.com | Real-time web search | EXA_API_KEY / TAVILY_API_KEY |
| context7 | mcp.context7.com/mcp | Library docs | CONTEXT7_API_KEY |
| grep_app | mcp.grep.app | GitHub code search | None |
@@ -35,6 +35,36 @@ mcp/
2. **Claude Code compat**: `.mcp.json` with `${VAR}` expansion
3. **Skill-embedded**: YAML frontmatter in skills (handled by skill-mcp-manager)
## Websearch Provider Configuration
The `websearch` MCP supports multiple providers. Exa is the default for backward compatibility and works without an API key.
| Provider | URL | Auth | API Key Required |
|----------|-----|------|------------------|
| exa (default) | mcp.exa.ai | x-api-key header | No (optional) |
| tavily | mcp.tavily.com | Authorization Bearer | Yes |
### Configuration Example
```jsonc
{
"websearch": {
"provider": "tavily" // or "exa" (default)
}
}
```
### Environment Variables
- `EXA_API_KEY`: Optional. Used when provider is `exa`.
- `TAVILY_API_KEY`: Required when provider is `tavily`.
### Priority and Behavior
- **Default**: Exa is used if no provider is specified.
- **Backward Compatibility**: Existing setups using `EXA_API_KEY` continue to work without changes.
- **Validation**: Selecting `tavily` without providing `TAVILY_API_KEY` will result in a configuration error.
## CONFIG PATTERN
```typescript
@@ -68,3 +98,4 @@ const mcps = createBuiltinMcps(["websearch"]) // Disable specific
- **Disable**: User can set `disabled_mcps: ["name"]` in config
- **Context7**: Optional auth using `CONTEXT7_API_KEY` env var
- **Exa**: Optional auth using `EXA_API_KEY` env var
- **Tavily**: Requires `TAVILY_API_KEY` env var

View File

@@ -83,4 +83,24 @@ describe("createBuiltinMcps", () => {
expect(result).toHaveProperty("grep_app")
expect(Object.keys(result)).toHaveLength(3)
})
test("should not throw when websearch disabled even if tavily configured without API key", () => {
// given
const originalTavilyKey = process.env.TAVILY_API_KEY
delete process.env.TAVILY_API_KEY
const disabledMcps = ["websearch"]
const config = { websearch: { provider: "tavily" as const } }
try {
// when
const createMcps = () => createBuiltinMcps(disabledMcps, config)
// then
expect(createMcps).not.toThrow()
const result = createMcps()
expect(result).not.toHaveProperty("websearch")
} finally {
if (originalTavilyKey) process.env.TAVILY_API_KEY = originalTavilyKey
}
})
})

View File

@@ -1,7 +1,8 @@
import { websearch } from "./websearch"
import { createWebsearchConfig } from "./websearch"
import { context7 } from "./context7"
import { grep_app } from "./grep-app"
import type { McpName } from "./types"
import type { OhMyOpenCodeConfig } from "../config/schema"
export { McpNameSchema, type McpName } from "./types"
@@ -13,19 +14,19 @@ type RemoteMcpConfig = {
oauth?: false
}
const allBuiltinMcps: Record<McpName, RemoteMcpConfig> = {
websearch,
context7,
grep_app,
}
export function createBuiltinMcps(disabledMcps: string[] = []) {
export function createBuiltinMcps(disabledMcps: string[] = [], config?: OhMyOpenCodeConfig) {
const mcps: Record<string, RemoteMcpConfig> = {}
for (const [name, config] of Object.entries(allBuiltinMcps)) {
if (!disabledMcps.includes(name)) {
mcps[name] = config
}
if (!disabledMcps.includes("websearch")) {
mcps.websearch = createWebsearchConfig(config?.websearch)
}
if (!disabledMcps.includes("context7")) {
mcps.context7 = context7
}
if (!disabledMcps.includes("grep_app")) {
mcps.grep_app = grep_app
}
return mcps

116
src/mcp/websearch.test.ts Normal file
View File

@@ -0,0 +1,116 @@
import { describe, expect, test, beforeEach, afterEach } from "bun:test"
import { createWebsearchConfig } from "./websearch"
describe("websearch MCP provider configuration", () => {
const originalEnv = { ...process.env }
beforeEach(() => {
delete process.env.EXA_API_KEY
delete process.env.TAVILY_API_KEY
})
afterEach(() => {
process.env = { ...originalEnv }
})
test("returns Exa config when no config provided", () => {
//#given - no config
//#when
const result = createWebsearchConfig()
//#then
expect(result.url).toContain("mcp.exa.ai")
expect(result.type).toBe("remote")
expect(result.enabled).toBe(true)
})
test("returns Exa config when provider is 'exa'", () => {
//#given
const config = { provider: "exa" as const }
//#when
const result = createWebsearchConfig(config)
//#then
expect(result.url).toContain("mcp.exa.ai")
expect(result.type).toBe("remote")
})
test("includes x-api-key header when EXA_API_KEY is set", () => {
//#given
const apiKey = "test-exa-key-12345"
process.env.EXA_API_KEY = apiKey
//#when
const result = createWebsearchConfig()
//#then
expect(result.headers).toEqual({ "x-api-key": apiKey })
})
test("returns Tavily config when provider is 'tavily' and TAVILY_API_KEY set", () => {
//#given
const tavilyKey = "test-tavily-key-67890"
process.env.TAVILY_API_KEY = tavilyKey
const config = { provider: "tavily" as const }
//#when
const result = createWebsearchConfig(config)
//#then
expect(result.url).toContain("mcp.tavily.com")
expect(result.headers).toEqual({ Authorization: `Bearer ${tavilyKey}` })
})
test("throws error when provider is 'tavily' but TAVILY_API_KEY missing", () => {
//#given
delete process.env.TAVILY_API_KEY
const config = { provider: "tavily" as const }
//#when
const createTavilyConfig = () => createWebsearchConfig(config)
//#then
expect(createTavilyConfig).toThrow("TAVILY_API_KEY environment variable is required")
})
test("returns Exa when both keys present but no explicit provider", () => {
//#given
process.env.EXA_API_KEY = "test-exa-key"
process.env.TAVILY_API_KEY = "test-tavily-key"
//#when
const result = createWebsearchConfig()
//#then
expect(result.url).toContain("mcp.exa.ai")
expect(result.headers).toEqual({ "x-api-key": "test-exa-key" })
})
test("Tavily config uses Authorization Bearer header format", () => {
//#given
const tavilyKey = "tavily-secret-key-xyz"
process.env.TAVILY_API_KEY = tavilyKey
const config = { provider: "tavily" as const }
//#when
const result = createWebsearchConfig(config)
//#then
expect(result.headers?.Authorization).toMatch(/^Bearer /)
expect(result.headers?.Authorization).toBe(`Bearer ${tavilyKey}`)
})
test("Exa config has no headers when EXA_API_KEY not set", () => {
//#given
delete process.env.EXA_API_KEY
//#when
const result = createWebsearchConfig()
//#then
expect(result.url).toContain("mcp.exa.ai")
expect(result.headers).toBeUndefined()
})
})

View File

@@ -1,10 +1,44 @@
export const websearch = {
type: "remote" as const,
url: "https://mcp.exa.ai/mcp?tools=web_search_exa",
enabled: true,
headers: process.env.EXA_API_KEY
? { "x-api-key": process.env.EXA_API_KEY }
: undefined,
// Disable OAuth auto-detection - Exa uses API key header, not OAuth
oauth: false as const,
import type { WebsearchConfig } from "../config/schema"
type RemoteMcpConfig = {
type: "remote"
url: string
enabled: boolean
headers?: Record<string, string>
oauth?: false
}
export function createWebsearchConfig(config?: WebsearchConfig): RemoteMcpConfig {
const provider = config?.provider || "exa"
if (provider === "tavily") {
const tavilyKey = process.env.TAVILY_API_KEY
if (!tavilyKey) {
throw new Error("TAVILY_API_KEY environment variable is required for Tavily provider")
}
return {
type: "remote" as const,
url: "https://mcp.tavily.com/mcp/",
enabled: true,
headers: {
Authorization: `Bearer ${tavilyKey}`,
},
oauth: false as const,
}
}
// Default to Exa
return {
type: "remote" as const,
url: "https://mcp.exa.ai/mcp?tools=web_search_exa",
enabled: true,
headers: process.env.EXA_API_KEY
? { "x-api-key": process.env.EXA_API_KEY }
: undefined,
oauth: false as const,
}
}
// Backward compatibility: export static instance using default config
export const websearch = createWebsearchConfig()

View File

@@ -157,6 +157,7 @@ export function createConfigHandler(deps: ConfigHandlerDeps) {
// config.model represents the currently active model in OpenCode (including UI selection)
// Pass it as uiSelectedModel so it takes highest priority in model resolution
const currentModel = config.model as string | undefined;
const disabledSkills = new Set<string>(pluginConfig.disabled_skills ?? []);
const builtinAgents = await createBuiltinAgents(
migratedDisabledAgents,
pluginConfig.agents,
@@ -167,7 +168,8 @@ export function createConfigHandler(deps: ConfigHandlerDeps) {
allDiscoveredSkills,
ctx.client,
browserProvider,
currentModel // uiSelectedModel - takes highest priority
currentModel, // uiSelectedModel - takes highest priority
disabledSkills
);
// Claude Code agents: Do NOT apply permission migration
@@ -305,7 +307,7 @@ export function createConfigHandler(deps: ConfigHandlerDeps) {
prompt: PROMETHEUS_SYSTEM_PROMPT,
permission: PROMETHEUS_PERMISSION,
description: `${configAgent?.plan?.description ?? "Plan agent"} (Prometheus - OhMyOpenCode)`,
color: (configAgent?.plan?.color as string) ?? "#9D4EDD", // Amethyst Purple - wisdom/foresight
color: (configAgent?.plan?.color as string) ?? "#FF5722", // Deep Orange - Fire/Flame theme
...(temperatureToUse !== undefined ? { temperature: temperatureToUse } : {}),
...(topPToUse !== undefined ? { top_p: topPToUse } : {}),
...(maxTokensToUse !== undefined ? { maxTokens: maxTokensToUse } : {}),
@@ -358,7 +360,8 @@ export function createConfigHandler(deps: ConfigHandlerDeps) {
: {};
const planDemoteConfig = shouldDemotePlan
? { mode: "subagent" as const }
? { mode: "subagent" as const
}
: undefined;
config.agent = {
@@ -447,7 +450,7 @@ export function createConfigHandler(deps: ConfigHandlerDeps) {
: { servers: {} };
config.mcp = {
...createBuiltinMcps(pluginConfig.disabled_mcps),
...createBuiltinMcps(pluginConfig.disabled_mcps, pluginConfig),
...(config.mcp as Record<string, unknown>),
...mcpResult.servers,
...pluginComponents.mcpServers,

View File

@@ -24,6 +24,20 @@ const AGENT_RESTRICTIONS: Record<string, Record<string, boolean>> = {
delegate_task: false,
},
metis: {
write: false,
edit: false,
task: false,
delegate_task: false,
},
momus: {
write: false,
edit: false,
task: false,
delegate_task: false,
},
"multimodal-looker": {
read: true,
},

View File

@@ -39,3 +39,4 @@ export * from "./connected-providers-cache"
export * from "./session-utils"
export * from "./tmux"
export * from "./model-suggestion-retry"
export * from "./opencode-server-auth"

View File

@@ -277,6 +277,42 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("anthropic/claude-opus-4-5")
})
// given available models with similar model IDs (e.g., glm-4.7 and glm-4.7-free)
// when searching for the longer variant (glm-4.7-free)
// then return exact model ID match, not the shorter one
it("should prefer exact model ID match over shorter substring match", () => {
const available = new Set([
"zai-coding-plan/glm-4.7",
"zai-coding-plan/glm-4.7-free",
])
const result = fuzzyMatchModel("glm-4.7-free", available)
expect(result).toBe("zai-coding-plan/glm-4.7-free")
})
// given available models with similar model IDs
// when searching for the shorter variant
// then return the shorter match (existing behavior preserved)
it("should still prefer shorter match when searching for shorter variant", () => {
const available = new Set([
"zai-coding-plan/glm-4.7",
"zai-coding-plan/glm-4.7-free",
])
const result = fuzzyMatchModel("glm-4.7", available)
expect(result).toBe("zai-coding-plan/glm-4.7")
})
// given same model ID from multiple providers
// when searching for exact model ID
// then return shortest full string (preserves tie-break behavior)
it("should use shortest tie-break when multiple providers have same model ID", () => {
const available = new Set([
"opencode/gpt-5.2",
"openai/gpt-5.2",
])
const result = fuzzyMatchModel("gpt-5.2", available)
expect(result).toBe("openai/gpt-5.2")
})
// given available models with multiple providers
// when multiple providers are specified
// then search all specified providers

View File

@@ -72,14 +72,29 @@ export function fuzzyMatchModel(
return null
}
// Priority 1: Exact match (normalized)
// Priority 1: Exact match (normalized full model string)
const exactMatch = matches.find((model) => normalizeModelName(model) === targetNormalized)
if (exactMatch) {
log("[fuzzyMatchModel] exact match found", { exactMatch })
return exactMatch
}
// Priority 2: Shorter model name (more specific)
// Priority 2: Exact model ID match (part after provider/)
// This ensures "glm-4.7-free" matches "zai-coding-plan/glm-4.7-free" over "zai-coding-plan/glm-4.7"
// Use filter + shortest to handle multi-provider cases (e.g., openai/gpt-5.2 + opencode/gpt-5.2)
const exactModelIdMatches = matches.filter((model) => {
const modelId = model.split("/").slice(1).join("/")
return normalizeModelName(modelId) === targetNormalized
})
if (exactModelIdMatches.length > 0) {
const result = exactModelIdMatches.reduce((shortest, current) =>
current.length < shortest.length ? current : shortest,
)
log("[fuzzyMatchModel] exact model ID match found", { result, candidateCount: exactModelIdMatches.length })
return result
}
// Priority 3: Shorter model name (more specific, fallback for partial matches)
const result = matches.reduce((shortest, current) =>
current.length < shortest.length ? current : shortest,
)

View File

@@ -313,7 +313,7 @@ describe("CATEGORY_MODEL_REQUIREMENTS", () => {
const primary = artistry.fallbackChain[0]
expect(primary.model).toBe("gemini-3-pro")
expect(primary.variant).toBe("max")
expect(primary.variant).toBe("high")
expect(primary.providers[0]).toBe("google")
})

View File

@@ -31,7 +31,7 @@ export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
oracle: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-5", variant: "max" },
],
},
@@ -75,14 +75,14 @@ export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["opencode"], model: "kimi-k2.5-free" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
],
},
momus: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "medium" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-5", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
],
},
atlas: {
@@ -107,7 +107,7 @@ export const CATEGORY_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
ultrabrain: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2-codex", variant: "xhigh" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-5", variant: "max" },
],
},
@@ -115,18 +115,18 @@ export const CATEGORY_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2-codex", variant: "medium" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-5", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
],
requiresModel: "gpt-5.2-codex",
},
artistry: {
fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "max" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-5", variant: "max" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" },
],
requiresModel: "gemini-3-pro",
},
artistry: {
fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-5", variant: "max" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" },
],
requiresModel: "gemini-3-pro",
},
quick: {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-haiku-4-5" },

View File

@@ -0,0 +1,94 @@
/// <reference types="bun-types" />
import { describe, test, expect, beforeEach, afterEach } from "bun:test"
import { getServerBasicAuthHeader, injectServerAuthIntoClient } from "./opencode-server-auth"
describe("opencode-server-auth", () => {
let originalEnv: Record<string, string | undefined>
beforeEach(() => {
originalEnv = {
OPENCODE_SERVER_PASSWORD: process.env.OPENCODE_SERVER_PASSWORD,
OPENCODE_SERVER_USERNAME: process.env.OPENCODE_SERVER_USERNAME,
}
})
afterEach(() => {
for (const [key, value] of Object.entries(originalEnv)) {
if (value !== undefined) {
process.env[key] = value
} else {
delete process.env[key]
}
}
})
test("#given no server password #when building auth header #then returns undefined", () => {
delete process.env.OPENCODE_SERVER_PASSWORD
const result = getServerBasicAuthHeader()
expect(result).toBeUndefined()
})
test("#given server password without username #when building auth header #then uses default username", () => {
process.env.OPENCODE_SERVER_PASSWORD = "secret"
delete process.env.OPENCODE_SERVER_USERNAME
const result = getServerBasicAuthHeader()
expect(result).toBe("Basic b3BlbmNvZGU6c2VjcmV0")
})
test("#given server password and username #when building auth header #then uses provided username", () => {
process.env.OPENCODE_SERVER_PASSWORD = "secret"
process.env.OPENCODE_SERVER_USERNAME = "dan"
const result = getServerBasicAuthHeader()
expect(result).toBe("Basic ZGFuOnNlY3JldA==")
})
test("#given server password #when injecting into client #then updates client headers", () => {
process.env.OPENCODE_SERVER_PASSWORD = "secret"
delete process.env.OPENCODE_SERVER_USERNAME
let receivedConfig: { headers: Record<string, string> } | undefined
const client = {
_client: {
setConfig: (config: { headers: Record<string, string> }) => {
receivedConfig = config
},
},
}
injectServerAuthIntoClient(client)
expect(receivedConfig).toEqual({
headers: {
Authorization: "Basic b3BlbmNvZGU6c2VjcmV0",
},
})
})
test("#given server password #when client has no _client #then does not throw", () => {
process.env.OPENCODE_SERVER_PASSWORD = "secret"
const client = {}
expect(() => injectServerAuthIntoClient(client)).not.toThrow()
})
test("#given server password #when client._client has no setConfig #then does not throw", () => {
process.env.OPENCODE_SERVER_PASSWORD = "secret"
const client = { _client: {} }
expect(() => injectServerAuthIntoClient(client)).not.toThrow()
})
test("#given no server password #when client is invalid #then does not throw", () => {
delete process.env.OPENCODE_SERVER_PASSWORD
const client = {}
expect(() => injectServerAuthIntoClient(client)).not.toThrow()
})
})

View File

@@ -0,0 +1,69 @@
/**
* Builds HTTP Basic Auth header from environment variables.
*
* @returns Basic Auth header string, or undefined if OPENCODE_SERVER_PASSWORD is not set
*/
export function getServerBasicAuthHeader(): string | undefined {
const password = process.env.OPENCODE_SERVER_PASSWORD
if (!password) {
return undefined
}
const username = process.env.OPENCODE_SERVER_USERNAME ?? "opencode"
const token = Buffer.from(`${username}:${password}`, "utf8").toString("base64")
return `Basic ${token}`
}
/**
* Injects HTTP Basic Auth header into the OpenCode SDK client.
*
* This function accesses the SDK's internal `_client.setConfig()` method.
* While `_client` has an underscore prefix (suggesting internal use), this is actually
* a stable public API from `@hey-api/openapi-ts` generated client:
* - `setConfig()` MERGES headers (does not replace existing ones)
* - This is the documented way to update client config at runtime
*
* @see https://github.com/sst/opencode/blob/main/packages/sdk/js/src/gen/client/client.gen.ts
* @throws {Error} If OPENCODE_SERVER_PASSWORD is set but client structure is incompatible
*/
export function injectServerAuthIntoClient(client: unknown): void {
const auth = getServerBasicAuthHeader()
if (!auth) {
return
}
try {
if (
typeof client !== "object" ||
client === null ||
!("_client" in client) ||
typeof (client as { _client: unknown })._client !== "object" ||
(client as { _client: unknown })._client === null
) {
throw new Error(
"[opencode-server-auth] OPENCODE_SERVER_PASSWORD is set but SDK client structure is incompatible. " +
"This may indicate an OpenCode SDK version mismatch."
)
}
const internal = (client as { _client: { setConfig?: (config: { headers: Record<string, string> }) => void } })
._client
if (typeof internal.setConfig !== "function") {
throw new Error(
"[opencode-server-auth] OPENCODE_SERVER_PASSWORD is set but SDK client._client.setConfig is not a function. " +
"This may indicate an OpenCode SDK version mismatch."
)
}
internal.setConfig({
headers: {
Authorization: auth,
},
})
} catch (error) {
const message = error instanceof Error ? error.message : String(error)
console.warn(`[opencode-server-auth] Failed to inject server auth: ${message}`)
}
}

View File

@@ -196,7 +196,7 @@ export const DEFAULT_CATEGORIES: Record<string, CategoryConfig> = {
"visual-engineering": { model: "google/gemini-3-pro" },
ultrabrain: { model: "openai/gpt-5.2-codex", variant: "xhigh" },
deep: { model: "openai/gpt-5.2-codex", variant: "medium" },
artistry: { model: "google/gemini-3-pro", variant: "max" },
artistry: { model: "google/gemini-3-pro", variant: "high" },
quick: { model: "anthropic/claude-haiku-4-5" },
"unspecified-low": { model: "anthropic/claude-sonnet-4-5" },
"unspecified-high": { model: "anthropic/claude-opus-4-5", variant: "max" },

View File

@@ -44,7 +44,7 @@ interface SessionMessage {
export async function resolveSkillContent(
skills: string[],
options: { gitMasterConfig?: GitMasterConfig; browserProvider?: BrowserAutomationProvider }
options: { gitMasterConfig?: GitMasterConfig; browserProvider?: BrowserAutomationProvider, disabledSkills?: Set<string> }
): Promise<{ content: string | undefined; error: string | null }> {
if (skills.length === 0) {
return { content: undefined, error: null }
@@ -794,18 +794,22 @@ export async function resolveCategoryExecution(
let categoryModel: { providerID: string; modelID: string; variant?: string } | undefined
const overrideModel = sisyphusJuniorModel
const explicitCategoryModel = userCategories?.[args.category!]?.model
if (!requirement) {
actualModel = overrideModel ?? resolved.model
// Precedence: explicit category model > sisyphus-junior default > category resolved model
// This keeps `sisyphus-junior.model` useful as a global default while allowing
// per-category overrides via `categories[category].model`.
actualModel = explicitCategoryModel ?? overrideModel ?? resolved.model
if (actualModel) {
modelInfo = overrideModel
modelInfo = explicitCategoryModel || overrideModel
? { model: actualModel, type: "user-defined", source: "override" }
: { model: actualModel, type: "system-default", source: "system-default" }
}
} else {
const resolution = resolveModelPipeline({
intent: {
userModel: overrideModel ?? userCategories?.[args.category!]?.model,
userModel: explicitCategoryModel ?? overrideModel,
categoryDefaultModel: resolved.model,
},
constraints: { availableModels },

View File

@@ -1492,7 +1492,7 @@ describe("sisyphus-task", () => {
abort: new AbortController().signal,
}
// when - artistry category (gemini-3-pro with max variant)
// when - artistry category (gemini-3-pro with high variant)
const result = await tool.execute(
{
description: "Test artistry forced background",
@@ -1770,6 +1770,68 @@ describe("sisyphus-task", () => {
expect(launchInput.model.providerID).toBe("anthropic")
expect(launchInput.model.modelID).toBe("claude-sonnet-4-5")
})
test("explicit category model takes precedence over sisyphus-junior model", async () => {
// given - explicit category model differs from sisyphus-junior override
const { createDelegateTask } = require("./tools")
let launchInput: any
const mockManager = {
launch: async (input: any) => {
launchInput = input
return {
id: "task-category-precedence",
sessionID: "ses_category_precedence_test",
description: "Category precedence test",
agent: "sisyphus-junior",
status: "running",
}
},
}
const mockClient = {
app: { agents: async () => ({ data: [] }) },
config: { get: async () => ({ data: { model: SYSTEM_DEFAULT_MODEL } }) },
model: { list: async () => [] },
session: {
create: async () => ({ data: { id: "test-session" } }),
prompt: async () => ({ data: {} }),
messages: async () => ({ data: [] }),
},
}
const tool = createDelegateTask({
manager: mockManager,
client: mockClient,
sisyphusJuniorModel: "anthropic/claude-sonnet-4-5",
userCategories: {
ultrabrain: { model: "openai/gpt-5.2-codex" },
},
})
const toolContext = {
sessionID: "parent-session",
messageID: "parent-message",
agent: "sisyphus",
abort: new AbortController().signal,
}
// when - using ultrabrain category with explicit model override
await tool.execute(
{
description: "Category precedence test",
prompt: "Do something",
category: "ultrabrain",
run_in_background: true,
load_skills: [],
},
toolContext
)
// then - explicit category model should win
expect(launchInput.model.providerID).toBe("openai")
expect(launchInput.model.modelID).toBe("gpt-5.2-codex")
})
})
describe("browserProvider propagation", () => {

View File

@@ -83,6 +83,7 @@ Prompts MUST be in English.`
const { content: skillContent, error: skillError } = await resolveSkillContent(args.load_skills, {
gitMasterConfig: options.gitMasterConfig,
browserProvider: options.browserProvider,
disabledSkills: options.disabledSkills,
})
if (skillError) {
return skillError

View File

@@ -41,6 +41,7 @@ export interface DelegateTaskToolOptions {
gitMasterConfig?: GitMasterConfig
sisyphusJuniorModel?: string
browserProvider?: BrowserAutomationProvider
disabledSkills?: Set<string>
onSyncSessionCreated?: (event: SyncSessionCreatedEvent) => Promise<void>
}

View File

@@ -20,10 +20,12 @@ export const glob: ToolDefinition = tool({
"simply omit it for the default behavior. Must be a valid directory path if provided."
),
},
execute: async (args) => {
execute: async (args, ctx) => {
try {
const cli = await resolveGrepCliWithAutoInstall()
const paths = args.path ? [args.path] : undefined
// Use ctx.directory as the default search path when no path is provided
const searchPath = args.path ?? ctx.directory
const paths = [searchPath]
const result = await runRgFiles(
{

View File

@@ -20,10 +20,12 @@ export const grep: ToolDefinition = tool({
.optional()
.describe("The directory to search in. Defaults to the current working directory."),
},
execute: async (args) => {
execute: async (args, ctx) => {
try {
const globs = args.include ? [args.include] : undefined
const paths = args.path ? [args.path] : undefined
// Use ctx.directory as the default search path when no path is provided
const searchPath = args.path ?? ctx.directory
const paths = [searchPath]
const result = await runRg({
pattern: args.pattern,

View File

@@ -0,0 +1,63 @@
import { mkdtempSync, rmSync, writeFileSync } from "node:fs"
import { join } from "node:path"
import { tmpdir } from "node:os"
import { describe, it, expect, spyOn, mock } from "bun:test"
mock.module("vscode-jsonrpc/node", () => ({
createMessageConnection: () => {
throw new Error("not used in unit test")
},
StreamMessageReader: function StreamMessageReader() {},
StreamMessageWriter: function StreamMessageWriter() {},
}))
import { LSPClient } from "./client"
import type { ResolvedServer } from "./types"
describe("LSPClient", () => {
describe("openFile", () => {
it("sends didChange when a previously opened file changes on disk", async () => {
// #given
const dir = mkdtempSync(join(tmpdir(), "lsp-client-test-"))
const filePath = join(dir, "test.ts")
writeFileSync(filePath, "const a = 1\n")
const originalSetTimeout = globalThis.setTimeout
globalThis.setTimeout = ((fn: (...args: unknown[]) => void, _ms?: number) => {
fn()
return 0 as unknown as ReturnType<typeof setTimeout>
}) as typeof setTimeout
const server: ResolvedServer = {
id: "typescript",
command: ["typescript-language-server", "--stdio"],
extensions: [".ts"],
priority: 0,
}
const client = new LSPClient(dir, server)
// Stub protocol output: we only want to assert notifications.
const sendNotificationSpy = spyOn(
client as unknown as { sendNotification: (m: string, p?: unknown) => void },
"sendNotification"
)
try {
// #when
await client.openFile(filePath)
writeFileSync(filePath, "const a = 2\n")
await client.openFile(filePath)
// #then
const methods = sendNotificationSpy.mock.calls.map((c) => c[0])
expect(methods).toContain("textDocument/didOpen")
expect(methods).toContain("textDocument/didChange")
} finally {
globalThis.setTimeout = originalSetTimeout
rmSync(dir, { recursive: true, force: true })
}
})
})
})

View File

@@ -255,6 +255,8 @@ export class LSPClient {
private proc: Subprocess<"pipe", "pipe", "pipe"> | null = null
private connection: MessageConnection | null = null
private openedFiles = new Set<string>()
private documentVersions = new Map<string, number>()
private lastSyncedText = new Map<string, string>()
private stderrBuffer: string[] = []
private processExited = false
private diagnosticsStore = new Map<string, Diagnostic[]>()
@@ -479,23 +481,50 @@ export class LSPClient {
async openFile(filePath: string): Promise<void> {
const absPath = resolve(filePath)
if (this.openedFiles.has(absPath)) return
const uri = pathToFileURL(absPath).href
const text = readFileSync(absPath, "utf-8")
const ext = extname(absPath)
const languageId = getLanguageId(ext)
this.sendNotification("textDocument/didOpen", {
textDocument: {
uri: pathToFileURL(absPath).href,
languageId,
version: 1,
text,
},
if (!this.openedFiles.has(absPath)) {
const ext = extname(absPath)
const languageId = getLanguageId(ext)
const version = 1
this.sendNotification("textDocument/didOpen", {
textDocument: {
uri,
languageId,
version,
text,
},
})
this.openedFiles.add(absPath)
this.documentVersions.set(uri, version)
this.lastSyncedText.set(uri, text)
await new Promise((r) => setTimeout(r, 1000))
return
}
const prevText = this.lastSyncedText.get(uri)
if (prevText === text) {
return
}
const nextVersion = (this.documentVersions.get(uri) ?? 1) + 1
this.documentVersions.set(uri, nextVersion)
this.lastSyncedText.set(uri, text)
this.sendNotification("textDocument/didChange", {
textDocument: { uri, version: nextVersion },
contentChanges: [{ text }],
})
this.openedFiles.add(absPath)
await new Promise((r) => setTimeout(r, 1000))
// Some servers update diagnostics only after save
this.sendNotification("textDocument/didSave", {
textDocument: { uri },
text,
})
}
async definition(filePath: string, line: number, character: number): Promise<unknown> {

View File

@@ -118,7 +118,7 @@ export function createSkillMcpTool(options: SkillMcpToolOptions): ToolDefinition
resource_name: tool.schema.string().optional().describe("MCP resource URI to read"),
prompt_name: tool.schema.string().optional().describe("MCP prompt to get"),
arguments: tool.schema
.union([tool.schema.string(), tool.schema.record(tool.schema.string(), tool.schema.unknown())])
.union([tool.schema.string(), tool.schema.object({})])
.optional()
.describe("JSON string or object of arguments"),
grep: tool.schema

View File

@@ -133,7 +133,7 @@ export function createSkillTool(options: SkillLoadOptions = {}): ToolDefinition
const getSkills = async (): Promise<LoadedSkill[]> => {
if (options.skills) return options.skills
if (cachedSkills) return cachedSkills
cachedSkills = await getAllSkills()
cachedSkills = await getAllSkills({disabledSkills: options?.disabledSkills})
return cachedSkills
}

View File

@@ -28,4 +28,5 @@ export interface SkillLoadOptions {
getSessionID?: () => string
/** Git master configuration for watermark/co-author settings */
gitMasterConfig?: GitMasterConfig
disabledSkills?: Set<string>
}