Compare commits
212 Commits
fix/docs-o
...
v3.13.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77424f86c8 | ||
|
|
919f7e4092 | ||
|
|
78a3e985be | ||
|
|
42fb2548d6 | ||
|
|
bff74f4237 | ||
|
|
038b8a79ec | ||
|
|
0aa8bfe839 | ||
|
|
422eaa9ae0 | ||
|
|
63ebedc9a2 | ||
|
|
f0b5835459 | ||
|
|
2a495c2e8d | ||
|
|
0edb87b1c1 | ||
|
|
cca057dc0f | ||
|
|
e000a3bb0d | ||
|
|
c19fc4ba22 | ||
|
|
e0de06851d | ||
|
|
26ac413dd9 | ||
|
|
81c912cf04 | ||
|
|
9c348db450 | ||
|
|
2993b3255d | ||
|
|
0b77e2def0 | ||
|
|
bfa8fa2378 | ||
|
|
6ee680af99 | ||
|
|
d327334ded | ||
|
|
07d120a78d | ||
|
|
8b7b1c843a | ||
|
|
a1786f469d | ||
|
|
da77d8addf | ||
|
|
971912e065 | ||
|
|
af301ab29a | ||
|
|
984464470c | ||
|
|
535ecee318 | ||
|
|
32035d153e | ||
|
|
a0649616bf | ||
|
|
cb12b286c8 | ||
|
|
8e239e134c | ||
|
|
733676f1a9 | ||
|
|
d2e566ba9d | ||
|
|
6da4d2dae0 | ||
|
|
3b41191980 | ||
|
|
0b614b751c | ||
|
|
c56a01c15d | ||
|
|
d2d48fc9ff | ||
|
|
41a43c62fc | ||
|
|
cea8769a7f | ||
|
|
7fa2417c42 | ||
|
|
4bba924dad | ||
|
|
e691303919 | ||
|
|
d4aee20743 | ||
|
|
bad70f5e24 | ||
|
|
b9fa2a3ebc | ||
|
|
0e7bd595f8 | ||
|
|
0732cb85f9 | ||
|
|
500784a9b9 | ||
|
|
5e856b4fde | ||
|
|
03dc903e8e | ||
|
|
69d0b23ab6 | ||
|
|
ee8735cd2c | ||
|
|
d8fe61131c | ||
|
|
935995d270 | ||
|
|
23d8b88c4a | ||
|
|
b4285ce565 | ||
|
|
f9d354b63e | ||
|
|
370eb945ee | ||
|
|
6387065e6f | ||
|
|
bebdb97c21 | ||
|
|
b5e2ead4e1 | ||
|
|
91922dae36 | ||
|
|
cb3d8af995 | ||
|
|
0fb3e2063a | ||
|
|
b37b877c45 | ||
|
|
f854246d7f | ||
|
|
f1eaa7bf9b | ||
|
|
ed9b4a6329 | ||
|
|
a00a22ac4c | ||
|
|
8879581fc1 | ||
|
|
230ce835e5 | ||
|
|
10e56badb3 | ||
|
|
cddf78434c | ||
|
|
0078b736b9 | ||
|
|
6d7f69625b | ||
|
|
fda17dd161 | ||
|
|
c41d6fd912 | ||
|
|
6e9128e060 | ||
|
|
92509d8cfb | ||
|
|
331f7ec52b | ||
|
|
4ba2da7ebb | ||
|
|
f95d3b1ef5 | ||
|
|
d5d7c7dd26 | ||
|
|
6a56c0e241 | ||
|
|
94c234c88c | ||
|
|
2ab976c511 | ||
|
|
dc66088483 | ||
|
|
67b5f46a7c | ||
|
|
0e483d27ac | ||
|
|
f5eaa648e9 | ||
|
|
4c4760a4ee | ||
|
|
7f20dd6ff5 | ||
|
|
de371be236 | ||
|
|
f3c2138ef4 | ||
|
|
0810e37240 | ||
|
|
a64e364fa6 | ||
|
|
d886ac701f | ||
|
|
11f1d71c93 | ||
|
|
30dc50d880 | ||
|
|
b17e633464 | ||
|
|
eec268ee42 | ||
|
|
363661c0d6 | ||
|
|
261bbdf4dc | ||
|
|
8aec4c5cb3 | ||
|
|
16cbc847ac | ||
|
|
436ce71dc8 | ||
|
|
0e610a72bc | ||
|
|
d2a49428b9 | ||
|
|
c3b23bf603 | ||
|
|
50094de73e | ||
|
|
3aa2748c04 | ||
|
|
ccaf759b6b | ||
|
|
521a1f76a9 | ||
|
|
490f0f2090 | ||
|
|
caf595e727 | ||
|
|
1f64a45113 | ||
|
|
9b2dc2189c | ||
|
|
071fab1618 | ||
|
|
f6c24e42af | ||
|
|
22fd976eb9 | ||
|
|
826284f3d9 | ||
|
|
3c7e6a3940 | ||
|
|
33ef4db502 | ||
|
|
458ec06b0e | ||
|
|
6b66f69433 | ||
|
|
ce8957e1e1 | ||
|
|
a3db64b931 | ||
|
|
8859da5fef | ||
|
|
23c0ff60f2 | ||
|
|
4723319eef | ||
|
|
b8f3186d65 | ||
|
|
01e18f8773 | ||
|
|
1669c83782 | ||
|
|
09cfd0b408 | ||
|
|
d48ea025f0 | ||
|
|
c5c7ba4eed | ||
|
|
90aa3a306c | ||
|
|
c2f7d059d2 | ||
|
|
7a96a167e6 | ||
|
|
2da19fe608 | ||
|
|
952bd5338d | ||
|
|
57757a345d | ||
|
|
3caae14192 | ||
|
|
55ac653eaa | ||
|
|
1d5652dfa9 | ||
|
|
76c460536d | ||
|
|
b067d4a284 | ||
|
|
94838ec039 | ||
|
|
224ecea8c7 | ||
|
|
5d5755f29d | ||
|
|
1fdce01fd2 | ||
|
|
c8213c970e | ||
|
|
576ff453e5 | ||
|
|
9b8aca45f9 | ||
|
|
f1f20f5a79 | ||
|
|
de40caf76d | ||
|
|
71b1f7e807 | ||
|
|
8adf6a2c47 | ||
|
|
d80833896c | ||
|
|
5c6194372e | ||
|
|
399796cbe4 | ||
|
|
77c3ed1a1f | ||
|
|
82e25c845b | ||
|
|
d50c38f037 | ||
|
|
f2d5f4ca92 | ||
|
|
b788586caf | ||
|
|
90351e442e | ||
|
|
4ad88b2576 | ||
|
|
2ce69710e3 | ||
|
|
0b4d092cf6 | ||
|
|
53285617d3 | ||
|
|
ae3befbfbe | ||
|
|
dc1a05ac3e | ||
|
|
e271b4a1b0 | ||
|
|
fee938d63a | ||
|
|
4d74d888e4 | ||
|
|
4bc7b1d27c | ||
|
|
78dac0642e | ||
|
|
92bc72a90b | ||
|
|
a7301ba8a9 | ||
|
|
e9887dd82f | ||
|
|
c0082d8a09 | ||
|
|
fbc3b4e230 | ||
|
|
1f7fdb43ba | ||
|
|
566031f4fa | ||
|
|
0cf386ec52 | ||
|
|
d493f9ec3a | ||
|
|
2c7ded2433 | ||
|
|
82c7807a4f | ||
|
|
df7e1ae16d | ||
|
|
0471078006 | ||
|
|
1070b9170f | ||
|
|
bb312711cf | ||
|
|
c31facf41e | ||
|
|
c644930753 | ||
|
|
b79df5e018 | ||
|
|
de66f1f397 | ||
|
|
427fa6d7a2 | ||
|
|
239da8b02a | ||
|
|
17244e2c84 | ||
|
|
24a0f7b032 | ||
|
|
fc48df1d53 | ||
|
|
6455b851b8 | ||
|
|
9346bc8379 | ||
|
|
3055454ecc | ||
|
|
11e9276498 |
BIN
.github/assets/building-in-public.png
vendored
Normal file
BIN
.github/assets/building-in-public.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 278 KiB |
98
.github/workflows/publish-platform.yml
vendored
98
.github/workflows/publish-platform.yml
vendored
@@ -59,20 +59,39 @@ jobs:
|
||||
- name: Check if already published
|
||||
id: check
|
||||
run: |
|
||||
PKG_NAME="oh-my-opencode-${{ matrix.platform }}"
|
||||
VERSION="${{ inputs.version }}"
|
||||
STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/${PKG_NAME}/${VERSION}")
|
||||
# Convert platform name for output (replace - with _)
|
||||
PLATFORM_KEY="${{ matrix.platform }}"
|
||||
PLATFORM_KEY="${PLATFORM_KEY//-/_}"
|
||||
if [ "$STATUS" = "200" ]; then
|
||||
|
||||
# Check oh-my-opencode
|
||||
OC_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-opencode-${{ matrix.platform }}/${VERSION}")
|
||||
# Check oh-my-openagent
|
||||
OA_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-openagent-${{ matrix.platform }}/${VERSION}")
|
||||
|
||||
echo "oh-my-opencode-${{ matrix.platform }}@${VERSION}: ${OC_STATUS}"
|
||||
echo "oh-my-openagent-${{ matrix.platform }}@${VERSION}: ${OA_STATUS}"
|
||||
|
||||
if [ "$OC_STATUS" = "200" ]; then
|
||||
echo "skip_opencode=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ oh-my-opencode-${{ matrix.platform }}@${VERSION} already published"
|
||||
else
|
||||
echo "skip_opencode=false" >> $GITHUB_OUTPUT
|
||||
echo "→ oh-my-opencode-${{ matrix.platform }}@${VERSION} needs publishing"
|
||||
fi
|
||||
|
||||
if [ "$OA_STATUS" = "200" ]; then
|
||||
echo "skip_openagent=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ oh-my-openagent-${{ matrix.platform }}@${VERSION} already published"
|
||||
else
|
||||
echo "skip_openagent=false" >> $GITHUB_OUTPUT
|
||||
echo "→ oh-my-openagent-${{ matrix.platform }}@${VERSION} needs publishing"
|
||||
fi
|
||||
|
||||
# Skip build only if BOTH are already published
|
||||
if [ "$OC_STATUS" = "200" ] && [ "$OA_STATUS" = "200" ]; then
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
echo "skip_${PLATFORM_KEY}=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ ${PKG_NAME}@${VERSION} already published"
|
||||
else
|
||||
echo "skip=false" >> $GITHUB_OUTPUT
|
||||
echo "skip_${PLATFORM_KEY}=false" >> $GITHUB_OUTPUT
|
||||
echo "→ ${PKG_NAME}@${VERSION} needs publishing"
|
||||
fi
|
||||
|
||||
- name: Update version in package.json
|
||||
@@ -207,23 +226,38 @@ jobs:
|
||||
matrix:
|
||||
platform: [darwin-arm64, darwin-x64, darwin-x64-baseline, linux-x64, linux-x64-baseline, linux-arm64, linux-x64-musl, linux-x64-musl-baseline, linux-arm64-musl, windows-x64, windows-x64-baseline]
|
||||
steps:
|
||||
- name: Check if oh-my-opencode already published
|
||||
- name: Check if already published
|
||||
id: check
|
||||
run: |
|
||||
PKG_NAME="oh-my-opencode-${{ matrix.platform }}"
|
||||
VERSION="${{ inputs.version }}"
|
||||
STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/${PKG_NAME}/${VERSION}")
|
||||
if [ "$STATUS" = "200" ]; then
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ ${PKG_NAME}@${VERSION} already published, skipping"
|
||||
|
||||
OC_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-opencode-${{ matrix.platform }}/${VERSION}")
|
||||
OA_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-openagent-${{ matrix.platform }}/${VERSION}")
|
||||
|
||||
if [ "$OC_STATUS" = "200" ]; then
|
||||
echo "skip_opencode=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ oh-my-opencode-${{ matrix.platform }}@${VERSION} already published"
|
||||
else
|
||||
echo "skip=false" >> $GITHUB_OUTPUT
|
||||
echo "→ ${PKG_NAME}@${VERSION} will be published"
|
||||
echo "skip_opencode=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
if [ "$OA_STATUS" = "200" ]; then
|
||||
echo "skip_openagent=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ oh-my-openagent-${{ matrix.platform }}@${VERSION} already published"
|
||||
else
|
||||
echo "skip_openagent=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Need artifact if either package needs publishing
|
||||
if [ "$OC_STATUS" = "200" ] && [ "$OA_STATUS" = "200" ]; then
|
||||
echo "skip_all=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "skip_all=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Download artifact
|
||||
id: download
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
if: steps.check.outputs.skip_all != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -231,7 +265,7 @@ jobs:
|
||||
path: .
|
||||
|
||||
- name: Extract artifact
|
||||
if: steps.check.outputs.skip != 'true' && steps.download.outcome == 'success'
|
||||
if: steps.check.outputs.skip_all != 'true' && steps.download.outcome == 'success'
|
||||
run: |
|
||||
PLATFORM="${{ matrix.platform }}"
|
||||
mkdir -p packages/${PLATFORM}
|
||||
@@ -247,13 +281,13 @@ jobs:
|
||||
ls -la packages/${PLATFORM}/bin/
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
if: steps.check.outputs.skip != 'true' && steps.download.outcome == 'success'
|
||||
if: steps.check.outputs.skip_all != 'true' && steps.download.outcome == 'success'
|
||||
with:
|
||||
node-version: "24"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
||||
- name: Publish ${{ matrix.platform }}
|
||||
if: steps.check.outputs.skip != 'true' && steps.download.outcome == 'success'
|
||||
- name: Publish oh-my-opencode-${{ matrix.platform }}
|
||||
if: steps.check.outputs.skip_opencode != 'true' && steps.download.outcome == 'success'
|
||||
run: |
|
||||
cd packages/${{ matrix.platform }}
|
||||
|
||||
@@ -267,3 +301,25 @@ jobs:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
timeout-minutes: 15
|
||||
|
||||
- name: Publish oh-my-openagent-${{ matrix.platform }}
|
||||
if: steps.check.outputs.skip_openagent != 'true' && steps.download.outcome == 'success'
|
||||
run: |
|
||||
cd packages/${{ matrix.platform }}
|
||||
|
||||
# Rename package for oh-my-openagent
|
||||
jq --arg name "oh-my-openagent-${{ matrix.platform }}" \
|
||||
--arg desc "Platform-specific binary for oh-my-openagent (${{ matrix.platform }})" \
|
||||
'.name = $name | .description = $desc | .bin = {"oh-my-openagent": (.bin | to_entries | .[0].value)}' \
|
||||
package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
TAG_ARG=""
|
||||
if [ -n "${{ inputs.dist_tag }}" ]; then
|
||||
TAG_ARG="--tag ${{ inputs.dist_tag }}"
|
||||
fi
|
||||
|
||||
npm publish --access public --provenance $TAG_ARG
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
timeout-minutes: 15
|
||||
|
||||
48
.github/workflows/publish.yml
vendored
48
.github/workflows/publish.yml
vendored
@@ -57,6 +57,7 @@ jobs:
|
||||
bun test src/cli/doctor/format-default.test.ts
|
||||
bun test src/tools/call-omo-agent/sync-executor.test.ts
|
||||
bun test src/tools/call-omo-agent/session-creator.test.ts
|
||||
bun test src/tools/session-manager
|
||||
bun test src/features/opencode-skill-loader/loader.test.ts
|
||||
bun test src/hooks/anthropic-context-window-limit-recovery/recovery-hook.test.ts
|
||||
bun test src/hooks/anthropic-context-window-limit-recovery/executor.test.ts
|
||||
@@ -66,9 +67,8 @@ jobs:
|
||||
# Enumerate subdirectories/files explicitly to EXCLUDE mock-heavy files
|
||||
# that were already run in isolation above.
|
||||
# Excluded from src/cli: doctor/formatter.test.ts, doctor/format-default.test.ts
|
||||
# Excluded from src/tools: call-omo-agent/sync-executor.test.ts, call-omo-agent/session-creator.test.ts
|
||||
# Excluded from src/tools: call-omo-agent/sync-executor.test.ts, call-omo-agent/session-creator.test.ts, session-manager (all)
|
||||
# Excluded from src/hooks/anthropic-context-window-limit-recovery: recovery-hook.test.ts, executor.test.ts
|
||||
# Excluded from src/tools: call-omo-agent/sync-executor.test.ts, call-omo-agent/session-creator.test.ts
|
||||
bun test bin script src/config src/mcp src/index.test.ts \
|
||||
src/agents src/shared \
|
||||
src/cli/run src/cli/config-manager src/cli/mcp-oauth \
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
src/cli/doctor/runner.test.ts src/cli/doctor/checks \
|
||||
src/tools/ast-grep src/tools/background-task src/tools/delegate-task \
|
||||
src/tools/glob src/tools/grep src/tools/interactive-bash \
|
||||
src/tools/look-at src/tools/lsp src/tools/session-manager \
|
||||
src/tools/look-at src/tools/lsp \
|
||||
src/tools/skill src/tools/skill-mcp src/tools/slashcommand src/tools/task \
|
||||
src/tools/call-omo-agent/background-agent-executor.test.ts \
|
||||
src/tools/call-omo-agent/background-executor.test.ts \
|
||||
@@ -216,6 +216,48 @@ jobs:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
|
||||
- name: Check if oh-my-openagent already published
|
||||
id: check-openagent
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-openagent/${VERSION}")
|
||||
if [ "$STATUS" = "200" ]; then
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ oh-my-openagent@${VERSION} already published"
|
||||
else
|
||||
echo "skip=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Publish oh-my-openagent
|
||||
if: steps.check-openagent.outputs.skip != 'true'
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
|
||||
# Update package name, version, and optionalDependencies for oh-my-openagent
|
||||
jq --arg v "$VERSION" '
|
||||
.name = "oh-my-openagent" |
|
||||
.version = $v |
|
||||
.optionalDependencies = (
|
||||
.optionalDependencies | to_entries |
|
||||
map(.key = (.key | sub("^oh-my-opencode-"; "oh-my-openagent-")) | .value = $v) |
|
||||
from_entries
|
||||
)
|
||||
' package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
TAG_ARG=""
|
||||
if [ -n "${{ steps.version.outputs.dist_tag }}" ]; then
|
||||
TAG_ARG="--tag ${{ steps.version.outputs.dist_tag }}"
|
||||
fi
|
||||
npm publish --access public --provenance $TAG_ARG || echo "::warning::oh-my-openagent publish failed"
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
|
||||
- name: Restore package.json
|
||||
if: steps.check-openagent.outputs.skip != 'true'
|
||||
run: |
|
||||
git checkout -- package.json
|
||||
|
||||
trigger-platform:
|
||||
runs-on: ubuntu-latest
|
||||
needs: publish-main
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -36,3 +36,4 @@ test-injection/
|
||||
notepad.md
|
||||
oauth-success.html
|
||||
*.bun-build
|
||||
.omx/
|
||||
|
||||
@@ -79,47 +79,65 @@ Pass `REPO`, `REPORT_DIR`, and `COMMIT_SHA` to every subagent.
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Fetch All Open Items
|
||||
---
|
||||
|
||||
<fetch>
|
||||
Paginate if 500 results returned.
|
||||
## Phase 1: Fetch All Open Items (CORRECTED)
|
||||
|
||||
**IMPORTANT:** `body` and `comments` fields may contain control characters that break jq parsing. Fetch basic metadata first, then fetch full details per-item in subagents.
|
||||
|
||||
```bash
|
||||
ISSUES=$(gh issue list --repo $REPO --state open --limit 500 \
|
||||
--json number,title,state,createdAt,updatedAt,labels,author,body,comments)
|
||||
ISSUE_LEN=$(echo "$ISSUES" | jq length)
|
||||
if [ "$ISSUE_LEN" -eq 500 ]; then
|
||||
LAST_DATE=$(echo "$ISSUES" | jq -r '.[-1].createdAt')
|
||||
# Step 1: Fetch basic metadata (without body/comments to avoid JSON parsing issues)
|
||||
ISSUES_LIST=$(gh issue list --repo $REPO --state open --limit 500 \
|
||||
--json number,title,labels,author,createdAt)
|
||||
ISSUE_COUNT=$(echo "$ISSUES_LIST" | jq length)
|
||||
|
||||
# Paginate if needed
|
||||
if [ "$ISSUE_COUNT" -eq 500 ]; then
|
||||
LAST_DATE=$(echo "$ISSUES_LIST" | jq -r '.[-1].createdAt')
|
||||
while true; do
|
||||
PAGE=$(gh issue list --repo $REPO --state open --limit 500 \
|
||||
--search "created:<$LAST_DATE" \
|
||||
--json number,title,state,createdAt,updatedAt,labels,author,body,comments)
|
||||
PAGE_LEN=$(echo "$PAGE" | jq length)
|
||||
[ "$PAGE_LEN" -eq 0 ] && break
|
||||
ISSUES=$(echo "[$ISSUES, $PAGE]" | jq -s 'add | unique_by(.number)')
|
||||
[ "$PAGE_LEN" -lt 500 ] && break
|
||||
--json number,title,labels,author,createdAt)
|
||||
PAGE_COUNT=$(echo "$PAGE" | jq length)
|
||||
[ "$PAGE_COUNT" -eq 0 ] && break
|
||||
ISSUES_LIST=$(echo "$ISSUES_LIST" "$PAGE" | jq -s '.[0] + .[1] | unique_by(.number)')
|
||||
ISSUE_COUNT=$(echo "$ISSUES_LIST" | jq length)
|
||||
[ "$PAGE_COUNT" -lt 500 ] && break
|
||||
LAST_DATE=$(echo "$PAGE" | jq -r '.[-1].createdAt')
|
||||
done
|
||||
fi
|
||||
|
||||
PRS=$(gh pr list --repo $REPO --state open --limit 500 \
|
||||
--json number,title,state,createdAt,updatedAt,labels,author,body,headRefName,baseRefName,isDraft,mergeable,reviewDecision,statusCheckRollup)
|
||||
PR_LEN=$(echo "$PRS" | jq length)
|
||||
if [ "$PR_LEN" -eq 500 ]; then
|
||||
LAST_DATE=$(echo "$PRS" | jq -r '.[-1].createdAt')
|
||||
# Same for PRs
|
||||
PRS_LIST=$(gh pr list --repo $REPO --state open --limit 500 \
|
||||
--json number,title,labels,author,headRefName,baseRefName,isDraft,createdAt)
|
||||
PR_COUNT=$(echo "$PRS_LIST" | jq length)
|
||||
|
||||
if [ "$PR_COUNT" -eq 500 ]; then
|
||||
LAST_DATE=$(echo "$PRS_LIST" | jq -r '.[-1].createdAt')
|
||||
while true; do
|
||||
PAGE=$(gh pr list --repo $REPO --state open --limit 500 \
|
||||
--search "created:<$LAST_DATE" \
|
||||
--json number,title,state,createdAt,updatedAt,labels,author,body,headRefName,baseRefName,isDraft,mergeable,reviewDecision,statusCheckRollup)
|
||||
PAGE_LEN=$(echo "$PAGE" | jq length)
|
||||
[ "$PAGE_LEN" -eq 0 ] && break
|
||||
PRS=$(echo "[$PRS, $PAGE]" | jq -s 'add | unique_by(.number)')
|
||||
[ "$PAGE_LEN" -lt 500 ] && break
|
||||
--json number,title,labels,author,headRefName,baseRefName,isDraft,createdAt)
|
||||
PAGE_COUNT=$(echo "$PAGE" | jq length)
|
||||
[ "$PAGE_COUNT" -eq 0 ] && break
|
||||
PRS_LIST=$(echo "$PRS_LIST" "$PAGE" | jq -s '.[0] + .[1] | unique_by(.number)')
|
||||
PR_COUNT=$(echo "$PRS_LIST" | jq length)
|
||||
[ "$PAGE_COUNT" -lt 500 ] && break
|
||||
LAST_DATE=$(echo "$PAGE" | jq -r '.[-1].createdAt')
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Total issues: $ISSUE_COUNT, Total PRs: $PR_COUNT"
|
||||
```
|
||||
</fetch>
|
||||
|
||||
**LARGE REPOSITORY HANDLING:**
|
||||
If total items exceeds 50, you MUST process ALL items. Use the pagination code above to fetch every single open issue and PR.
|
||||
**DO NOT** sample or limit to 50 items - process the entire backlog.
|
||||
|
||||
Example: If there are 500 open issues, spawn 500 subagents. If there are 1000 open PRs, spawn 1000 subagents.
|
||||
|
||||
**Note:** Background task system will queue excess tasks automatically.
|
||||
|
||||
|
||||
---
|
||||
|
||||
@@ -136,7 +154,36 @@ fi
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Spawn Subagents
|
||||
## Phase 3: Spawn Subagents (Individual Tool Calls)
|
||||
|
||||
**CRITICAL: Create tasks ONE BY ONE using individual `task_create` tool calls. NEVER batch or script.**
|
||||
|
||||
For each item, execute these steps sequentially:
|
||||
|
||||
### Step 3.1: Create Task Record
|
||||
```typescript
|
||||
task_create(
|
||||
subject="Triage: #{number} {title}",
|
||||
description="GitHub {issue|PR} triage analysis - {type}",
|
||||
metadata={"type": "{ISSUE_QUESTION|ISSUE_BUG|ISSUE_FEATURE|ISSUE_OTHER|PR_BUGFIX|PR_OTHER}", "number": {number}}
|
||||
)
|
||||
```
|
||||
|
||||
### Step 3.2: Spawn Analysis Subagent (Background)
|
||||
```typescript
|
||||
task(
|
||||
category="quick",
|
||||
run_in_background=true,
|
||||
load_skills=[],
|
||||
prompt=SUBAGENT_PROMPT
|
||||
)
|
||||
```
|
||||
|
||||
**ABSOLUTE RULES for Subagents:**
|
||||
- **ONLY ANALYZE** - Never take action on GitHub (no comments, merges, closes)
|
||||
- **READ-ONLY** - Use tools only for reading code/GitHub data
|
||||
- **WRITE REPORT ONLY** - Output goes to `{REPORT_DIR}/{issue|pr}-{number}.md` via Write tool
|
||||
- **EVIDENCE REQUIRED** - Every claim must have GitHub permalink as proof
|
||||
|
||||
```
|
||||
For each item:
|
||||
@@ -170,6 +217,7 @@ ABSOLUTE RULES (violating ANY = critical failure):
|
||||
- Your ONLY writable output: {REPORT_DIR}/{issue|pr}-{number}.md via the Write tool
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
### ISSUE_QUESTION
|
||||
|
||||
10
AGENTS.md
10
AGENTS.md
@@ -4,7 +4,7 @@
|
||||
|
||||
## OVERVIEW
|
||||
|
||||
OpenCode plugin (npm: `oh-my-opencode`) that extends Claude Code (OpenCode fork) with multi-agent orchestration, 46 lifecycle hooks, 26 tools, skill/command/MCP systems, and Claude Code compatibility. 1268 TypeScript files, 160k LOC.
|
||||
OpenCode plugin (npm: `oh-my-opencode`) that extends Claude Code (OpenCode fork) with multi-agent orchestration, 48 lifecycle hooks, 26 tools, skill/command/MCP systems, and Claude Code compatibility. 1268 TypeScript files, 160k LOC.
|
||||
|
||||
## STRUCTURE
|
||||
|
||||
@@ -14,14 +14,14 @@ oh-my-opencode/
|
||||
│ ├── index.ts # Plugin entry: loadConfig → createManagers → createTools → createHooks → createPluginInterface
|
||||
│ ├── plugin-config.ts # JSONC multi-level config: user → project → defaults (Zod v4)
|
||||
│ ├── agents/ # 11 agents (Sisyphus, Hephaestus, Oracle, Librarian, Explore, Atlas, Prometheus, Metis, Momus, Multimodal-Looker, Sisyphus-Junior)
|
||||
│ ├── hooks/ # 46 hooks across 45 directories + 11 standalone files
|
||||
│ ├── hooks/ # 48 lifecycle hooks across dedicated modules and standalone files
|
||||
│ ├── tools/ # 26 tools across 15 directories
|
||||
│ ├── features/ # 19 feature modules (background-agent, skill-loader, tmux, MCP-OAuth, etc.)
|
||||
│ ├── shared/ # 95+ utility files in 13 categories
|
||||
│ ├── config/ # Zod v4 schema system (24 files)
|
||||
│ ├── cli/ # CLI: install, run, doctor, mcp-oauth (Commander.js)
|
||||
│ ├── mcp/ # 3 built-in remote MCPs (websearch, context7, grep_app)
|
||||
│ ├── plugin/ # 8 OpenCode hook handlers + 46 hook composition
|
||||
│ ├── plugin/ # 8 OpenCode hook handlers + 48 hook composition
|
||||
│ └── plugin-handlers/ # 6-phase config loading pipeline
|
||||
├── packages/ # Monorepo: cli-runner, 12 platform binaries
|
||||
└── local-ignore/ # Dev-only test fixtures
|
||||
@@ -34,7 +34,7 @@ OhMyOpenCodePlugin(ctx)
|
||||
├─→ loadPluginConfig() # JSONC parse → project/user merge → Zod validate → migrate
|
||||
├─→ createManagers() # TmuxSessionManager, BackgroundManager, SkillMcpManager, ConfigHandler
|
||||
├─→ createTools() # SkillContext + AvailableCategories + ToolRegistry (26 tools)
|
||||
├─→ createHooks() # 3-tier: Core(37) + Continuation(7) + Skill(2) = 46 hooks
|
||||
├─→ createHooks() # 3-tier: Core(39) + Continuation(7) + Skill(2) = 48 hooks
|
||||
└─→ createPluginInterface() # 8 OpenCode hook handlers → PluginInterface
|
||||
```
|
||||
|
||||
@@ -97,7 +97,7 @@ Fields: agents (14 overridable, 21 fields each), categories (8 built-in + custom
|
||||
- **Test pattern**: Bun test (`bun:test`), co-located `*.test.ts`, given/when/then style (nested describe with `#given`/`#when`/`#then` prefixes)
|
||||
- **CI test split**: mock-heavy tests run in isolation (separate `bun test` processes), rest in batch
|
||||
- **Factory pattern**: `createXXX()` for all tools, hooks, agents
|
||||
- **Hook tiers**: Session (23) → Tool-Guard (10) → Transform (4) → Continuation (7) → Skill (2)
|
||||
- **Hook tiers**: Session (23) → Tool-Guard (12) → Transform (4) → Continuation (7) → Skill (2)
|
||||
- **Agent modes**: `primary` (respects UI model) vs `subagent` (own fallback chain) vs `all`
|
||||
- **Model resolution**: 4-step: override → category-default → provider-fallback → system-default
|
||||
- **Config format**: JSONC with comments, Zod v4 validation, snake_case keys
|
||||
|
||||
11
README.ja.md
11
README.ja.md
@@ -4,6 +4,17 @@
|
||||
> コアメンテナーのQが負傷したため、今週は Issue/PR への返信とリリースが遅れる可能性があります。
|
||||
> ご理解とご支援に感謝します。
|
||||
|
||||
> [!TIP]
|
||||
> **Building in Public**
|
||||
>
|
||||
> メンテナーが Jobdori を使い、oh-my-opencode をリアルタイムで開発・メンテナンスしています。Jobdori は OpenClaw をベースに大幅カスタマイズされた AI アシスタントです。
|
||||
> すべての機能開発、修正、Issue トリアージを Discord でライブでご覧いただけます。
|
||||
>
|
||||
> [](https://discord.gg/PUwSMR9XNk)
|
||||
>
|
||||
> [**→ #building-in-public で確認する**](https://discord.gg/PUwSMR9XNk)
|
||||
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
> [](https://sisyphuslabs.ai)
|
||||
|
||||
11
README.ko.md
11
README.ko.md
@@ -4,6 +4,17 @@
|
||||
> 핵심 메인테이너 Q가 부상을 입어, 이번 주에는 이슈/PR 응답 및 릴리스가 지연될 수 있습니다.
|
||||
> 양해와 응원에 감사드립니다.
|
||||
|
||||
> [!TIP]
|
||||
> **Building in Public**
|
||||
>
|
||||
> 메인테이너가 Jobdori를 통해 oh-my-opencode를 실시간으로 개발하고 있습니다. Jobdori는 OpenClaw를 기반으로 대폭 커스터마이징된 AI 어시스턴트입니다.
|
||||
> 모든 기능 개발, 버그 수정, 이슈 트리아지를 Discord에서 실시간으로 확인하세요.
|
||||
>
|
||||
> [](https://discord.gg/PUwSMR9XNk)
|
||||
>
|
||||
> [**→ #building-in-public에서 확인하기**](https://discord.gg/PUwSMR9XNk)
|
||||
|
||||
|
||||
> [!TIP]
|
||||
> 저희와 함께 하세요!
|
||||
>
|
||||
|
||||
16
README.md
16
README.md
@@ -1,8 +1,12 @@
|
||||
> [!WARNING]
|
||||
> **TEMP NOTICE (This Week): Reduced Maintainer Availability**
|
||||
> [!TIP]
|
||||
> **Building in Public**
|
||||
>
|
||||
> Core maintainer Q got injured, so issue/PR responses and releases may be delayed this week.
|
||||
> Thank you for your patience and support.
|
||||
> The maintainer builds and maintains oh-my-opencode in real-time with Jobdori, an AI assistant built on a heavily customized fork of OpenClaw.
|
||||
> Every feature, every fix, every issue triage — live in our Discord.
|
||||
>
|
||||
> [](https://discord.gg/PUwSMR9XNk)
|
||||
>
|
||||
> [**→ Watch it happen in #building-in-public**](https://discord.gg/PUwSMR9XNk)
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
@@ -310,7 +314,7 @@ See full [Features Documentation](docs/reference/features.md).
|
||||
- **Claude Code Compatibility**: Full hook system, commands, skills, agents, MCPs
|
||||
- **Built-in MCPs**: websearch (Exa), context7 (docs), grep_app (GitHub search)
|
||||
- **Session Tools**: List, read, search, and analyze session history
|
||||
- **Productivity Features**: Ralph Loop, Todo Enforcer, GPT permission-tail continuation, Comment Checker, Think Mode, and more
|
||||
- **Productivity Features**: Ralph Loop, Todo Enforcer, Comment Checker, Think Mode, and more
|
||||
- **Model Setup**: Agent-model matching is built into the [Installation Guide](docs/guide/installation.md#step-5-understand-your-model-setup)
|
||||
|
||||
## Configuration
|
||||
@@ -327,7 +331,7 @@ See [Configuration Documentation](docs/reference/configuration.md).
|
||||
- **Sisyphus Agent**: Main orchestrator with Prometheus (Planner) and Metis (Plan Consultant)
|
||||
- **Background Tasks**: Configure concurrency limits per provider/model
|
||||
- **Categories**: Domain-specific task delegation (`visual`, `business-logic`, custom)
|
||||
- **Hooks**: 25+ built-in hooks, including `gpt-permission-continuation`, all configurable via `disabled_hooks`
|
||||
- **Hooks**: 25+ built-in hooks, all configurable via `disabled_hooks`
|
||||
- **MCPs**: Built-in websearch (Exa), context7 (docs), grep_app (GitHub search)
|
||||
- **LSP**: Full LSP support with refactoring tools
|
||||
- **Experimental**: Aggressive truncation, auto-resume, and more
|
||||
|
||||
11
README.ru.md
11
README.ru.md
@@ -4,6 +4,17 @@
|
||||
> Ключевой мейнтейнер Q получил травму, поэтому на этой неделе ответы по issue/PR и релизы могут задерживаться.
|
||||
> Спасибо за терпение и поддержку.
|
||||
|
||||
> [!TIP]
|
||||
> **Building in Public**
|
||||
>
|
||||
> Мейнтейнер разрабатывает и поддерживает oh-my-opencode в режиме реального времени с помощью Jobdori — ИИ-ассистента на базе глубоко кастомизированной версии OpenClaw.
|
||||
> Каждая фича, каждый фикс, каждый триаж issue — в прямом эфире в нашем Discord.
|
||||
>
|
||||
> [](https://discord.gg/PUwSMR9XNk)
|
||||
>
|
||||
> [**→ Смотрите в #building-in-public**](https://discord.gg/PUwSMR9XNk)
|
||||
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
> [](https://sisyphuslabs.ai)
|
||||
|
||||
@@ -4,6 +4,17 @@
|
||||
> 核心维护者 Q 因受伤,本周 issue/PR 回复和发布可能会延迟。
|
||||
> 感谢你的耐心与支持。
|
||||
|
||||
> [!TIP]
|
||||
> **Building in Public**
|
||||
>
|
||||
> 维护者正在使用 Jobdori 实时开发和维护 oh-my-opencode。Jobdori 是基于 OpenClaw 深度定制的 AI 助手。
|
||||
> 每个功能开发、每次修复、每次 Issue 分类,都在 Discord 上实时进行。
|
||||
>
|
||||
> [](https://discord.gg/PUwSMR9XNk)
|
||||
>
|
||||
> [**→ 在 #building-in-public 频道中查看**](https://discord.gg/PUwSMR9XNk)
|
||||
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
> [](https://sisyphuslabs.ai)
|
||||
|
||||
@@ -3699,6 +3699,30 @@
|
||||
"syncPollTimeoutMs": {
|
||||
"type": "number",
|
||||
"minimum": 60000
|
||||
},
|
||||
"maxToolCalls": {
|
||||
"type": "integer",
|
||||
"minimum": 10,
|
||||
"maximum": 9007199254740991
|
||||
},
|
||||
"circuitBreaker": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"maxToolCalls": {
|
||||
"type": "integer",
|
||||
"minimum": 10,
|
||||
"maximum": 9007199254740991
|
||||
},
|
||||
"consecutiveThreshold": {
|
||||
"type": "integer",
|
||||
"minimum": 5,
|
||||
"maximum": 9007199254740991
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
@@ -3712,6 +3736,147 @@
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"openclaw": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"default": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
"gateways": {
|
||||
"default": {},
|
||||
"type": "object",
|
||||
"propertyNames": {
|
||||
"type": "string"
|
||||
},
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"default": "http",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"http",
|
||||
"command"
|
||||
]
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
},
|
||||
"method": {
|
||||
"default": "POST",
|
||||
"type": "string"
|
||||
},
|
||||
"headers": {
|
||||
"type": "object",
|
||||
"propertyNames": {
|
||||
"type": "string"
|
||||
},
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"command": {
|
||||
"type": "string"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"type",
|
||||
"method"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"hooks": {
|
||||
"default": {},
|
||||
"type": "object",
|
||||
"propertyNames": {
|
||||
"type": "string"
|
||||
},
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"default": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
"gateway": {
|
||||
"type": "string"
|
||||
},
|
||||
"instruction": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"enabled",
|
||||
"gateway",
|
||||
"instruction"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"replyListener": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"discordBotToken": {
|
||||
"type": "string"
|
||||
},
|
||||
"discordChannelId": {
|
||||
"type": "string"
|
||||
},
|
||||
"discordMention": {
|
||||
"type": "string"
|
||||
},
|
||||
"authorizedDiscordUserIds": {
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"telegramBotToken": {
|
||||
"type": "string"
|
||||
},
|
||||
"telegramChatId": {
|
||||
"type": "string"
|
||||
},
|
||||
"pollIntervalMs": {
|
||||
"default": 3000,
|
||||
"type": "number"
|
||||
},
|
||||
"rateLimitPerMinute": {
|
||||
"default": 10,
|
||||
"type": "number"
|
||||
},
|
||||
"maxMessageLength": {
|
||||
"default": 500,
|
||||
"type": "number"
|
||||
},
|
||||
"includePrefix": {
|
||||
"default": true,
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"authorizedDiscordUserIds",
|
||||
"pollIntervalMs",
|
||||
"rateLimitPerMinute",
|
||||
"maxMessageLength",
|
||||
"includePrefix"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"enabled",
|
||||
"gateways",
|
||||
"hooks"
|
||||
],
|
||||
"additionalProperties": false
|
||||
},
|
||||
"babysitting": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"name": "hashline-edit-benchmark",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"description": "Hashline edit tool benchmark using Vercel AI SDK with FriendliAI provider",
|
||||
"scripts": {
|
||||
"bench:basic": "bun run test-edit-ops.ts",
|
||||
"bench:edge": "bun run test-edge-cases.ts",
|
||||
"bench:multi": "bun run test-multi-model.ts",
|
||||
"bench:all": "bun run bench:basic && bun run bench:edge"
|
||||
},
|
||||
"dependencies": {
|
||||
"@friendliai/ai-provider": "^1.0.9",
|
||||
"ai": "^6.0.94",
|
||||
"zod": "^4.1.0"
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ Think of AI models as developers on a team. Each has a different brain, differen
|
||||
|
||||
This isn't a bug. It's the foundation of the entire system.
|
||||
|
||||
Oh My OpenCode assigns each agent a model that matches its _working style_ — like building a team where each person is in the role that fits their personality.
|
||||
Oh My OpenAgent assigns each agent a model that matches its _working style_ — like building a team where each person is in the role that fits their personality.
|
||||
|
||||
### Sisyphus: The Sociable Lead
|
||||
|
||||
@@ -64,8 +64,8 @@ These agents have Claude-optimized prompts — long, detailed, mechanics-driven.
|
||||
|
||||
| Agent | Role | Fallback Chain | Notes |
|
||||
| ------------ | ----------------- | -------------------------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| **Sisyphus** | Main orchestrator | Claude Opus → opencode-go/kimi-k2.5 → K2P5 → GPT-5.4 → GLM-5 → Big Pickle | Claude-family first. GPT-5.4 has dedicated prompt support. Kimi/GLM as intermediate fallbacks. |
|
||||
| **Metis** | Plan gap analyzer | Claude Opus → opencode-go/glm-5 → K2P5 | Claude preferred. Uses opencode-go for reliable GLM-5 access. |
|
||||
| **Sisyphus** | Main orchestrator | Claude Opus → opencode-go/kimi-k2.5 → K2P5 → Kimi K2.5 → GPT-5.4 → GLM-5 → Big Pickle | Claude-family first. GPT-5.4 has dedicated prompt support. Kimi available through multiple providers. |
|
||||
| **Metis** | Plan gap analyzer | Claude Opus → GPT-5.4 → opencode-go/glm-5 → K2P5 | Claude preferred. GPT-5.4 as secondary before GLM-5 fallback. |
|
||||
|
||||
### Dual-Prompt Agents → Claude preferred, GPT supported
|
||||
|
||||
@@ -74,7 +74,7 @@ These agents ship separate prompts for Claude and GPT families. They auto-detect
|
||||
| Agent | Role | Fallback Chain | Notes |
|
||||
| -------------- | ----------------- | -------------------------------------- | -------------------------------------------------------------------- |
|
||||
| **Prometheus** | Strategic planner | Claude Opus → GPT-5.4 → opencode-go/glm-5 → Gemini 3.1 Pro | Interview-mode planning. GPT prompt is compact and principle-driven. |
|
||||
| **Atlas** | Todo orchestrator | Claude Sonnet → opencode-go/kimi-k2.5 | Claude first, opencode-go as the current fallback path. |
|
||||
| **Atlas** | Todo orchestrator | Claude Sonnet → opencode-go/kimi-k2.5 → GPT-5.4 | Claude first, opencode-go as intermediate, GPT-5.4 as last resort. |
|
||||
|
||||
### Deep Specialists → GPT
|
||||
|
||||
@@ -82,9 +82,9 @@ These agents are built for GPT's principle-driven style. Their prompts assume au
|
||||
|
||||
| Agent | Role | Fallback Chain | Notes |
|
||||
| -------------- | ----------------------- | -------------------------------------- | ------------------------------------------------ |
|
||||
| **Hephaestus** | Autonomous deep worker | GPT-5.3 Codex only | No fallback. Requires GPT access. The craftsman. |
|
||||
| **Oracle** | Architecture consultant | GPT-5.4 → Gemini 3.1 Pro → Claude Opus | Read-only high-IQ consultation. |
|
||||
| **Momus** | Ruthless reviewer | GPT-5.4 → Claude Opus → Gemini 3.1 Pro | Verification and plan review. |
|
||||
| **Hephaestus** | Autonomous deep worker | GPT-5.3 Codex → GPT-5.4 (Copilot) | Requires GPT access. GPT-5.4 via Copilot as fallback. The craftsman. |
|
||||
| **Oracle** | Architecture consultant | GPT-5.4 → Gemini 3.1 Pro → Claude Opus → opencode-go/glm-5 | Read-only high-IQ consultation. |
|
||||
| **Momus** | Ruthless reviewer | GPT-5.4 → Claude Opus → Gemini 3.1 Pro → opencode-go/glm-5 | Verification and plan review. GPT-5.4 uses xhigh variant. |
|
||||
|
||||
### Utility Runners → Speed over Intelligence
|
||||
|
||||
@@ -95,6 +95,7 @@ These agents do grep, search, and retrieval. They intentionally use the fastest,
|
||||
| **Explore** | Fast codebase grep | Grok Code Fast → opencode-go/minimax-m2.5 → MiniMax Free → Haiku → GPT-5-Nano | Speed is everything. Fire 10 in parallel. |
|
||||
| **Librarian** | Docs/code search | opencode-go/minimax-m2.5 → MiniMax Free → Haiku → GPT-5-Nano | Doc retrieval doesn't need deep reasoning. |
|
||||
| **Multimodal Looker** | Vision/screenshots | GPT-5.4 → opencode-go/kimi-k2.5 → GLM-4.6v → GPT-5-Nano | Uses the first available multimodal-capable fallback. |
|
||||
| **Sisyphus-Junior** | Category executor | Claude Sonnet → opencode-go/kimi-k2.5 → GPT-5.4 → Big Pickle | Handles delegated category tasks. Sonnet-tier default. |
|
||||
|
||||
---
|
||||
|
||||
@@ -119,8 +120,8 @@ Principle-driven, explicit reasoning, deep technical capability. Best for agents
|
||||
| Model | Strengths |
|
||||
| ----------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| **GPT-5.3 Codex** | Deep coding powerhouse. Autonomous exploration. Required for Hephaestus. |
|
||||
| **GPT-5.4** | High intelligence, strategic reasoning. Default for Oracle. |
|
||||
| **GPT-5.4** | Strong principle-driven reasoning. Default for Momus and a key fallback for Prometheus / Atlas. |
|
||||
| **GPT-5.4** | High intelligence, strategic reasoning. Default for Oracle, Momus, and a key fallback for Prometheus / Atlas. Uses xhigh variant for Momus. |
|
||||
| **GPT-5.4 Mini** | Fast + strong reasoning. Good for lightweight autonomous tasks. Default for quick category. |
|
||||
| **GPT-5-Nano** | Ultra-cheap, fast. Good for simple utility tasks. |
|
||||
|
||||
### Other Models
|
||||
@@ -166,14 +167,14 @@ When agents delegate work, they don't pick a model name — they pick a **catego
|
||||
|
||||
| Category | When Used | Fallback Chain |
|
||||
| -------------------- | -------------------------- | -------------------------------------------- |
|
||||
| `visual-engineering` | Frontend, UI, CSS, design | Gemini 3.1 Pro → GLM 5 → Claude Opus |
|
||||
| `ultrabrain` | Maximum reasoning needed | GPT-5.4 → Gemini 3.1 Pro → Claude Opus |
|
||||
| `visual-engineering` | Frontend, UI, CSS, design | Gemini 3.1 Pro → GLM 5 → Claude Opus → opencode-go/glm-5 → K2P5 |
|
||||
| `ultrabrain` | Maximum reasoning needed | GPT-5.4 → Gemini 3.1 Pro → Claude Opus → opencode-go/glm-5 |
|
||||
| `deep` | Deep coding, complex logic | GPT-5.3 Codex → Claude Opus → Gemini 3.1 Pro |
|
||||
| `artistry` | Creative, novel approaches | Gemini 3.1 Pro → Claude Opus → GPT-5.4 |
|
||||
| `quick` | Simple, fast tasks | Claude Haiku → Gemini Flash → GPT-5-Nano |
|
||||
| `unspecified-high` | General complex work | Claude Opus → GPT-5.4 (high) → GLM 5 → K2P5 |
|
||||
| `unspecified-low` | General standard work | Claude Sonnet → GPT-5.3 Codex → Gemini Flash |
|
||||
| `writing` | Text, docs, prose | Gemini Flash → Claude Sonnet |
|
||||
| `quick` | Simple, fast tasks | GPT-5.4 Mini → Claude Haiku → Gemini Flash → opencode-go/minimax-m2.5 → GPT-5-Nano |
|
||||
| `unspecified-high` | General complex work | Claude Opus → GPT-5.4 → GLM 5 → K2P5 → opencode-go/glm-5 → Kimi K2.5 |
|
||||
| `unspecified-low` | General standard work | Claude Sonnet → GPT-5.3 Codex → opencode-go/kimi-k2.5 → Gemini Flash |
|
||||
| `writing` | Text, docs, prose | Gemini Flash → opencode-go/kimi-k2.5 → Claude Sonnet |
|
||||
|
||||
See the [Orchestration System Guide](./orchestration.md) for how agents dispatch tasks to categories.
|
||||
|
||||
@@ -185,7 +186,7 @@ See the [Orchestration System Guide](./orchestration.md) for how agents dispatch
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json",
|
||||
|
||||
"agents": {
|
||||
// Main orchestrator: Claude Opus or Kimi K2.5 work best
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Paste this into your llm agent session:
|
||||
|
||||
```
|
||||
Install and configure oh-my-opencode by following the instructions here:
|
||||
Install and configure oh-my-openagent by following the instructions here:
|
||||
https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
|
||||
```
|
||||
|
||||
@@ -14,8 +14,8 @@ https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/do
|
||||
Run the interactive installer:
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode install # recommended
|
||||
npx oh-my-opencode install # alternative
|
||||
bunx oh-my-openagent install # recommended
|
||||
npx oh-my-openagent install # alternative
|
||||
```
|
||||
|
||||
> **Note**: The CLI ships with standalone binaries for all major platforms. No runtime (Bun/Node.js) is required for CLI execution after installation.
|
||||
@@ -34,7 +34,7 @@ After you install it, you can read this [overview guide](./overview.md) to under
|
||||
> curl -fsSL https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
|
||||
> ```
|
||||
|
||||
If you're an LLM Agent helping set up oh-my-opencode, Welcome! You'll now help your user to install the best agent harness, OhMyOpenCode. Tell user this with greetings, with test 'oMoMoMoMo...'
|
||||
If you're an LLM Agent helping set up oh-my-openagent, Welcome! You'll now help your user to install the best agent harness, OhMyOpenAgent. Tell user this with greetings, with test 'oMoMoMoMo...'
|
||||
|
||||
follow these steps:
|
||||
|
||||
@@ -96,19 +96,19 @@ Spawn a subagent to handle installation and report back - to save context.
|
||||
Based on user's answers, run the CLI installer with appropriate flags:
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode install --no-tui --claude=<yes|no|max20> --gemini=<yes|no> --copilot=<yes|no> [--openai=<yes|no>] [--opencode-go=<yes|no>] [--opencode-zen=<yes|no>] [--zai-coding-plan=<yes|no>]
|
||||
bunx oh-my-openagent install --no-tui --claude=<yes|no|max20> --gemini=<yes|no> --copilot=<yes|no> [--openai=<yes|no>] [--opencode-go=<yes|no>] [--opencode-zen=<yes|no>] [--zai-coding-plan=<yes|no>]
|
||||
```
|
||||
|
||||
**Examples:**
|
||||
|
||||
- User has all native subscriptions: `bunx oh-my-opencode install --no-tui --claude=max20 --openai=yes --gemini=yes --copilot=no`
|
||||
- User has only Claude: `bunx oh-my-opencode install --no-tui --claude=yes --gemini=no --copilot=no`
|
||||
- User has Claude + OpenAI: `bunx oh-my-opencode install --no-tui --claude=yes --openai=yes --gemini=no --copilot=no`
|
||||
- User has only GitHub Copilot: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=yes`
|
||||
- User has Z.ai for Librarian: `bunx oh-my-opencode install --no-tui --claude=yes --gemini=no --copilot=no --zai-coding-plan=yes`
|
||||
- User has only OpenCode Zen: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=no --opencode-zen=yes`
|
||||
- User has OpenCode Go only: `bunx oh-my-opencode install --no-tui --claude=no --openai=no --gemini=no --copilot=no --opencode-go=yes`
|
||||
- User has no subscriptions: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=no`
|
||||
- User has all native subscriptions: `bunx oh-my-openagent install --no-tui --claude=max20 --openai=yes --gemini=yes --copilot=no`
|
||||
- User has only Claude: `bunx oh-my-openagent install --no-tui --claude=yes --gemini=no --copilot=no`
|
||||
- User has Claude + OpenAI: `bunx oh-my-openagent install --no-tui --claude=yes --openai=yes --gemini=no --copilot=no`
|
||||
- User has only GitHub Copilot: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=yes`
|
||||
- User has Z.ai for Librarian: `bunx oh-my-openagent install --no-tui --claude=yes --gemini=no --copilot=no --zai-coding-plan=yes`
|
||||
- User has only OpenCode Zen: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=no --opencode-zen=yes`
|
||||
- User has OpenCode Go only: `bunx oh-my-openagent install --no-tui --claude=no --openai=no --gemini=no --copilot=no --opencode-go=yes`
|
||||
- User has no subscriptions: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=no`
|
||||
|
||||
The CLI will:
|
||||
|
||||
@@ -120,7 +120,7 @@ The CLI will:
|
||||
|
||||
```bash
|
||||
opencode --version # Should be 1.0.150 or higher
|
||||
cat ~/.config/opencode/opencode.json # Should contain "oh-my-opencode" in plugin array
|
||||
cat ~/.config/opencode/opencode.json # Should contain "oh-my-openagent" in plugin array
|
||||
```
|
||||
|
||||
### Step 4: Configure Authentication
|
||||
@@ -145,7 +145,7 @@ First, add the opencode-antigravity-auth plugin:
|
||||
|
||||
```json
|
||||
{
|
||||
"plugin": ["oh-my-opencode", "opencode-antigravity-auth@latest"]
|
||||
"plugin": ["oh-my-openagent", "opencode-antigravity-auth@latest"]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -154,9 +154,9 @@ First, add the opencode-antigravity-auth plugin:
|
||||
You'll also need full model settings in `opencode.json`.
|
||||
Read the [opencode-antigravity-auth documentation](https://github.com/NoeFabris/opencode-antigravity-auth), copy the full model configuration from the README, and merge carefully to avoid breaking the user's existing setup. The plugin now uses a **variant system** — models like `antigravity-gemini-3-pro` support `low`/`high` variants instead of separate `-low`/`-high` model entries.
|
||||
|
||||
##### oh-my-opencode Agent Model Override
|
||||
##### oh-my-openagent Agent Model Override
|
||||
|
||||
The `opencode-antigravity-auth` plugin uses different model names than the built-in Google auth. Override the agent models in `oh-my-opencode.json` (or `.opencode/oh-my-opencode.json`):
|
||||
The `opencode-antigravity-auth` plugin uses different model names than the built-in Google auth. Override the agent models in `oh-my-openagent.json` (or `.opencode/oh-my-openagent.json`):
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -176,7 +176,7 @@ The `opencode-antigravity-auth` plugin uses different model names than the built
|
||||
|
||||
**Available models (Gemini CLI quota)**:
|
||||
|
||||
- `google/gemini-2.5-flash`, `google/gemini-2.5-pro`, `google/gemini-3-flash-preview`, `google/gemini-3-pro-preview`
|
||||
- `google/gemini-2.5-flash`, `google/gemini-2.5-pro`, `google/gemini-3-flash-preview`, `google/gemini-3.1-pro-preview`
|
||||
|
||||
> **Note**: Legacy tier-suffixed names like `google/antigravity-gemini-3-pro-high` still work but variants are recommended. Use `--variant=high` with the base model name instead.
|
||||
|
||||
@@ -201,7 +201,7 @@ GitHub Copilot is supported as a **fallback provider** when native providers are
|
||||
|
||||
##### Model Mappings
|
||||
|
||||
When GitHub Copilot is the best available provider, oh-my-opencode uses these model assignments:
|
||||
When GitHub Copilot is the best available provider, oh-my-openagent uses these model assignments:
|
||||
|
||||
| Agent | Model |
|
||||
| ------------- | --------------------------------- |
|
||||
@@ -243,7 +243,7 @@ When OpenCode Zen is the best available provider (no native or Copilot), these m
|
||||
Run the installer and select "Yes" for GitHub Copilot:
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode install
|
||||
bunx oh-my-openagent install
|
||||
# Select your subscriptions (Claude, ChatGPT, Gemini)
|
||||
# When prompted: "Do you have a GitHub Copilot subscription?" → Select "Yes"
|
||||
```
|
||||
@@ -251,7 +251,7 @@ bunx oh-my-opencode install
|
||||
Or use non-interactive mode:
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode install --no-tui --claude=no --openai=no --gemini=no --copilot=yes
|
||||
bunx oh-my-openagent install --no-tui --claude=no --openai=no --gemini=no --copilot=yes
|
||||
```
|
||||
|
||||
Then authenticate with GitHub:
|
||||
@@ -263,7 +263,7 @@ opencode auth login
|
||||
|
||||
### Step 5: Understand Your Model Setup
|
||||
|
||||
You've just configured oh-my-opencode. Here's what got set up and why.
|
||||
You've just configured oh-my-openagent. Here's what got set up and why.
|
||||
|
||||
#### Model Families: What You're Working With
|
||||
|
||||
@@ -287,13 +287,14 @@ Not all models behave the same way. Understanding which models are "similar" hel
|
||||
| ----------------- | -------------------------------- | ------------------------------------------------- |
|
||||
| **GPT-5.3-codex** | openai, github-copilot, opencode | Deep coding powerhouse. Required for Hephaestus. |
|
||||
| **GPT-5.4** | openai, github-copilot, opencode | High intelligence. Default for Oracle. |
|
||||
| **GPT-5.4 Mini** | openai, github-copilot, opencode | Fast + strong reasoning. Default for quick category. |
|
||||
| **GPT-5-Nano** | opencode | Ultra-cheap, fast. Good for simple utility tasks. |
|
||||
|
||||
**Different-Behavior Models**:
|
||||
|
||||
| Model | Provider(s) | Notes |
|
||||
| --------------------- | -------------------------------- | ----------------------------------------------------------- |
|
||||
| **Gemini 3 Pro** | google, github-copilot, opencode | Excels at visual/frontend tasks. Different reasoning style. |
|
||||
| **Gemini 3.1 Pro** | google, github-copilot, opencode | Excels at visual/frontend tasks. Different reasoning style. |
|
||||
| **Gemini 3 Flash** | google, github-copilot, opencode | Fast, good for doc search and light tasks. |
|
||||
| **MiniMax M2.5** | venice | Fast and smart. Good for utility tasks. |
|
||||
| **MiniMax M2.5 Free** | opencode | Free-tier MiniMax. Fast for search/retrieval. |
|
||||
@@ -305,7 +306,7 @@ Not all models behave the same way. Understanding which models are "similar" hel
|
||||
| **Grok Code Fast 1** | github-copilot, venice | Very fast | Optimized for code grep/search. Default for Explore. |
|
||||
| **Claude Haiku 4.5** | anthropic, opencode | Fast | Good balance of speed and intelligence. |
|
||||
| **MiniMax M2.5 (Free)** | opencode, venice | Fast | Smart for its speed class. |
|
||||
| **GPT-5.3-codex-spark** | openai | Extremely fast | Blazing fast but compacts so aggressively that oh-my-opencode's context management doesn't work well with it. Not recommended for omo agents. |
|
||||
| **GPT-5.3-codex-spark** | openai | Extremely fast | Blazing fast but compacts so aggressively that oh-my-openagent's context management doesn't work well with it. Not recommended for omo agents. |
|
||||
|
||||
#### What Each Agent Does and Which Model It Got
|
||||
|
||||
@@ -316,7 +317,7 @@ Based on your subscriptions, here's how the agents were configured:
|
||||
| Agent | Role | Default Chain | What It Does |
|
||||
| ------------ | ---------------- | ----------------------------------------------- | ---------------------------------------------------------------------------------------- |
|
||||
| **Sisyphus** | Main ultraworker | Opus (max) → Kimi K2.5 → GLM 5 → Big Pickle | Primary coding agent. Orchestrates everything. **Never use GPT — no GPT prompt exists.** |
|
||||
| **Metis** | Plan review | Opus (max) → Kimi K2.5 → GPT-5.4 → Gemini 3 Pro | Reviews Prometheus plans for gaps. |
|
||||
| **Metis** | Plan review | Opus (max) → Kimi K2.5 → GPT-5.4 → Gemini 3.1 Pro | Reviews Prometheus plans for gaps. |
|
||||
|
||||
**Dual-Prompt Agents** (auto-switch between Claude and GPT prompts):
|
||||
|
||||
@@ -326,7 +327,7 @@ Priority: **Claude > GPT > Claude-like models**
|
||||
|
||||
| Agent | Role | Default Chain | GPT Prompt? |
|
||||
| -------------- | ----------------- | ---------------------------------------------------------- | ---------------------------------------------------------------- |
|
||||
| **Prometheus** | Strategic planner | Opus (max) → **GPT-5.4 (high)** → Kimi K2.5 → Gemini 3 Pro | Yes — XML-tagged, principle-driven (~300 lines vs ~1,100 Claude) |
|
||||
| **Prometheus** | Strategic planner | Opus (max) → **GPT-5.4 (high)** → Kimi K2.5 → Gemini 3.1 Pro | Yes — XML-tagged, principle-driven (~300 lines vs ~1,100 Claude) |
|
||||
| **Atlas** | Todo orchestrator | **Kimi K2.5** → Sonnet → GPT-5.4 | Yes — GPT-optimized todo management |
|
||||
|
||||
**GPT-Native Agents** (built for GPT, don't override to Claude):
|
||||
@@ -334,8 +335,8 @@ Priority: **Claude > GPT > Claude-like models**
|
||||
| Agent | Role | Default Chain | Notes |
|
||||
| -------------- | ---------------------- | -------------------------------------- | ------------------------------------------------------ |
|
||||
| **Hephaestus** | Deep autonomous worker | GPT-5.3-codex (medium) only | "Codex on steroids." No fallback. Requires GPT access. |
|
||||
| **Oracle** | Architecture/debugging | GPT-5.4 (high) → Gemini 3 Pro → Opus | High-IQ strategic backup. GPT preferred. |
|
||||
| **Momus** | High-accuracy reviewer | GPT-5.4 (medium) → Opus → Gemini 3 Pro | Verification agent. GPT preferred. |
|
||||
| **Oracle** | Architecture/debugging | GPT-5.4 (high) → Gemini 3.1 Pro → Opus | High-IQ strategic backup. GPT preferred. |
|
||||
| **Momus** | High-accuracy reviewer | GPT-5.4 (medium) → Opus → Gemini 3.1 Pro | Verification agent. GPT preferred. |
|
||||
|
||||
**Utility Agents** (speed over intelligence):
|
||||
|
||||
@@ -364,7 +365,7 @@ This is why Prometheus and Atlas ship separate prompts per model family — they
|
||||
|
||||
#### Custom Model Configuration
|
||||
|
||||
If the user wants to override which model an agent uses, you can customize in `oh-my-opencode.json`:
|
||||
If the user wants to override which model an agent uses, you can customize in `oh-my-openagent.json`:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
@@ -406,7 +407,7 @@ GPT (5.3-codex, 5.2) > Claude Opus (decent fallback) > Gemini (acceptable)
|
||||
|
||||
#### Provider Priority Chain
|
||||
|
||||
When multiple providers are available, oh-my-opencode uses this priority:
|
||||
When multiple providers are available, oh-my-openagent uses this priority:
|
||||
|
||||
```
|
||||
Native (anthropic/, openai/, google/) > Kimi for Coding > GitHub Copilot > Venice > OpenCode Zen > Z.ai Coding Plan
|
||||
@@ -424,7 +425,7 @@ Read this document again, think about you have done everything correctly.
|
||||
|
||||
#### Say 'Congratulations! 🎉' to the user
|
||||
|
||||
Say to user: Congratulations! 🎉 You have successfully set up OhMyOpenCode! Type `opencode` in your terminal to start using it.
|
||||
Say to user: Congratulations! 🎉 You have successfully set up OhMyOpenAgent! Type `opencode` in your terminal to start using it.
|
||||
|
||||
#### Free advertising
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Orchestration System Guide
|
||||
|
||||
Oh My OpenCode's orchestration system transforms a simple AI agent into a coordinated development team through **separation of planning and execution**.
|
||||
Oh My OpenAgent's orchestration system transforms a simple AI agent into a coordinated development team through **separation of planning and execution**.
|
||||
|
||||
---
|
||||
|
||||
@@ -298,7 +298,7 @@ task({ category: "quick", prompt: "..." }); // "Just get it done fast"
|
||||
| `visual-engineering` | Gemini 3.1 Pro | Frontend, UI/UX, design, styling, animation |
|
||||
| `ultrabrain` | GPT-5.4 (xhigh) | Deep logical reasoning, complex architecture decisions |
|
||||
| `artistry` | Gemini 3.1 Pro (high) | Highly creative or artistic tasks, novel ideas |
|
||||
| `quick` | Claude Haiku 4.5 | Trivial tasks - single file changes, typo fixes |
|
||||
| `quick` | GPT-5.4 Mini | Trivial tasks - single file changes, typo fixes |
|
||||
| `deep` | GPT-5.3 Codex (medium) | Goal-oriented autonomous problem-solving, thorough research |
|
||||
| `unspecified-low` | Claude Sonnet 4.6 | Tasks that don't fit other categories, low effort |
|
||||
| `unspecified-high` | Claude Opus 4.6 (max) | Tasks that don't fit other categories, high effort |
|
||||
@@ -475,7 +475,7 @@ Use the `ulw` keyword in Sisyphus when:
|
||||
|
||||
## Configuration
|
||||
|
||||
You can control related features in `oh-my-opencode.json`:
|
||||
You can control related features in `oh-my-openagent.json`:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# What Is Oh My OpenCode?
|
||||
# What Is Oh My OpenAgent?
|
||||
|
||||
Oh My OpenCode is a multi-model agent orchestration harness for OpenCode. It transforms a single AI agent into a coordinated development team that actually ships code.
|
||||
Oh My OpenAgent is a multi-model agent orchestration harness for OpenCode. It transforms a single AI agent into a coordinated development team that actually ships code.
|
||||
|
||||
Not locked to Claude. Not locked to OpenAI. Not locked to anyone.
|
||||
|
||||
@@ -15,7 +15,7 @@ Just better results, cheaper models, real orchestration.
|
||||
Paste this into your LLM agent session:
|
||||
|
||||
```
|
||||
Install and configure oh-my-opencode by following the instructions here:
|
||||
Install and configure oh-my-openagent by following the instructions here:
|
||||
https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
|
||||
```
|
||||
|
||||
@@ -41,13 +41,13 @@ We used to call this "Claude Code on steroids." That was wrong.
|
||||
|
||||
This isn't about making Claude Code better. It's about breaking free from the idea that one model, one provider, one way of working is enough. Anthropic wants you locked in. OpenAI wants you locked in. Everyone wants you locked in.
|
||||
|
||||
Oh My OpenCode doesn't play that game. It orchestrates across models, picking the right brain for the right job. Claude for orchestration. GPT for deep reasoning. Gemini for frontend. Haiku for quick tasks. All working together, automatically.
|
||||
Oh My OpenAgent doesn't play that game. It orchestrates across models, picking the right brain for the right job. Claude for orchestration. GPT for deep reasoning. Gemini for frontend. GPT-5.4 Mini for quick tasks. All working together, automatically.
|
||||
|
||||
---
|
||||
|
||||
## How It Works: Agent Orchestration
|
||||
|
||||
Instead of one agent doing everything, Oh My OpenCode uses **specialized agents that delegate to each other** based on task type.
|
||||
Instead of one agent doing everything, Oh My OpenAgent uses **specialized agents that delegate to each other** based on task type.
|
||||
|
||||
**The Architecture:**
|
||||
|
||||
@@ -99,9 +99,9 @@ Use Hephaestus when you need deep architectural reasoning, complex debugging acr
|
||||
|
||||
**Why this beats vanilla Codex CLI:**
|
||||
|
||||
- **Multi-model orchestration.** Pure Codex is single-model. OmO routes different tasks to different models automatically. GPT for deep reasoning. Gemini for frontend. Haiku for speed. The right brain for the right job.
|
||||
- **Multi-model orchestration.** Pure Codex is single-model. OmO routes different tasks to different models automatically. GPT for deep reasoning. Gemini for frontend. GPT-5.4 Mini for speed. The right brain for the right job.
|
||||
- **Background agents.** Fire 5+ agents in parallel. Something Codex simply cannot do. While one agent writes code, another researches patterns, another checks documentation. Like a real dev team.
|
||||
- **Category system.** Tasks are routed by intent, not model name. `visual-engineering` gets Gemini. `ultrabrain` gets GPT-5.4. `quick` gets Haiku. No manual juggling.
|
||||
- **Category system.** Tasks are routed by intent, not model name. `visual-engineering` gets Gemini. `ultrabrain` gets GPT-5.4. `quick` gets GPT-5.4 Mini. No manual juggling.
|
||||
- **Accumulated wisdom.** Subagents learn from previous results. Conventions discovered in task 1 are passed to task 5. Mistakes made early aren't repeated. The system gets smarter as it works.
|
||||
|
||||
### Prometheus: The Strategic Planner
|
||||
@@ -154,7 +154,7 @@ Use Prometheus for multi-day projects, critical production changes, complex refa
|
||||
|
||||
## Agent Model Matching
|
||||
|
||||
Different agents work best with different models. Oh My OpenCode automatically assigns optimal models, but you can customize everything.
|
||||
Different agents work best with different models. Oh My OpenAgent automatically assigns optimal models, but you can customize everything.
|
||||
|
||||
### Default Configuration
|
||||
|
||||
@@ -168,7 +168,7 @@ You can override specific agents or categories in your config:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json",
|
||||
|
||||
"agents": {
|
||||
// Main orchestrator: Claude Opus or Kimi K2.5 work best
|
||||
@@ -195,8 +195,8 @@ You can override specific agents or categories in your config:
|
||||
// General high-effort work
|
||||
"unspecified-high": { "model": "anthropic/claude-opus-4-6", "variant": "max" },
|
||||
|
||||
// Quick tasks: use the cheapest models
|
||||
"quick": { "model": "anthropic/claude-haiku-4-5" },
|
||||
// Quick tasks: use GPT-5.4-mini (fast and cheap)
|
||||
"quick": { "model": "openai/gpt-5.4-mini" },
|
||||
|
||||
// Deep reasoning: GPT-5.4
|
||||
"ultrabrain": { "model": "openai/gpt-5.4", "variant": "xhigh" },
|
||||
@@ -220,7 +220,7 @@ You can override specific agents or categories in your config:
|
||||
|
||||
**Different-behavior models**:
|
||||
|
||||
- Gemini 3 Pro — excels at visual/frontend tasks
|
||||
- Gemini 3.1 Pro — excels at visual/frontend tasks
|
||||
- MiniMax M2.5 — fast and smart for utility tasks
|
||||
- Grok Code Fast 1 — optimized for code grep/search
|
||||
|
||||
@@ -232,7 +232,7 @@ See the [Agent-Model Matching Guide](./agent-model-matching.md) for complete det
|
||||
|
||||
Claude Code is good. But it's a single agent running a single model doing everything alone.
|
||||
|
||||
Oh My OpenCode turns that into a coordinated team:
|
||||
Oh My OpenAgent turns that into a coordinated team:
|
||||
|
||||
**Parallel execution.** Claude Code processes one thing at a time. OmO fires background agents in parallel — research, implementation, and verification happening simultaneously. Like having 5 engineers instead of 1.
|
||||
|
||||
@@ -246,7 +246,7 @@ Oh My OpenCode turns that into a coordinated team:
|
||||
|
||||
**Discipline enforcement.** Todo enforcer yanks idle agents back to work. Comment checker strips AI slop. Ralph Loop keeps going until 100% done. The system doesn't let the agent slack off.
|
||||
|
||||
**The fundamental advantage.** Models have different temperaments. Claude thinks deeply. GPT reasons architecturally. Gemini visualizes. Haiku moves fast. Single-model tools force you to pick one personality for all tasks. Oh My OpenCode leverages them all, routing by task type. This isn't a temporary hack — it's the only architecture that makes sense as models specialize further. The gap between multi-model orchestration and single-model limitation widens every month. We're betting on that future.
|
||||
**The fundamental advantage.** Models have different temperaments. Claude thinks deeply. GPT reasons architecturally. Gemini visualizes. Haiku moves fast. Single-model tools force you to pick one personality for all tasks. Oh My OpenAgent leverages them all, routing by task type. This isn't a temporary hack — it's the only architecture that makes sense as models specialize further. The gap between multi-model orchestration and single-model limitation widens every month. We're betting on that future.
|
||||
|
||||
---
|
||||
|
||||
@@ -256,7 +256,7 @@ Before acting on any request, Sisyphus classifies your true intent.
|
||||
|
||||
Are you asking for research? Implementation? Investigation? A fix? The Intent Gate figures out what you actually want, not just the literal words you typed. This means the agent understands context, nuance, and the real goal behind your request.
|
||||
|
||||
Claude Code doesn't have this. It takes your prompt and runs. Oh My OpenCode thinks first, then acts.
|
||||
Claude Code doesn't have this. It takes your prompt and runs. Oh My OpenAgent thinks first, then acts.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Manifesto
|
||||
|
||||
The principles and philosophy behind Oh My OpenCode.
|
||||
The principles and philosophy behind Oh My OpenAgent.
|
||||
|
||||
---
|
||||
|
||||
@@ -20,7 +20,7 @@ When you find yourself:
|
||||
|
||||
That's not "human-AI collaboration." That's the AI failing to do its job.
|
||||
|
||||
**Oh My OpenCode is built on this premise**: Human intervention during agentic work is fundamentally a wrong signal. If the system is designed correctly, the agent should complete the work without requiring you to babysit it.
|
||||
**Oh My OpenAgent is built on this premise**: Human intervention during agentic work is fundamentally a wrong signal. If the system is designed correctly, the agent should complete the work without requiring you to babysit it.
|
||||
|
||||
---
|
||||
|
||||
@@ -144,7 +144,7 @@ Human Intent → Agent Execution → Verified Result
|
||||
(intervention only on true failure)
|
||||
```
|
||||
|
||||
Everything in Oh My OpenCode is designed to make this loop work:
|
||||
Everything in Oh My OpenAgent is designed to make this loop work:
|
||||
|
||||
| Feature | Purpose |
|
||||
|---------|---------|
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
# CLI Reference
|
||||
|
||||
Complete reference for the `oh-my-opencode` command-line interface.
|
||||
Complete reference for the `oh-my-openagent` command-line interface.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```bash
|
||||
# Display help
|
||||
bunx oh-my-opencode
|
||||
bunx oh-my-openagent
|
||||
|
||||
# Or with npx
|
||||
npx oh-my-opencode
|
||||
npx oh-my-openagent
|
||||
```
|
||||
|
||||
## Commands
|
||||
@@ -27,20 +27,20 @@ npx oh-my-opencode
|
||||
|
||||
## install
|
||||
|
||||
Interactive installation tool for initial Oh-My-OpenCode setup. Provides a TUI based on `@clack/prompts`.
|
||||
Interactive installation tool for initial Oh-My-OpenAgent setup. Provides a TUI based on `@clack/prompts`.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode install
|
||||
bunx oh-my-openagent install
|
||||
```
|
||||
|
||||
### Installation Process
|
||||
|
||||
1. **Provider Selection**: Choose your AI provider (Claude, ChatGPT, or Gemini)
|
||||
2. **API Key Input**: Enter the API key for your selected provider
|
||||
3. **Configuration File Creation**: Generates `opencode.json` or `oh-my-opencode.json` files
|
||||
4. **Plugin Registration**: Automatically registers the oh-my-opencode plugin in OpenCode settings
|
||||
3. **Configuration File Creation**: Generates `opencode.json` or `oh-my-openagent.json` files
|
||||
4. **Plugin Registration**: Automatically registers the oh-my-openagent plugin in OpenCode settings
|
||||
|
||||
### Options
|
||||
|
||||
@@ -53,12 +53,12 @@ bunx oh-my-opencode install
|
||||
|
||||
## doctor
|
||||
|
||||
Diagnoses your environment to ensure Oh-My-OpenCode is functioning correctly. Performs 17+ health checks.
|
||||
Diagnoses your environment to ensure Oh-My-OpenAgent is functioning correctly. Performs 17+ health checks.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode doctor
|
||||
bunx oh-my-openagent doctor
|
||||
```
|
||||
|
||||
### Diagnostic Categories
|
||||
@@ -83,10 +83,10 @@ bunx oh-my-opencode doctor
|
||||
### Example Output
|
||||
|
||||
```
|
||||
oh-my-opencode doctor
|
||||
oh-my-openagent doctor
|
||||
|
||||
┌──────────────────────────────────────────────────┐
|
||||
│ Oh-My-OpenCode Doctor │
|
||||
│ Oh-My-OpenAgent Doctor │
|
||||
└──────────────────────────────────────────────────┘
|
||||
|
||||
Installation
|
||||
@@ -94,7 +94,7 @@ Installation
|
||||
✓ Plugin registered in opencode.json
|
||||
|
||||
Configuration
|
||||
✓ oh-my-opencode.json is valid
|
||||
✓ oh-my-openagent.json is valid
|
||||
⚠ categories.visual-engineering: using default model
|
||||
|
||||
Authentication
|
||||
@@ -119,7 +119,7 @@ Executes OpenCode sessions and monitors task completion.
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode run [prompt]
|
||||
bunx oh-my-openagent run [prompt]
|
||||
```
|
||||
|
||||
### Options
|
||||
@@ -148,16 +148,16 @@ Manages OAuth 2.1 authentication for remote MCP servers.
|
||||
|
||||
```bash
|
||||
# Login to an OAuth-protected MCP server
|
||||
bunx oh-my-opencode mcp oauth login <server-name> --server-url https://api.example.com
|
||||
bunx oh-my-openagent mcp oauth login <server-name> --server-url https://api.example.com
|
||||
|
||||
# Login with explicit client ID and scopes
|
||||
bunx oh-my-opencode mcp oauth login my-api --server-url https://api.example.com --client-id my-client --scopes "read,write"
|
||||
bunx oh-my-openagent mcp oauth login my-api --server-url https://api.example.com --client-id my-client --scopes "read,write"
|
||||
|
||||
# Remove stored OAuth tokens
|
||||
bunx oh-my-opencode mcp oauth logout <server-name>
|
||||
bunx oh-my-openagent mcp oauth logout <server-name>
|
||||
|
||||
# Check OAuth token status
|
||||
bunx oh-my-opencode mcp oauth status [server-name]
|
||||
bunx oh-my-openagent mcp oauth status [server-name]
|
||||
```
|
||||
|
||||
### Options
|
||||
@@ -178,8 +178,8 @@ Tokens are stored in `~/.config/opencode/mcp-oauth.json` with `0600` permissions
|
||||
|
||||
The CLI searches for configuration files in the following locations (in priority order):
|
||||
|
||||
1. **Project Level**: `.opencode/oh-my-opencode.json`
|
||||
2. **User Level**: `~/.config/opencode/oh-my-opencode.json`
|
||||
1. **Project Level**: `.opencode/oh-my-openagent.json`
|
||||
2. **User Level**: `~/.config/opencode/oh-my-openagent.json`
|
||||
|
||||
### JSONC Support
|
||||
|
||||
@@ -219,17 +219,17 @@ bun install -g opencode@latest
|
||||
|
||||
```bash
|
||||
# Reinstall plugin
|
||||
bunx oh-my-opencode install
|
||||
bunx oh-my-openagent install
|
||||
```
|
||||
|
||||
### Doctor Check Failures
|
||||
|
||||
```bash
|
||||
# Diagnose with detailed information
|
||||
bunx oh-my-opencode doctor --verbose
|
||||
bunx oh-my-openagent doctor --verbose
|
||||
|
||||
# Check specific category only
|
||||
bunx oh-my-opencode doctor --category authentication
|
||||
bunx oh-my-openagent doctor --category authentication
|
||||
```
|
||||
|
||||
---
|
||||
@@ -240,10 +240,10 @@ Use the `--no-tui` option for CI/CD environments.
|
||||
|
||||
```bash
|
||||
# Run doctor in CI environment
|
||||
bunx oh-my-opencode doctor --no-tui --json
|
||||
bunx oh-my-openagent doctor --no-tui --json
|
||||
|
||||
# Save results to file
|
||||
bunx oh-my-opencode doctor --json > doctor-report.json
|
||||
bunx oh-my-openagent doctor --json > doctor-report.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Configuration Reference
|
||||
|
||||
Complete reference for `oh-my-opencode.jsonc` configuration. This document covers every available option with examples.
|
||||
Complete reference for `oh-my-openagent.jsonc` configuration. This document covers every available option with examples.
|
||||
|
||||
---
|
||||
|
||||
@@ -44,13 +44,13 @@ Complete reference for `oh-my-opencode.jsonc` configuration. This document cover
|
||||
|
||||
Priority order (project overrides user):
|
||||
|
||||
1. `.opencode/oh-my-opencode.jsonc` / `.opencode/oh-my-opencode.json`
|
||||
1. `.opencode/oh-my-openagent.jsonc` / `.opencode/oh-my-openagent.json`
|
||||
2. User config (`.jsonc` preferred over `.json`):
|
||||
|
||||
| Platform | Path |
|
||||
| ----------- | ----------------------------------------- |
|
||||
| macOS/Linux | `~/.config/opencode/oh-my-opencode.jsonc` |
|
||||
| Windows | `%APPDATA%\opencode\oh-my-opencode.jsonc` |
|
||||
| macOS/Linux | `~/.config/opencode/oh-my-openagent.jsonc` |
|
||||
| Windows | `%APPDATA%\opencode\oh-my-openagent.jsonc` |
|
||||
|
||||
JSONC supports `// line comments`, `/* block comments */`, and trailing commas.
|
||||
|
||||
@@ -58,11 +58,11 @@ Enable schema autocomplete:
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json"
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json"
|
||||
}
|
||||
```
|
||||
|
||||
Run `bunx oh-my-opencode install` for guided setup. Run `opencode models` to list available models.
|
||||
Run `bunx oh-my-openagent install` for guided setup. Run `opencode models` to list available models.
|
||||
|
||||
### Quick Start Example
|
||||
|
||||
@@ -70,7 +70,7 @@ Here's a practical starting configuration:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json",
|
||||
|
||||
"agents": {
|
||||
// Main orchestrator: Claude Opus or Kimi K2.5 work best
|
||||
@@ -228,7 +228,7 @@ Domain-specific model delegation used by the `task()` tool. When Sisyphus delega
|
||||
| `ultrabrain` | `openai/gpt-5.4` (xhigh) | Deep logical reasoning, complex architecture |
|
||||
| `deep` | `openai/gpt-5.3-codex` (medium) | Autonomous problem-solving, thorough research |
|
||||
| `artistry` | `google/gemini-3.1-pro` (high) | Creative/unconventional approaches |
|
||||
| `quick` | `anthropic/claude-haiku-4-5` | Trivial tasks, typo fixes, single-file changes |
|
||||
| `quick` | `openai/gpt-5.4-mini` | Trivial tasks, typo fixes, single-file changes |
|
||||
| `unspecified-low` | `anthropic/claude-sonnet-4-6` | General tasks, low effort |
|
||||
| `unspecified-high` | `anthropic/claude-opus-4-6` (max) | General tasks, high effort |
|
||||
| `writing` | `google/gemini-3-flash` | Documentation, prose, technical writing |
|
||||
@@ -286,12 +286,12 @@ Disable categories: `{ "disabled_categories": ["ultrabrain"] }`
|
||||
| **ultrabrain** | `gpt-5.4` | `gpt-5.4` → `gemini-3.1-pro` → `claude-opus-4-6` |
|
||||
| **deep** | `gpt-5.3-codex` | `gpt-5.3-codex` → `claude-opus-4-6` → `gemini-3.1-pro` |
|
||||
| **artistry** | `gemini-3.1-pro` | `gemini-3.1-pro` → `claude-opus-4-6` → `gpt-5.4` |
|
||||
| **quick** | `claude-haiku-4-5` | `claude-haiku-4-5` → `gemini-3-flash` → `gpt-5-nano` |
|
||||
| **quick** | `gpt-5.4-mini` | `gpt-5.4-mini` → `claude-haiku-4-5` → `gemini-3-flash` → `minimax-m2.5` → `gpt-5-nano` |
|
||||
| **unspecified-low** | `claude-sonnet-4-6` | `claude-sonnet-4-6` → `gpt-5.3-codex` → `gemini-3-flash` |
|
||||
| **unspecified-high** | `claude-opus-4-6` | `claude-opus-4-6` → `gpt-5.4 (high)` → `glm-5` → `k2p5` → `kimi-k2.5` |
|
||||
| **writing** | `gemini-3-flash` | `gemini-3-flash` → `claude-sonnet-4-6` |
|
||||
|
||||
Run `bunx oh-my-opencode doctor --verbose` to see effective model resolution for your config.
|
||||
Run `bunx oh-my-openagent doctor --verbose` to see effective model resolution for your config.
|
||||
|
||||
---
|
||||
|
||||
@@ -418,15 +418,14 @@ Disable built-in skills: `{ "disabled_skills": ["playwright"] }`
|
||||
Disable built-in hooks via `disabled_hooks`:
|
||||
|
||||
```json
|
||||
{ "disabled_hooks": ["comment-checker", "gpt-permission-continuation"] }
|
||||
{ "disabled_hooks": ["comment-checker"] }
|
||||
```
|
||||
|
||||
Available hooks: `gpt-permission-continuation`, `todo-continuation-enforcer`, `context-window-monitor`, `session-recovery`, `session-notification`, `comment-checker`, `grep-output-truncator`, `tool-output-truncator`, `directory-agents-injector`, `directory-readme-injector`, `empty-task-response-detector`, `think-mode`, `anthropic-context-window-limit-recovery`, `rules-injector`, `background-notification`, `auto-update-checker`, `startup-toast`, `keyword-detector`, `agent-usage-reminder`, `non-interactive-env`, `interactive-bash-session`, `compaction-context-injector`, `thinking-block-validator`, `claude-code-hooks`, `ralph-loop`, `preemptive-compaction`, `auto-slash-command`, `sisyphus-junior-notepad`, `no-sisyphus-gpt`, `start-work`, `runtime-fallback`
|
||||
Available hooks: `todo-continuation-enforcer`, `context-window-monitor`, `session-recovery`, `session-notification`, `comment-checker`, `grep-output-truncator`, `tool-output-truncator`, `directory-agents-injector`, `directory-readme-injector`, `empty-task-response-detector`, `think-mode`, `anthropic-context-window-limit-recovery`, `rules-injector`, `background-notification`, `auto-update-checker`, `startup-toast`, `keyword-detector`, `agent-usage-reminder`, `non-interactive-env`, `interactive-bash-session`, `compaction-context-injector`, `thinking-block-validator`, `claude-code-hooks`, `ralph-loop`, `preemptive-compaction`, `auto-slash-command`, `sisyphus-junior-notepad`, `no-sisyphus-gpt`, `start-work`, `runtime-fallback`
|
||||
|
||||
**Notes:**
|
||||
|
||||
- `directory-agents-injector` — auto-disabled on OpenCode 1.1.37+ (native AGENTS.md support)
|
||||
- `gpt-permission-continuation` — resumes GPT sessions only when the last assistant reply ends with a permission-seeking tail like `If you want, ...`. Disable it if you prefer GPT sessions to wait for explicit user follow-up.
|
||||
- `no-sisyphus-gpt` — **do not disable**. It blocks incompatible GPT models for Sisyphus while allowing the dedicated GPT-5.4 prompt path.
|
||||
- `startup-toast` is a sub-feature of `auto-update-checker`. Disable just the toast by adding `startup-toast` to `disabled_hooks`.
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Oh-My-OpenCode Features Reference
|
||||
# Oh-My-OpenAgent Features Reference
|
||||
|
||||
## Agents
|
||||
|
||||
Oh-My-OpenCode provides 11 specialized AI agents. Each has distinct expertise, optimized models, and tool permissions.
|
||||
Oh-My-OpenAgent provides 11 specialized AI agents. Each has distinct expertise, optimized models, and tool permissions.
|
||||
|
||||
### Core Agents
|
||||
|
||||
@@ -90,7 +90,7 @@ When running inside tmux:
|
||||
- Each pane shows agent output live
|
||||
- Auto-cleanup when agents complete
|
||||
|
||||
Customize agent models, prompts, and permissions in `oh-my-opencode.json`.
|
||||
Customize agent models, prompts, and permissions in `oh-my-openagent.json`.
|
||||
|
||||
## Category System
|
||||
|
||||
@@ -111,7 +111,7 @@ By combining these two concepts, you can generate optimal agents through `task`.
|
||||
| `ultrabrain` | `openai/gpt-5.4` (xhigh) | Deep logical reasoning, complex architecture decisions requiring extensive analysis |
|
||||
| `deep` | `openai/gpt-5.3-codex` (medium) | Goal-oriented autonomous problem-solving. Thorough research before action. For hairy problems requiring deep understanding. |
|
||||
| `artistry` | `google/gemini-3.1-pro` (high) | Highly creative/artistic tasks, novel ideas |
|
||||
| `quick` | `anthropic/claude-haiku-4-5` | Trivial tasks - single file changes, typo fixes, simple modifications |
|
||||
| `quick` | `openai/gpt-5.4-mini` | Trivial tasks - single file changes, typo fixes, simple modifications |
|
||||
| `unspecified-low` | `anthropic/claude-sonnet-4-6` | Tasks that don't fit other categories, low effort required |
|
||||
| `unspecified-high` | `anthropic/claude-opus-4-6` (max) | Tasks that don't fit other categories, high effort required |
|
||||
| `writing` | `google/gemini-3-flash` | Documentation, prose, technical writing |
|
||||
@@ -129,7 +129,7 @@ task({
|
||||
|
||||
### Custom Categories
|
||||
|
||||
You can define custom categories in `oh-my-opencode.json`.
|
||||
You can define custom categories in `oh-my-openagent.json`.
|
||||
|
||||
#### Category Configuration Schema
|
||||
|
||||
@@ -237,7 +237,7 @@ Skills provide specialized workflows with embedded MCP servers and detailed inst
|
||||
|
||||
### Browser Automation Options
|
||||
|
||||
Oh-My-OpenCode provides two browser automation providers, configurable via `browser_automation_engine.provider`.
|
||||
Oh-My-OpenAgent provides two browser automation providers, configurable via `browser_automation_engine.provider`.
|
||||
|
||||
#### Option 1: Playwright MCP (Default)
|
||||
|
||||
@@ -558,7 +558,7 @@ Requires `experimental.task_system: true` in config.
|
||||
|
||||
#### Task System Details
|
||||
|
||||
**Note on Claude Code Alignment**: This implementation follows Claude Code's internal Task tool signatures (`TaskCreate`, `TaskUpdate`, `TaskList`, `TaskGet`) and field naming conventions (`subject`, `blockedBy`, `blocks`, etc.). However, Anthropic has not published official documentation for these tools. This is Oh My OpenCode's own implementation based on observed Claude Code behavior and internal specifications.
|
||||
**Note on Claude Code Alignment**: This implementation follows Claude Code's internal Task tool signatures (`TaskCreate`, `TaskUpdate`, `TaskList`, `TaskGet`) and field naming conventions (`subject`, `blockedBy`, `blocks`, etc.). However, Anthropic has not published official documentation for these tools. This is Oh My OpenAgent's own implementation based on observed Claude Code behavior and internal specifications.
|
||||
|
||||
**Task Schema**:
|
||||
|
||||
@@ -680,7 +680,6 @@ Hooks intercept and modify behavior at key points in the agent lifecycle across
|
||||
| **ralph-loop** | Event + Message | Manages self-referential loop continuation. |
|
||||
| **start-work** | Message | Handles /start-work command execution. |
|
||||
| **auto-slash-command** | Message | Automatically executes slash commands from prompts. |
|
||||
| **gpt-permission-continuation** | Event | Auto-continues GPT sessions when the final assistant reply ends with a permission-seeking tail such as `If you want, ...`. |
|
||||
| **stop-continuation-guard** | Event + Message | Guards the stop-continuation mechanism. |
|
||||
| **category-skill-reminder** | Event + PostToolUse | Reminds agents about available category skills for delegation. |
|
||||
| **anthropic-effort** | Params | Adjusts Anthropic API effort level based on context. |
|
||||
@@ -735,7 +734,6 @@ Hooks intercept and modify behavior at key points in the agent lifecycle across
|
||||
|
||||
| Hook | Event | Description |
|
||||
| ------------------------------ | ----- | ---------------------------------------------------------- |
|
||||
| **gpt-permission-continuation** | Event | Continues GPT replies that end in a permission-seeking tail. |
|
||||
| **todo-continuation-enforcer** | Event | Enforces todo completion — yanks idle agents back to work. |
|
||||
| **compaction-todo-preserver** | Event | Preserves todo state during session compaction. |
|
||||
| **unstable-agent-babysitter** | Event | Handles unstable agent behavior with recovery strategies. |
|
||||
@@ -787,12 +785,10 @@ Disable specific hooks in config:
|
||||
|
||||
```json
|
||||
{
|
||||
"disabled_hooks": ["comment-checker", "gpt-permission-continuation"]
|
||||
"disabled_hooks": ["comment-checker"]
|
||||
}
|
||||
```
|
||||
|
||||
Use `gpt-permission-continuation` when you want GPT sessions to stop at permission-seeking endings instead of auto-resuming.
|
||||
|
||||
## MCPs
|
||||
|
||||
### Built-in MCPs
|
||||
@@ -848,7 +844,7 @@ When a skill MCP has `oauth` configured:
|
||||
Pre-authenticate via CLI:
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode mcp oauth login <server-name> --server-url https://api.example.com
|
||||
bunx oh-my-openagent mcp oauth login <server-name> --server-url https://api.example.com
|
||||
```
|
||||
|
||||
## Context Injection
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
### Problem
|
||||
|
||||
When using Ollama as a provider with oh-my-opencode agents, you may encounter:
|
||||
When using Ollama as a provider with oh-my-openagent agents, you may encounter:
|
||||
|
||||
```
|
||||
JSON Parse error: Unexpected EOF
|
||||
@@ -26,7 +26,7 @@ Claude Code SDK expects a single JSON object, not multiple NDJSON lines, causing
|
||||
**Why this happens:**
|
||||
- **Ollama API**: Returns streaming responses as NDJSON by design
|
||||
- **Claude Code SDK**: Doesn't properly handle NDJSON responses for tool calls
|
||||
- **oh-my-opencode**: Passes through the SDK's behavior (can't fix at this layer)
|
||||
- **oh-my-openagent**: Passes through the SDK's behavior (can't fix at this layer)
|
||||
|
||||
## Solutions
|
||||
|
||||
@@ -114,7 +114,7 @@ curl -s http://localhost:11434/api/chat \
|
||||
|
||||
## Related Issues
|
||||
|
||||
- **oh-my-opencode**: https://github.com/code-yeongyu/oh-my-openagent/issues/1124
|
||||
- **oh-my-openagent**: https://github.com/code-yeongyu/oh-my-openagent/issues/1124
|
||||
- **Ollama API Docs**: https://github.com/ollama/ollama/blob/main/docs/api.md
|
||||
|
||||
## Getting Help
|
||||
|
||||
@@ -101,7 +101,9 @@ async function main() {
|
||||
console.log("\n✅ All platform binaries built successfully!\n");
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
console.error("Fatal error:", error);
|
||||
process.exit(1);
|
||||
});
|
||||
if (import.meta.main) {
|
||||
main().catch((error) => {
|
||||
console.error("Fatal error:", error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -2207,6 +2207,102 @@
|
||||
"created_at": "2026-03-16T04:55:10Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2604
|
||||
},
|
||||
{
|
||||
"name": "gxlife",
|
||||
"id": 110413359,
|
||||
"comment_id": 4068427047,
|
||||
"created_at": "2026-03-16T15:17:01Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2625
|
||||
},
|
||||
{
|
||||
"name": "HaD0Yun",
|
||||
"id": 102889891,
|
||||
"comment_id": 4073195308,
|
||||
"created_at": "2026-03-17T08:27:45Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2640
|
||||
},
|
||||
{
|
||||
"name": "tad-hq",
|
||||
"id": 213478119,
|
||||
"comment_id": 4077697128,
|
||||
"created_at": "2026-03-17T20:07:09Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2655
|
||||
},
|
||||
{
|
||||
"name": "ogormans-deptstack",
|
||||
"id": 208788555,
|
||||
"comment_id": 4077893096,
|
||||
"created_at": "2026-03-17T20:42:42Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2656
|
||||
},
|
||||
{
|
||||
"name": "walioo",
|
||||
"id": 25835823,
|
||||
"comment_id": 4087098221,
|
||||
"created_at": "2026-03-19T02:13:02Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2688
|
||||
},
|
||||
{
|
||||
"name": "trafgals",
|
||||
"id": 6454757,
|
||||
"comment_id": 4087725932,
|
||||
"created_at": "2026-03-19T04:22:32Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2690
|
||||
},
|
||||
{
|
||||
"name": "tonymfer",
|
||||
"id": 66512584,
|
||||
"comment_id": 4091847232,
|
||||
"created_at": "2026-03-19T17:13:49Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2701
|
||||
},
|
||||
{
|
||||
"name": "nguyentamdat",
|
||||
"id": 16253213,
|
||||
"comment_id": 4096267323,
|
||||
"created_at": "2026-03-20T07:34:22Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2718
|
||||
},
|
||||
{
|
||||
"name": "whackur",
|
||||
"id": 26926041,
|
||||
"comment_id": 4102330445,
|
||||
"created_at": "2026-03-21T05:27:17Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2733
|
||||
},
|
||||
{
|
||||
"name": "ndaemy",
|
||||
"id": 18691542,
|
||||
"comment_id": 4103008804,
|
||||
"created_at": "2026-03-21T10:18:22Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2734
|
||||
},
|
||||
{
|
||||
"name": "0xYiliu",
|
||||
"id": 3838688,
|
||||
"comment_id": 4104738337,
|
||||
"created_at": "2026-03-21T22:59:33Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2738
|
||||
},
|
||||
{
|
||||
"name": "hunghoang3011",
|
||||
"id": 65234777,
|
||||
"comment_id": 4107900881,
|
||||
"created_at": "2026-03-23T04:28:20Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2758
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -14,7 +14,7 @@ Entry point `index.ts` orchestrates 5-step initialization: loadConfig → create
|
||||
| `plugin-config.ts` | JSONC parse, multi-level merge, Zod v4 validation |
|
||||
| `create-managers.ts` | TmuxSessionManager, BackgroundManager, SkillMcpManager, ConfigHandler |
|
||||
| `create-tools.ts` | SkillContext + AvailableCategories + ToolRegistry (26 tools) |
|
||||
| `create-hooks.ts` | 3-tier: Core(37) + Continuation(7) + Skill(2) = 46 hooks |
|
||||
| `create-hooks.ts` | 3-tier: Core(39) + Continuation(7) + Skill(2) = 48 hooks |
|
||||
| `plugin-interface.ts` | 8 OpenCode hook handlers: config, tool, chat.message, chat.params, chat.headers, event, tool.execute.before, tool.execute.after |
|
||||
|
||||
## CONFIG LOADING
|
||||
@@ -32,10 +32,10 @@ loadPluginConfig(directory, ctx)
|
||||
|
||||
```
|
||||
createHooks()
|
||||
├─→ createCoreHooks() # 37 hooks
|
||||
├─→ createCoreHooks() # 39 hooks
|
||||
│ ├─ createSessionHooks() # 23: contextWindowMonitor, thinkMode, ralphLoop, modelFallback, runtimeFallback, noSisyphusGpt, noHephaestusNonGpt, anthropicEffort, intentGate...
|
||||
│ ├─ createToolGuardHooks() # 10: commentChecker, rulesInjector, writeExistingFileGuard, jsonErrorRecovery, hashlineReadEnhancer...
|
||||
│ ├─ createToolGuardHooks() # 12: commentChecker, rulesInjector, writeExistingFileGuard, jsonErrorRecovery, hashlineReadEnhancer...
|
||||
│ └─ createTransformHooks() # 4: claudeCodeHooks, keywordDetector, contextInjector, thinkingBlockValidator
|
||||
├─→ createContinuationHooks() # 7: todoContinuationEnforcer, atlas, stopContinuationGuard, ralphLoopActivator...
|
||||
├─→ createContinuationHooks() # 7: todoContinuationEnforcer, atlas, stopContinuationGuard, compactionContextInjector...
|
||||
└─→ createSkillHooks() # 2: categorySkillReminder, autoSlashCommand
|
||||
```
|
||||
|
||||
@@ -39,7 +39,7 @@ export function maybeCreateAtlasConfig(input: {
|
||||
const atlasRequirement = AGENT_MODEL_REQUIREMENTS["atlas"]
|
||||
|
||||
const atlasResolution = applyModelResolution({
|
||||
uiSelectedModel: orchestratorOverride?.model ? undefined : uiSelectedModel,
|
||||
uiSelectedModel: orchestratorOverride?.model !== undefined ? undefined : uiSelectedModel,
|
||||
userModel: orchestratorOverride?.model,
|
||||
requirement: atlasRequirement,
|
||||
availableModels,
|
||||
|
||||
@@ -69,7 +69,7 @@ export function collectPendingBuiltinAgents(input: {
|
||||
const isPrimaryAgent = isFactory(source) && source.mode === "primary"
|
||||
|
||||
let resolution = applyModelResolution({
|
||||
uiSelectedModel: (isPrimaryAgent && !override?.model) ? uiSelectedModel : undefined,
|
||||
uiSelectedModel: (isPrimaryAgent && override?.model === undefined) ? uiSelectedModel : undefined,
|
||||
userModel: override?.model,
|
||||
requirement,
|
||||
availableModels,
|
||||
|
||||
@@ -1,20 +1,32 @@
|
||||
import { afterAll, beforeAll, describe, expect, test } from "bun:test"
|
||||
import { afterAll, beforeAll, describe, expect, mock, test } from "bun:test"
|
||||
import { mkdirSync, rmSync, writeFileSync } from "node:fs"
|
||||
import { homedir, tmpdir } from "node:os"
|
||||
import * as os from "node:os"
|
||||
import { tmpdir } from "node:os"
|
||||
import { join } from "node:path"
|
||||
import { resolvePromptAppend } from "./resolve-file-uri"
|
||||
|
||||
const originalHomedir = os.homedir.bind(os)
|
||||
let mockedHomeDir = ""
|
||||
let moduleImportCounter = 0
|
||||
let resolvePromptAppend: typeof import("./resolve-file-uri").resolvePromptAppend
|
||||
|
||||
mock.module("node:os", () => ({
|
||||
...os,
|
||||
homedir: () => mockedHomeDir || originalHomedir(),
|
||||
}))
|
||||
|
||||
describe("resolvePromptAppend", () => {
|
||||
const fixtureRoot = join(tmpdir(), `resolve-file-uri-${Date.now()}`)
|
||||
const configDir = join(fixtureRoot, "config")
|
||||
const homeFixtureDir = join(homedir(), `.resolve-file-uri-home-${Date.now()}`)
|
||||
const homeFixtureRoot = join(fixtureRoot, "home")
|
||||
const homeFixtureDir = join(homeFixtureRoot, "fixture-home")
|
||||
|
||||
const absoluteFilePath = join(fixtureRoot, "absolute.txt")
|
||||
const relativeFilePath = join(configDir, "relative.txt")
|
||||
const spacedFilePath = join(fixtureRoot, "with space.txt")
|
||||
const homeFilePath = join(homeFixtureDir, "home.txt")
|
||||
|
||||
beforeAll(() => {
|
||||
beforeAll(async () => {
|
||||
mockedHomeDir = homeFixtureRoot
|
||||
mkdirSync(fixtureRoot, { recursive: true })
|
||||
mkdirSync(configDir, { recursive: true })
|
||||
mkdirSync(homeFixtureDir, { recursive: true })
|
||||
@@ -23,11 +35,14 @@ describe("resolvePromptAppend", () => {
|
||||
writeFileSync(relativeFilePath, "relative-content", "utf8")
|
||||
writeFileSync(spacedFilePath, "encoded-content", "utf8")
|
||||
writeFileSync(homeFilePath, "home-content", "utf8")
|
||||
|
||||
moduleImportCounter += 1
|
||||
;({ resolvePromptAppend } = await import(`./resolve-file-uri?test=${moduleImportCounter}`))
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
rmSync(fixtureRoot, { recursive: true, force: true })
|
||||
rmSync(homeFixtureDir, { recursive: true, force: true })
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
test("returns non-file URI strings unchanged", () => {
|
||||
@@ -65,7 +80,7 @@ describe("resolvePromptAppend", () => {
|
||||
|
||||
test("resolves home directory URI path", () => {
|
||||
//#given
|
||||
const input = `file://~/${homeFixtureDir.split("/").pop()}/home.txt`
|
||||
const input = "file://~/fixture-home/home.txt"
|
||||
|
||||
//#when
|
||||
const resolved = resolvePromptAppend(input)
|
||||
|
||||
@@ -52,7 +52,7 @@ export function maybeCreateSisyphusConfig(input: {
|
||||
if (disabledAgents.includes("sisyphus") || !meetsSisyphusAnyModelRequirement) return undefined
|
||||
|
||||
let sisyphusResolution = applyModelResolution({
|
||||
uiSelectedModel: sisyphusOverride?.model ? undefined : uiSelectedModel,
|
||||
uiSelectedModel: sisyphusOverride?.model !== undefined ? undefined : uiSelectedModel,
|
||||
userModel: sisyphusOverride?.model,
|
||||
requirement: sisyphusRequirement,
|
||||
availableModels,
|
||||
|
||||
@@ -181,7 +181,7 @@ describe("buildParallelDelegationSection", () => {
|
||||
|
||||
it("#given non-Claude model with deep category #when building #then returns aggressive delegation section", () => {
|
||||
//#given
|
||||
const model = "google/gemini-3-pro"
|
||||
const model = "google/gemini-3.1-pro"
|
||||
const categories = [deepCategory, otherCategory]
|
||||
|
||||
//#when
|
||||
@@ -237,7 +237,7 @@ describe("buildParallelDelegationSection", () => {
|
||||
describe("buildNonClaudePlannerSection", () => {
|
||||
it("#given non-Claude model #when building #then returns plan agent section", () => {
|
||||
//#given
|
||||
const model = "google/gemini-3-pro"
|
||||
const model = "google/gemini-3.1-pro"
|
||||
|
||||
//#when
|
||||
const result = buildNonClaudePlannerSection(model)
|
||||
@@ -272,4 +272,3 @@ describe("buildNonClaudePlannerSection", () => {
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
|
||||
@@ -162,6 +162,10 @@ Asking the user is the LAST resort after exhausting creative alternatives.
|
||||
- User asks a question implying work → Answer briefly, DO the implied work in the same turn
|
||||
- You wrote a plan in your response → EXECUTE the plan before ending turn — plans are starting lines, not finish lines
|
||||
|
||||
### Task Scope Clarification
|
||||
|
||||
You handle multi-step sub-tasks of a SINGLE GOAL. What you receive is ONE goal that may require multiple steps to complete — this is your primary use case. Only reject when given MULTIPLE INDEPENDENT goals in one request.
|
||||
|
||||
## Hard Constraints
|
||||
|
||||
${hardBlocks}
|
||||
|
||||
@@ -121,6 +121,10 @@ When blocked: try a different approach → decompose the problem → challenge a
|
||||
- User asks a question implying work → Answer briefly, DO the implied work in the same turn
|
||||
- You wrote a plan in your response → EXECUTE the plan before ending turn — plans are starting lines, not finish lines
|
||||
|
||||
### Task Scope Clarification
|
||||
|
||||
You handle multi-step sub-tasks of a SINGLE GOAL. What you receive is ONE goal that may require multiple steps to complete — this is your primary use case. Only reject when given MULTIPLE INDEPENDENT goals in one request.
|
||||
|
||||
## Hard Constraints
|
||||
|
||||
${hardBlocks}
|
||||
|
||||
@@ -112,6 +112,10 @@ Asking the user is the LAST resort after exhausting creative alternatives.
|
||||
- Note assumptions in final message, not as questions mid-work
|
||||
- Need context? Fire explore/librarian in background IMMEDIATELY — continue only with non-overlapping work while they search
|
||||
|
||||
### Task Scope Clarification
|
||||
|
||||
You handle multi-step sub-tasks of a SINGLE GOAL. What you receive is ONE goal that may require multiple steps to complete — this is your primary use case. Only reject when given MULTIPLE INDEPENDENT goals in one request.
|
||||
|
||||
## Hard Constraints
|
||||
|
||||
${hardBlocks}
|
||||
|
||||
42
src/agents/prometheus/system-prompt.test.ts
Normal file
42
src/agents/prometheus/system-prompt.test.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { describe, it, expect } from "bun:test"
|
||||
import { getPrometheusPrompt } from "./system-prompt"
|
||||
|
||||
describe("getPrometheusPrompt", () => {
|
||||
describe("#given question tool is not disabled", () => {
|
||||
describe("#when generating prompt", () => {
|
||||
it("#then should include Question tool references", () => {
|
||||
const prompt = getPrometheusPrompt(undefined, [])
|
||||
|
||||
expect(prompt).toContain("Question({")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given question tool is disabled via disabled_tools", () => {
|
||||
describe("#when generating prompt", () => {
|
||||
it("#then should strip Question tool code examples", () => {
|
||||
const prompt = getPrometheusPrompt(undefined, ["question"])
|
||||
|
||||
expect(prompt).not.toContain("Question({")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#when disabled_tools includes question among other tools", () => {
|
||||
it("#then should strip Question tool code examples", () => {
|
||||
const prompt = getPrometheusPrompt(undefined, ["todowrite", "question", "interactive_bash"])
|
||||
|
||||
expect(prompt).not.toContain("Question({")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given no disabled_tools provided", () => {
|
||||
describe("#when generating prompt with undefined", () => {
|
||||
it("#then should include Question tool references", () => {
|
||||
const prompt = getPrometheusPrompt(undefined, undefined)
|
||||
|
||||
expect(prompt).toContain("Question({")
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -52,16 +52,34 @@ export function getPrometheusPromptSource(model?: string): PrometheusPromptSourc
|
||||
* Gemini models → Gemini-optimized prompt (aggressive tool-call enforcement, thinking checkpoints)
|
||||
* Default (Claude, etc.) → Claude-optimized prompt (modular sections)
|
||||
*/
|
||||
export function getPrometheusPrompt(model?: string): string {
|
||||
export function getPrometheusPrompt(model?: string, disabledTools?: readonly string[]): string {
|
||||
const source = getPrometheusPromptSource(model)
|
||||
const isQuestionDisabled = disabledTools?.includes("question") ?? false
|
||||
|
||||
let prompt: string
|
||||
switch (source) {
|
||||
case "gpt":
|
||||
return getGptPrometheusPrompt()
|
||||
prompt = getGptPrometheusPrompt()
|
||||
break
|
||||
case "gemini":
|
||||
return getGeminiPrometheusPrompt()
|
||||
prompt = getGeminiPrometheusPrompt()
|
||||
break
|
||||
case "default":
|
||||
default:
|
||||
return PROMETHEUS_SYSTEM_PROMPT
|
||||
prompt = PROMETHEUS_SYSTEM_PROMPT
|
||||
}
|
||||
|
||||
if (isQuestionDisabled) {
|
||||
prompt = stripQuestionToolReferences(prompt)
|
||||
}
|
||||
|
||||
return prompt
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes Question tool usage examples from prompt text when question tool is disabled.
|
||||
*/
|
||||
function stripQuestionToolReferences(prompt: string): string {
|
||||
// Remove Question({...}) code blocks (multi-line)
|
||||
return prompt.replace(/```typescript\n\s*Question\(\{[\s\S]*?\}\)\s*\n```/g, "")
|
||||
}
|
||||
|
||||
@@ -5,60 +5,60 @@ exports[`generateModelConfig no providers available returns ULTIMATE_FALLBACK fo
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"agents": {
|
||||
"atlas": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"explore": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"hephaestus": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"librarian": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"metis": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"momus": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"prometheus": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"sisyphus-junior": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
"categories": {
|
||||
"artistry": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"deep": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"quick": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"unspecified-high": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"unspecified-low": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"visual-engineering": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"writing": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -83,7 +83,7 @@ exports[`generateModelConfig single native provider uses Claude models when only
|
||||
"variant": "max",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
@@ -145,7 +145,7 @@ exports[`generateModelConfig single native provider uses Claude models with isMa
|
||||
"variant": "max",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
@@ -248,8 +248,7 @@ exports[`generateModelConfig single native provider uses OpenAI models when only
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "openai/gpt-5.3-codex",
|
||||
"variant": "low",
|
||||
"model": "openai/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "openai/gpt-5.4",
|
||||
@@ -334,8 +333,7 @@ exports[`generateModelConfig single native provider uses OpenAI models with isMa
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "openai/gpt-5.3-codex",
|
||||
"variant": "low",
|
||||
"model": "openai/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "openai/gpt-5.4",
|
||||
@@ -366,20 +364,20 @@ exports[`generateModelConfig single native provider uses Gemini models when only
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"agents": {
|
||||
"atlas": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"explore": {
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"metis": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"momus": {
|
||||
"model": "google/gemini-3.1-pro-preview",
|
||||
"variant": "high",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "google/gemini-3.1-pro-preview",
|
||||
@@ -389,7 +387,7 @@ exports[`generateModelConfig single native provider uses Gemini models when only
|
||||
"model": "google/gemini-3.1-pro-preview",
|
||||
},
|
||||
"sisyphus-junior": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
"categories": {
|
||||
@@ -426,20 +424,20 @@ exports[`generateModelConfig single native provider uses Gemini models with isMa
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"agents": {
|
||||
"atlas": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"explore": {
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"metis": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"momus": {
|
||||
"model": "google/gemini-3.1-pro-preview",
|
||||
"variant": "high",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "google/gemini-3.1-pro-preview",
|
||||
@@ -449,7 +447,7 @@ exports[`generateModelConfig single native provider uses Gemini models with isMa
|
||||
"model": "google/gemini-3.1-pro-preview",
|
||||
},
|
||||
"sisyphus-junior": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
"categories": {
|
||||
@@ -465,7 +463,7 @@ exports[`generateModelConfig single native provider uses Gemini models with isMa
|
||||
"variant": "high",
|
||||
},
|
||||
"unspecified-high": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"unspecified-low": {
|
||||
"model": "google/gemini-3-flash-preview",
|
||||
@@ -533,7 +531,7 @@ exports[`generateModelConfig all native providers uses preferred models from fal
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "anthropic/claude-haiku-4-5",
|
||||
"model": "openai/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "openai/gpt-5.4",
|
||||
@@ -608,7 +606,7 @@ exports[`generateModelConfig all native providers uses preferred models with isM
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "anthropic/claude-haiku-4-5",
|
||||
"model": "openai/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "openai/gpt-5.4",
|
||||
@@ -684,7 +682,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models when on
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "opencode/claude-haiku-4-5",
|
||||
"model": "opencode/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "opencode/gpt-5.4",
|
||||
@@ -759,7 +757,7 @@ exports[`generateModelConfig fallback providers uses OpenCode Zen models with is
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "opencode/claude-haiku-4-5",
|
||||
"model": "opencode/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "opencode/gpt-5.4",
|
||||
@@ -830,7 +828,7 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models when
|
||||
"variant": "high",
|
||||
},
|
||||
"quick": {
|
||||
"model": "github-copilot/claude-haiku-4.5",
|
||||
"model": "github-copilot/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "github-copilot/gemini-3.1-pro-preview",
|
||||
@@ -900,7 +898,7 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models with
|
||||
"variant": "high",
|
||||
},
|
||||
"quick": {
|
||||
"model": "github-copilot/claude-haiku-4.5",
|
||||
"model": "github-copilot/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "github-copilot/gemini-3.1-pro-preview",
|
||||
@@ -929,7 +927,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian whe
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"agents": {
|
||||
"atlas": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"explore": {
|
||||
"model": "opencode/gpt-5-nano",
|
||||
@@ -938,45 +936,45 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian whe
|
||||
"model": "zai-coding-plan/glm-4.7",
|
||||
},
|
||||
"metis": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"momus": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "zai-coding-plan/glm-4.6v",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"prometheus": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"sisyphus": {
|
||||
"model": "zai-coding-plan/glm-5",
|
||||
},
|
||||
"sisyphus-junior": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
"categories": {
|
||||
"quick": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"unspecified-high": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"unspecified-low": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"visual-engineering": {
|
||||
"model": "zai-coding-plan/glm-5",
|
||||
},
|
||||
"writing": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -987,7 +985,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian wit
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
"agents": {
|
||||
"atlas": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"explore": {
|
||||
"model": "opencode/gpt-5-nano",
|
||||
@@ -996,45 +994,45 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian wit
|
||||
"model": "zai-coding-plan/glm-4.7",
|
||||
},
|
||||
"metis": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"momus": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "zai-coding-plan/glm-4.6v",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"prometheus": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"sisyphus": {
|
||||
"model": "zai-coding-plan/glm-5",
|
||||
},
|
||||
"sisyphus-junior": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
"categories": {
|
||||
"quick": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"unspecified-high": {
|
||||
"model": "zai-coding-plan/glm-5",
|
||||
},
|
||||
"unspecified-low": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"visual-engineering": {
|
||||
"model": "zai-coding-plan/glm-5",
|
||||
},
|
||||
"writing": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1092,7 +1090,7 @@ exports[`generateModelConfig mixed provider scenarios uses Claude + OpenCode Zen
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "anthropic/claude-haiku-4-5",
|
||||
"model": "opencode/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "opencode/gpt-5.4",
|
||||
@@ -1167,7 +1165,7 @@ exports[`generateModelConfig mixed provider scenarios uses OpenAI + Copilot comb
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "github-copilot/claude-haiku-4.5",
|
||||
"model": "openai/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "openai/gpt-5.4",
|
||||
@@ -1273,7 +1271,7 @@ exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combi
|
||||
"variant": "max",
|
||||
},
|
||||
"multimodal-looker": {
|
||||
"model": "opencode/glm-4.7-free",
|
||||
"model": "opencode/gpt-5-nano",
|
||||
},
|
||||
"oracle": {
|
||||
"model": "google/gemini-3.1-pro-preview",
|
||||
@@ -1375,7 +1373,7 @@ exports[`generateModelConfig mixed provider scenarios uses all fallback provider
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "github-copilot/claude-haiku-4.5",
|
||||
"model": "github-copilot/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "opencode/gpt-5.4",
|
||||
@@ -1453,7 +1451,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers togethe
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "anthropic/claude-haiku-4-5",
|
||||
"model": "openai/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "openai/gpt-5.4",
|
||||
@@ -1531,7 +1529,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers with is
|
||||
"variant": "medium",
|
||||
},
|
||||
"quick": {
|
||||
"model": "anthropic/claude-haiku-4-5",
|
||||
"model": "openai/gpt-5.4-mini",
|
||||
},
|
||||
"ultrabrain": {
|
||||
"model": "openai/gpt-5.4",
|
||||
|
||||
@@ -42,7 +42,7 @@ Examples:
|
||||
Model Providers (Priority: Native > Copilot > OpenCode Zen > Z.ai > Kimi):
|
||||
Claude Native anthropic/ models (Opus, Sonnet, Haiku)
|
||||
OpenAI Native openai/ models (GPT-5.4 for Oracle)
|
||||
Gemini Native google/ models (Gemini 3 Pro, Flash)
|
||||
Gemini Native google/ models (Gemini 3.1 Pro, Flash)
|
||||
Copilot github-copilot/ models (fallback)
|
||||
OpenCode Zen opencode/ models (opencode/claude-opus-4-6, etc.)
|
||||
Z.ai zai-coding-plan/glm-5 (visual-engineering fallback)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { readFileSync, writeFileSync } from "node:fs"
|
||||
import type { ConfigMergeResult } from "../types"
|
||||
import { PLUGIN_NAME, LEGACY_PLUGIN_NAME } from "../../shared"
|
||||
import { getConfigDir } from "./config-context"
|
||||
import { ensureConfigDirectoryExists } from "./ensure-config-directory-exists"
|
||||
import { formatErrorWithSuggestion } from "./format-error-with-suggestion"
|
||||
@@ -7,8 +8,6 @@ import { detectConfigFormat } from "./opencode-config-format"
|
||||
import { parseOpenCodeConfigFileWithError, type OpenCodeConfig } from "./parse-opencode-config-file"
|
||||
import { getPluginNameWithVersion } from "./plugin-name-with-version"
|
||||
|
||||
const PACKAGE_NAME = "oh-my-opencode"
|
||||
|
||||
export async function addPluginToOpenCodeConfig(currentVersion: string): Promise<ConfigMergeResult> {
|
||||
try {
|
||||
ensureConfigDirectoryExists()
|
||||
@@ -21,7 +20,7 @@ export async function addPluginToOpenCodeConfig(currentVersion: string): Promise
|
||||
}
|
||||
|
||||
const { format, path } = detectConfigFormat()
|
||||
const pluginEntry = await getPluginNameWithVersion(currentVersion, PACKAGE_NAME)
|
||||
const pluginEntry = await getPluginNameWithVersion(currentVersion, PLUGIN_NAME)
|
||||
|
||||
try {
|
||||
if (format === "none") {
|
||||
@@ -41,13 +40,24 @@ export async function addPluginToOpenCodeConfig(currentVersion: string): Promise
|
||||
|
||||
const config = parseResult.config
|
||||
const plugins = config.plugin ?? []
|
||||
const existingIndex = plugins.findIndex((plugin) => plugin === PACKAGE_NAME || plugin.startsWith(`${PACKAGE_NAME}@`))
|
||||
|
||||
if (existingIndex !== -1) {
|
||||
if (plugins[existingIndex] === pluginEntry) {
|
||||
// Check for existing plugin (either current or legacy name)
|
||||
const currentNameIndex = plugins.findIndex(
|
||||
(plugin) => plugin === PLUGIN_NAME || plugin.startsWith(`${PLUGIN_NAME}@`)
|
||||
)
|
||||
const legacyNameIndex = plugins.findIndex(
|
||||
(plugin) => plugin === LEGACY_PLUGIN_NAME || plugin.startsWith(`${LEGACY_PLUGIN_NAME}@`)
|
||||
)
|
||||
|
||||
// If either name exists, update to new name
|
||||
if (currentNameIndex !== -1) {
|
||||
if (plugins[currentNameIndex] === pluginEntry) {
|
||||
return { success: true, configPath: path }
|
||||
}
|
||||
plugins[existingIndex] = pluginEntry
|
||||
plugins[currentNameIndex] = pluginEntry
|
||||
} else if (legacyNameIndex !== -1) {
|
||||
// Upgrade legacy name to new name
|
||||
plugins[legacyNameIndex] = pluginEntry
|
||||
} else {
|
||||
plugins.push(pluginEntry)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ type BunInstallOutputMode = "inherit" | "pipe"
|
||||
|
||||
interface RunBunInstallOptions {
|
||||
outputMode?: BunInstallOutputMode
|
||||
/** Workspace directory to install to. Defaults to cache dir if not provided. */
|
||||
workspaceDir?: string
|
||||
}
|
||||
|
||||
interface BunInstallOutput {
|
||||
@@ -65,7 +67,7 @@ function logCapturedOutputOnFailure(outputMode: BunInstallOutputMode, output: Bu
|
||||
|
||||
export async function runBunInstallWithDetails(options?: RunBunInstallOptions): Promise<BunInstallResult> {
|
||||
const outputMode = options?.outputMode ?? "pipe"
|
||||
const cacheDir = getOpenCodeCacheDir()
|
||||
const cacheDir = options?.workspaceDir ?? getOpenCodeCacheDir()
|
||||
const packageJsonPath = `${cacheDir}/package.json`
|
||||
|
||||
if (!existsSync(packageJsonPath)) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { existsSync, readFileSync } from "node:fs"
|
||||
import { parseJsonc } from "../../shared"
|
||||
import { parseJsonc, LEGACY_PLUGIN_NAME, PLUGIN_NAME } from "../../shared"
|
||||
import type { DetectedConfig } from "../types"
|
||||
import { getOmoConfigPath } from "./config-context"
|
||||
import { detectConfigFormat } from "./opencode-config-format"
|
||||
@@ -55,8 +55,12 @@ function detectProvidersFromOmoConfig(): {
|
||||
}
|
||||
}
|
||||
|
||||
function isOurPlugin(plugin: string): boolean {
|
||||
return plugin === PLUGIN_NAME || plugin.startsWith(`${PLUGIN_NAME}@`) ||
|
||||
plugin === LEGACY_PLUGIN_NAME || plugin.startsWith(`${LEGACY_PLUGIN_NAME}@`)
|
||||
}
|
||||
|
||||
export function detectCurrentConfig(): DetectedConfig {
|
||||
const PACKAGE_NAME = "oh-my-opencode"
|
||||
const result: DetectedConfig = {
|
||||
isInstalled: false,
|
||||
hasClaude: true,
|
||||
@@ -82,7 +86,7 @@ export function detectCurrentConfig(): DetectedConfig {
|
||||
|
||||
const openCodeConfig = parseResult.config
|
||||
const plugins = openCodeConfig.plugin ?? []
|
||||
result.isInstalled = plugins.some((plugin) => plugin.startsWith(PACKAGE_NAME))
|
||||
result.isInstalled = plugins.some(isOurPlugin)
|
||||
|
||||
if (!result.isInstalled) {
|
||||
return result
|
||||
|
||||
@@ -52,6 +52,30 @@ describe("detectCurrentConfig - single package detection", () => {
|
||||
expect(result.isInstalled).toBe(true)
|
||||
})
|
||||
|
||||
it("detects oh-my-openagent as installed (legacy name)", () => {
|
||||
// given
|
||||
const config = { plugin: ["oh-my-openagent"] }
|
||||
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
|
||||
|
||||
// when
|
||||
const result = detectCurrentConfig()
|
||||
|
||||
// then
|
||||
expect(result.isInstalled).toBe(true)
|
||||
})
|
||||
|
||||
it("detects oh-my-openagent with version pin as installed (legacy name)", () => {
|
||||
// given
|
||||
const config = { plugin: ["oh-my-openagent@3.11.0"] }
|
||||
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
|
||||
|
||||
// when
|
||||
const result = detectCurrentConfig()
|
||||
|
||||
// then
|
||||
expect(result.isInstalled).toBe(true)
|
||||
})
|
||||
|
||||
it("returns false when plugin not present", () => {
|
||||
// given
|
||||
const config = { plugin: ["some-other-plugin"] }
|
||||
@@ -64,6 +88,18 @@ describe("detectCurrentConfig - single package detection", () => {
|
||||
expect(result.isInstalled).toBe(false)
|
||||
})
|
||||
|
||||
it("returns false when plugin not present (even with similar name)", () => {
|
||||
// given - not exactly oh-my-openagent
|
||||
const config = { plugin: ["oh-my-openagent-extra"] }
|
||||
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
|
||||
|
||||
// when
|
||||
const result = detectCurrentConfig()
|
||||
|
||||
// then
|
||||
expect(result.isInstalled).toBe(false)
|
||||
})
|
||||
|
||||
it("detects OpenCode Go from the existing omo config", () => {
|
||||
// given
|
||||
writeFileSync(testConfigPath, JSON.stringify({ plugin: ["oh-my-opencode"] }, null, 2) + "\n", "utf-8")
|
||||
@@ -130,6 +166,38 @@ describe("addPluginToOpenCodeConfig - single package writes", () => {
|
||||
expect(savedConfig.plugin).not.toContain("oh-my-opencode@3.10.0")
|
||||
})
|
||||
|
||||
it("recognizes oh-my-openagent as already installed (legacy name)", async () => {
|
||||
// given
|
||||
const config = { plugin: ["oh-my-openagent"] }
|
||||
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
|
||||
|
||||
// when
|
||||
const result = await addPluginToOpenCodeConfig("3.11.0")
|
||||
|
||||
// then
|
||||
expect(result.success).toBe(true)
|
||||
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
|
||||
// Should upgrade to new name
|
||||
expect(savedConfig.plugin).toContain("oh-my-opencode")
|
||||
expect(savedConfig.plugin).not.toContain("oh-my-openagent")
|
||||
})
|
||||
|
||||
it("replaces version-pinned oh-my-openagent@X.Y.Z with new name", async () => {
|
||||
// given
|
||||
const config = { plugin: ["oh-my-openagent@3.10.0"] }
|
||||
writeFileSync(testConfigPath, JSON.stringify(config, null, 2) + "\n", "utf-8")
|
||||
|
||||
// when
|
||||
const result = await addPluginToOpenCodeConfig("3.11.0")
|
||||
|
||||
// then
|
||||
expect(result.success).toBe(true)
|
||||
const savedConfig = JSON.parse(readFileSync(testConfigPath, "utf-8"))
|
||||
// Legacy should be replaced with new name
|
||||
expect(savedConfig.plugin).toContain("oh-my-opencode")
|
||||
expect(savedConfig.plugin).not.toContain("oh-my-openagent")
|
||||
})
|
||||
|
||||
it("adds new plugin when none exists", async () => {
|
||||
// given
|
||||
const config = {}
|
||||
|
||||
@@ -2,15 +2,15 @@ import { readFileSync } from "node:fs"
|
||||
import { join } from "node:path"
|
||||
|
||||
import { OhMyOpenCodeConfigSchema } from "../../../config"
|
||||
import { detectConfigFile, getOpenCodeConfigDir, parseJsonc } from "../../../shared"
|
||||
import { detectPluginConfigFile, getOpenCodeConfigDir, parseJsonc } from "../../../shared"
|
||||
import { CHECK_IDS, CHECK_NAMES, PACKAGE_NAME } from "../constants"
|
||||
import type { CheckResult, DoctorIssue } from "../types"
|
||||
import { loadAvailableModelsFromCache } from "./model-resolution-cache"
|
||||
import { getModelResolutionInfoWithOverrides } from "./model-resolution"
|
||||
import type { OmoConfig } from "./model-resolution-types"
|
||||
|
||||
const USER_CONFIG_BASE = join(getOpenCodeConfigDir({ binary: "opencode" }), PACKAGE_NAME)
|
||||
const PROJECT_CONFIG_BASE = join(process.cwd(), ".opencode", PACKAGE_NAME)
|
||||
const USER_CONFIG_DIR = getOpenCodeConfigDir({ binary: "opencode" })
|
||||
const PROJECT_CONFIG_DIR = join(process.cwd(), ".opencode")
|
||||
|
||||
interface ConfigValidationResult {
|
||||
exists: boolean
|
||||
@@ -21,10 +21,10 @@ interface ConfigValidationResult {
|
||||
}
|
||||
|
||||
function findConfigPath(): string | null {
|
||||
const projectConfig = detectConfigFile(PROJECT_CONFIG_BASE)
|
||||
const projectConfig = detectPluginConfigFile(PROJECT_CONFIG_DIR)
|
||||
if (projectConfig.format !== "none") return projectConfig.path
|
||||
|
||||
const userConfig = detectConfigFile(USER_CONFIG_BASE)
|
||||
const userConfig = detectPluginConfigFile(USER_CONFIG_DIR)
|
||||
if (userConfig.format !== "none") return userConfig.path
|
||||
|
||||
return null
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
import { readFileSync } from "node:fs"
|
||||
import { join } from "node:path"
|
||||
import { detectConfigFile, getOpenCodeConfigPaths, parseJsonc } from "../../../shared"
|
||||
import { detectPluginConfigFile, getOpenCodeConfigPaths, parseJsonc } from "../../../shared"
|
||||
import type { OmoConfig } from "./model-resolution-types"
|
||||
|
||||
const PACKAGE_NAME = "oh-my-opencode"
|
||||
const USER_CONFIG_BASE = join(
|
||||
getOpenCodeConfigPaths({ binary: "opencode", version: null }).configDir,
|
||||
PACKAGE_NAME
|
||||
)
|
||||
const PROJECT_CONFIG_BASE = join(process.cwd(), ".opencode", PACKAGE_NAME)
|
||||
const USER_CONFIG_DIR = getOpenCodeConfigPaths({ binary: "opencode", version: null }).configDir
|
||||
const PROJECT_CONFIG_DIR = join(process.cwd(), ".opencode")
|
||||
|
||||
export function loadOmoConfig(): OmoConfig | null {
|
||||
const projectDetected = detectConfigFile(PROJECT_CONFIG_BASE)
|
||||
const projectDetected = detectPluginConfigFile(PROJECT_CONFIG_DIR)
|
||||
if (projectDetected.format !== "none") {
|
||||
try {
|
||||
const content = readFileSync(projectDetected.path, "utf-8")
|
||||
@@ -21,7 +17,7 @@ export function loadOmoConfig(): OmoConfig | null {
|
||||
}
|
||||
}
|
||||
|
||||
const userDetected = detectConfigFile(USER_CONFIG_BASE)
|
||||
const userDetected = detectPluginConfigFile(USER_CONFIG_DIR)
|
||||
if (userDetected.format !== "none") {
|
||||
try {
|
||||
const content = readFileSync(userDetected.path, "utf-8")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { existsSync, readFileSync } from "node:fs"
|
||||
|
||||
import { PACKAGE_NAME } from "../constants"
|
||||
import { getOpenCodeConfigPaths, parseJsonc } from "../../../shared"
|
||||
import { LEGACY_PLUGIN_NAME, PLUGIN_NAME, getOpenCodeConfigPaths, parseJsonc } from "../../../shared"
|
||||
|
||||
export interface PluginInfo {
|
||||
registered: boolean
|
||||
@@ -24,18 +23,33 @@ function detectConfigPath(): string | null {
|
||||
}
|
||||
|
||||
function parsePluginVersion(entry: string): string | null {
|
||||
if (!entry.startsWith(`${PACKAGE_NAME}@`)) return null
|
||||
const value = entry.slice(PACKAGE_NAME.length + 1)
|
||||
if (!value || value === "latest") return null
|
||||
return value
|
||||
// Check for current package name
|
||||
if (entry.startsWith(`${PLUGIN_NAME}@`)) {
|
||||
const value = entry.slice(PLUGIN_NAME.length + 1)
|
||||
if (!value || value === "latest") return null
|
||||
return value
|
||||
}
|
||||
// Check for legacy package name
|
||||
if (entry.startsWith(`${LEGACY_PLUGIN_NAME}@`)) {
|
||||
const value = entry.slice(LEGACY_PLUGIN_NAME.length + 1)
|
||||
if (!value || value === "latest") return null
|
||||
return value
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
function findPluginEntry(entries: string[]): { entry: string; isLocalDev: boolean } | null {
|
||||
for (const entry of entries) {
|
||||
if (entry === PACKAGE_NAME || entry.startsWith(`${PACKAGE_NAME}@`)) {
|
||||
// Check for current package name
|
||||
if (entry === PLUGIN_NAME || entry.startsWith(`${PLUGIN_NAME}@`)) {
|
||||
return { entry, isLocalDev: false }
|
||||
}
|
||||
if (entry.startsWith("file://") && entry.includes(PACKAGE_NAME)) {
|
||||
// Check for legacy package name
|
||||
if (entry === LEGACY_PLUGIN_NAME || entry.startsWith(`${LEGACY_PLUGIN_NAME}@`)) {
|
||||
return { entry, isLocalDev: false }
|
||||
}
|
||||
// Check for file:// paths that include either name
|
||||
if (entry.startsWith("file://") && (entry.includes(PLUGIN_NAME) || entry.includes(LEGACY_PLUGIN_NAME))) {
|
||||
return { entry, isLocalDev: true }
|
||||
}
|
||||
}
|
||||
@@ -76,7 +90,7 @@ export function getPluginInfo(): PluginInfo {
|
||||
registered: true,
|
||||
configPath,
|
||||
entry: pluginEntry.entry,
|
||||
isPinned: pinnedVersion !== null && /^\d+\.\d+\.\d+/.test(pinnedVersion),
|
||||
isPinned: pinnedVersion !== null && /^\d+\.\d+\.\d+/.test(pinnedVersion ?? ""),
|
||||
pinnedVersion,
|
||||
isLocalDev: pluginEntry.isLocalDev,
|
||||
}
|
||||
|
||||
@@ -53,6 +53,14 @@ describe("install CLI - binary check behavior", () => {
|
||||
isOpenCodeInstalledSpy = spyOn(configManager, "isOpenCodeInstalled").mockResolvedValue(false)
|
||||
getOpenCodeVersionSpy = spyOn(configManager, "getOpenCodeVersion").mockResolvedValue(null)
|
||||
|
||||
// given mock npm fetch
|
||||
globalThis.fetch = mock(() =>
|
||||
Promise.resolve({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ latest: "3.0.0" }),
|
||||
} as Response)
|
||||
) as unknown as typeof fetch
|
||||
|
||||
const args: InstallArgs = {
|
||||
tui: false,
|
||||
claude: "yes",
|
||||
|
||||
@@ -19,7 +19,7 @@ export type { GeneratedOmoConfig } from "./model-fallback-types"
|
||||
|
||||
const ZAI_MODEL = "zai-coding-plan/glm-4.7"
|
||||
|
||||
const ULTIMATE_FALLBACK = "opencode/glm-4.7-free"
|
||||
const ULTIMATE_FALLBACK = "opencode/gpt-5-nano"
|
||||
const SCHEMA_URL = "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json"
|
||||
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ describe("generateModelConfig OpenAI-only model catalog", () => {
|
||||
|
||||
// #then
|
||||
expect(result.categories?.artistry).toEqual({ model: "openai/gpt-5.4", variant: "xhigh" })
|
||||
expect(result.categories?.quick).toEqual({ model: "openai/gpt-5.3-codex", variant: "low" })
|
||||
expect(result.categories?.quick).toEqual({ model: "openai/gpt-5.4-mini" })
|
||||
expect(result.categories?.["visual-engineering"]).toEqual({ model: "openai/gpt-5.4", variant: "high" })
|
||||
expect(result.categories?.writing).toEqual({ model: "openai/gpt-5.4", variant: "medium" })
|
||||
})
|
||||
@@ -55,6 +55,6 @@ describe("generateModelConfig OpenAI-only model catalog", () => {
|
||||
// #then
|
||||
expect(result.agents?.explore).toEqual({ model: "opencode-go/minimax-m2.5" })
|
||||
expect(result.agents?.librarian).toEqual({ model: "opencode-go/minimax-m2.5" })
|
||||
expect(result.categories?.quick).toEqual({ model: "opencode-go/minimax-m2.5" })
|
||||
expect(result.categories?.quick).toEqual({ model: "openai/gpt-5.4-mini" })
|
||||
})
|
||||
})
|
||||
|
||||
@@ -7,7 +7,7 @@ const OPENAI_ONLY_AGENT_OVERRIDES: Record<string, AgentConfig> = {
|
||||
|
||||
const OPENAI_ONLY_CATEGORY_OVERRIDES: Record<string, CategoryConfig> = {
|
||||
artistry: { model: "openai/gpt-5.4", variant: "xhigh" },
|
||||
quick: { model: "openai/gpt-5.3-codex", variant: "low" },
|
||||
quick: { model: "openai/gpt-5.4-mini" },
|
||||
"visual-engineering": { model: "openai/gpt-5.4", variant: "high" },
|
||||
writing: { model: "openai/gpt-5.4", variant: "medium" },
|
||||
}
|
||||
|
||||
@@ -45,26 +45,26 @@ export function writePaddedText(
|
||||
return { output: text, atLineStart: text.endsWith("\n") }
|
||||
}
|
||||
|
||||
let output = ""
|
||||
const parts: string[] = []
|
||||
let lineStart = atLineStart
|
||||
|
||||
for (let i = 0; i < text.length; i++) {
|
||||
const ch = text[i]
|
||||
if (lineStart) {
|
||||
output += " "
|
||||
parts.push(" ")
|
||||
lineStart = false
|
||||
}
|
||||
|
||||
if (ch === "\n") {
|
||||
output += " \n"
|
||||
parts.push(" \n")
|
||||
lineStart = true
|
||||
continue
|
||||
}
|
||||
|
||||
output += ch
|
||||
parts.push(ch)
|
||||
}
|
||||
|
||||
return { output, atLineStart: lineStart }
|
||||
return { output: parts.join(""), atLineStart: lineStart }
|
||||
}
|
||||
|
||||
function colorizeWithProfileColor(text: string, hexColor?: string): string {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/// <reference types="bun-types" />
|
||||
|
||||
import { describe, it, expect } from "bun:test"
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "bun:test"
|
||||
import type { OhMyOpenCodeConfig } from "../../config"
|
||||
import { resolveRunAgent, waitForEventProcessorShutdown } from "./runner"
|
||||
|
||||
@@ -83,7 +83,6 @@ describe("resolveRunAgent", () => {
|
||||
})
|
||||
|
||||
describe("waitForEventProcessorShutdown", () => {
|
||||
|
||||
it("returns quickly when event processor completes", async () => {
|
||||
//#given
|
||||
const eventProcessor = new Promise<void>((resolve) => {
|
||||
@@ -115,3 +114,80 @@ describe("waitForEventProcessorShutdown", () => {
|
||||
expect(elapsed).toBeGreaterThanOrEqual(timeoutMs - 10)
|
||||
})
|
||||
})
|
||||
|
||||
describe("run environment setup", () => {
|
||||
let originalClient: string | undefined
|
||||
let originalRunMode: string | undefined
|
||||
|
||||
beforeEach(() => {
|
||||
originalClient = process.env.OPENCODE_CLIENT
|
||||
originalRunMode = process.env.OPENCODE_CLI_RUN_MODE
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
if (originalClient === undefined) {
|
||||
delete process.env.OPENCODE_CLIENT
|
||||
} else {
|
||||
process.env.OPENCODE_CLIENT = originalClient
|
||||
}
|
||||
if (originalRunMode === undefined) {
|
||||
delete process.env.OPENCODE_CLI_RUN_MODE
|
||||
} else {
|
||||
process.env.OPENCODE_CLI_RUN_MODE = originalRunMode
|
||||
}
|
||||
})
|
||||
|
||||
it("sets OPENCODE_CLIENT to 'run' to exclude question tool from registry", async () => {
|
||||
//#given
|
||||
delete process.env.OPENCODE_CLIENT
|
||||
|
||||
//#when - run() sets env vars synchronously before any async work
|
||||
const { run } = await import(`./runner?env-setup-${Date.now()}`)
|
||||
run({ message: "test" }).catch(() => {})
|
||||
|
||||
//#then
|
||||
expect(String(process.env.OPENCODE_CLIENT)).toBe("run")
|
||||
expect(String(process.env.OPENCODE_CLI_RUN_MODE)).toBe("true")
|
||||
})
|
||||
})
|
||||
|
||||
describe("run with invalid model", () => {
|
||||
it("given invalid --model value, when run, then returns exit code 1 with error message", async () => {
|
||||
// given
|
||||
const originalExit = process.exit
|
||||
const originalError = console.error
|
||||
const errorMessages: string[] = []
|
||||
const exitCodes: number[] = []
|
||||
|
||||
console.error = (...args: unknown[]) => {
|
||||
errorMessages.push(args.map(String).join(" "))
|
||||
}
|
||||
process.exit = ((code?: number) => {
|
||||
exitCodes.push(code ?? 0)
|
||||
throw new Error("exit")
|
||||
}) as typeof process.exit
|
||||
|
||||
try {
|
||||
// when
|
||||
// Note: This will actually try to run - but the issue is that resolveRunModel
|
||||
// is called BEFORE the try block, so it throws an unhandled exception
|
||||
// We're testing the runner's error handling
|
||||
const { run } = await import("./runner")
|
||||
|
||||
// This will throw because model "invalid" is invalid format
|
||||
try {
|
||||
await run({
|
||||
message: "test",
|
||||
model: "invalid",
|
||||
})
|
||||
} catch {
|
||||
// Expected to potentially throw due to unhandled model resolution error
|
||||
}
|
||||
} finally {
|
||||
// then - verify error handling
|
||||
// Currently this will fail because the error is not caught properly
|
||||
console.error = originalError
|
||||
process.exit = originalExit
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -31,6 +31,7 @@ export async function waitForEventProcessorShutdown(
|
||||
|
||||
export async function run(options: RunOptions): Promise<number> {
|
||||
process.env.OPENCODE_CLI_RUN_MODE = "true"
|
||||
process.env.OPENCODE_CLIENT = "run"
|
||||
|
||||
const startTime = Date.now()
|
||||
const {
|
||||
@@ -47,10 +48,11 @@ export async function run(options: RunOptions): Promise<number> {
|
||||
|
||||
const pluginConfig = loadPluginConfig(directory, { command: "run" })
|
||||
const resolvedAgent = resolveRunAgent(options, pluginConfig)
|
||||
const resolvedModel = resolveRunModel(options.model)
|
||||
const abortController = new AbortController()
|
||||
|
||||
try {
|
||||
const resolvedModel = resolveRunModel(options.model)
|
||||
|
||||
const { client, cleanup: serverCleanup } = await createServerConnection({
|
||||
port: options.port,
|
||||
attach: options.attach,
|
||||
|
||||
@@ -54,7 +54,7 @@ export async function promptInstallConfig(detected: DetectedConfig): Promise<Ins
|
||||
message: "Will you integrate Google Gemini?",
|
||||
options: [
|
||||
{ value: "no", label: "No", hint: "Frontend/docs agents will use fallback" },
|
||||
{ value: "yes", label: "Yes", hint: "Beautiful UI generation with Gemini 3 Pro" },
|
||||
{ value: "yes", label: "Yes", hint: "Beautiful UI generation with Gemini 3.1 Pro" },
|
||||
],
|
||||
initialValue: initial.gemini,
|
||||
})
|
||||
|
||||
@@ -14,7 +14,7 @@ config/schema/
|
||||
├── agent-names.ts # BuiltinAgentNameSchema (11), OverridableAgentNameSchema (14)
|
||||
├── agent-overrides.ts # AgentOverrideConfigSchema (21 fields per agent)
|
||||
├── categories.ts # 8 built-in + custom categories
|
||||
├── hooks.ts # HookNameSchema (46 hooks)
|
||||
├── hooks.ts # HookNameSchema (48 hooks)
|
||||
├── skills.ts # SkillsConfigSchema (sources, paths, recursive)
|
||||
├── commands.ts # BuiltinCommandNameSchema
|
||||
├── experimental.ts # Feature flags (plugin_load_timeout_ms min 1000)
|
||||
|
||||
56
src/config/schema/background-task-circuit-breaker.test.ts
Normal file
56
src/config/schema/background-task-circuit-breaker.test.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import { ZodError } from "zod/v4"
|
||||
import { BackgroundTaskConfigSchema } from "./background-task"
|
||||
|
||||
describe("BackgroundTaskConfigSchema.circuitBreaker", () => {
|
||||
describe("#given valid circuit breaker settings", () => {
|
||||
test("#when parsed #then returns nested config", () => {
|
||||
const result = BackgroundTaskConfigSchema.parse({
|
||||
circuitBreaker: {
|
||||
maxToolCalls: 150,
|
||||
consecutiveThreshold: 10,
|
||||
},
|
||||
})
|
||||
expect(result.circuitBreaker).toEqual({
|
||||
maxToolCalls: 150,
|
||||
consecutiveThreshold: 10,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given consecutiveThreshold below minimum", () => {
|
||||
test("#when parsed #then throws ZodError", () => {
|
||||
let thrownError: unknown
|
||||
|
||||
try {
|
||||
BackgroundTaskConfigSchema.parse({
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 4,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
thrownError = error
|
||||
}
|
||||
|
||||
expect(thrownError).toBeInstanceOf(ZodError)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given consecutiveThreshold is zero", () => {
|
||||
test("#when parsed #then throws ZodError", () => {
|
||||
let thrownError: unknown
|
||||
|
||||
try {
|
||||
BackgroundTaskConfigSchema.parse({
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 0,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
thrownError = error
|
||||
}
|
||||
|
||||
expect(thrownError).toBeInstanceOf(ZodError)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,5 +1,11 @@
|
||||
import { z } from "zod"
|
||||
|
||||
const CircuitBreakerConfigSchema = z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
maxToolCalls: z.number().int().min(10).optional(),
|
||||
consecutiveThreshold: z.number().int().min(5).optional(),
|
||||
})
|
||||
|
||||
export const BackgroundTaskConfigSchema = z.object({
|
||||
defaultConcurrency: z.number().min(1).optional(),
|
||||
providerConcurrency: z.record(z.string(), z.number().min(0)).optional(),
|
||||
@@ -11,6 +17,9 @@ export const BackgroundTaskConfigSchema = z.object({
|
||||
/** Timeout for tasks that never received any progress update, falling back to startedAt (default: 1800000 = 30 minutes, minimum: 60000 = 1 minute) */
|
||||
messageStalenessTimeoutMs: z.number().min(60000).optional(),
|
||||
syncPollTimeoutMs: z.number().min(60000).optional(),
|
||||
/** Maximum tool calls per subagent task before circuit breaker triggers (default: 200, minimum: 10). Prevents runaway loops from burning unlimited tokens. */
|
||||
maxToolCalls: z.number().int().min(10).optional(),
|
||||
circuitBreaker: CircuitBreakerConfigSchema.optional(),
|
||||
})
|
||||
|
||||
export type BackgroundTaskConfig = z.infer<typeof BackgroundTaskConfigSchema>
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { z } from "zod"
|
||||
|
||||
export const HookNameSchema = z.enum([
|
||||
"gpt-permission-continuation",
|
||||
"todo-continuation-enforcer",
|
||||
"context-window-monitor",
|
||||
"session-recovery",
|
||||
@@ -51,7 +50,8 @@ export const HookNameSchema = z.enum([
|
||||
"anthropic-effort",
|
||||
"hashline-read-enhancer",
|
||||
"read-image-resizer",
|
||||
"openclaw-sender",
|
||||
"todo-description-override",
|
||||
"webfetch-redirect-guard",
|
||||
])
|
||||
|
||||
export type HookName = z.infer<typeof HookNameSchema>
|
||||
|
||||
@@ -1,51 +1,50 @@
|
||||
import { z } from "zod";
|
||||
import { z } from "zod"
|
||||
|
||||
export const OpenClawHookEventSchema = z.enum([
|
||||
"session-start",
|
||||
"session-end",
|
||||
"session-idle",
|
||||
"ask-user-question",
|
||||
"stop",
|
||||
]);
|
||||
|
||||
export const OpenClawHttpGatewayConfigSchema = z.object({
|
||||
type: z.literal("http").optional(),
|
||||
url: z.string(), // Allow looser URL validation as it might contain placeholders
|
||||
export const OpenClawGatewaySchema = z.object({
|
||||
type: z.enum(["http", "command"]).default("http"),
|
||||
// HTTP specific
|
||||
url: z.string().optional(),
|
||||
method: z.string().default("POST"),
|
||||
headers: z.record(z.string(), z.string()).optional(),
|
||||
method: z.enum(["POST", "PUT"]).optional(),
|
||||
// Command specific
|
||||
command: z.string().optional(),
|
||||
// Shared
|
||||
timeout: z.number().optional(),
|
||||
});
|
||||
})
|
||||
|
||||
export const OpenClawCommandGatewayConfigSchema = z.object({
|
||||
type: z.literal("command"),
|
||||
command: z.string(),
|
||||
timeout: z.number().optional(),
|
||||
});
|
||||
|
||||
export const OpenClawGatewayConfigSchema = z.union([
|
||||
OpenClawHttpGatewayConfigSchema,
|
||||
OpenClawCommandGatewayConfigSchema,
|
||||
]);
|
||||
|
||||
export const OpenClawHookMappingSchema = z.object({
|
||||
export const OpenClawHookSchema = z.object({
|
||||
enabled: z.boolean().default(true),
|
||||
gateway: z.string(),
|
||||
instruction: z.string(),
|
||||
enabled: z.boolean(),
|
||||
});
|
||||
})
|
||||
|
||||
export const OpenClawReplyListenerConfigSchema = z.object({
|
||||
discordBotToken: z.string().optional(),
|
||||
discordChannelId: z.string().optional(),
|
||||
discordMention: z.string().optional(), // For allowed_mentions
|
||||
authorizedDiscordUserIds: z.array(z.string()).default([]),
|
||||
|
||||
telegramBotToken: z.string().optional(),
|
||||
telegramChatId: z.string().optional(),
|
||||
|
||||
pollIntervalMs: z.number().default(3000),
|
||||
rateLimitPerMinute: z.number().default(10),
|
||||
maxMessageLength: z.number().default(500),
|
||||
includePrefix: z.boolean().default(true),
|
||||
})
|
||||
|
||||
export const OpenClawConfigSchema = z.object({
|
||||
enabled: z.boolean(),
|
||||
gateways: z.record(z.string(), OpenClawGatewayConfigSchema),
|
||||
hooks: z
|
||||
.object({
|
||||
"session-start": OpenClawHookMappingSchema.optional(),
|
||||
"session-end": OpenClawHookMappingSchema.optional(),
|
||||
"session-idle": OpenClawHookMappingSchema.optional(),
|
||||
"ask-user-question": OpenClawHookMappingSchema.optional(),
|
||||
stop: OpenClawHookMappingSchema.optional(),
|
||||
})
|
||||
.strict()
|
||||
.optional(),
|
||||
});
|
||||
enabled: z.boolean().default(false),
|
||||
|
||||
export type OpenClawConfig = z.infer<typeof OpenClawConfigSchema>;
|
||||
// Outbound Configuration
|
||||
gateways: z.record(z.string(), OpenClawGatewaySchema).default({}),
|
||||
hooks: z.record(z.string(), OpenClawHookSchema).default({}),
|
||||
|
||||
// Inbound Configuration (Reply Listener)
|
||||
replyListener: OpenClawReplyListenerConfigSchema.optional(),
|
||||
})
|
||||
|
||||
export type OpenClawConfig = z.infer<typeof OpenClawConfigSchema>
|
||||
export type OpenClawGateway = z.infer<typeof OpenClawGatewaySchema>
|
||||
export type OpenClawHook = z.infer<typeof OpenClawHookSchema>
|
||||
export type OpenClawReplyListenerConfig = z.infer<typeof OpenClawReplyListenerConfigSchema>
|
||||
|
||||
@@ -2,9 +2,13 @@ import type { PluginInput } from "@opencode-ai/plugin"
|
||||
import type { BackgroundTask, LaunchInput } from "./types"
|
||||
|
||||
export const TASK_TTL_MS = 30 * 60 * 1000
|
||||
export const TERMINAL_TASK_TTL_MS = 30 * 60 * 1000
|
||||
export const MIN_STABILITY_TIME_MS = 10 * 1000
|
||||
export const DEFAULT_STALE_TIMEOUT_MS = 180_000
|
||||
export const DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS = 1_800_000
|
||||
export const DEFAULT_STALE_TIMEOUT_MS = 2_700_000
|
||||
export const DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS = 3_600_000
|
||||
export const DEFAULT_MAX_TOOL_CALLS = 4000
|
||||
export const DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD = 20
|
||||
export const DEFAULT_CIRCUIT_BREAKER_ENABLED = true
|
||||
export const MIN_RUNTIME_BEFORE_STALE_MS = 30_000
|
||||
export const MIN_IDLE_TIME_MS = 5000
|
||||
export const POLLING_INTERVAL_MS = 3000
|
||||
|
||||
@@ -21,9 +21,9 @@ function createRunningTask(startedAt: Date): BackgroundTask {
|
||||
}
|
||||
|
||||
describe("DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS", () => {
|
||||
test("uses a 30 minute default", () => {
|
||||
test("uses a 60 minute default", () => {
|
||||
// #given
|
||||
const expectedTimeout = 30 * 60 * 1000
|
||||
const expectedTimeout = 60 * 60 * 1000
|
||||
|
||||
// #when
|
||||
const timeout = DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS
|
||||
|
||||
17
src/features/background-agent/default-stale-timeout.test.ts
Normal file
17
src/features/background-agent/default-stale-timeout.test.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
declare const require: (name: string) => any
|
||||
const { describe, expect, test } = require("bun:test")
|
||||
|
||||
import { DEFAULT_STALE_TIMEOUT_MS } from "./constants"
|
||||
|
||||
describe("DEFAULT_STALE_TIMEOUT_MS", () => {
|
||||
test("uses a 45 minute default", () => {
|
||||
// #given
|
||||
const expectedTimeout = 45 * 60 * 1000
|
||||
|
||||
// #when
|
||||
const timeout = DEFAULT_STALE_TIMEOUT_MS
|
||||
|
||||
// #then
|
||||
expect(timeout).toBe(expectedTimeout)
|
||||
})
|
||||
})
|
||||
@@ -19,6 +19,8 @@ mock.module("../../shared/provider-model-id-transform", () => ({
|
||||
|
||||
import { tryFallbackRetry } from "./fallback-retry-handler"
|
||||
import { shouldRetryError } from "../../shared/model-error-classifier"
|
||||
import { selectFallbackProvider } from "../../shared/model-error-classifier"
|
||||
import { readProviderModelsCache } from "../../shared"
|
||||
import type { BackgroundTask } from "./types"
|
||||
import type { ConcurrencyManager } from "./concurrency"
|
||||
|
||||
@@ -82,6 +84,8 @@ function createDefaultArgs(taskOverrides: Partial<BackgroundTask> = {}) {
|
||||
describe("tryFallbackRetry", () => {
|
||||
beforeEach(() => {
|
||||
;(shouldRetryError as any).mockImplementation(() => true)
|
||||
;(selectFallbackProvider as any).mockImplementation((providers: string[]) => providers[0])
|
||||
;(readProviderModelsCache as any).mockReturnValue(null)
|
||||
})
|
||||
|
||||
describe("#given retryable error with fallback chain", () => {
|
||||
@@ -267,4 +271,24 @@ describe("tryFallbackRetry", () => {
|
||||
expect(args.task.attemptCount).toBe(2)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given disconnected fallback providers with connected preferred provider", () => {
|
||||
test("keeps fallback entry and selects connected preferred provider", () => {
|
||||
;(readProviderModelsCache as any).mockReturnValue({ connected: ["provider-a"] })
|
||||
;(selectFallbackProvider as any).mockImplementation(
|
||||
(_providers: string[], preferredProviderID?: string) => preferredProviderID ?? "provider-b",
|
||||
)
|
||||
|
||||
const args = createDefaultArgs({
|
||||
fallbackChain: [{ model: "fallback-model-1", providers: ["provider-b"], variant: undefined }],
|
||||
model: { providerID: "provider-a", modelID: "original-model" },
|
||||
})
|
||||
|
||||
const result = tryFallbackRetry(args)
|
||||
|
||||
expect(result).toBe(true)
|
||||
expect(args.task.model?.providerID).toBe("provider-a")
|
||||
expect(args.task.model?.modelID).toBe("fallback-model-1")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -35,10 +35,14 @@ export function tryFallbackRetry(args: {
|
||||
const providerModelsCache = readProviderModelsCache()
|
||||
const connectedProviders = providerModelsCache?.connected ?? readConnectedProvidersCache()
|
||||
const connectedSet = connectedProviders ? new Set(connectedProviders.map(p => p.toLowerCase())) : null
|
||||
const preferredProvider = task.model?.providerID?.toLowerCase()
|
||||
|
||||
const isReachable = (entry: FallbackEntry): boolean => {
|
||||
if (!connectedSet) return true
|
||||
return entry.providers.some((p) => connectedSet.has(p.toLowerCase()))
|
||||
if (entry.providers.some((provider) => connectedSet.has(provider.toLowerCase()))) {
|
||||
return true
|
||||
}
|
||||
return preferredProvider ? connectedSet.has(preferredProvider) : false
|
||||
}
|
||||
|
||||
let selectedAttemptCount = attemptCount
|
||||
|
||||
263
src/features/background-agent/loop-detector.test.ts
Normal file
263
src/features/background-agent/loop-detector.test.ts
Normal file
@@ -0,0 +1,263 @@
|
||||
/// <reference types="bun-types" />
|
||||
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import {
|
||||
createToolCallSignature,
|
||||
detectRepetitiveToolUse,
|
||||
recordToolCall,
|
||||
resolveCircuitBreakerSettings,
|
||||
} from "./loop-detector"
|
||||
|
||||
function buildWindow(
|
||||
toolNames: string[],
|
||||
override?: Parameters<typeof resolveCircuitBreakerSettings>[0]
|
||||
) {
|
||||
const settings = resolveCircuitBreakerSettings(override)
|
||||
|
||||
return toolNames.reduce(
|
||||
(window, toolName) => recordToolCall(window, toolName, settings),
|
||||
undefined as ReturnType<typeof recordToolCall> | undefined
|
||||
)
|
||||
}
|
||||
|
||||
function buildWindowWithInputs(
|
||||
calls: Array<{ tool: string; input?: Record<string, unknown> | null }>,
|
||||
override?: Parameters<typeof resolveCircuitBreakerSettings>[0]
|
||||
) {
|
||||
const settings = resolveCircuitBreakerSettings(override)
|
||||
return calls.reduce(
|
||||
(window, { tool, input }) => recordToolCall(window, tool, settings, input),
|
||||
undefined as ReturnType<typeof recordToolCall> | undefined
|
||||
)
|
||||
}
|
||||
|
||||
describe("loop-detector", () => {
|
||||
describe("resolveCircuitBreakerSettings", () => {
|
||||
describe("#given nested circuit breaker config", () => {
|
||||
test("#when resolved #then nested values override defaults", () => {
|
||||
const result = resolveCircuitBreakerSettings({
|
||||
maxToolCalls: 200,
|
||||
circuitBreaker: {
|
||||
maxToolCalls: 120,
|
||||
consecutiveThreshold: 7,
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toEqual({
|
||||
enabled: true,
|
||||
maxToolCalls: 120,
|
||||
consecutiveThreshold: 7,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given no enabled config", () => {
|
||||
test("#when resolved #then enabled defaults to true", () => {
|
||||
const result = resolveCircuitBreakerSettings({
|
||||
circuitBreaker: {
|
||||
maxToolCalls: 100,
|
||||
consecutiveThreshold: 5,
|
||||
},
|
||||
})
|
||||
|
||||
expect(result.enabled).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given enabled is false in config", () => {
|
||||
test("#when resolved #then enabled is false", () => {
|
||||
const result = resolveCircuitBreakerSettings({
|
||||
circuitBreaker: {
|
||||
enabled: false,
|
||||
maxToolCalls: 100,
|
||||
consecutiveThreshold: 5,
|
||||
},
|
||||
})
|
||||
|
||||
expect(result.enabled).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given enabled is true in config", () => {
|
||||
test("#when resolved #then enabled is true", () => {
|
||||
const result = resolveCircuitBreakerSettings({
|
||||
circuitBreaker: {
|
||||
enabled: true,
|
||||
maxToolCalls: 100,
|
||||
consecutiveThreshold: 5,
|
||||
},
|
||||
})
|
||||
|
||||
expect(result.enabled).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("createToolCallSignature", () => {
|
||||
test("#given tool with input #when signature created #then includes tool and sorted input", () => {
|
||||
const result = createToolCallSignature("read", { filePath: "/a.ts" })
|
||||
|
||||
expect(result).toBe('read::{"filePath":"/a.ts"}')
|
||||
})
|
||||
|
||||
test("#given tool with undefined input #when signature created #then returns bare tool name", () => {
|
||||
const result = createToolCallSignature("read", undefined)
|
||||
|
||||
expect(result).toBe("read")
|
||||
})
|
||||
|
||||
test("#given tool with null input #when signature created #then returns bare tool name", () => {
|
||||
const result = createToolCallSignature("read", null)
|
||||
|
||||
expect(result).toBe("read")
|
||||
})
|
||||
|
||||
test("#given tool with empty object input #when signature created #then returns bare tool name", () => {
|
||||
const result = createToolCallSignature("read", {})
|
||||
|
||||
expect(result).toBe("read")
|
||||
})
|
||||
|
||||
test("#given same input different key order #when signatures compared #then they are equal", () => {
|
||||
const first = createToolCallSignature("read", { filePath: "/a.ts", offset: 0 })
|
||||
const second = createToolCallSignature("read", { offset: 0, filePath: "/a.ts" })
|
||||
|
||||
expect(first).toBe(second)
|
||||
})
|
||||
})
|
||||
|
||||
describe("detectRepetitiveToolUse", () => {
|
||||
describe("#given recent tools are diverse", () => {
|
||||
test("#when evaluated #then it does not trigger", () => {
|
||||
const window = buildWindow([
|
||||
"read",
|
||||
"grep",
|
||||
"edit",
|
||||
"bash",
|
||||
"read",
|
||||
"glob",
|
||||
"lsp_diagnostics",
|
||||
"read",
|
||||
"grep",
|
||||
"edit",
|
||||
])
|
||||
|
||||
const result = detectRepetitiveToolUse(window)
|
||||
|
||||
expect(result.triggered).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given the same tool is called consecutively", () => {
|
||||
test("#when evaluated #then it triggers", () => {
|
||||
const window = buildWindowWithInputs(
|
||||
Array.from({ length: 20 }, () => ({
|
||||
tool: "read",
|
||||
input: { filePath: "/src/same.ts" },
|
||||
}))
|
||||
)
|
||||
|
||||
const result = detectRepetitiveToolUse(window)
|
||||
|
||||
expect(result).toEqual({
|
||||
triggered: true,
|
||||
toolName: "read",
|
||||
repeatedCount: 20,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given consecutive calls are interrupted by different tool", () => {
|
||||
test("#when evaluated #then it does not trigger", () => {
|
||||
const window = buildWindow([
|
||||
...Array.from({ length: 19 }, () => "read"),
|
||||
"edit",
|
||||
"read",
|
||||
])
|
||||
|
||||
const result = detectRepetitiveToolUse(window)
|
||||
|
||||
expect(result).toEqual({ triggered: false })
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given threshold boundary", () => {
|
||||
test("#when below threshold #then it does not trigger", () => {
|
||||
const belowThresholdWindow = buildWindowWithInputs(
|
||||
Array.from({ length: 19 }, () => ({
|
||||
tool: "read",
|
||||
input: { filePath: "/src/same.ts" },
|
||||
}))
|
||||
)
|
||||
|
||||
const result = detectRepetitiveToolUse(belowThresholdWindow)
|
||||
|
||||
expect(result).toEqual({ triggered: false })
|
||||
})
|
||||
|
||||
test("#when equal to threshold #then it triggers", () => {
|
||||
const atThresholdWindow = buildWindowWithInputs(
|
||||
Array.from({ length: 20 }, () => ({
|
||||
tool: "read",
|
||||
input: { filePath: "/src/same.ts" },
|
||||
}))
|
||||
)
|
||||
|
||||
const result = detectRepetitiveToolUse(atThresholdWindow)
|
||||
|
||||
expect(result).toEqual({
|
||||
triggered: true,
|
||||
toolName: "read",
|
||||
repeatedCount: 20,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given same tool with different file inputs", () => {
|
||||
test("#when evaluated #then it does not trigger", () => {
|
||||
const calls = Array.from({ length: 20 }, (_, i) => ({
|
||||
tool: "read",
|
||||
input: { filePath: `/src/file-${i}.ts` },
|
||||
}))
|
||||
const window = buildWindowWithInputs(calls)
|
||||
const result = detectRepetitiveToolUse(window)
|
||||
expect(result.triggered).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given same tool with identical file inputs", () => {
|
||||
test("#when evaluated #then it triggers with bare tool name", () => {
|
||||
const calls = Array.from({ length: 20 }, () => ({
|
||||
tool: "read",
|
||||
input: { filePath: "/src/same.ts" },
|
||||
}))
|
||||
const window = buildWindowWithInputs(calls)
|
||||
const result = detectRepetitiveToolUse(window)
|
||||
expect(result).toEqual({
|
||||
triggered: true,
|
||||
toolName: "read",
|
||||
repeatedCount: 20,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given tool calls with undefined input", () => {
|
||||
test("#when evaluated #then it does not trigger", () => {
|
||||
const calls = Array.from({ length: 20 }, () => ({ tool: "read" }))
|
||||
const window = buildWindowWithInputs(calls)
|
||||
const result = detectRepetitiveToolUse(window)
|
||||
expect(result).toEqual({ triggered: false })
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given tool calls with null input", () => {
|
||||
test("#when evaluated #then it does not trigger", () => {
|
||||
const calls = Array.from({ length: 20 }, () => ({ tool: "read", input: null }))
|
||||
const window = buildWindowWithInputs(calls)
|
||||
const result = detectRepetitiveToolUse(window)
|
||||
|
||||
expect(result).toEqual({ triggered: false })
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
102
src/features/background-agent/loop-detector.ts
Normal file
102
src/features/background-agent/loop-detector.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import type { BackgroundTaskConfig } from "../../config/schema"
|
||||
import {
|
||||
DEFAULT_CIRCUIT_BREAKER_ENABLED,
|
||||
DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD,
|
||||
DEFAULT_MAX_TOOL_CALLS,
|
||||
} from "./constants"
|
||||
import type { ToolCallWindow } from "./types"
|
||||
|
||||
export interface CircuitBreakerSettings {
|
||||
enabled: boolean
|
||||
maxToolCalls: number
|
||||
consecutiveThreshold: number
|
||||
}
|
||||
|
||||
export interface ToolLoopDetectionResult {
|
||||
triggered: boolean
|
||||
toolName?: string
|
||||
repeatedCount?: number
|
||||
}
|
||||
|
||||
export function resolveCircuitBreakerSettings(
|
||||
config?: BackgroundTaskConfig
|
||||
): CircuitBreakerSettings {
|
||||
return {
|
||||
enabled: config?.circuitBreaker?.enabled ?? DEFAULT_CIRCUIT_BREAKER_ENABLED,
|
||||
maxToolCalls:
|
||||
config?.circuitBreaker?.maxToolCalls ?? config?.maxToolCalls ?? DEFAULT_MAX_TOOL_CALLS,
|
||||
consecutiveThreshold:
|
||||
config?.circuitBreaker?.consecutiveThreshold ?? DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD,
|
||||
}
|
||||
}
|
||||
|
||||
export function recordToolCall(
|
||||
window: ToolCallWindow | undefined,
|
||||
toolName: string,
|
||||
settings: CircuitBreakerSettings,
|
||||
toolInput?: Record<string, unknown> | null
|
||||
): ToolCallWindow {
|
||||
if (toolInput === undefined || toolInput === null) {
|
||||
return {
|
||||
lastSignature: `${toolName}::__unknown-input__`,
|
||||
consecutiveCount: 1,
|
||||
threshold: settings.consecutiveThreshold,
|
||||
}
|
||||
}
|
||||
|
||||
const signature = createToolCallSignature(toolName, toolInput)
|
||||
|
||||
if (window && window.lastSignature === signature) {
|
||||
return {
|
||||
lastSignature: signature,
|
||||
consecutiveCount: window.consecutiveCount + 1,
|
||||
threshold: settings.consecutiveThreshold,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
lastSignature: signature,
|
||||
consecutiveCount: 1,
|
||||
threshold: settings.consecutiveThreshold,
|
||||
}
|
||||
}
|
||||
|
||||
function sortObject(obj: unknown): unknown {
|
||||
if (obj === null || obj === undefined) return obj
|
||||
if (typeof obj !== "object") return obj
|
||||
if (Array.isArray(obj)) return obj.map(sortObject)
|
||||
|
||||
const sorted: Record<string, unknown> = {}
|
||||
const keys = Object.keys(obj as Record<string, unknown>).sort()
|
||||
for (const key of keys) {
|
||||
sorted[key] = sortObject((obj as Record<string, unknown>)[key])
|
||||
}
|
||||
return sorted
|
||||
}
|
||||
|
||||
export function createToolCallSignature(
|
||||
toolName: string,
|
||||
toolInput?: Record<string, unknown> | null
|
||||
): string {
|
||||
if (toolInput === undefined || toolInput === null) {
|
||||
return toolName
|
||||
}
|
||||
if (Object.keys(toolInput).length === 0) {
|
||||
return toolName
|
||||
}
|
||||
return `${toolName}::${JSON.stringify(sortObject(toolInput))}`
|
||||
}
|
||||
|
||||
export function detectRepetitiveToolUse(
|
||||
window: ToolCallWindow | undefined
|
||||
): ToolLoopDetectionResult {
|
||||
if (!window || window.consecutiveCount < window.threshold) {
|
||||
return { triggered: false }
|
||||
}
|
||||
|
||||
return {
|
||||
triggered: true,
|
||||
toolName: window.lastSignature.split("::")[0],
|
||||
repeatedCount: window.consecutiveCount,
|
||||
}
|
||||
}
|
||||
389
src/features/background-agent/manager-circuit-breaker.test.ts
Normal file
389
src/features/background-agent/manager-circuit-breaker.test.ts
Normal file
@@ -0,0 +1,389 @@
|
||||
/// <reference types="bun-types" />
|
||||
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import type { PluginInput } from "@opencode-ai/plugin"
|
||||
import { tmpdir } from "node:os"
|
||||
import type { BackgroundTaskConfig } from "../../config/schema"
|
||||
import { BackgroundManager } from "./manager"
|
||||
import type { BackgroundTask } from "./types"
|
||||
|
||||
function createManager(config?: BackgroundTaskConfig): BackgroundManager {
|
||||
const client = {
|
||||
session: {
|
||||
prompt: async () => ({}),
|
||||
promptAsync: async () => ({}),
|
||||
abort: async () => ({}),
|
||||
},
|
||||
}
|
||||
|
||||
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, config)
|
||||
const testManager = manager as unknown as {
|
||||
enqueueNotificationForParent: (sessionID: string, fn: () => Promise<void>) => Promise<void>
|
||||
notifyParentSession: (task: BackgroundTask) => Promise<void>
|
||||
tasks: Map<string, BackgroundTask>
|
||||
}
|
||||
|
||||
testManager.enqueueNotificationForParent = async (_sessionID, fn) => {
|
||||
await fn()
|
||||
}
|
||||
testManager.notifyParentSession = async () => {}
|
||||
|
||||
return manager
|
||||
}
|
||||
|
||||
function getTaskMap(manager: BackgroundManager): Map<string, BackgroundTask> {
|
||||
return (manager as unknown as { tasks: Map<string, BackgroundTask> }).tasks
|
||||
}
|
||||
|
||||
async function flushAsyncWork() {
|
||||
await new Promise(resolve => setTimeout(resolve, 0))
|
||||
}
|
||||
|
||||
describe("BackgroundManager circuit breaker", () => {
|
||||
describe("#given flat-format tool events have no state.input", () => {
|
||||
test("#when 20 consecutive read events arrive #then the task keeps running", async () => {
|
||||
const manager = createManager({
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 20,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-loop-1",
|
||||
sessionID: "session-loop-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Looping task",
|
||||
prompt: "loop",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (let i = 0; i < 20; i++) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: { sessionID: task.sessionID, type: "tool", tool: "read" },
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("running")
|
||||
expect(task.progress?.toolCalls).toBe(20)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given recent tool calls are diverse", () => {
|
||||
test("#when the window fills #then the task keeps running", async () => {
|
||||
const manager = createManager({
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 10,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-diverse-1",
|
||||
sessionID: "session-diverse-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Healthy task",
|
||||
prompt: "work",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (const toolName of [
|
||||
"read",
|
||||
"grep",
|
||||
"edit",
|
||||
"bash",
|
||||
"glob",
|
||||
"read",
|
||||
"lsp_diagnostics",
|
||||
"grep",
|
||||
"edit",
|
||||
"read",
|
||||
]) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: { sessionID: task.sessionID, type: "tool", tool: toolName },
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("running")
|
||||
expect(task.progress?.toolCalls).toBe(10)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given the absolute cap is configured lower than the repetition detector needs", () => {
|
||||
test("#when repeated flat-format tool events reach maxToolCalls #then the backstop still cancels the task", async () => {
|
||||
const manager = createManager({
|
||||
maxToolCalls: 3,
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 95,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-cap-1",
|
||||
sessionID: "session-cap-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Backstop task",
|
||||
prompt: "work",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (let i = 0; i < 3; i++) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: { sessionID: task.sessionID, type: "tool", tool: "read" },
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("maximum tool call limit (3)")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given the same running tool part emits multiple updates", () => {
|
||||
test("#when duplicate running updates arrive #then it only counts the tool once", async () => {
|
||||
const manager = createManager({
|
||||
maxToolCalls: 2,
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 5,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-dedupe-1",
|
||||
sessionID: "session-dedupe-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Dedupe task",
|
||||
prompt: "work",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (let index = 0; index < 3; index += 1) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: {
|
||||
part: {
|
||||
id: "tool-1",
|
||||
sessionID: task.sessionID,
|
||||
type: "tool",
|
||||
tool: "bash",
|
||||
state: { status: "running" },
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("running")
|
||||
expect(task.progress?.toolCalls).toBe(1)
|
||||
expect(task.progress?.countedToolPartIDs).toEqual(new Set(["tool-1"]))
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given same tool reading different files", () => {
|
||||
test("#when tool events arrive with state.input #then task keeps running", async () => {
|
||||
const manager = createManager({
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 20,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-diff-files-1",
|
||||
sessionID: "session-diff-files-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Reading different files",
|
||||
prompt: "work",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (let i = 0; i < 20; i++) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: {
|
||||
part: {
|
||||
sessionID: task.sessionID,
|
||||
type: "tool",
|
||||
tool: "read",
|
||||
state: { status: "running", input: { filePath: `/src/file-${i}.ts` } },
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("running")
|
||||
expect(task.progress?.toolCalls).toBe(20)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given same tool reading same file repeatedly", () => {
|
||||
test("#when tool events arrive with state.input #then task is cancelled with bare tool name in error", async () => {
|
||||
const manager = createManager({
|
||||
circuitBreaker: {
|
||||
consecutiveThreshold: 20,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-same-file-1",
|
||||
sessionID: "session-same-file-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Reading same file repeatedly",
|
||||
prompt: "work",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (let i = 0; i < 20; i++) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: {
|
||||
part: {
|
||||
sessionID: task.sessionID,
|
||||
type: "tool",
|
||||
tool: "read",
|
||||
state: { status: "running", input: { filePath: "/src/same.ts" } },
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("read 20 consecutive times")
|
||||
expect(task.error).not.toContain("::")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given circuit breaker enabled is false", () => {
|
||||
test("#when repetitive tools arrive #then task keeps running", async () => {
|
||||
const manager = createManager({
|
||||
circuitBreaker: {
|
||||
enabled: false,
|
||||
consecutiveThreshold: 20,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-disabled-1",
|
||||
sessionID: "session-disabled-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Disabled circuit breaker task",
|
||||
prompt: "work",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (let i = 0; i < 20; i++) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: {
|
||||
sessionID: task.sessionID,
|
||||
type: "tool",
|
||||
tool: "read",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("running")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given circuit breaker enabled is false but absolute cap is low", () => {
|
||||
test("#when max tool calls exceeded #then task is still cancelled by absolute cap", async () => {
|
||||
const manager = createManager({
|
||||
maxToolCalls: 3,
|
||||
circuitBreaker: {
|
||||
enabled: false,
|
||||
consecutiveThreshold: 95,
|
||||
},
|
||||
})
|
||||
const task: BackgroundTask = {
|
||||
id: "task-cap-disabled-1",
|
||||
sessionID: "session-cap-disabled-1",
|
||||
parentSessionID: "parent-1",
|
||||
parentMessageID: "msg-1",
|
||||
description: "Backstop task with disabled circuit breaker",
|
||||
prompt: "work",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60_000),
|
||||
progress: {
|
||||
toolCalls: 0,
|
||||
lastUpdate: new Date(Date.now() - 60_000),
|
||||
},
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
for (const toolName of ["read", "grep", "edit"]) {
|
||||
manager.handleEvent({
|
||||
type: "message.part.updated",
|
||||
properties: { sessionID: task.sessionID, type: "tool", tool: toolName },
|
||||
})
|
||||
}
|
||||
|
||||
await flushAsyncWork()
|
||||
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("maximum tool call limit (3)")
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -153,4 +153,42 @@ describe("BackgroundManager pollRunningTasks", () => {
|
||||
expect(task.status).toBe("running")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given a running task whose session has terminal non-idle status", () => {
|
||||
test('#when session status is "interrupted" #then completes the task', async () => {
|
||||
//#given
|
||||
const manager = createManagerWithClient({
|
||||
status: async () => ({ data: { "ses-interrupted": { type: "interrupted" } } }),
|
||||
})
|
||||
const task = createRunningTask("ses-interrupted")
|
||||
injectTask(manager, task)
|
||||
|
||||
//#when
|
||||
const poll = (manager as unknown as { pollRunningTasks: () => Promise<void> }).pollRunningTasks
|
||||
await poll.call(manager)
|
||||
manager.shutdown()
|
||||
|
||||
//#then
|
||||
expect(task.status).toBe("completed")
|
||||
expect(task.completedAt).toBeDefined()
|
||||
})
|
||||
|
||||
test('#when session status is an unknown type #then completes the task', async () => {
|
||||
//#given
|
||||
const manager = createManagerWithClient({
|
||||
status: async () => ({ data: { "ses-unknown": { type: "some-weird-status" } } }),
|
||||
})
|
||||
const task = createRunningTask("ses-unknown")
|
||||
injectTask(manager, task)
|
||||
|
||||
//#when
|
||||
const poll = (manager as unknown as { pollRunningTasks: () => Promise<void> }).pollRunningTasks
|
||||
await poll.call(manager)
|
||||
manager.shutdown()
|
||||
|
||||
//#then
|
||||
expect(task.status).toBe("completed")
|
||||
expect(task.completedAt).toBeDefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,5 +1,5 @@
|
||||
declare const require: (name: string) => any
|
||||
const { describe, test, expect, beforeEach, afterEach } = require("bun:test")
|
||||
const { describe, test, expect, beforeEach, afterEach, spyOn } = require("bun:test")
|
||||
import { tmpdir } from "node:os"
|
||||
import type { PluginInput } from "@opencode-ai/plugin"
|
||||
import type { BackgroundTask, ResumeInput } from "./types"
|
||||
@@ -1806,9 +1806,9 @@ describe("BackgroundManager - Non-blocking Queue Integration", () => {
|
||||
expect(task.sessionID).toBeUndefined()
|
||||
})
|
||||
|
||||
test("should return immediately even with concurrency limit", async () => {
|
||||
// given
|
||||
const config = { defaultConcurrency: 1 }
|
||||
test("should return immediately even with concurrency limit", async () => {
|
||||
// given
|
||||
const config = { defaultConcurrency: 1 }
|
||||
manager.shutdown()
|
||||
manager = new BackgroundManager({ client: mockClient, directory: tmpdir() } as unknown as PluginInput, config)
|
||||
|
||||
@@ -1828,9 +1828,76 @@ describe("BackgroundManager - Non-blocking Queue Integration", () => {
|
||||
|
||||
// then
|
||||
expect(endTime - startTime).toBeLessThan(100) // Should be instant
|
||||
expect(task1.status).toBe("pending")
|
||||
expect(task2.status).toBe("pending")
|
||||
expect(task1.status).toBe("pending")
|
||||
expect(task2.status).toBe("pending")
|
||||
})
|
||||
|
||||
test("should keep agent when launch has model and keep agent without model", async () => {
|
||||
// given
|
||||
const promptBodies: Array<Record<string, unknown>> = []
|
||||
let resolveFirstPromptStarted: (() => void) | undefined
|
||||
let resolveSecondPromptStarted: (() => void) | undefined
|
||||
const firstPromptStarted = new Promise<void>((resolve) => {
|
||||
resolveFirstPromptStarted = resolve
|
||||
})
|
||||
const secondPromptStarted = new Promise<void>((resolve) => {
|
||||
resolveSecondPromptStarted = resolve
|
||||
})
|
||||
const customClient = {
|
||||
session: {
|
||||
create: async (_args?: unknown) => ({ data: { id: `ses_${crypto.randomUUID()}` } }),
|
||||
get: async () => ({ data: { directory: "/test/dir" } }),
|
||||
prompt: async () => ({}),
|
||||
promptAsync: async (args: { path: { id: string }; body: Record<string, unknown> }) => {
|
||||
promptBodies.push(args.body)
|
||||
if (promptBodies.length === 1) {
|
||||
resolveFirstPromptStarted?.()
|
||||
}
|
||||
if (promptBodies.length === 2) {
|
||||
resolveSecondPromptStarted?.()
|
||||
}
|
||||
return {}
|
||||
},
|
||||
messages: async () => ({ data: [] }),
|
||||
todo: async () => ({ data: [] }),
|
||||
status: async () => ({ data: {} }),
|
||||
abort: async () => ({}),
|
||||
},
|
||||
}
|
||||
manager.shutdown()
|
||||
manager = new BackgroundManager({ client: customClient, directory: tmpdir() } as unknown as PluginInput)
|
||||
|
||||
const launchInputWithModel = {
|
||||
description: "Test task with model",
|
||||
prompt: "Do something",
|
||||
agent: "test-agent",
|
||||
parentSessionID: "parent-session",
|
||||
parentMessageID: "parent-message",
|
||||
model: { providerID: "anthropic", modelID: "claude-opus-4-6" },
|
||||
}
|
||||
const launchInputWithoutModel = {
|
||||
description: "Test task without model",
|
||||
prompt: "Do something else",
|
||||
agent: "test-agent",
|
||||
parentSessionID: "parent-session",
|
||||
parentMessageID: "parent-message",
|
||||
}
|
||||
|
||||
// when
|
||||
const taskWithModel = await manager.launch(launchInputWithModel)
|
||||
await firstPromptStarted
|
||||
const taskWithoutModel = await manager.launch(launchInputWithoutModel)
|
||||
await secondPromptStarted
|
||||
|
||||
// then
|
||||
expect(taskWithModel.status).toBe("pending")
|
||||
expect(taskWithoutModel.status).toBe("pending")
|
||||
expect(promptBodies).toHaveLength(2)
|
||||
expect(promptBodies[0].model).toEqual({ providerID: "anthropic", modelID: "claude-opus-4-6" })
|
||||
expect(promptBodies[0].agent).toBe("test-agent")
|
||||
expect(promptBodies[1].agent).toBe("test-agent")
|
||||
expect("model" in promptBodies[1]).toBe(false)
|
||||
})
|
||||
|
||||
test("should queue multiple tasks without blocking", async () => {
|
||||
// given
|
||||
@@ -2781,6 +2848,18 @@ describe("BackgroundManager - Non-blocking Queue Integration", () => {
|
||||
})
|
||||
|
||||
describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
const originalDateNow = Date.now
|
||||
let fixedTime: number
|
||||
|
||||
beforeEach(() => {
|
||||
fixedTime = Date.now()
|
||||
spyOn(globalThis.Date, "now").mockReturnValue(fixedTime)
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
Date.now = originalDateNow
|
||||
})
|
||||
|
||||
test("should NOT interrupt task running less than 30 seconds (min runtime guard)", async () => {
|
||||
const client = {
|
||||
session: {
|
||||
@@ -3027,10 +3106,10 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
prompt: "Test",
|
||||
agent: "test-agent",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
startedAt: new Date(Date.now() - 50 * 60 * 1000),
|
||||
progress: {
|
||||
toolCalls: 1,
|
||||
lastUpdate: new Date(Date.now() - 200_000),
|
||||
lastUpdate: new Date(Date.now() - 46 * 60 * 1000),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -4673,6 +4752,53 @@ describe("BackgroundManager - tool permission spread order", () => {
|
||||
manager.shutdown()
|
||||
})
|
||||
|
||||
test("startTask keeps agent when explicit model is configured", async () => {
|
||||
//#given
|
||||
const promptCalls: Array<{ path: { id: string }; body: Record<string, unknown> }> = []
|
||||
const client = {
|
||||
session: {
|
||||
get: async () => ({ data: { directory: "/test/dir" } }),
|
||||
create: async () => ({ data: { id: "session-1" } }),
|
||||
promptAsync: async (args: { path: { id: string }; body: Record<string, unknown> }) => {
|
||||
promptCalls.push(args)
|
||||
return {}
|
||||
},
|
||||
},
|
||||
}
|
||||
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput)
|
||||
const task: BackgroundTask = {
|
||||
id: "task-explicit-model",
|
||||
status: "pending",
|
||||
queuedAt: new Date(),
|
||||
description: "test task",
|
||||
prompt: "test prompt",
|
||||
agent: "sisyphus-junior",
|
||||
parentSessionID: "parent-session",
|
||||
parentMessageID: "parent-message",
|
||||
model: { providerID: "openai", modelID: "gpt-5.4", variant: "medium" },
|
||||
}
|
||||
const input: import("./types").LaunchInput = {
|
||||
description: task.description,
|
||||
prompt: task.prompt,
|
||||
agent: task.agent,
|
||||
parentSessionID: task.parentSessionID,
|
||||
parentMessageID: task.parentMessageID,
|
||||
model: task.model,
|
||||
}
|
||||
|
||||
//#when
|
||||
await (manager as unknown as { startTask: (item: { task: BackgroundTask; input: import("./types").LaunchInput }) => Promise<void> })
|
||||
.startTask({ task, input })
|
||||
|
||||
//#then
|
||||
expect(promptCalls).toHaveLength(1)
|
||||
expect(promptCalls[0].body.agent).toBe("sisyphus-junior")
|
||||
expect(promptCalls[0].body.model).toEqual({ providerID: "openai", modelID: "gpt-5.4" })
|
||||
expect(promptCalls[0].body.variant).toBe("medium")
|
||||
|
||||
manager.shutdown()
|
||||
})
|
||||
|
||||
test("resume respects explore agent restrictions", async () => {
|
||||
//#given
|
||||
let capturedTools: Record<string, unknown> | undefined
|
||||
@@ -4717,4 +4843,48 @@ describe("BackgroundManager - tool permission spread order", () => {
|
||||
|
||||
manager.shutdown()
|
||||
})
|
||||
|
||||
test("resume keeps agent when explicit model is configured", async () => {
|
||||
//#given
|
||||
let promptCall: { path: { id: string }; body: Record<string, unknown> } | undefined
|
||||
const client = {
|
||||
session: {
|
||||
promptAsync: async (args: { path: { id: string }; body: Record<string, unknown> }) => {
|
||||
promptCall = args
|
||||
return {}
|
||||
},
|
||||
abort: async () => ({}),
|
||||
},
|
||||
}
|
||||
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput)
|
||||
const task: BackgroundTask = {
|
||||
id: "task-explicit-model-resume",
|
||||
sessionID: "session-3",
|
||||
parentSessionID: "parent-session",
|
||||
parentMessageID: "parent-message",
|
||||
description: "resume task",
|
||||
prompt: "resume prompt",
|
||||
agent: "explore",
|
||||
status: "completed",
|
||||
startedAt: new Date(),
|
||||
completedAt: new Date(),
|
||||
model: { providerID: "anthropic", modelID: "claude-sonnet-4-20250514" },
|
||||
}
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
//#when
|
||||
await manager.resume({
|
||||
sessionId: "session-3",
|
||||
prompt: "continue",
|
||||
parentSessionID: "parent-session",
|
||||
parentMessageID: "parent-message",
|
||||
})
|
||||
|
||||
//#then
|
||||
expect(promptCall).toBeDefined()
|
||||
expect(promptCall?.body.agent).toBe("explore")
|
||||
expect(promptCall?.body.model).toEqual({ providerID: "anthropic", modelID: "claude-sonnet-4-20250514" })
|
||||
|
||||
manager.shutdown()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -27,6 +27,7 @@ import {
|
||||
import {
|
||||
POLLING_INTERVAL_MS,
|
||||
TASK_CLEANUP_DELAY_MS,
|
||||
TASK_TTL_MS,
|
||||
} from "./constants"
|
||||
|
||||
import { subagentSessions } from "../claude-code-session-state"
|
||||
@@ -51,6 +52,13 @@ import { join } from "node:path"
|
||||
import { pruneStaleTasksAndNotifications } from "./task-poller"
|
||||
import { checkAndInterruptStaleTasks } from "./task-poller"
|
||||
import { removeTaskToastTracking } from "./remove-task-toast-tracking"
|
||||
import { isActiveSessionStatus, isTerminalSessionStatus } from "./session-status-classifier"
|
||||
import {
|
||||
detectRepetitiveToolUse,
|
||||
recordToolCall,
|
||||
resolveCircuitBreakerSettings,
|
||||
type CircuitBreakerSettings,
|
||||
} from "./loop-detector"
|
||||
import {
|
||||
createSubagentDepthLimitError,
|
||||
createSubagentDescendantLimitError,
|
||||
@@ -64,9 +72,11 @@ type OpencodeClient = PluginInput["client"]
|
||||
|
||||
|
||||
interface MessagePartInfo {
|
||||
id?: string
|
||||
sessionID?: string
|
||||
type?: string
|
||||
tool?: string
|
||||
state?: { status?: string; input?: Record<string, unknown> }
|
||||
}
|
||||
|
||||
interface EventProperties {
|
||||
@@ -80,6 +90,19 @@ interface Event {
|
||||
properties?: EventProperties
|
||||
}
|
||||
|
||||
function resolveMessagePartInfo(properties: EventProperties | undefined): MessagePartInfo | undefined {
|
||||
if (!properties || typeof properties !== "object") {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const nestedPart = properties.part
|
||||
if (nestedPart && typeof nestedPart === "object") {
|
||||
return nestedPart as MessagePartInfo
|
||||
}
|
||||
|
||||
return properties as MessagePartInfo
|
||||
}
|
||||
|
||||
interface Todo {
|
||||
content: string
|
||||
status: string
|
||||
@@ -100,6 +123,8 @@ export interface SubagentSessionCreatedEvent {
|
||||
|
||||
export type OnSubagentSessionCreated = (event: SubagentSessionCreatedEvent) => Promise<void>
|
||||
|
||||
const MAX_TASK_REMOVAL_RESCHEDULES = 6
|
||||
|
||||
export class BackgroundManager {
|
||||
|
||||
|
||||
@@ -128,6 +153,7 @@ export class BackgroundManager {
|
||||
private preStartDescendantReservations: Set<string>
|
||||
private enableParentSessionNotifications: boolean
|
||||
readonly taskHistory = new TaskHistory()
|
||||
private cachedCircuitBreakerSettings?: CircuitBreakerSettings
|
||||
|
||||
constructor(
|
||||
ctx: PluginInput,
|
||||
@@ -720,6 +746,8 @@ export class BackgroundManager {
|
||||
|
||||
existingTask.progress = {
|
||||
toolCalls: existingTask.progress?.toolCalls ?? 0,
|
||||
toolCallWindow: existingTask.progress?.toolCallWindow,
|
||||
countedToolPartIDs: existingTask.progress?.countedToolPartIDs,
|
||||
lastUpdate: new Date(),
|
||||
}
|
||||
|
||||
@@ -852,8 +880,7 @@ export class BackgroundManager {
|
||||
}
|
||||
|
||||
if (event.type === "message.part.updated" || event.type === "message.part.delta") {
|
||||
if (!props || typeof props !== "object" || !("sessionID" in props)) return
|
||||
const partInfo = props as unknown as MessagePartInfo
|
||||
const partInfo = resolveMessagePartInfo(props)
|
||||
const sessionID = partInfo?.sessionID
|
||||
if (!sessionID) return
|
||||
|
||||
@@ -876,8 +903,65 @@ export class BackgroundManager {
|
||||
task.progress.lastUpdate = new Date()
|
||||
|
||||
if (partInfo?.type === "tool" || partInfo?.tool) {
|
||||
const countedToolPartIDs = task.progress.countedToolPartIDs ?? new Set<string>()
|
||||
const shouldCountToolCall =
|
||||
!partInfo.id ||
|
||||
partInfo.state?.status !== "running" ||
|
||||
!countedToolPartIDs.has(partInfo.id)
|
||||
|
||||
if (!shouldCountToolCall) {
|
||||
return
|
||||
}
|
||||
|
||||
if (partInfo.id && partInfo.state?.status === "running") {
|
||||
countedToolPartIDs.add(partInfo.id)
|
||||
task.progress.countedToolPartIDs = countedToolPartIDs
|
||||
}
|
||||
|
||||
task.progress.toolCalls += 1
|
||||
task.progress.lastTool = partInfo.tool
|
||||
const circuitBreaker = this.cachedCircuitBreakerSettings ?? (this.cachedCircuitBreakerSettings = resolveCircuitBreakerSettings(this.config))
|
||||
if (partInfo.tool) {
|
||||
task.progress.toolCallWindow = recordToolCall(
|
||||
task.progress.toolCallWindow,
|
||||
partInfo.tool,
|
||||
circuitBreaker,
|
||||
partInfo.state?.input
|
||||
)
|
||||
|
||||
if (circuitBreaker.enabled) {
|
||||
const loopDetection = detectRepetitiveToolUse(task.progress.toolCallWindow)
|
||||
if (loopDetection.triggered) {
|
||||
log("[background-agent] Circuit breaker: consecutive tool usage detected", {
|
||||
taskId: task.id,
|
||||
agent: task.agent,
|
||||
sessionID,
|
||||
toolName: loopDetection.toolName,
|
||||
repeatedCount: loopDetection.repeatedCount,
|
||||
})
|
||||
void this.cancelTask(task.id, {
|
||||
source: "circuit-breaker",
|
||||
reason: `Subagent called ${loopDetection.toolName} ${loopDetection.repeatedCount} consecutive times (threshold: ${circuitBreaker.consecutiveThreshold}). This usually indicates an infinite loop. The task was automatically cancelled to prevent excessive token usage.`,
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const maxToolCalls = circuitBreaker.maxToolCalls
|
||||
if (task.progress.toolCalls >= maxToolCalls) {
|
||||
log("[background-agent] Circuit breaker: tool call limit reached", {
|
||||
taskId: task.id,
|
||||
toolCalls: task.progress.toolCalls,
|
||||
maxToolCalls,
|
||||
agent: task.agent,
|
||||
sessionID,
|
||||
})
|
||||
void this.cancelTask(task.id, {
|
||||
source: "circuit-breaker",
|
||||
reason: `Subagent exceeded maximum tool call limit (${maxToolCalls}). This usually indicates an infinite loop. The task was automatically cancelled to prevent excessive token usage.`,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1188,7 +1272,7 @@ export class BackgroundManager {
|
||||
this.completedTaskSummaries.delete(parentSessionID)
|
||||
}
|
||||
|
||||
private scheduleTaskRemoval(taskId: string): void {
|
||||
private scheduleTaskRemoval(taskId: string, rescheduleCount = 0): void {
|
||||
const existingTimer = this.completionTimers.get(taskId)
|
||||
if (existingTimer) {
|
||||
clearTimeout(existingTimer)
|
||||
@@ -1198,17 +1282,29 @@ export class BackgroundManager {
|
||||
const timer = setTimeout(() => {
|
||||
this.completionTimers.delete(taskId)
|
||||
const task = this.tasks.get(taskId)
|
||||
if (task) {
|
||||
this.clearNotificationsForTask(taskId)
|
||||
this.tasks.delete(taskId)
|
||||
this.clearTaskHistoryWhenParentTasksGone(task.parentSessionID)
|
||||
if (task.sessionID) {
|
||||
subagentSessions.delete(task.sessionID)
|
||||
SessionCategoryRegistry.remove(task.sessionID)
|
||||
if (!task) return
|
||||
|
||||
if (task.parentSessionID) {
|
||||
const siblings = this.getTasksByParentSession(task.parentSessionID)
|
||||
const runningOrPendingSiblings = siblings.filter(
|
||||
sibling => sibling.id !== taskId && (sibling.status === "running" || sibling.status === "pending"),
|
||||
)
|
||||
const completedAtTimestamp = task.completedAt?.getTime()
|
||||
const reachedTaskTtl = completedAtTimestamp !== undefined && (Date.now() - completedAtTimestamp) >= TASK_TTL_MS
|
||||
if (runningOrPendingSiblings.length > 0 && rescheduleCount < MAX_TASK_REMOVAL_RESCHEDULES && !reachedTaskTtl) {
|
||||
this.scheduleTaskRemoval(taskId, rescheduleCount + 1)
|
||||
return
|
||||
}
|
||||
log("[background-agent] Removed completed task from memory:", taskId)
|
||||
this.clearTaskHistoryWhenParentTasksGone(task?.parentSessionID)
|
||||
}
|
||||
|
||||
this.clearNotificationsForTask(taskId)
|
||||
this.tasks.delete(taskId)
|
||||
this.clearTaskHistoryWhenParentTasksGone(task.parentSessionID)
|
||||
if (task.sessionID) {
|
||||
subagentSessions.delete(task.sessionID)
|
||||
SessionCategoryRegistry.remove(task.sessionID)
|
||||
}
|
||||
log("[background-agent] Removed completed task from memory:", taskId)
|
||||
}, TASK_CLEANUP_DELAY_MS)
|
||||
|
||||
this.completionTimers.set(taskId, timer)
|
||||
@@ -1688,11 +1784,9 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
|
||||
}
|
||||
}
|
||||
|
||||
// Match sync-session-poller pattern: only skip completion check when
|
||||
// status EXISTS and is not idle (i.e., session is actively running).
|
||||
// When sessionStatus is undefined, the session has completed and dropped
|
||||
// from the status response — fall through to completion detection.
|
||||
if (sessionStatus && sessionStatus.type !== "idle") {
|
||||
// Only skip completion when session status is actively running.
|
||||
// Unknown or terminal statuses (like "interrupted") fall through to completion.
|
||||
if (sessionStatus && isActiveSessionStatus(sessionStatus.type)) {
|
||||
log("[background-agent] Session still running, relying on event-based progress:", {
|
||||
taskId: task.id,
|
||||
sessionID,
|
||||
@@ -1702,6 +1796,24 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
|
||||
continue
|
||||
}
|
||||
|
||||
// Explicit terminal non-idle status (e.g., "interrupted") — complete immediately,
|
||||
// skipping output validation (session will never produce more output).
|
||||
// Unknown statuses fall through to the idle/gone path with output validation.
|
||||
if (sessionStatus && isTerminalSessionStatus(sessionStatus.type)) {
|
||||
await this.tryCompleteTask(task, `polling (terminal session status: ${sessionStatus.type})`)
|
||||
continue
|
||||
}
|
||||
|
||||
// Unknown non-idle status — not active, not terminal, not idle.
|
||||
// Fall through to idle/gone completion path with output validation.
|
||||
if (sessionStatus && sessionStatus.type !== "idle") {
|
||||
log("[background-agent] Unknown session status, treating as potentially idle:", {
|
||||
taskId: task.id,
|
||||
sessionID,
|
||||
sessionStatus: sessionStatus.type,
|
||||
})
|
||||
}
|
||||
|
||||
// Session is idle or no longer in status response (completed/disappeared)
|
||||
const completionSource = sessionStatus?.type === "idle"
|
||||
? "polling (idle status)"
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
import { describe, test, expect, mock } from "bun:test"
|
||||
import { isActiveSessionStatus, isTerminalSessionStatus } from "./session-status-classifier"
|
||||
|
||||
const mockLog = mock()
|
||||
mock.module("../../shared", () => ({ log: mockLog }))
|
||||
|
||||
describe("isActiveSessionStatus", () => {
|
||||
describe("#given a known active session status", () => {
|
||||
test('#when type is "busy" #then returns true', () => {
|
||||
expect(isActiveSessionStatus("busy")).toBe(true)
|
||||
})
|
||||
|
||||
test('#when type is "retry" #then returns true', () => {
|
||||
expect(isActiveSessionStatus("retry")).toBe(true)
|
||||
})
|
||||
|
||||
test('#when type is "running" #then returns true', () => {
|
||||
expect(isActiveSessionStatus("running")).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given a known terminal session status", () => {
|
||||
test('#when type is "idle" #then returns false', () => {
|
||||
expect(isActiveSessionStatus("idle")).toBe(false)
|
||||
})
|
||||
|
||||
test('#when type is "interrupted" #then returns false and does not log', () => {
|
||||
mockLog.mockClear()
|
||||
expect(isActiveSessionStatus("interrupted")).toBe(false)
|
||||
expect(mockLog).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given an unknown session status", () => {
|
||||
test('#when type is an arbitrary unknown string #then returns false and logs warning', () => {
|
||||
mockLog.mockClear()
|
||||
expect(isActiveSessionStatus("some-unknown-status")).toBe(false)
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
"[background-agent] Unknown session status type encountered:",
|
||||
"some-unknown-status",
|
||||
)
|
||||
})
|
||||
|
||||
test('#when type is empty string #then returns false', () => {
|
||||
expect(isActiveSessionStatus("")).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("isTerminalSessionStatus", () => {
|
||||
test('#when type is "interrupted" #then returns true', () => {
|
||||
expect(isTerminalSessionStatus("interrupted")).toBe(true)
|
||||
})
|
||||
|
||||
test('#when type is "idle" #then returns false (idle is handled separately)', () => {
|
||||
expect(isTerminalSessionStatus("idle")).toBe(false)
|
||||
})
|
||||
|
||||
test('#when type is "busy" #then returns false', () => {
|
||||
expect(isTerminalSessionStatus("busy")).toBe(false)
|
||||
})
|
||||
|
||||
test('#when type is an unknown string #then returns false', () => {
|
||||
expect(isTerminalSessionStatus("some-unknown")).toBe(false)
|
||||
})
|
||||
})
|
||||
20
src/features/background-agent/session-status-classifier.ts
Normal file
20
src/features/background-agent/session-status-classifier.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import { log } from "../../shared"
|
||||
|
||||
const ACTIVE_SESSION_STATUSES = new Set(["busy", "retry", "running"])
|
||||
const KNOWN_TERMINAL_STATUSES = new Set(["idle", "interrupted"])
|
||||
|
||||
export function isActiveSessionStatus(type: string): boolean {
|
||||
if (ACTIVE_SESSION_STATUSES.has(type)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (!KNOWN_TERMINAL_STATUSES.has(type)) {
|
||||
log("[background-agent] Unknown session status type encountered:", type)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
export function isTerminalSessionStatus(type: string): boolean {
|
||||
return KNOWN_TERMINAL_STATUSES.has(type) && type !== "idle"
|
||||
}
|
||||
@@ -64,4 +64,63 @@ describe("background-agent spawner.startTask", () => {
|
||||
{ permission: "question", action: "deny", pattern: "*" },
|
||||
])
|
||||
})
|
||||
|
||||
test("keeps agent when explicit model is configured", async () => {
|
||||
//#given
|
||||
const promptCalls: any[] = []
|
||||
|
||||
const client = {
|
||||
session: {
|
||||
get: async () => ({ data: { directory: "/parent/dir" } }),
|
||||
create: async () => ({ data: { id: "ses_child" } }),
|
||||
promptAsync: async (args?: any) => {
|
||||
promptCalls.push(args)
|
||||
return {}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const task = createTask({
|
||||
description: "Test task",
|
||||
prompt: "Do work",
|
||||
agent: "sisyphus-junior",
|
||||
parentSessionID: "ses_parent",
|
||||
parentMessageID: "msg_parent",
|
||||
model: { providerID: "openai", modelID: "gpt-5.4", variant: "medium" },
|
||||
})
|
||||
|
||||
const item = {
|
||||
task,
|
||||
input: {
|
||||
description: task.description,
|
||||
prompt: task.prompt,
|
||||
agent: task.agent,
|
||||
parentSessionID: task.parentSessionID,
|
||||
parentMessageID: task.parentMessageID,
|
||||
parentModel: task.parentModel,
|
||||
parentAgent: task.parentAgent,
|
||||
model: task.model,
|
||||
},
|
||||
}
|
||||
|
||||
const ctx = {
|
||||
client,
|
||||
directory: "/fallback",
|
||||
concurrencyManager: { release: () => {} },
|
||||
tmuxEnabled: false,
|
||||
onTaskError: () => {},
|
||||
}
|
||||
|
||||
//#when
|
||||
await startTask(item as any, ctx as any)
|
||||
|
||||
//#then
|
||||
expect(promptCalls).toHaveLength(1)
|
||||
expect(promptCalls[0]?.body?.agent).toBe("sisyphus-junior")
|
||||
expect(promptCalls[0]?.body?.model).toEqual({
|
||||
providerID: "openai",
|
||||
modelID: "gpt-5.4",
|
||||
})
|
||||
expect(promptCalls[0]?.body?.variant).toBe("medium")
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
declare const require: (name: string) => any
|
||||
const { describe, test, expect, afterEach } = require("bun:test")
|
||||
import { tmpdir } from "node:os"
|
||||
import { afterEach, describe, expect, test } from "bun:test"
|
||||
import type { PluginInput } from "@opencode-ai/plugin"
|
||||
import { TASK_CLEANUP_DELAY_MS } from "./constants"
|
||||
import { BackgroundManager } from "./manager"
|
||||
@@ -157,17 +156,19 @@ function getRequiredTimer(manager: BackgroundManager, taskID: string): ReturnTyp
|
||||
}
|
||||
|
||||
describe("BackgroundManager.notifyParentSession cleanup scheduling", () => {
|
||||
describe("#given 2 tasks for same parent and task A completed", () => {
|
||||
test("#when task B is still running #then task A is cleaned up from this.tasks after delay even though task B is not done", async () => {
|
||||
describe("#given 3 tasks for same parent and task A completed first", () => {
|
||||
test("#when siblings are still running or pending #then task A remains until siblings also complete", async () => {
|
||||
// given
|
||||
const { manager } = createManager(false)
|
||||
managerUnderTest = manager
|
||||
fakeTimers = installFakeTimers()
|
||||
const taskA = createTask({ id: "task-a", parentSessionID: "parent-1", description: "task A", status: "completed", completedAt: new Date("2026-03-11T00:01:00.000Z") })
|
||||
const taskA = createTask({ id: "task-a", parentSessionID: "parent-1", description: "task A", status: "completed", completedAt: new Date() })
|
||||
const taskB = createTask({ id: "task-b", parentSessionID: "parent-1", description: "task B", status: "running" })
|
||||
const taskC = createTask({ id: "task-c", parentSessionID: "parent-1", description: "task C", status: "pending" })
|
||||
getTasks(manager).set(taskA.id, taskA)
|
||||
getTasks(manager).set(taskB.id, taskB)
|
||||
getPendingByParent(manager).set(taskA.parentSessionID, new Set([taskA.id, taskB.id]))
|
||||
getTasks(manager).set(taskC.id, taskC)
|
||||
getPendingByParent(manager).set(taskA.parentSessionID, new Set([taskA.id, taskB.id, taskC.id]))
|
||||
|
||||
// when
|
||||
await notifyParentSessionForTest(manager, taskA)
|
||||
@@ -177,8 +178,23 @@ describe("BackgroundManager.notifyParentSession cleanup scheduling", () => {
|
||||
|
||||
// then
|
||||
expect(fakeTimers.getDelay(taskATimer)).toBeUndefined()
|
||||
expect(getTasks(manager).has(taskA.id)).toBe(false)
|
||||
expect(getTasks(manager).has(taskA.id)).toBe(true)
|
||||
expect(getTasks(manager).get(taskB.id)).toBe(taskB)
|
||||
expect(getTasks(manager).get(taskC.id)).toBe(taskC)
|
||||
|
||||
// when
|
||||
taskB.status = "completed"
|
||||
taskB.completedAt = new Date()
|
||||
taskC.status = "completed"
|
||||
taskC.completedAt = new Date()
|
||||
await notifyParentSessionForTest(manager, taskB)
|
||||
await notifyParentSessionForTest(manager, taskC)
|
||||
const rescheduledTaskATimer = getRequiredTimer(manager, taskA.id)
|
||||
expect(fakeTimers.getDelay(rescheduledTaskATimer)).toBe(TASK_CLEANUP_DELAY_MS)
|
||||
fakeTimers.run(rescheduledTaskATimer)
|
||||
|
||||
// then
|
||||
expect(getTasks(manager).has(taskA.id)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
declare const require: (name: string) => any
|
||||
const { describe, it, expect, mock } = require("bun:test")
|
||||
const { describe, it, expect, mock, spyOn, beforeEach, afterEach } = require("bun:test")
|
||||
|
||||
import { checkAndInterruptStaleTasks, pruneStaleTasksAndNotifications } from "./task-poller"
|
||||
import type { BackgroundTask } from "./types"
|
||||
@@ -29,6 +29,18 @@ describe("checkAndInterruptStaleTasks", () => {
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
const originalDateNow = Date.now
|
||||
let fixedTime: number
|
||||
|
||||
beforeEach(() => {
|
||||
fixedTime = Date.now()
|
||||
spyOn(globalThis.Date, "now").mockReturnValue(fixedTime)
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
Date.now = originalDateNow
|
||||
})
|
||||
|
||||
|
||||
it("should interrupt tasks with lastUpdate exceeding stale timeout", async () => {
|
||||
//#given
|
||||
@@ -117,13 +129,13 @@ describe("checkAndInterruptStaleTasks", () => {
|
||||
})
|
||||
|
||||
it("should use DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS when messageStalenessTimeoutMs is not configured", async () => {
|
||||
//#given — task started 35 minutes ago, no config for messageStalenessTimeoutMs
|
||||
//#given — task started 65 minutes ago, no config for messageStalenessTimeoutMs
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 35 * 60 * 1000),
|
||||
startedAt: new Date(Date.now() - 65 * 60 * 1000),
|
||||
progress: undefined,
|
||||
})
|
||||
|
||||
//#when — default is 30 minutes (1_800_000ms)
|
||||
//#when — default is 60 minutes (3_600_000ms)
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
@@ -417,6 +429,56 @@ describe("checkAndInterruptStaleTasks", () => {
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(onTaskInterrupted).toHaveBeenCalledWith(task)
|
||||
})
|
||||
|
||||
it('should NOT protect task when session has terminal non-idle status like "interrupted"', async () => {
|
||||
//#given — lastUpdate is 5min old, session is "interrupted" (terminal, not active)
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
progress: {
|
||||
toolCalls: 2,
|
||||
lastUpdate: new Date(Date.now() - 300_000),
|
||||
},
|
||||
})
|
||||
|
||||
//#when — session status is "interrupted" (terminal)
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: { staleTimeoutMs: 180_000 },
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: { "ses-1": { type: "interrupted" } },
|
||||
})
|
||||
|
||||
//#then — terminal statuses should not protect from stale timeout
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("Stale timeout")
|
||||
})
|
||||
|
||||
it('should NOT protect task when session has unknown status type', async () => {
|
||||
//#given — lastUpdate is 5min old, session has an unknown status
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
progress: {
|
||||
toolCalls: 2,
|
||||
lastUpdate: new Date(Date.now() - 300_000),
|
||||
},
|
||||
})
|
||||
|
||||
//#when — session has unknown status type
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: { staleTimeoutMs: 180_000 },
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: { "ses-1": { type: "some-weird-status" } },
|
||||
})
|
||||
|
||||
//#then — unknown statuses should not protect from stale timeout
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("Stale timeout")
|
||||
})
|
||||
})
|
||||
|
||||
describe("pruneStaleTasksAndNotifications", () => {
|
||||
|
||||
@@ -9,12 +9,12 @@ import {
|
||||
DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS,
|
||||
DEFAULT_STALE_TIMEOUT_MS,
|
||||
MIN_RUNTIME_BEFORE_STALE_MS,
|
||||
TERMINAL_TASK_TTL_MS,
|
||||
TASK_TTL_MS,
|
||||
} from "./constants"
|
||||
import { removeTaskToastTracking } from "./remove-task-toast-tracking"
|
||||
|
||||
const TERMINAL_TASK_TTL_MS = 30 * 60 * 1000
|
||||
|
||||
import { isActiveSessionStatus } from "./session-status-classifier"
|
||||
const TERMINAL_TASK_STATUSES = new Set<BackgroundTask["status"]>([
|
||||
"completed",
|
||||
"error",
|
||||
@@ -121,7 +121,7 @@ export async function checkAndInterruptStaleTasks(args: {
|
||||
if (!startedAt || !sessionID) continue
|
||||
|
||||
const sessionStatus = sessionStatuses?.[sessionID]?.type
|
||||
const sessionIsRunning = sessionStatus !== undefined && sessionStatus !== "idle"
|
||||
const sessionIsRunning = sessionStatus !== undefined && isActiveSessionStatus(sessionStatus)
|
||||
const runtime = now - startedAt.getTime()
|
||||
|
||||
if (!task.progress?.lastUpdate) {
|
||||
@@ -130,7 +130,7 @@ export async function checkAndInterruptStaleTasks(args: {
|
||||
|
||||
const staleMinutes = Math.round(runtime / 60000)
|
||||
task.status = "cancelled"
|
||||
task.error = `Stale timeout (no activity for ${staleMinutes}min since start)`
|
||||
task.error = `Stale timeout (no activity for ${staleMinutes}min since start). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.staleTimeoutMs' in .opencode/oh-my-opencode.json.`
|
||||
task.completedAt = new Date()
|
||||
|
||||
if (task.concurrencyKey) {
|
||||
@@ -159,10 +159,10 @@ export async function checkAndInterruptStaleTasks(args: {
|
||||
if (timeSinceLastUpdate <= staleTimeoutMs) continue
|
||||
if (task.status !== "running") continue
|
||||
|
||||
const staleMinutes = Math.round(timeSinceLastUpdate / 60000)
|
||||
task.status = "cancelled"
|
||||
task.error = `Stale timeout (no activity for ${staleMinutes}min)`
|
||||
task.completedAt = new Date()
|
||||
const staleMinutes = Math.round(timeSinceLastUpdate / 60000)
|
||||
task.status = "cancelled"
|
||||
task.error = `Stale timeout (no activity for ${staleMinutes}min). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.staleTimeoutMs' in .opencode/oh-my-opencode.json.`
|
||||
task.completedAt = new Date()
|
||||
|
||||
if (task.concurrencyKey) {
|
||||
concurrencyManager.release(task.concurrencyKey)
|
||||
|
||||
@@ -9,9 +9,17 @@ export type BackgroundTaskStatus =
|
||||
| "cancelled"
|
||||
| "interrupt"
|
||||
|
||||
export interface ToolCallWindow {
|
||||
lastSignature: string
|
||||
consecutiveCount: number
|
||||
threshold: number
|
||||
}
|
||||
|
||||
export interface TaskProgress {
|
||||
toolCalls: number
|
||||
lastTool?: string
|
||||
toolCallWindow?: ToolCallWindow
|
||||
countedToolPartIDs?: Set<string>
|
||||
lastUpdate: Date
|
||||
lastMessage?: string
|
||||
lastMessageAt?: Date
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
export * from "./types"
|
||||
export * from "./constants"
|
||||
export * from "./storage"
|
||||
export * from "./top-level-task"
|
||||
|
||||
@@ -11,8 +11,11 @@ import {
|
||||
getPlanName,
|
||||
createBoulderState,
|
||||
findPrometheusPlans,
|
||||
getTaskSessionState,
|
||||
upsertTaskSessionState,
|
||||
} from "./storage"
|
||||
import type { BoulderState } from "./types"
|
||||
import { readCurrentTopLevelTask } from "./top-level-task"
|
||||
|
||||
describe("boulder-state", () => {
|
||||
const TEST_DIR = join(tmpdir(), "boulder-state-test-" + Date.now())
|
||||
@@ -134,6 +137,24 @@ describe("boulder-state", () => {
|
||||
expect(result?.session_ids).toEqual(["session-1", "session-2"])
|
||||
expect(result?.plan_name).toBe("my-plan")
|
||||
})
|
||||
|
||||
test("should default task_sessions to empty object when missing from JSON", () => {
|
||||
// given - boulder.json without task_sessions field
|
||||
const boulderFile = join(SISYPHUS_DIR, "boulder.json")
|
||||
writeFileSync(boulderFile, JSON.stringify({
|
||||
active_plan: "/path/to/plan.md",
|
||||
started_at: "2026-01-01T00:00:00Z",
|
||||
session_ids: ["session-1"],
|
||||
plan_name: "plan",
|
||||
}))
|
||||
|
||||
// when
|
||||
const result = readBoulderState(TEST_DIR)
|
||||
|
||||
// then
|
||||
expect(result).not.toBeNull()
|
||||
expect(result!.task_sessions).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
describe("writeBoulderState", () => {
|
||||
@@ -249,6 +270,115 @@ describe("boulder-state", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("task session state", () => {
|
||||
test("should persist and read preferred session for a top-level plan task", () => {
|
||||
// given - existing boulder state
|
||||
const state: BoulderState = {
|
||||
active_plan: "/plan.md",
|
||||
started_at: "2026-01-02T10:00:00Z",
|
||||
session_ids: ["session-1"],
|
||||
plan_name: "plan",
|
||||
}
|
||||
writeBoulderState(TEST_DIR, state)
|
||||
|
||||
// when
|
||||
upsertTaskSessionState(TEST_DIR, {
|
||||
taskKey: "todo:1",
|
||||
taskLabel: "1",
|
||||
taskTitle: "Implement auth flow",
|
||||
sessionId: "ses_task_123",
|
||||
agent: "sisyphus-junior",
|
||||
category: "deep",
|
||||
})
|
||||
const result = getTaskSessionState(TEST_DIR, "todo:1")
|
||||
|
||||
// then
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.session_id).toBe("ses_task_123")
|
||||
expect(result?.task_title).toBe("Implement auth flow")
|
||||
expect(result?.agent).toBe("sisyphus-junior")
|
||||
expect(result?.category).toBe("deep")
|
||||
})
|
||||
|
||||
test("should overwrite preferred session for the same top-level plan task", () => {
|
||||
// given - existing boulder state with prior preferred session
|
||||
const state: BoulderState = {
|
||||
active_plan: "/plan.md",
|
||||
started_at: "2026-01-02T10:00:00Z",
|
||||
session_ids: ["session-1"],
|
||||
plan_name: "plan",
|
||||
task_sessions: {
|
||||
"todo:1": {
|
||||
task_key: "todo:1",
|
||||
task_label: "1",
|
||||
task_title: "Implement auth flow",
|
||||
session_id: "ses_old",
|
||||
updated_at: "2026-01-02T10:00:00Z",
|
||||
},
|
||||
},
|
||||
}
|
||||
writeBoulderState(TEST_DIR, state)
|
||||
|
||||
// when
|
||||
upsertTaskSessionState(TEST_DIR, {
|
||||
taskKey: "todo:1",
|
||||
taskLabel: "1",
|
||||
taskTitle: "Implement auth flow",
|
||||
sessionId: "ses_new",
|
||||
})
|
||||
const result = getTaskSessionState(TEST_DIR, "todo:1")
|
||||
|
||||
// then
|
||||
expect(result?.session_id).toBe("ses_new")
|
||||
})
|
||||
})
|
||||
|
||||
describe("readCurrentTopLevelTask", () => {
|
||||
test("should return the first unchecked top-level task in TODOs", () => {
|
||||
// given - plan with nested and top-level unchecked tasks
|
||||
const planPath = join(TEST_DIR, "current-task-plan.md")
|
||||
writeFileSync(planPath, `# Plan
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Finished task
|
||||
- [ ] nested acceptance checkbox
|
||||
- [ ] 2. Current task
|
||||
|
||||
## Final Verification Wave
|
||||
- [ ] F1. Final review
|
||||
`)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.key).toBe("todo:2")
|
||||
expect(result?.title).toBe("Current task")
|
||||
})
|
||||
|
||||
test("should fall back to final-wave task when implementation tasks are complete", () => {
|
||||
// given - plan with only final-wave work remaining
|
||||
const planPath = join(TEST_DIR, "final-wave-current-task-plan.md")
|
||||
writeFileSync(planPath, `# Plan
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Finished task
|
||||
|
||||
## Final Verification Wave
|
||||
- [ ] F1. Final review
|
||||
`)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.key).toBe("final-wave:f1")
|
||||
expect(result?.title).toBe("Final review")
|
||||
})
|
||||
})
|
||||
|
||||
describe("getPlanProgress", () => {
|
||||
test("should count completed and uncompleted checkboxes", () => {
|
||||
// given - plan file with checkboxes
|
||||
@@ -351,7 +481,7 @@ describe("boulder-state", () => {
|
||||
expect(progress.isComplete).toBe(true)
|
||||
})
|
||||
|
||||
test("should return isComplete true for empty plan", () => {
|
||||
test("should return isComplete false for empty plan", () => {
|
||||
// given - plan with no checkboxes
|
||||
const planPath = join(TEST_DIR, "empty-plan.md")
|
||||
writeFileSync(planPath, "# Plan\nNo tasks here")
|
||||
@@ -361,7 +491,7 @@ describe("boulder-state", () => {
|
||||
|
||||
// then
|
||||
expect(progress.total).toBe(0)
|
||||
expect(progress.isComplete).toBe(true)
|
||||
expect(progress.isComplete).toBe(false)
|
||||
})
|
||||
|
||||
test("should handle non-existent file", () => {
|
||||
|
||||
@@ -6,9 +6,11 @@
|
||||
|
||||
import { existsSync, readFileSync, writeFileSync, mkdirSync, readdirSync } from "node:fs"
|
||||
import { dirname, join, basename } from "node:path"
|
||||
import type { BoulderState, PlanProgress } from "./types"
|
||||
import type { BoulderState, PlanProgress, TaskSessionState } from "./types"
|
||||
import { BOULDER_DIR, BOULDER_FILE, PROMETHEUS_PLANS_DIR } from "./constants"
|
||||
|
||||
const RESERVED_KEYS = new Set(["__proto__", "prototype", "constructor"])
|
||||
|
||||
export function getBoulderFilePath(directory: string): string {
|
||||
return join(directory, BOULDER_DIR, BOULDER_FILE)
|
||||
}
|
||||
@@ -29,6 +31,9 @@ export function readBoulderState(directory: string): BoulderState | null {
|
||||
if (!Array.isArray(parsed.session_ids)) {
|
||||
parsed.session_ids = []
|
||||
}
|
||||
if (!parsed.task_sessions || typeof parsed.task_sessions !== "object" || Array.isArray(parsed.task_sessions)) {
|
||||
parsed.task_sessions = {}
|
||||
}
|
||||
return parsed as BoulderState
|
||||
} catch {
|
||||
return null
|
||||
@@ -59,10 +64,13 @@ export function appendSessionId(directory: string, sessionId: string): BoulderSt
|
||||
if (!Array.isArray(state.session_ids)) {
|
||||
state.session_ids = []
|
||||
}
|
||||
const originalSessionIds = [...state.session_ids]
|
||||
state.session_ids.push(sessionId)
|
||||
if (writeBoulderState(directory, state)) {
|
||||
return state
|
||||
}
|
||||
state.session_ids = originalSessionIds
|
||||
return null
|
||||
}
|
||||
|
||||
return state
|
||||
@@ -82,6 +90,54 @@ export function clearBoulderState(directory: string): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
export function getTaskSessionState(directory: string, taskKey: string): TaskSessionState | null {
|
||||
const state = readBoulderState(directory)
|
||||
if (!state?.task_sessions) {
|
||||
return null
|
||||
}
|
||||
|
||||
return state.task_sessions[taskKey] ?? null
|
||||
}
|
||||
|
||||
export function upsertTaskSessionState(
|
||||
directory: string,
|
||||
input: {
|
||||
taskKey: string
|
||||
taskLabel: string
|
||||
taskTitle: string
|
||||
sessionId: string
|
||||
agent?: string
|
||||
category?: string
|
||||
},
|
||||
): BoulderState | null {
|
||||
const state = readBoulderState(directory)
|
||||
if (!state) {
|
||||
return null
|
||||
}
|
||||
|
||||
if (RESERVED_KEYS.has(input.taskKey)) {
|
||||
return null
|
||||
}
|
||||
|
||||
const taskSessions = state.task_sessions ?? {}
|
||||
taskSessions[input.taskKey] = {
|
||||
task_key: input.taskKey,
|
||||
task_label: input.taskLabel,
|
||||
task_title: input.taskTitle,
|
||||
session_id: input.sessionId,
|
||||
...(input.agent !== undefined ? { agent: input.agent } : {}),
|
||||
...(input.category !== undefined ? { category: input.category } : {}),
|
||||
updated_at: new Date().toISOString(),
|
||||
}
|
||||
|
||||
state.task_sessions = taskSessions
|
||||
if (writeBoulderState(directory, state)) {
|
||||
return state
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Find Prometheus plan files for this project.
|
||||
* Prometheus stores plans at: {project}/.sisyphus/plans/{name}.md
|
||||
@@ -130,7 +186,7 @@ export function getPlanProgress(planPath: string): PlanProgress {
|
||||
return {
|
||||
total,
|
||||
completed,
|
||||
isComplete: total === 0 || completed === total,
|
||||
isComplete: total > 0 && completed === total,
|
||||
}
|
||||
} catch {
|
||||
return { total: 0, completed: 0, isComplete: true }
|
||||
|
||||
268
src/features/boulder-state/top-level-task.test.ts
Normal file
268
src/features/boulder-state/top-level-task.test.ts
Normal file
@@ -0,0 +1,268 @@
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import { writeFileSync } from "node:fs"
|
||||
import { join } from "node:path"
|
||||
import { tmpdir } from "node:os"
|
||||
|
||||
import { readCurrentTopLevelTask } from "./top-level-task"
|
||||
|
||||
function writePlanFile(fileName: string, content: string): string {
|
||||
const planPath = join(tmpdir(), fileName)
|
||||
writeFileSync(planPath, content, "utf-8")
|
||||
return planPath
|
||||
}
|
||||
|
||||
describe("readCurrentTopLevelTask", () => {
|
||||
test("returns first unchecked top-level task in TODOs", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-happy-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Done task
|
||||
- [ ] 2. Current task
|
||||
|
||||
## Final Verification Wave
|
||||
- [ ] F1. Final review
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).toEqual({
|
||||
key: "todo:2",
|
||||
section: "todo",
|
||||
label: "2",
|
||||
title: "Current task",
|
||||
})
|
||||
})
|
||||
|
||||
test("returns null when all tasks are checked", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-all-checked-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Done task
|
||||
- [x] 2. Another done task
|
||||
|
||||
## Final Verification Wave
|
||||
- [x] F1. Final done review
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test("returns null for empty plan file", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(`top-level-task-empty-${Date.now()}.md`, "")
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test("returns null when plan file does not exist", () => {
|
||||
// given
|
||||
const planPath = join(tmpdir(), `top-level-task-missing-${Date.now()}.md`)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
test("skips nested or indented checkboxes", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-nested-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Done task
|
||||
- [ ] nested should be ignored
|
||||
- [ ] 2. Top-level pending
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result?.key).toBe("todo:2")
|
||||
})
|
||||
|
||||
test("falls back to Final Verification Wave when TODOs are all checked", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-fallback-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Done task
|
||||
- [x] 2. Done task
|
||||
|
||||
## Final Verification Wave
|
||||
- [ ] F1. Final review pending
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).toEqual({
|
||||
key: "final-wave:f1",
|
||||
section: "final-wave",
|
||||
label: "F1",
|
||||
title: "Final review pending",
|
||||
})
|
||||
})
|
||||
|
||||
test("selects the first unchecked task among mixed checked and unchecked TODOs", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-mixed-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Done task
|
||||
- [ ] 2. First unchecked
|
||||
- [ ] 3. Second unchecked
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result?.key).toBe("todo:2")
|
||||
expect(result?.title).toBe("First unchecked")
|
||||
})
|
||||
|
||||
test("ignores malformed labels and continues to next unchecked task", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-malformed-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
- [ ] no number prefix
|
||||
- [ ] 2. Valid task after malformed label
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).toEqual({
|
||||
key: "todo:2",
|
||||
section: "todo",
|
||||
label: "2",
|
||||
title: "Valid task after malformed label",
|
||||
})
|
||||
})
|
||||
|
||||
test("supports unchecked tasks with asterisk bullets", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-asterisk-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
* [ ] 1. Task using asterisk bullet
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result?.key).toBe("todo:1")
|
||||
expect(result?.title).toBe("Task using asterisk bullet")
|
||||
})
|
||||
|
||||
test("returns final-wave task when plan has only Final Verification Wave section", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-final-only-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## Final Verification Wave
|
||||
- [ ] F2. Final-only task
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result).toEqual({
|
||||
key: "final-wave:f2",
|
||||
section: "final-wave",
|
||||
label: "F2",
|
||||
title: "Final-only task",
|
||||
})
|
||||
})
|
||||
|
||||
test("returns the first unchecked task when multiple unchecked tasks exist", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-multiple-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## TODOs
|
||||
- [ ] 1. First unchecked task
|
||||
- [ ] 2. Second unchecked task
|
||||
- [ ] 3. Third unchecked task
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result?.label).toBe("1")
|
||||
expect(result?.title).toBe("First unchecked task")
|
||||
})
|
||||
|
||||
test("ignores unchecked content in non-target sections during section transitions", () => {
|
||||
// given
|
||||
const planPath = writePlanFile(
|
||||
`top-level-task-sections-${Date.now()}.md`,
|
||||
`# Plan
|
||||
|
||||
## Notes
|
||||
- [ ] 99. Should be ignored because section is not tracked
|
||||
|
||||
## TODOs
|
||||
- [x] 1. Done implementation task
|
||||
|
||||
## Decisions
|
||||
- [ ] 100. Should also be ignored
|
||||
|
||||
## Final Verification Wave
|
||||
- [ ] F3. Final verification task
|
||||
`,
|
||||
)
|
||||
|
||||
// when
|
||||
const result = readCurrentTopLevelTask(planPath)
|
||||
|
||||
// then
|
||||
expect(result?.key).toBe("final-wave:f3")
|
||||
expect(result?.section).toBe("final-wave")
|
||||
})
|
||||
})
|
||||
77
src/features/boulder-state/top-level-task.ts
Normal file
77
src/features/boulder-state/top-level-task.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
import { existsSync, readFileSync } from "node:fs"
|
||||
|
||||
import type { TopLevelTaskRef } from "./types"
|
||||
|
||||
const TODO_HEADING_PATTERN = /^##\s+TODOs\b/i
|
||||
const FINAL_VERIFICATION_HEADING_PATTERN = /^##\s+Final Verification Wave\b/i
|
||||
const SECOND_LEVEL_HEADING_PATTERN = /^##\s+/
|
||||
const UNCHECKED_CHECKBOX_PATTERN = /^(\s*)[-*]\s*\[\s*\]\s*(.+)$/
|
||||
const TODO_TASK_PATTERN = /^(\d+)\.\s+(.+)$/
|
||||
const FINAL_WAVE_TASK_PATTERN = /^(F\d+)\.\s+(.+)$/i
|
||||
|
||||
type PlanSection = "todo" | "final-wave" | "other"
|
||||
|
||||
function buildTaskRef(
|
||||
section: "todo" | "final-wave",
|
||||
taskLabel: string,
|
||||
): TopLevelTaskRef | null {
|
||||
const pattern = section === "todo" ? TODO_TASK_PATTERN : FINAL_WAVE_TASK_PATTERN
|
||||
const match = taskLabel.match(pattern)
|
||||
if (!match) {
|
||||
return null
|
||||
}
|
||||
|
||||
const rawLabel = match[1]
|
||||
const title = match[2].trim()
|
||||
|
||||
return {
|
||||
key: `${section}:${rawLabel.toLowerCase()}`,
|
||||
section,
|
||||
label: rawLabel,
|
||||
title,
|
||||
}
|
||||
}
|
||||
|
||||
export function readCurrentTopLevelTask(planPath: string): TopLevelTaskRef | null {
|
||||
if (!existsSync(planPath)) {
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
const content = readFileSync(planPath, "utf-8")
|
||||
const lines = content.split(/\r?\n/)
|
||||
let section: PlanSection = "other"
|
||||
|
||||
for (const line of lines) {
|
||||
if (SECOND_LEVEL_HEADING_PATTERN.test(line)) {
|
||||
section = TODO_HEADING_PATTERN.test(line)
|
||||
? "todo"
|
||||
: FINAL_VERIFICATION_HEADING_PATTERN.test(line)
|
||||
? "final-wave"
|
||||
: "other"
|
||||
}
|
||||
|
||||
const uncheckedTaskMatch = line.match(UNCHECKED_CHECKBOX_PATTERN)
|
||||
if (!uncheckedTaskMatch) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (uncheckedTaskMatch[1].length > 0) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (section !== "todo" && section !== "final-wave") {
|
||||
continue
|
||||
}
|
||||
|
||||
const taskRef = buildTaskRef(section, uncheckedTaskMatch[2].trim())
|
||||
if (taskRef) {
|
||||
return taskRef
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,8 @@ export interface BoulderState {
|
||||
agent?: string
|
||||
/** Absolute path to the git worktree root where work happens */
|
||||
worktree_path?: string
|
||||
/** Preferred reusable subagent sessions keyed by current top-level plan task */
|
||||
task_sessions?: Record<string, TaskSessionState>
|
||||
}
|
||||
|
||||
export interface PlanProgress {
|
||||
@@ -28,3 +30,31 @@ export interface PlanProgress {
|
||||
/** Whether all tasks are done */
|
||||
isComplete: boolean
|
||||
}
|
||||
|
||||
export interface TaskSessionState {
|
||||
/** Stable identifier for the current top-level plan task (e.g. todo:1 / final-wave:F1) */
|
||||
task_key: string
|
||||
/** Original task label from the plan file */
|
||||
task_label: string
|
||||
/** Full task title from the plan file */
|
||||
task_title: string
|
||||
/** Preferred reusable subagent session */
|
||||
session_id: string
|
||||
/** Agent associated with the task session, when known */
|
||||
agent?: string
|
||||
/** Category associated with the task session, when known */
|
||||
category?: string
|
||||
/** Last update timestamp */
|
||||
updated_at: string
|
||||
}
|
||||
|
||||
export interface TopLevelTaskRef {
|
||||
/** Stable identifier for the current top-level plan task */
|
||||
key: string
|
||||
/** Task section in the Prometheus plan */
|
||||
section: "todo" | "final-wave"
|
||||
/** Original label token (e.g. 1 / F1) */
|
||||
label: string
|
||||
/** Full task title extracted from the checkbox line */
|
||||
title: string
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ export const START_WORK_TEMPLATE = `You are starting a Sisyphus work session.
|
||||
- \`--worktree <path>\` (optional): absolute path to an existing git worktree to work in
|
||||
- If specified and valid: hook pre-sets worktree_path in boulder.json
|
||||
- If specified but invalid: you must run \`git worktree add <path> <branch>\` first
|
||||
- If omitted: you MUST choose or create a worktree (see Worktree Setup below)
|
||||
- If omitted: work directly in the current project directory (no worktree)
|
||||
|
||||
## WHAT TO DO
|
||||
|
||||
@@ -24,7 +24,7 @@ export const START_WORK_TEMPLATE = `You are starting a Sisyphus work session.
|
||||
- If ONE plan: auto-select it
|
||||
- If MULTIPLE plans: show list with timestamps, ask user to select
|
||||
|
||||
4. **Worktree Setup** (when \`worktree_path\` not already set in boulder.json):
|
||||
4. **Worktree Setup** (ONLY when \`--worktree\` was explicitly specified and \`worktree_path\` not already set in boulder.json):
|
||||
1. \`git worktree list --porcelain\` — see available worktrees
|
||||
2. Create: \`git worktree add <absolute-path> <branch-or-HEAD>\`
|
||||
3. Update boulder.json to add \`"worktree_path": "<absolute-path>"\`
|
||||
@@ -86,6 +86,38 @@ Reading plan and beginning execution...
|
||||
|
||||
- The session_id is injected by the hook - use it directly
|
||||
- Always update boulder.json BEFORE starting work
|
||||
- Always set worktree_path in boulder.json before executing any tasks
|
||||
- If worktree_path is set in boulder.json, all work happens inside that worktree directory
|
||||
- Read the FULL plan file before delegating any tasks
|
||||
- Follow atlas delegation protocols (7-section format)`
|
||||
- Follow atlas delegation protocols (7-section format)
|
||||
|
||||
## TASK BREAKDOWN (MANDATORY)
|
||||
|
||||
After reading the plan file, you MUST decompose every plan task into granular, implementation-level sub-steps and register ALL of them as task/todo items BEFORE starting any work.
|
||||
|
||||
**How to break down**:
|
||||
- Each plan checkbox item (e.g., \`- [ ] Add user authentication\`) must be split into concrete, actionable sub-tasks
|
||||
- Sub-tasks should be specific enough that each one touches a clear set of files/functions
|
||||
- Include: file to modify, what to change, expected behavior, and how to verify
|
||||
- Do NOT leave any task vague — "implement feature X" is NOT acceptable; "add validateToken() to src/auth/middleware.ts that checks JWT expiry and returns 401" IS acceptable
|
||||
|
||||
**Example breakdown**:
|
||||
Plan task: \`- [ ] Add rate limiting to API\`
|
||||
→ Todo items:
|
||||
1. Create \`src/middleware/rate-limiter.ts\` with sliding window algorithm (max 100 req/min per IP)
|
||||
2. Add RateLimiter middleware to \`src/app.ts\` router chain, before auth middleware
|
||||
3. Add rate limit headers (X-RateLimit-Limit, X-RateLimit-Remaining) to response in \`rate-limiter.ts\`
|
||||
4. Add test: verify 429 response after exceeding limit in \`src/middleware/rate-limiter.test.ts\`
|
||||
5. Add test: verify headers are present on normal responses
|
||||
|
||||
Register these as task/todo items so progress is tracked and visible throughout the session.
|
||||
|
||||
## WORKTREE COMPLETION
|
||||
|
||||
When working in a worktree (\`worktree_path\` is set in boulder.json) and ALL plan tasks are complete:
|
||||
1. Commit all remaining changes in the worktree
|
||||
2. Switch to the main working directory (the original repo, NOT the worktree)
|
||||
3. Merge the worktree branch into the current branch: \`git merge <worktree-branch>\`
|
||||
4. If merge succeeds, clean up: \`git worktree remove <worktree-path>\`
|
||||
5. Remove the boulder.json state
|
||||
|
||||
This is the DEFAULT behavior when \`--worktree\` was used. Skip merge only if the user explicitly instructs otherwise (e.g., asks to create a PR instead).`
|
||||
|
||||
112
src/features/claude-code-plugin-loader/loader.test.ts
Normal file
112
src/features/claude-code-plugin-loader/loader.test.ts
Normal file
@@ -0,0 +1,112 @@
|
||||
import { describe, it, expect, beforeEach, afterEach } from "bun:test"
|
||||
import type { PluginComponentsResult } from "./loader"
|
||||
|
||||
describe("loadAllPluginComponents", () => {
|
||||
const originalEnv = { ...process.env }
|
||||
|
||||
beforeEach(() => {
|
||||
delete process.env.OPENCODE_DISABLE_CLAUDE_CODE
|
||||
delete process.env.OPENCODE_DISABLE_CLAUDE_CODE_PLUGINS
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv }
|
||||
})
|
||||
|
||||
describe("when OPENCODE_DISABLE_CLAUDE_CODE is set to 'true'", () => {
|
||||
it("returns empty result without loading any plugins", async () => {
|
||||
// given
|
||||
process.env.OPENCODE_DISABLE_CLAUDE_CODE = "true"
|
||||
|
||||
// when
|
||||
const { loadAllPluginComponents } = await import("./loader")
|
||||
const result: PluginComponentsResult = await loadAllPluginComponents()
|
||||
|
||||
// then
|
||||
expect(result.commands).toEqual({})
|
||||
expect(result.skills).toEqual({})
|
||||
expect(result.agents).toEqual({})
|
||||
expect(result.mcpServers).toEqual({})
|
||||
expect(result.hooksConfigs).toEqual([])
|
||||
expect(result.plugins).toEqual([])
|
||||
expect(result.errors).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe("when OPENCODE_DISABLE_CLAUDE_CODE is set to '1'", () => {
|
||||
it("returns empty result without loading any plugins", async () => {
|
||||
// given
|
||||
process.env.OPENCODE_DISABLE_CLAUDE_CODE = "1"
|
||||
|
||||
// when
|
||||
const { loadAllPluginComponents } = await import("./loader")
|
||||
const result: PluginComponentsResult = await loadAllPluginComponents()
|
||||
|
||||
// then
|
||||
expect(result.commands).toEqual({})
|
||||
expect(result.plugins).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe("when OPENCODE_DISABLE_CLAUDE_CODE_PLUGINS is set to 'true'", () => {
|
||||
it("returns empty result without loading any plugins", async () => {
|
||||
// given
|
||||
process.env.OPENCODE_DISABLE_CLAUDE_CODE_PLUGINS = "true"
|
||||
|
||||
// when
|
||||
const { loadAllPluginComponents } = await import("./loader")
|
||||
const result: PluginComponentsResult = await loadAllPluginComponents()
|
||||
|
||||
// then
|
||||
expect(result.commands).toEqual({})
|
||||
expect(result.plugins).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe("when OPENCODE_DISABLE_CLAUDE_CODE_PLUGINS is set to '1'", () => {
|
||||
it("returns empty result without loading any plugins", async () => {
|
||||
// given
|
||||
process.env.OPENCODE_DISABLE_CLAUDE_CODE_PLUGINS = "1"
|
||||
|
||||
// when
|
||||
const { loadAllPluginComponents } = await import("./loader")
|
||||
const result: PluginComponentsResult = await loadAllPluginComponents()
|
||||
|
||||
// then
|
||||
expect(result.commands).toEqual({})
|
||||
expect(result.plugins).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe("when neither env var is set", () => {
|
||||
it("does not skip plugin loading", async () => {
|
||||
// given
|
||||
delete process.env.OPENCODE_DISABLE_CLAUDE_CODE
|
||||
delete process.env.OPENCODE_DISABLE_CLAUDE_CODE_PLUGINS
|
||||
|
||||
// when
|
||||
const { loadAllPluginComponents } = await import("./loader")
|
||||
const result: PluginComponentsResult = await loadAllPluginComponents()
|
||||
|
||||
// then — should attempt to load (may find 0 plugins, but shouldn't early-return)
|
||||
expect(result).toBeDefined()
|
||||
expect(result).toHaveProperty("commands")
|
||||
expect(result).toHaveProperty("plugins")
|
||||
})
|
||||
})
|
||||
|
||||
describe("when env var is set to unrecognized value", () => {
|
||||
it("does not skip plugin loading", async () => {
|
||||
// given
|
||||
process.env.OPENCODE_DISABLE_CLAUDE_CODE = "yes"
|
||||
|
||||
// when
|
||||
const { loadAllPluginComponents } = await import("./loader")
|
||||
const result: PluginComponentsResult = await loadAllPluginComponents()
|
||||
|
||||
// then — "yes" is not "true" or "1", should not skip
|
||||
expect(result).toBeDefined()
|
||||
expect(result).toHaveProperty("plugins")
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -27,7 +27,26 @@ export interface PluginComponentsResult {
|
||||
errors: PluginLoadError[]
|
||||
}
|
||||
|
||||
function isClaudeCodePluginsDisabled(): boolean {
|
||||
const disableFlag = process.env.OPENCODE_DISABLE_CLAUDE_CODE
|
||||
const disablePluginsFlag = process.env.OPENCODE_DISABLE_CLAUDE_CODE_PLUGINS
|
||||
return disableFlag === "true" || disableFlag === "1" || disablePluginsFlag === "true" || disablePluginsFlag === "1"
|
||||
}
|
||||
|
||||
export async function loadAllPluginComponents(options?: PluginLoaderOptions): Promise<PluginComponentsResult> {
|
||||
if (isClaudeCodePluginsDisabled()) {
|
||||
log("Claude Code plugin loading disabled via OPENCODE_DISABLE_CLAUDE_CODE env var")
|
||||
return {
|
||||
commands: {},
|
||||
skills: {},
|
||||
agents: {},
|
||||
mcpServers: {},
|
||||
hooksConfigs: [],
|
||||
plugins: [],
|
||||
errors: [],
|
||||
}
|
||||
}
|
||||
|
||||
const { plugins, errors } = discoverInstalledPlugins(options)
|
||||
|
||||
const [commands, skills, agents, mcpServers, hooksConfigs] = await Promise.all([
|
||||
|
||||
@@ -1,44 +1,112 @@
|
||||
import { afterEach, describe, expect, it } from "bun:test"
|
||||
import { afterEach, beforeEach, describe, expect, it, spyOn } from "bun:test"
|
||||
import { startCallbackServer, type CallbackServer } from "./callback-server"
|
||||
|
||||
const HOSTNAME = "127.0.0.1"
|
||||
const nativeFetch = Bun.fetch.bind(Bun)
|
||||
|
||||
function supportsRealSocketBinding(): boolean {
|
||||
try {
|
||||
const server = Bun.serve({
|
||||
port: 0,
|
||||
hostname: HOSTNAME,
|
||||
fetch: () => new Response("probe"),
|
||||
})
|
||||
server.stop(true)
|
||||
return true
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
const canBindRealSockets = supportsRealSocketBinding()
|
||||
|
||||
type MockServerState = {
|
||||
port: number
|
||||
stopped: boolean
|
||||
fetch: (request: Request) => Response | Promise<Response>
|
||||
}
|
||||
|
||||
describe("startCallbackServer", () => {
|
||||
let server: CallbackServer | null = null
|
||||
let serveSpy: ReturnType<typeof spyOn> | null = null
|
||||
let activeServer: MockServerState | null = null
|
||||
|
||||
async function request(url: string): Promise<Response> {
|
||||
if (canBindRealSockets) {
|
||||
return nativeFetch(url)
|
||||
}
|
||||
|
||||
if (!activeServer || activeServer.stopped) {
|
||||
throw new Error("Connection refused")
|
||||
}
|
||||
|
||||
return await activeServer.fetch(new Request(url))
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
if (canBindRealSockets) {
|
||||
return
|
||||
}
|
||||
|
||||
activeServer = null
|
||||
serveSpy = spyOn(Bun, "serve").mockImplementation((options: {
|
||||
port: number
|
||||
hostname?: string
|
||||
fetch: (request: Request) => Response | Promise<Response>
|
||||
}) => {
|
||||
const state: MockServerState = {
|
||||
port: options.port === 0 ? 19877 : options.port,
|
||||
stopped: false,
|
||||
fetch: options.fetch,
|
||||
}
|
||||
|
||||
const handle = {
|
||||
port: state.port,
|
||||
stop: (_force?: boolean) => {
|
||||
state.stopped = true
|
||||
if (activeServer === state) {
|
||||
activeServer = null
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
activeServer = state
|
||||
return handle as ReturnType<typeof Bun.serve>
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
server?.close()
|
||||
server = null
|
||||
// Allow time for port to be released before next test
|
||||
await Bun.sleep(10)
|
||||
|
||||
if (serveSpy) {
|
||||
serveSpy.mockRestore()
|
||||
serveSpy = null
|
||||
}
|
||||
activeServer = null
|
||||
|
||||
if (canBindRealSockets) {
|
||||
await Bun.sleep(10)
|
||||
}
|
||||
})
|
||||
|
||||
it("starts server and returns port", async () => {
|
||||
// given - no preconditions
|
||||
|
||||
// when
|
||||
server = await startCallbackServer()
|
||||
|
||||
// then
|
||||
expect(server.port).toBeGreaterThanOrEqual(19877)
|
||||
expect(typeof server.waitForCallback).toBe("function")
|
||||
expect(typeof server.close).toBe("function")
|
||||
})
|
||||
|
||||
it("resolves callback with code and state from query params", async () => {
|
||||
// given
|
||||
server = await startCallbackServer()
|
||||
const callbackUrl = `http://127.0.0.1:${server.port}/oauth/callback?code=test-code&state=test-state`
|
||||
const callbackUrl = `http://${HOSTNAME}:${server.port}/oauth/callback?code=test-code&state=test-state`
|
||||
|
||||
// when
|
||||
// Use Promise.all to ensure fetch and waitForCallback run concurrently
|
||||
// This prevents race condition where waitForCallback blocks before fetch starts
|
||||
const [result, response] = await Promise.all([
|
||||
server.waitForCallback(),
|
||||
nativeFetch(callbackUrl)
|
||||
request(callbackUrl),
|
||||
])
|
||||
|
||||
// then
|
||||
expect(result).toEqual({ code: "test-code", state: "test-state" })
|
||||
expect(response.status).toBe(200)
|
||||
const html = await response.text()
|
||||
@@ -46,25 +114,19 @@ describe("startCallbackServer", () => {
|
||||
})
|
||||
|
||||
it("returns 404 for non-callback routes", async () => {
|
||||
// given
|
||||
server = await startCallbackServer()
|
||||
|
||||
// when
|
||||
const response = await nativeFetch(`http://127.0.0.1:${server.port}/other`)
|
||||
const response = await request(`http://${HOSTNAME}:${server.port}/other`)
|
||||
|
||||
// then
|
||||
expect(response.status).toBe(404)
|
||||
})
|
||||
|
||||
it("returns 400 and rejects when code is missing", async () => {
|
||||
// given
|
||||
server = await startCallbackServer()
|
||||
const callbackRejection = server.waitForCallback().catch((e: Error) => e)
|
||||
const callbackRejection = server.waitForCallback().catch((error: Error) => error)
|
||||
|
||||
// when
|
||||
const response = await nativeFetch(`http://127.0.0.1:${server.port}/oauth/callback?state=s`)
|
||||
const response = await request(`http://${HOSTNAME}:${server.port}/oauth/callback?state=s`)
|
||||
|
||||
// then
|
||||
expect(response.status).toBe(400)
|
||||
const error = await callbackRejection
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
@@ -72,14 +134,11 @@ describe("startCallbackServer", () => {
|
||||
})
|
||||
|
||||
it("returns 400 and rejects when state is missing", async () => {
|
||||
// given
|
||||
server = await startCallbackServer()
|
||||
const callbackRejection = server.waitForCallback().catch((e: Error) => e)
|
||||
const callbackRejection = server.waitForCallback().catch((error: Error) => error)
|
||||
|
||||
// when
|
||||
const response = await nativeFetch(`http://127.0.0.1:${server.port}/oauth/callback?code=c`)
|
||||
const response = await request(`http://${HOSTNAME}:${server.port}/oauth/callback?code=c`)
|
||||
|
||||
// then
|
||||
expect(response.status).toBe(400)
|
||||
const error = await callbackRejection
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
@@ -87,18 +146,15 @@ describe("startCallbackServer", () => {
|
||||
})
|
||||
|
||||
it("close stops the server immediately", async () => {
|
||||
// given
|
||||
server = await startCallbackServer()
|
||||
const port = server.port
|
||||
|
||||
// when
|
||||
server.close()
|
||||
server = null
|
||||
|
||||
// then
|
||||
try {
|
||||
await nativeFetch(`http://127.0.0.1:${port}/oauth/callback?code=c&state=s`)
|
||||
expect(true).toBe(false)
|
||||
await request(`http://${HOSTNAME}:${port}/oauth/callback?code=c&state=s`)
|
||||
expect.unreachable("request should fail after close")
|
||||
} catch (error) {
|
||||
expect(error).toBeDefined()
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ export async function findAvailablePort(startPort: number = DEFAULT_PORT): Promi
|
||||
}
|
||||
|
||||
export async function startCallbackServer(startPort: number = DEFAULT_PORT): Promise<CallbackServer> {
|
||||
const port = await findAvailablePort(startPort)
|
||||
const requestedPort = await findAvailablePort(startPort).catch(() => 0)
|
||||
|
||||
let resolveCallback: ((result: OAuthCallbackResult) => void) | null = null
|
||||
let rejectCallback: ((error: Error) => void) | null = null
|
||||
@@ -55,7 +55,7 @@ export async function startCallbackServer(startPort: number = DEFAULT_PORT): Pro
|
||||
}, TIMEOUT_MS)
|
||||
|
||||
const server = Bun.serve({
|
||||
port,
|
||||
port: requestedPort,
|
||||
hostname: "127.0.0.1",
|
||||
fetch(request: Request): Response {
|
||||
const url = new URL(request.url)
|
||||
@@ -93,9 +93,10 @@ export async function startCallbackServer(startPort: number = DEFAULT_PORT): Pro
|
||||
})
|
||||
},
|
||||
})
|
||||
const activePort = server.port ?? requestedPort
|
||||
|
||||
return {
|
||||
port,
|
||||
port: activePort,
|
||||
waitForCallback: () => callbackPromise,
|
||||
close: () => {
|
||||
clearTimeout(timeoutId)
|
||||
|
||||
@@ -153,3 +153,25 @@ describe("#given git_env_prefix with commit footer", () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given idempotency of prefixGitCommandsInBashCodeBlocks", () => {
|
||||
describe("#when git_env_prefix is provided and template already has prefixed commands in env prefix section", () => {
|
||||
it("#then does NOT double-prefix the already-prefixed commands", () => {
|
||||
const result = injectGitMasterConfig(SAMPLE_TEMPLATE, {
|
||||
commit_footer: false,
|
||||
include_co_authored_by: false,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
})
|
||||
|
||||
expect(result).not.toContain("GIT_MASTER=1 GIT_MASTER=1 git status")
|
||||
expect(result).not.toContain("GIT_MASTER=1 GIT_MASTER=1 git add")
|
||||
expect(result).not.toContain("GIT_MASTER=1 GIT_MASTER=1 git commit")
|
||||
expect(result).not.toContain("GIT_MASTER=1 GIT_MASTER=1 git push")
|
||||
|
||||
expect(result).toContain("GIT_MASTER=1 git status")
|
||||
expect(result).toContain("GIT_MASTER=1 git add")
|
||||
expect(result).toContain("GIT_MASTER=1 git commit")
|
||||
expect(result).toContain("GIT_MASTER=1 git push")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -72,8 +72,16 @@ function prefixGitCommandsInBashCodeBlocks(template: string, prefix: string): st
|
||||
|
||||
function prefixGitCommandsInCodeBlock(codeBlock: string, prefix: string): string {
|
||||
return codeBlock
|
||||
.replace(LEADING_GIT_COMMAND_PATTERN, `$1${prefix} git`)
|
||||
.replace(INLINE_GIT_COMMAND_PATTERN, `$1${prefix} git`)
|
||||
.split("\n")
|
||||
.map((line) => {
|
||||
if (line.includes(prefix)) {
|
||||
return line
|
||||
}
|
||||
return line
|
||||
.replace(LEADING_GIT_COMMAND_PATTERN, `$1${prefix} git`)
|
||||
.replace(INLINE_GIT_COMMAND_PATTERN, `$1${prefix} git`)
|
||||
})
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
function buildCommitFooterInjection(
|
||||
|
||||
@@ -199,3 +199,236 @@ describe("EXCLUDED_ENV_PATTERNS", () => {
|
||||
}
|
||||
})
|
||||
})
|
||||
describe("secret env var filtering", () => {
|
||||
it("filters out ANTHROPIC_API_KEY", () => {
|
||||
// given
|
||||
process.env.ANTHROPIC_API_KEY = "sk-ant-api03-secret"
|
||||
process.env.PATH = "/usr/bin"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.ANTHROPIC_API_KEY).toBeUndefined()
|
||||
expect(cleanEnv.PATH).toBe("/usr/bin")
|
||||
})
|
||||
|
||||
it("filters out AWS_SECRET_ACCESS_KEY", () => {
|
||||
// given
|
||||
process.env.AWS_SECRET_ACCESS_KEY = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
process.env.AWS_ACCESS_KEY_ID = "AKIAIOSFODNN7EXAMPLE"
|
||||
process.env.HOME = "/home/user"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.AWS_SECRET_ACCESS_KEY).toBeUndefined()
|
||||
expect(cleanEnv.AWS_ACCESS_KEY_ID).toBeUndefined()
|
||||
expect(cleanEnv.HOME).toBe("/home/user")
|
||||
})
|
||||
|
||||
it("filters out GITHUB_TOKEN", () => {
|
||||
// given
|
||||
process.env.GITHUB_TOKEN = "ghp_secrettoken123456789"
|
||||
process.env.GITHUB_API_TOKEN = "another_secret_token"
|
||||
process.env.SHELL = "/bin/bash"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.GITHUB_TOKEN).toBeUndefined()
|
||||
expect(cleanEnv.GITHUB_API_TOKEN).toBeUndefined()
|
||||
expect(cleanEnv.SHELL).toBe("/bin/bash")
|
||||
})
|
||||
|
||||
it("filters out OPENAI_API_KEY", () => {
|
||||
// given
|
||||
process.env.OPENAI_API_KEY = "sk-secret123456789"
|
||||
process.env.LANG = "en_US.UTF-8"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.OPENAI_API_KEY).toBeUndefined()
|
||||
expect(cleanEnv.LANG).toBe("en_US.UTF-8")
|
||||
})
|
||||
|
||||
it("filters out DATABASE_URL with credentials", () => {
|
||||
// given
|
||||
process.env.DATABASE_URL = "postgresql://user:password@localhost:5432/db"
|
||||
process.env.DB_PASSWORD = "supersecretpassword"
|
||||
process.env.TERM = "xterm-256color"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.DATABASE_URL).toBeUndefined()
|
||||
expect(cleanEnv.DB_PASSWORD).toBeUndefined()
|
||||
expect(cleanEnv.TERM).toBe("xterm-256color")
|
||||
})
|
||||
})
|
||||
|
||||
describe("suffix-based secret filtering", () => {
|
||||
it("filters variables ending with _KEY", () => {
|
||||
// given
|
||||
process.env.MY_API_KEY = "secret-value"
|
||||
process.env.SOME_KEY = "another-secret"
|
||||
process.env.TMPDIR = "/tmp"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.MY_API_KEY).toBeUndefined()
|
||||
expect(cleanEnv.SOME_KEY).toBeUndefined()
|
||||
expect(cleanEnv.TMPDIR).toBe("/tmp")
|
||||
})
|
||||
|
||||
it("filters variables ending with _SECRET", () => {
|
||||
// given
|
||||
process.env.AWS_SECRET = "secret-value"
|
||||
process.env.JWT_SECRET = "jwt-secret-token"
|
||||
process.env.USER = "testuser"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.AWS_SECRET).toBeUndefined()
|
||||
expect(cleanEnv.JWT_SECRET).toBeUndefined()
|
||||
expect(cleanEnv.USER).toBe("testuser")
|
||||
})
|
||||
|
||||
it("filters variables ending with _TOKEN", () => {
|
||||
// given
|
||||
process.env.ACCESS_TOKEN = "token-value"
|
||||
process.env.BEARER_TOKEN = "bearer-token"
|
||||
process.env.HOME = "/home/user"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.ACCESS_TOKEN).toBeUndefined()
|
||||
expect(cleanEnv.BEARER_TOKEN).toBeUndefined()
|
||||
expect(cleanEnv.HOME).toBe("/home/user")
|
||||
})
|
||||
|
||||
it("filters variables ending with _PASSWORD", () => {
|
||||
// given
|
||||
process.env.DB_PASSWORD = "db-password"
|
||||
process.env.APP_PASSWORD = "app-secret"
|
||||
process.env.NODE_ENV = "production"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.DB_PASSWORD).toBeUndefined()
|
||||
expect(cleanEnv.APP_PASSWORD).toBeUndefined()
|
||||
expect(cleanEnv.NODE_ENV).toBe("production")
|
||||
})
|
||||
|
||||
it("filters variables ending with _CREDENTIAL", () => {
|
||||
// given
|
||||
process.env.GCP_CREDENTIAL = "json-credential"
|
||||
process.env.AZURE_CREDENTIAL = "azure-creds"
|
||||
process.env.PWD = "/current/dir"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.GCP_CREDENTIAL).toBeUndefined()
|
||||
expect(cleanEnv.AZURE_CREDENTIAL).toBeUndefined()
|
||||
expect(cleanEnv.PWD).toBe("/current/dir")
|
||||
})
|
||||
|
||||
it("filters variables ending with _API_KEY", () => {
|
||||
// given
|
||||
// given
|
||||
process.env.STRIPE_API_KEY = "sk_live_secret"
|
||||
process.env.SENDGRID_API_KEY = "SG.secret"
|
||||
process.env.SHELL = "/bin/zsh"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.STRIPE_API_KEY).toBeUndefined()
|
||||
expect(cleanEnv.SENDGRID_API_KEY).toBeUndefined()
|
||||
expect(cleanEnv.SHELL).toBe("/bin/zsh")
|
||||
})
|
||||
})
|
||||
|
||||
describe("safe environment variables preserved", () => {
|
||||
it("preserves PATH", () => {
|
||||
// given
|
||||
process.env.PATH = "/usr/bin:/usr/local/bin"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.PATH).toBe("/usr/bin:/usr/local/bin")
|
||||
})
|
||||
|
||||
it("preserves HOME", () => {
|
||||
// given
|
||||
process.env.HOME = "/home/testuser"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.HOME).toBe("/home/testuser")
|
||||
})
|
||||
|
||||
it("preserves SHELL", () => {
|
||||
// given
|
||||
process.env.SHELL = "/bin/bash"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.SHELL).toBe("/bin/bash")
|
||||
})
|
||||
|
||||
it("preserves LANG", () => {
|
||||
// given
|
||||
process.env.LANG = "en_US.UTF-8"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.LANG).toBe("en_US.UTF-8")
|
||||
})
|
||||
|
||||
it("preserves TERM", () => {
|
||||
// given
|
||||
process.env.TERM = "xterm-256color"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.TERM).toBe("xterm-256color")
|
||||
})
|
||||
|
||||
it("preserves TMPDIR", () => {
|
||||
// given
|
||||
process.env.TMPDIR = "/tmp"
|
||||
|
||||
// when
|
||||
const cleanEnv = createCleanMcpEnvironment()
|
||||
|
||||
// then
|
||||
expect(cleanEnv.TMPDIR).toBe("/tmp")
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,10 +1,28 @@
|
||||
// Filters npm/pnpm/yarn config env vars that break MCP servers in pnpm projects (#456)
|
||||
// Also filters secret-containing env vars to prevent exposure to malicious stdio MCP servers (#B-02)
|
||||
export const EXCLUDED_ENV_PATTERNS: RegExp[] = [
|
||||
// npm/pnpm/yarn config patterns (original)
|
||||
/^NPM_CONFIG_/i,
|
||||
/^npm_config_/,
|
||||
/^YARN_/,
|
||||
/^PNPM_/,
|
||||
/^NO_UPDATE_NOTIFIER$/,
|
||||
|
||||
// Specific high-risk secret env vars (explicit blocks)
|
||||
/^ANTHROPIC_API_KEY$/i,
|
||||
/^AWS_ACCESS_KEY_ID$/i,
|
||||
/^AWS_SECRET_ACCESS_KEY$/i,
|
||||
/^GITHUB_TOKEN$/i,
|
||||
/^DATABASE_URL$/i,
|
||||
/^OPENAI_API_KEY$/i,
|
||||
|
||||
// Suffix-based patterns for common secret naming conventions
|
||||
/_KEY$/i,
|
||||
/_SECRET$/i,
|
||||
/_TOKEN$/i,
|
||||
/_PASSWORD$/i,
|
||||
/_CREDENTIAL$/i,
|
||||
/_API_KEY$/i,
|
||||
]
|
||||
|
||||
export function createCleanMcpEnvironment(
|
||||
|
||||
@@ -279,6 +279,116 @@ describe("TaskToastManager", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("model name display in task line", () => {
|
||||
test("should show model name before category when modelInfo exists", () => {
|
||||
// given - a task with category and modelInfo
|
||||
const task = {
|
||||
id: "task_model_display",
|
||||
description: "Build UI component",
|
||||
agent: "sisyphus-junior",
|
||||
isBackground: true,
|
||||
category: "deep",
|
||||
modelInfo: { model: "openai/gpt-5.3-codex", type: "category-default" as const },
|
||||
}
|
||||
|
||||
// when - addTask is called
|
||||
toastManager.addTask(task)
|
||||
|
||||
// then - toast should show model name before category like "gpt-5.3-codex: deep"
|
||||
const call = mockClient.tui.showToast.mock.calls[0][0]
|
||||
expect(call.body.message).toContain("gpt-5.3-codex: deep")
|
||||
expect(call.body.message).not.toContain("sisyphus-junior/deep")
|
||||
})
|
||||
|
||||
test("should strip provider prefix from model name", () => {
|
||||
// given - a task with provider-prefixed model
|
||||
const task = {
|
||||
id: "task_strip_provider",
|
||||
description: "Fix styles",
|
||||
agent: "sisyphus-junior",
|
||||
isBackground: false,
|
||||
category: "visual-engineering",
|
||||
modelInfo: { model: "google/gemini-3.1-pro", type: "category-default" as const },
|
||||
}
|
||||
|
||||
// when - addTask is called
|
||||
toastManager.addTask(task)
|
||||
|
||||
// then - should show model ID without provider prefix
|
||||
const call = mockClient.tui.showToast.mock.calls[0][0]
|
||||
expect(call.body.message).toContain("gemini-3.1-pro: visual-engineering")
|
||||
})
|
||||
|
||||
test("should fall back to agent/category format when no modelInfo", () => {
|
||||
// given - a task without modelInfo
|
||||
const task = {
|
||||
id: "task_no_model",
|
||||
description: "Quick fix",
|
||||
agent: "sisyphus-junior",
|
||||
isBackground: true,
|
||||
category: "quick",
|
||||
}
|
||||
|
||||
// when - addTask is called
|
||||
toastManager.addTask(task)
|
||||
|
||||
// then - should use old format with agent name
|
||||
const call = mockClient.tui.showToast.mock.calls[0][0]
|
||||
expect(call.body.message).toContain("sisyphus-junior/quick")
|
||||
})
|
||||
|
||||
test("should show model name without category when category is absent", () => {
|
||||
// given - a task with modelInfo but no category
|
||||
const task = {
|
||||
id: "task_model_no_cat",
|
||||
description: "Explore codebase",
|
||||
agent: "explore",
|
||||
isBackground: true,
|
||||
modelInfo: { model: "anthropic/claude-sonnet-4-6", type: "category-default" as const },
|
||||
}
|
||||
|
||||
// when - addTask is called
|
||||
toastManager.addTask(task)
|
||||
|
||||
// then - should show just the model name in parens
|
||||
const call = mockClient.tui.showToast.mock.calls[0][0]
|
||||
expect(call.body.message).toContain("(claude-sonnet-4-6)")
|
||||
})
|
||||
|
||||
test("should show model name in queued tasks too", () => {
|
||||
// given - a concurrency manager that limits to 1
|
||||
const limitedConcurrency = {
|
||||
getConcurrencyLimit: mock(() => 1),
|
||||
} as unknown as ConcurrencyManager
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const limitedManager = new TaskToastManager(mockClient as any, limitedConcurrency)
|
||||
|
||||
limitedManager.addTask({
|
||||
id: "task_running",
|
||||
description: "Running task",
|
||||
agent: "sisyphus-junior",
|
||||
isBackground: true,
|
||||
category: "deep",
|
||||
modelInfo: { model: "openai/gpt-5.3-codex", type: "category-default" as const },
|
||||
})
|
||||
limitedManager.addTask({
|
||||
id: "task_queued",
|
||||
description: "Queued task",
|
||||
agent: "sisyphus-junior",
|
||||
isBackground: true,
|
||||
category: "quick",
|
||||
status: "queued",
|
||||
modelInfo: { model: "anthropic/claude-haiku-4-5", type: "category-default" as const },
|
||||
})
|
||||
|
||||
// when - the queued task toast fires
|
||||
const lastCall = mockClient.tui.showToast.mock.calls[1][0]
|
||||
|
||||
// then - queued task should also show model name
|
||||
expect(lastCall.body.message).toContain("claude-haiku-4-5: quick")
|
||||
})
|
||||
})
|
||||
|
||||
describe("updateTaskModelBySession", () => {
|
||||
test("updates task model info and shows fallback toast", () => {
|
||||
// given - task without model info
|
||||
|
||||
@@ -127,6 +127,13 @@ export class TaskToastManager {
|
||||
const queued = this.getQueuedTasks()
|
||||
const concurrencyInfo = this.getConcurrencyInfo()
|
||||
|
||||
const formatTaskIdentifier = (task: TrackedTask): string => {
|
||||
const modelName = task.modelInfo?.model?.split("/").pop()
|
||||
if (modelName && task.category) return `${modelName}: ${task.category}`
|
||||
if (modelName) return modelName
|
||||
if (task.category) return `${task.agent}/${task.category}`
|
||||
return task.agent
|
||||
}
|
||||
const lines: string[] = []
|
||||
|
||||
const isFallback = newTask.modelInfo && (
|
||||
@@ -151,9 +158,9 @@ export class TaskToastManager {
|
||||
const duration = this.formatDuration(task.startedAt)
|
||||
const bgIcon = task.isBackground ? "[BG]" : "[RUN]"
|
||||
const isNew = task.id === newTask.id ? " ← NEW" : ""
|
||||
const categoryInfo = task.category ? `/${task.category}` : ""
|
||||
const taskId = formatTaskIdentifier(task)
|
||||
const skillsInfo = task.skills?.length ? ` [${task.skills.join(", ")}]` : ""
|
||||
lines.push(`${bgIcon} ${task.description} (${task.agent}${categoryInfo})${skillsInfo} - ${duration}${isNew}`)
|
||||
lines.push(`${bgIcon} ${task.description} (${taskId})${skillsInfo} - ${duration}${isNew}`)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,10 +169,10 @@ export class TaskToastManager {
|
||||
lines.push(`Queued (${queued.length}):`)
|
||||
for (const task of queued) {
|
||||
const bgIcon = task.isBackground ? "[Q]" : "[W]"
|
||||
const categoryInfo = task.category ? `/${task.category}` : ""
|
||||
const taskId = formatTaskIdentifier(task)
|
||||
const skillsInfo = task.skills?.length ? ` [${task.skills.join(", ")}]` : ""
|
||||
const isNew = task.id === newTask.id ? " ← NEW" : ""
|
||||
lines.push(`${bgIcon} ${task.description} (${task.agent}${categoryInfo})${skillsInfo} - Queued${isNew}`)
|
||||
lines.push(`${bgIcon} ${task.description} (${taskId})${skillsInfo} - Queued${isNew}`)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -226,6 +226,29 @@ describe('TmuxSessionManager', () => {
|
||||
// then
|
||||
expect(manager).toBeDefined()
|
||||
})
|
||||
|
||||
test('falls back to default port when serverUrl has port 0', async () => {
|
||||
// given
|
||||
mockIsInsideTmux.mockReturnValue(true)
|
||||
const { TmuxSessionManager } = await import('./manager')
|
||||
const ctx = {
|
||||
...createMockContext(),
|
||||
serverUrl: new URL('http://127.0.0.1:0/'),
|
||||
}
|
||||
const config: TmuxConfig = {
|
||||
enabled: true,
|
||||
layout: 'main-vertical',
|
||||
main_pane_size: 60,
|
||||
main_pane_min_width: 80,
|
||||
agent_pane_min_width: 40,
|
||||
}
|
||||
|
||||
// when
|
||||
const manager = new TmuxSessionManager(ctx, config, mockTmuxDeps)
|
||||
|
||||
// then
|
||||
expect((manager as any).serverUrl).toBe('http://localhost:4096')
|
||||
})
|
||||
})
|
||||
|
||||
describe('onSessionCreated', () => {
|
||||
|
||||
@@ -73,10 +73,18 @@ export class TmuxSessionManager {
|
||||
this.tmuxConfig = tmuxConfig
|
||||
this.deps = deps
|
||||
const defaultPort = process.env.OPENCODE_PORT ?? "4096"
|
||||
const fallbackUrl = `http://localhost:${defaultPort}`
|
||||
try {
|
||||
this.serverUrl = ctx.serverUrl?.toString() ?? `http://localhost:${defaultPort}`
|
||||
const raw = ctx.serverUrl?.toString()
|
||||
if (raw) {
|
||||
const parsed = new URL(raw)
|
||||
const port = parsed.port || (parsed.protocol === 'https:' ? '443' : '80')
|
||||
this.serverUrl = port === '0' ? fallbackUrl : raw
|
||||
} else {
|
||||
this.serverUrl = fallbackUrl
|
||||
}
|
||||
} catch {
|
||||
this.serverUrl = `http://localhost:${defaultPort}`
|
||||
this.serverUrl = fallbackUrl
|
||||
}
|
||||
this.sourcePaneId = deps.getCurrentPaneId()
|
||||
this.pollingManager = new TmuxPollingManager(
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# src/hooks/ — 46 Lifecycle Hooks
|
||||
# src/hooks/ — 48 Lifecycle Hooks
|
||||
|
||||
**Generated:** 2026-03-06
|
||||
|
||||
## OVERVIEW
|
||||
|
||||
46 hooks across 45 directories + 11 standalone files. Three-tier composition: Core(37) + Continuation(7) + Skill(2). All hooks follow `createXXXHook(deps) → HookFunction` factory pattern.
|
||||
48 hooks across dedicated modules and standalone files. Three-tier composition: Core(39) + Continuation(7) + Skill(2). All hooks follow `createXXXHook(deps) → HookFunction` factory pattern.
|
||||
|
||||
## HOOK TIERS
|
||||
|
||||
@@ -85,7 +85,7 @@ hooks/
|
||||
| noHephaestusNonGpt | chat.message | Block Hephaestus from using non-GPT models |
|
||||
| runtimeFallback | event | Auto-switch models on API provider errors |
|
||||
|
||||
### Tier 2: Tool Guard Hooks (10) — `create-tool-guard-hooks.ts`
|
||||
### Tier 2: Tool Guard Hooks (12) — `create-tool-guard-hooks.ts`
|
||||
|
||||
| Hook | Event | Purpose |
|
||||
|------|-------|---------|
|
||||
|
||||
@@ -70,7 +70,7 @@ function isTokenLimitError(text: string): boolean {
|
||||
return false
|
||||
}
|
||||
const lower = text.toLowerCase()
|
||||
return TOKEN_LIMIT_KEYWORDS.some((kw) => lower.includes(kw.toLowerCase()))
|
||||
return TOKEN_LIMIT_KEYWORDS.some((kw) => lower.includes(kw))
|
||||
}
|
||||
|
||||
export function parseAnthropicTokenLimitError(err: unknown): ParsedTokenLimitError | null {
|
||||
|
||||
@@ -98,9 +98,9 @@ describe("runSummarizeRetryStrategy", () => {
|
||||
}) as typeof setTimeout
|
||||
|
||||
autoCompactState.retryStateBySession.set(sessionID, {
|
||||
attempt: 1,
|
||||
attempt: 0,
|
||||
lastAttemptTime: Date.now(),
|
||||
firstAttemptTime: Date.now() - 119700,
|
||||
firstAttemptTime: Date.now() - 119900,
|
||||
})
|
||||
summarizeMock.mockRejectedValueOnce(new Error("rate limited"))
|
||||
|
||||
@@ -117,6 +117,6 @@ describe("runSummarizeRetryStrategy", () => {
|
||||
//#then
|
||||
expect(timeoutCalls.length).toBe(1)
|
||||
expect(timeoutCalls[0]!.delay).toBeGreaterThan(0)
|
||||
expect(timeoutCalls[0]!.delay).toBeLessThanOrEqual(500)
|
||||
expect(timeoutCalls[0]!.delay).toBeLessThanOrEqual(300)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -114,6 +114,7 @@ export async function runSummarizeRetryStrategy(params: {
|
||||
body: summarizeBody as never,
|
||||
query: { directory: params.directory },
|
||||
})
|
||||
clearSessionState(params.autoCompactState, params.sessionID)
|
||||
return
|
||||
} catch {
|
||||
const remainingTimeMs = SUMMARIZE_RETRY_TOTAL_TIMEOUT_MS - (Date.now() - retryState.firstAttemptTime)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { log, normalizeModelID } from "../../shared"
|
||||
|
||||
const OPUS_4_6_PATTERN = /claude-opus-4[-.]6/i
|
||||
const OPUS_PATTERN = /claude-opus/i
|
||||
|
||||
function isClaudeProvider(providerID: string, modelID: string): boolean {
|
||||
if (["anthropic", "google-vertex-anthropic", "opencode"].includes(providerID)) return true
|
||||
@@ -8,9 +8,9 @@ function isClaudeProvider(providerID: string, modelID: string): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
function isOpus46(modelID: string): boolean {
|
||||
function isOpusModel(modelID: string): boolean {
|
||||
const normalized = normalizeModelID(modelID)
|
||||
return OPUS_4_6_PATTERN.test(normalized)
|
||||
return OPUS_PATTERN.test(normalized)
|
||||
}
|
||||
|
||||
interface ChatParamsInput {
|
||||
@@ -28,6 +28,20 @@ interface ChatParamsOutput {
|
||||
options: Record<string, unknown>
|
||||
}
|
||||
|
||||
/**
|
||||
* Valid thinking budget levels per model tier.
|
||||
* Opus supports "max"; all other Claude models cap at "high".
|
||||
*/
|
||||
const MAX_VARIANT_BY_TIER: Record<string, string> = {
|
||||
opus: "max",
|
||||
default: "high",
|
||||
}
|
||||
|
||||
function clampVariant(variant: string, isOpus: boolean): string {
|
||||
if (variant !== "max") return variant
|
||||
return isOpus ? MAX_VARIANT_BY_TIER.opus : MAX_VARIANT_BY_TIER.default
|
||||
}
|
||||
|
||||
export function createAnthropicEffortHook() {
|
||||
return {
|
||||
"chat.params": async (
|
||||
@@ -38,15 +52,27 @@ export function createAnthropicEffortHook() {
|
||||
if (!model?.modelID || !model?.providerID) return
|
||||
if (message.variant !== "max") return
|
||||
if (!isClaudeProvider(model.providerID, model.modelID)) return
|
||||
if (!isOpus46(model.modelID)) return
|
||||
if (output.options.effort !== undefined) return
|
||||
|
||||
output.options.effort = "max"
|
||||
log("anthropic-effort: injected effort=max", {
|
||||
sessionID: input.sessionID,
|
||||
provider: model.providerID,
|
||||
model: model.modelID,
|
||||
})
|
||||
const opus = isOpusModel(model.modelID)
|
||||
const clamped = clampVariant(message.variant, opus)
|
||||
output.options.effort = clamped
|
||||
|
||||
if (!opus) {
|
||||
// Override the variant so OpenCode doesn't pass "max" to the API
|
||||
;(message as { variant?: string }).variant = clamped
|
||||
log("anthropic-effort: clamped variant max→high for non-Opus model", {
|
||||
sessionID: input.sessionID,
|
||||
provider: model.providerID,
|
||||
model: model.modelID,
|
||||
})
|
||||
} else {
|
||||
log("anthropic-effort: injected effort=max", {
|
||||
sessionID: input.sessionID,
|
||||
provider: model.providerID,
|
||||
model: model.modelID,
|
||||
})
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,6 +116,21 @@ describe("createAnthropicEffortHook", () => {
|
||||
//#then should normalize and inject effort
|
||||
expect(output.options.effort).toBe("max")
|
||||
})
|
||||
|
||||
it("should preserve max for other opus model IDs such as opus-4-5", async () => {
|
||||
//#given another opus model id that is not 4.6
|
||||
const hook = createAnthropicEffortHook()
|
||||
const { input, output } = createMockParams({
|
||||
modelID: "claude-opus-4-5",
|
||||
})
|
||||
|
||||
//#when chat.params hook is called
|
||||
await hook["chat.params"](input, output)
|
||||
|
||||
//#then max should still be treated as valid for opus family
|
||||
expect(output.options.effort).toBe("max")
|
||||
expect(input.message.variant).toBe("max")
|
||||
})
|
||||
})
|
||||
|
||||
describe("conditions NOT met - should skip", () => {
|
||||
@@ -143,8 +158,8 @@ describe("createAnthropicEffortHook", () => {
|
||||
expect(output.options.effort).toBeUndefined()
|
||||
})
|
||||
|
||||
it("should NOT inject effort for non-opus model", async () => {
|
||||
//#given claude-sonnet-4-6 (not opus)
|
||||
it("should clamp effort to high for non-opus claude model with variant max", async () => {
|
||||
//#given claude-sonnet-4-6 (not opus) with variant max
|
||||
const hook = createAnthropicEffortHook()
|
||||
const { input, output } = createMockParams({
|
||||
modelID: "claude-sonnet-4-6",
|
||||
@@ -153,8 +168,9 @@ describe("createAnthropicEffortHook", () => {
|
||||
//#when chat.params hook is called
|
||||
await hook["chat.params"](input, output)
|
||||
|
||||
//#then effort should NOT be injected
|
||||
expect(output.options.effort).toBeUndefined()
|
||||
//#then effort should be clamped to high (not max)
|
||||
expect(output.options.effort).toBe("high")
|
||||
expect(input.message.variant).toBe("high")
|
||||
})
|
||||
|
||||
it("should NOT inject effort for non-anthropic provider with non-claude model", async () => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user