Compare commits
90 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3dc11ea620 | ||
|
|
5a28ee1bef | ||
|
|
5d4e57ce96 | ||
|
|
b2497f1327 | ||
|
|
9fc56ab544 | ||
|
|
448a8dc93d | ||
|
|
9cbcf17dde | ||
|
|
4e214cba4e | ||
|
|
4a029258a4 | ||
|
|
d2c576c510 | ||
|
|
44b039bef6 | ||
|
|
aeec5ef98d | ||
|
|
7b2b8be181 | ||
|
|
adc55138c8 | ||
|
|
3715fb79b9 | ||
|
|
b9ed0ca30b | ||
|
|
45e9fcd776 | ||
|
|
49687a654a | ||
|
|
364550038a | ||
|
|
9e6f2d9977 | ||
|
|
4fa7d48c04 | ||
|
|
b4a5189a07 | ||
|
|
631092461c | ||
|
|
c5068d37d2 | ||
|
|
1434313bd7 | ||
|
|
38347a396e | ||
|
|
885d3a2462 | ||
|
|
b4d4d30fa8 | ||
|
|
9d9365901b | ||
|
|
2b2b280895 | ||
|
|
fee60d2def | ||
|
|
f030e0d78d | ||
|
|
5d5eb46f19 | ||
|
|
787ce99eda | ||
|
|
d09af86ea7 | ||
|
|
5b9b6eb0b8 | ||
|
|
324dbb119c | ||
|
|
ab0b084199 | ||
|
|
f1f099fde9 | ||
|
|
6662205646 | ||
|
|
76bf269b39 | ||
|
|
3e4b988860 | ||
|
|
d3dbb4976e | ||
|
|
ec7a2e3eae | ||
|
|
c41e59e9ab | ||
|
|
3b4420bc23 | ||
|
|
3be26cb97f | ||
|
|
e22e13cd29 | ||
|
|
6a733c9dde | ||
|
|
127626a122 | ||
|
|
5765168af4 | ||
|
|
e65a0ed10d | ||
|
|
041770ff42 | ||
|
|
a3b84ec5f9 | ||
|
|
7ce7a85768 | ||
|
|
19ab3b5656 | ||
|
|
670d8ab175 | ||
|
|
40a92138ea | ||
|
|
a081ddcefb | ||
|
|
8f4554e115 | ||
|
|
07793f35a7 | ||
|
|
8ca93c7a27 | ||
|
|
a1b4e97e74 | ||
|
|
47e7d4afbb | ||
|
|
6d3172adc9 | ||
|
|
65dc3e4a3b | ||
|
|
587ee704e8 | ||
|
|
3bafa88204 | ||
|
|
f2496158e8 | ||
|
|
a7ac2e7aba | ||
|
|
a2c7fed9d4 | ||
|
|
98572c8dac | ||
|
|
661737b95a | ||
|
|
8136679b1c | ||
|
|
82d89fd5fc | ||
|
|
b1735d4004 | ||
|
|
8bde294978 | ||
|
|
a476e557c9 | ||
|
|
404390efda | ||
|
|
944cf429a7 | ||
|
|
241224f7ab | ||
|
|
fb837db90d | ||
|
|
95801a4850 | ||
|
|
3601061da0 | ||
|
|
774d0bd84d | ||
|
|
aaaeb6997c | ||
|
|
2f801f6c28 | ||
|
|
10eb3a07e0 | ||
|
|
03feaa0594 | ||
|
|
d2d65fbf99 |
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -67,6 +67,8 @@ jobs:
|
||||
bun test src/shared/opencode-message-dir.test.ts
|
||||
# session-recovery mock isolation (recover-tool-result-missing mocks ./storage)
|
||||
bun test src/hooks/session-recovery/recover-tool-result-missing.test.ts
|
||||
# legacy-plugin-toast mock isolation (hook.test.ts mocks ./auto-migrate)
|
||||
bun test src/hooks/legacy-plugin-toast/hook.test.ts
|
||||
|
||||
- name: Run remaining tests
|
||||
run: |
|
||||
@@ -98,6 +100,7 @@ jobs:
|
||||
src/tools/call-omo-agent/subagent-session-creator.test.ts \
|
||||
src/hooks/anthropic-context-window-limit-recovery/empty-content-recovery-sdk.test.ts src/hooks/anthropic-context-window-limit-recovery/parser.test.ts src/hooks/anthropic-context-window-limit-recovery/pruning-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/recovery-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/storage.test.ts \
|
||||
src/hooks/session-recovery/detect-error-type.test.ts src/hooks/session-recovery/index.test.ts src/hooks/session-recovery/recover-empty-content-message-sdk.test.ts src/hooks/session-recovery/resume.test.ts src/hooks/session-recovery/storage \
|
||||
src/hooks/legacy-plugin-toast/auto-migrate.test.ts \
|
||||
src/hooks/claude-code-compatibility \
|
||||
src/hooks/context-injection \
|
||||
src/hooks/provider-toast \
|
||||
|
||||
90
.github/workflows/publish-platform.yml
vendored
90
.github/workflows/publish-platform.yml
vendored
@@ -56,10 +56,33 @@ jobs:
|
||||
env:
|
||||
BUN_INSTALL_ALLOW_SCRIPTS: "@ast-grep/napi"
|
||||
|
||||
- name: Validate release inputs
|
||||
id: validate
|
||||
env:
|
||||
INPUT_VERSION: ${{ inputs.version }}
|
||||
INPUT_DIST_TAG: ${{ inputs.dist_tag }}
|
||||
run: |
|
||||
VERSION="$INPUT_VERSION"
|
||||
DIST_TAG="$INPUT_DIST_TAG"
|
||||
|
||||
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z]+(\.[0-9A-Za-z]+)*)?$ ]]; then
|
||||
echo "::error::Invalid version: $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$DIST_TAG" ] && ! [[ "$DIST_TAG" =~ ^[a-z][a-z0-9-]*$ ]]; then
|
||||
echo "::error::Invalid dist_tag: $DIST_TAG"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "dist_tag=$DIST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check if already published
|
||||
id: check
|
||||
env:
|
||||
VERSION: ${{ steps.validate.outputs.version }}
|
||||
run: |
|
||||
VERSION="${{ inputs.version }}"
|
||||
PLATFORM_KEY="${{ matrix.platform }}"
|
||||
PLATFORM_KEY="${PLATFORM_KEY//-/_}"
|
||||
|
||||
@@ -96,15 +119,18 @@ jobs:
|
||||
|
||||
- name: Update version in package.json
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
env:
|
||||
VERSION: ${{ steps.validate.outputs.version }}
|
||||
run: |
|
||||
VERSION="${{ inputs.version }}"
|
||||
cd packages/${{ matrix.platform }}
|
||||
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
- name: Set root package version
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
env:
|
||||
VERSION: ${{ steps.validate.outputs.version }}
|
||||
run: |
|
||||
jq --arg v "${{ inputs.version }}" '.version = $v' package.json > tmp.json && mv tmp.json package.json
|
||||
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
- name: Pre-download baseline compile target
|
||||
if: steps.check.outputs.skip != 'true' && endsWith(matrix.platform, '-baseline')
|
||||
@@ -226,11 +252,33 @@ jobs:
|
||||
matrix:
|
||||
platform: [darwin-arm64, darwin-x64, darwin-x64-baseline, linux-x64, linux-x64-baseline, linux-arm64, linux-x64-musl, linux-x64-musl-baseline, linux-arm64-musl, windows-x64, windows-x64-baseline]
|
||||
steps:
|
||||
- name: Validate release inputs
|
||||
id: validate
|
||||
env:
|
||||
INPUT_VERSION: ${{ inputs.version }}
|
||||
INPUT_DIST_TAG: ${{ inputs.dist_tag }}
|
||||
run: |
|
||||
VERSION="$INPUT_VERSION"
|
||||
DIST_TAG="$INPUT_DIST_TAG"
|
||||
|
||||
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z]+(\.[0-9A-Za-z]+)*)?$ ]]; then
|
||||
echo "::error::Invalid version: $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$DIST_TAG" ] && ! [[ "$DIST_TAG" =~ ^[a-z][a-z0-9-]*$ ]]; then
|
||||
echo "::error::Invalid dist_tag: $DIST_TAG"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "dist_tag=$DIST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check if already published
|
||||
id: check
|
||||
env:
|
||||
VERSION: ${{ steps.validate.outputs.version }}
|
||||
run: |
|
||||
VERSION="${{ inputs.version }}"
|
||||
|
||||
OC_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-opencode-${{ matrix.platform }}/${VERSION}")
|
||||
OA_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-openagent-${{ matrix.platform }}/${VERSION}")
|
||||
|
||||
@@ -288,22 +336,26 @@ jobs:
|
||||
|
||||
- name: Publish oh-my-opencode-${{ matrix.platform }}
|
||||
if: steps.check.outputs.skip_opencode != 'true' && steps.download.outcome == 'success'
|
||||
env:
|
||||
DIST_TAG: ${{ steps.validate.outputs.dist_tag }}
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
run: |
|
||||
cd packages/${{ matrix.platform }}
|
||||
|
||||
TAG_ARG=""
|
||||
if [ -n "${{ inputs.dist_tag }}" ]; then
|
||||
TAG_ARG="--tag ${{ inputs.dist_tag }}"
|
||||
if [ -n "$DIST_TAG" ]; then
|
||||
npm publish --access public --provenance --tag "$DIST_TAG"
|
||||
else
|
||||
npm publish --access public --provenance
|
||||
fi
|
||||
|
||||
npm publish --access public --provenance $TAG_ARG
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
timeout-minutes: 15
|
||||
|
||||
- name: Publish oh-my-openagent-${{ matrix.platform }}
|
||||
if: steps.check.outputs.skip_openagent != 'true' && steps.download.outcome == 'success'
|
||||
env:
|
||||
DIST_TAG: ${{ steps.validate.outputs.dist_tag }}
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
run: |
|
||||
cd packages/${{ matrix.platform }}
|
||||
|
||||
@@ -313,13 +365,9 @@ jobs:
|
||||
'.name = $name | .description = $desc | .bin = {"oh-my-openagent": (.bin | to_entries | .[0].value)}' \
|
||||
package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
TAG_ARG=""
|
||||
if [ -n "${{ inputs.dist_tag }}" ]; then
|
||||
TAG_ARG="--tag ${{ inputs.dist_tag }}"
|
||||
if [ -n "$DIST_TAG" ]; then
|
||||
npm publish --access public --provenance --tag "$DIST_TAG"
|
||||
else
|
||||
npm publish --access public --provenance
|
||||
fi
|
||||
|
||||
npm publish --access public --provenance $TAG_ARG
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
timeout-minutes: 15
|
||||
|
||||
139
.github/workflows/publish.yml
vendored
139
.github/workflows/publish.yml
vendored
@@ -68,6 +68,8 @@ jobs:
|
||||
bun test src/shared/opencode-message-dir.test.ts
|
||||
# session-recovery mock isolation (recover-tool-result-missing mocks ./storage)
|
||||
bun test src/hooks/session-recovery/recover-tool-result-missing.test.ts
|
||||
# legacy-plugin-toast mock isolation (hook.test.ts mocks ./auto-migrate)
|
||||
bun test src/hooks/legacy-plugin-toast/hook.test.ts
|
||||
|
||||
- name: Run remaining tests
|
||||
run: |
|
||||
@@ -99,6 +101,7 @@ jobs:
|
||||
src/tools/call-omo-agent/subagent-session-creator.test.ts \
|
||||
src/hooks/anthropic-context-window-limit-recovery/empty-content-recovery-sdk.test.ts src/hooks/anthropic-context-window-limit-recovery/parser.test.ts src/hooks/anthropic-context-window-limit-recovery/pruning-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/recovery-deduplication.test.ts src/hooks/anthropic-context-window-limit-recovery/storage.test.ts \
|
||||
src/hooks/session-recovery/detect-error-type.test.ts src/hooks/session-recovery/index.test.ts src/hooks/session-recovery/recover-empty-content-message-sdk.test.ts src/hooks/session-recovery/resume.test.ts src/hooks/session-recovery/storage \
|
||||
src/hooks/legacy-plugin-toast/auto-migrate.test.ts \
|
||||
src/hooks/claude-code-compatibility \
|
||||
src/hooks/context-injection \
|
||||
src/hooks/provider-toast \
|
||||
@@ -164,22 +167,35 @@ jobs:
|
||||
|
||||
- name: Calculate version
|
||||
id: version
|
||||
env:
|
||||
RAW_VERSION: ${{ inputs.version }}
|
||||
BUMP: ${{ inputs.bump }}
|
||||
run: |
|
||||
VERSION="${{ inputs.version }}"
|
||||
VERSION="$RAW_VERSION"
|
||||
if [ -z "$VERSION" ]; then
|
||||
PREV=$(curl -s https://registry.npmjs.org/oh-my-opencode/latest | jq -r '.version // "0.0.0"')
|
||||
BASE="${PREV%%-*}"
|
||||
IFS='.' read -r MAJOR MINOR PATCH <<< "$BASE"
|
||||
case "${{ inputs.bump }}" in
|
||||
case "$BUMP" in
|
||||
major) VERSION="$((MAJOR+1)).0.0" ;;
|
||||
minor) VERSION="${MAJOR}.$((MINOR+1)).0" ;;
|
||||
*) VERSION="${MAJOR}.${MINOR}.$((PATCH+1))" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z]+(\.[0-9A-Za-z]+)*)?$ ]]; then
|
||||
echo "::error::Invalid version: $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ "$VERSION" == *"-"* ]]; then
|
||||
DIST_TAG=$(echo "$VERSION" | cut -d'-' -f2 | cut -d'.' -f1)
|
||||
DIST_TAG=$(printf '%s' "$VERSION" | cut -d'-' -f2 | cut -d'.' -f1)
|
||||
if ! [[ "$DIST_TAG" =~ ^[a-z][a-z0-9-]*$ ]]; then
|
||||
echo "::error::Invalid dist_tag: $DIST_TAG"
|
||||
exit 1
|
||||
fi
|
||||
echo "dist_tag=${DIST_TAG:-next}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "dist_tag=" >> $GITHUB_OUTPUT
|
||||
@@ -189,8 +205,9 @@ jobs:
|
||||
|
||||
- name: Check if already published
|
||||
id: check
|
||||
env:
|
||||
VERSION: ${{ steps.version.outputs.version }}
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-opencode/${VERSION}")
|
||||
if [ "$STATUS" = "200" ]; then
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
@@ -201,8 +218,9 @@ jobs:
|
||||
|
||||
- name: Update version
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
env:
|
||||
VERSION: ${{ steps.version.outputs.version }}
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
for platform in darwin-arm64 darwin-x64 darwin-x64-baseline linux-x64 linux-x64-baseline linux-arm64 linux-x64-musl linux-x64-musl-baseline linux-arm64-musl windows-x64 windows-x64-baseline; do
|
||||
@@ -222,20 +240,22 @@ jobs:
|
||||
|
||||
- name: Publish oh-my-opencode
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
run: |
|
||||
TAG_ARG=""
|
||||
if [ -n "${{ steps.version.outputs.dist_tag }}" ]; then
|
||||
TAG_ARG="--tag ${{ steps.version.outputs.dist_tag }}"
|
||||
fi
|
||||
npm publish --access public --provenance $TAG_ARG
|
||||
env:
|
||||
DIST_TAG: ${{ steps.version.outputs.dist_tag }}
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
run: |
|
||||
if [ -n "$DIST_TAG" ]; then
|
||||
npm publish --access public --provenance --tag "$DIST_TAG"
|
||||
else
|
||||
npm publish --access public --provenance
|
||||
fi
|
||||
|
||||
- name: Check if oh-my-openagent already published
|
||||
id: check-openagent
|
||||
env:
|
||||
VERSION: ${{ steps.version.outputs.version }}
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://registry.npmjs.org/oh-my-openagent/${VERSION}")
|
||||
if [ "$STATUS" = "200" ]; then
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
@@ -246,9 +266,12 @@ jobs:
|
||||
|
||||
- name: Publish oh-my-openagent
|
||||
if: steps.check-openagent.outputs.skip != 'true'
|
||||
env:
|
||||
VERSION: ${{ steps.version.outputs.version }}
|
||||
DIST_TAG: ${{ steps.version.outputs.dist_tag }}
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
|
||||
# Update package name, version, and optionalDependencies for oh-my-openagent
|
||||
jq --arg v "$VERSION" '
|
||||
.name = "oh-my-openagent" |
|
||||
@@ -260,38 +283,30 @@ jobs:
|
||||
)
|
||||
' package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
TAG_ARG=""
|
||||
if [ -n "${{ steps.version.outputs.dist_tag }}" ]; then
|
||||
TAG_ARG="--tag ${{ steps.version.outputs.dist_tag }}"
|
||||
if [ -n "$DIST_TAG" ]; then
|
||||
npm publish --access public --provenance --tag "$DIST_TAG"
|
||||
else
|
||||
npm publish --access public --provenance
|
||||
fi
|
||||
npm publish --access public --provenance $TAG_ARG || echo "::warning::oh-my-openagent publish failed"
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NODE_AUTH_TOKEN }}
|
||||
NPM_CONFIG_PROVENANCE: true
|
||||
|
||||
- name: Restore package.json
|
||||
if: steps.check-openagent.outputs.skip != 'true'
|
||||
if: always() && steps.check-openagent.outputs.skip != 'true'
|
||||
run: |
|
||||
git checkout -- package.json
|
||||
|
||||
trigger-platform:
|
||||
runs-on: ubuntu-latest
|
||||
publish-platform:
|
||||
needs: publish-main
|
||||
if: inputs.skip_platform != true
|
||||
steps:
|
||||
- name: Trigger platform publish workflow
|
||||
run: |
|
||||
gh workflow run publish-platform.yml \
|
||||
--repo ${{ github.repository }} \
|
||||
--ref ${{ github.ref }} \
|
||||
-f version=${{ needs.publish-main.outputs.version }} \
|
||||
-f dist_tag=${{ needs.publish-main.outputs.dist_tag }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
uses: ./.github/workflows/publish-platform.yml
|
||||
with:
|
||||
version: ${{ needs.publish-main.outputs.version }}
|
||||
dist_tag: ${{ needs.publish-main.outputs.dist_tag }}
|
||||
secrets: inherit
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
needs: publish-main
|
||||
needs: [publish-main, publish-platform]
|
||||
if: always() && needs.publish-main.result == 'success' && (inputs.skip_platform == true || needs.publish-platform.result == 'success')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -315,13 +330,53 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create GitHub release
|
||||
- name: Apply release version to source tree
|
||||
env:
|
||||
VERSION: ${{ needs.publish-main.outputs.version }}
|
||||
run: |
|
||||
jq --arg v "$VERSION" '.version = $v' package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
for platform in darwin-arm64 darwin-x64 darwin-x64-baseline linux-x64 linux-x64-baseline linux-arm64 linux-x64-musl linux-x64-musl-baseline linux-arm64-musl windows-x64 windows-x64-baseline; do
|
||||
jq --arg v "$VERSION" '.version = $v' "packages/${platform}/package.json" > tmp.json
|
||||
mv tmp.json "packages/${platform}/package.json"
|
||||
done
|
||||
|
||||
jq --arg v "$VERSION" '.optionalDependencies = (.optionalDependencies | to_entries | map(.value = $v) | from_entries)' package.json > tmp.json && mv tmp.json package.json
|
||||
|
||||
- name: Commit version bump
|
||||
env:
|
||||
VERSION: ${{ needs.publish-main.outputs.version }}
|
||||
run: |
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git add package.json packages/*/package.json
|
||||
git diff --cached --quiet || git commit -m "release: v${VERSION}"
|
||||
|
||||
- name: Create release tag
|
||||
env:
|
||||
VERSION: ${{ needs.publish-main.outputs.version }}
|
||||
run: |
|
||||
if git rev-parse "v${VERSION}" >/dev/null 2>&1; then
|
||||
echo "::error::Tag v${VERSION} already exists"
|
||||
exit 1
|
||||
fi
|
||||
git tag "v${VERSION}"
|
||||
|
||||
- name: Push release state
|
||||
env:
|
||||
VERSION: ${{ needs.publish-main.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
git push origin HEAD
|
||||
git push origin "v${VERSION}"
|
||||
|
||||
- name: Create GitHub release
|
||||
env:
|
||||
VERSION: ${{ needs.publish-main.outputs.version }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
VERSION="${{ needs.publish-main.outputs.version }}"
|
||||
gh release view "v${VERSION}" >/dev/null 2>&1 || \
|
||||
gh release create "v${VERSION}" --title "v${VERSION}" --notes-file /tmp/changelog.md
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Delete draft release
|
||||
run: gh release delete next --yes 2>/dev/null || true
|
||||
@@ -330,13 +385,13 @@ jobs:
|
||||
|
||||
- name: Merge to master
|
||||
continue-on-error: true
|
||||
env:
|
||||
VERSION: ${{ needs.publish-main.outputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
VERSION="${{ needs.publish-main.outputs.version }}"
|
||||
git stash --include-untracked || true
|
||||
git checkout master
|
||||
git reset --hard "v${VERSION}"
|
||||
git push -f origin master || echo "::warning::Failed to push to master"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -282,6 +282,18 @@ Once all three gates pass:
|
||||
gh pr merge "$PR_NUMBER" --squash --delete-branch
|
||||
```
|
||||
|
||||
### Sync .sisyphus state back to main repo
|
||||
|
||||
Before removing the worktree, copy `.sisyphus/` state back. When `.sisyphus/` is gitignored, files written there during worktree execution are not committed or merged — they would be lost on worktree removal.
|
||||
|
||||
```bash
|
||||
# Sync .sisyphus state from worktree to main repo (preserves task state, plans, notepads)
|
||||
if [ -d "$WORKTREE_PATH/.sisyphus" ]; then
|
||||
mkdir -p "$ORIGINAL_DIR/.sisyphus"
|
||||
cp -r "$WORKTREE_PATH/.sisyphus/"* "$ORIGINAL_DIR/.sisyphus/" 2>/dev/null || true
|
||||
fi
|
||||
```
|
||||
|
||||
### Clean up the worktree
|
||||
|
||||
The worktree served its purpose — remove it to avoid disk bloat:
|
||||
|
||||
20
README.md
20
README.md
@@ -111,6 +111,8 @@ Fetch the installation guide and follow it:
|
||||
curl -s https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
|
||||
```
|
||||
|
||||
**Note**: Use the published package and binary name `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`, while legacy `oh-my-opencode` entries still load with a warning. Plugin config files still commonly use `oh-my-opencode.json` or `oh-my-opencode.jsonc`, and both legacy and renamed basenames are recognized during the transition.
|
||||
|
||||
---
|
||||
|
||||
## Skip This README
|
||||
@@ -273,11 +275,11 @@ To remove oh-my-opencode:
|
||||
|
||||
1. **Remove the plugin from your OpenCode config**
|
||||
|
||||
Edit `~/.config/opencode/opencode.json` (or `opencode.jsonc`) and remove `"oh-my-opencode"` from the `plugin` array:
|
||||
Edit `~/.config/opencode/opencode.json` (or `opencode.jsonc`) and remove either `"oh-my-openagent"` or the legacy `"oh-my-opencode"` entry from the `plugin` array:
|
||||
|
||||
```bash
|
||||
# Using jq
|
||||
jq '.plugin = [.plugin[] | select(. != "oh-my-opencode")]' \
|
||||
jq '.plugin = [.plugin[] | select(. != "oh-my-openagent" and . != "oh-my-opencode")]' \
|
||||
~/.config/opencode/opencode.json > /tmp/oc.json && \
|
||||
mv /tmp/oc.json ~/.config/opencode/opencode.json
|
||||
```
|
||||
@@ -285,11 +287,13 @@ To remove oh-my-opencode:
|
||||
2. **Remove configuration files (optional)**
|
||||
|
||||
```bash
|
||||
# Remove user config
|
||||
rm -f ~/.config/opencode/oh-my-opencode.json ~/.config/opencode/oh-my-opencode.jsonc
|
||||
# Remove plugin config files recognized during the compatibility window
|
||||
rm -f ~/.config/opencode/oh-my-openagent.jsonc ~/.config/opencode/oh-my-openagent.json \
|
||||
~/.config/opencode/oh-my-opencode.jsonc ~/.config/opencode/oh-my-opencode.json
|
||||
|
||||
# Remove project config (if exists)
|
||||
rm -f .opencode/oh-my-opencode.json .opencode/oh-my-opencode.jsonc
|
||||
rm -f .opencode/oh-my-openagent.jsonc .opencode/oh-my-openagent.json \
|
||||
.opencode/oh-my-opencode.jsonc .opencode/oh-my-opencode.json
|
||||
```
|
||||
|
||||
3. **Verify removal**
|
||||
@@ -315,6 +319,10 @@ See full [Features Documentation](docs/reference/features.md).
|
||||
- **Built-in MCPs**: websearch (Exa), context7 (docs), grep_app (GitHub search)
|
||||
- **Session Tools**: List, read, search, and analyze session history
|
||||
- **Productivity Features**: Ralph Loop, Todo Enforcer, Comment Checker, Think Mode, and more
|
||||
- **Doctor Command**: Built-in diagnostics (`bunx oh-my-opencode doctor`) verify plugin registration, config, models, and environment
|
||||
- **Model Fallbacks**: `fallback_models` can mix plain model strings with per-fallback object settings in the same array
|
||||
- **File Prompts**: Load prompts from files with `file://` support in agent configurations
|
||||
- **Session Recovery**: Automatic recovery from session errors, context window limits, and API failures
|
||||
- **Model Setup**: Agent-model matching is built into the [Installation Guide](docs/guide/installation.md#step-5-understand-your-model-setup)
|
||||
|
||||
## Configuration
|
||||
@@ -324,7 +332,7 @@ Opinionated defaults, adjustable if you insist.
|
||||
See [Configuration Documentation](docs/reference/configuration.md).
|
||||
|
||||
**Quick Overview:**
|
||||
- **Config Locations**: `.opencode/oh-my-opencode.jsonc` or `.opencode/oh-my-opencode.json` (project), `~/.config/opencode/oh-my-opencode.jsonc` or `~/.config/opencode/oh-my-opencode.json` (user)
|
||||
- **Config Locations**: The compatibility layer recognizes both `oh-my-openagent.json[c]` and legacy `oh-my-opencode.json[c]` plugin config files. Existing installs still commonly use the legacy basename.
|
||||
- **JSONC Support**: Comments and trailing commas supported
|
||||
- **Agents**: Override models, temperatures, prompts, and permissions for any agent
|
||||
- **Built-in Skills**: `playwright` (browser automation), `git-master` (atomic commits)
|
||||
|
||||
@@ -4423,6 +4423,11 @@
|
||||
},
|
||||
"model_fallback_title": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"max_tools": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"maximum": 9007199254740991
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
@@ -4656,6 +4661,14 @@
|
||||
"type": "number",
|
||||
"minimum": 60000
|
||||
},
|
||||
"taskTtlMs": {
|
||||
"type": "number",
|
||||
"minimum": 300000
|
||||
},
|
||||
"sessionGoneTimeoutMs": {
|
||||
"type": "number",
|
||||
"minimum": 10000
|
||||
},
|
||||
"syncPollTimeoutMs": {
|
||||
"type": "number",
|
||||
"minimum": 60000
|
||||
@@ -4872,6 +4885,11 @@
|
||||
"additionalProperties": false
|
||||
},
|
||||
"git_master": {
|
||||
"default": {
|
||||
"commit_footer": true,
|
||||
"include_co_authored_by": true,
|
||||
"git_env_prefix": "GIT_MASTER=1"
|
||||
},
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"commit_footer": {
|
||||
@@ -5022,5 +5040,8 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"git_master"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
@@ -71,9 +71,19 @@ function getSignalExitCode(signal) {
|
||||
return 128 + (signalCodeByName[signal] ?? 1);
|
||||
}
|
||||
|
||||
function getPackageBaseName() {
|
||||
try {
|
||||
const packageJson = JSON.parse(readFileSync(new URL("../package.json", import.meta.url), "utf8"));
|
||||
return packageJson.name || "oh-my-opencode";
|
||||
} catch {
|
||||
return "oh-my-opencode";
|
||||
}
|
||||
}
|
||||
|
||||
function main() {
|
||||
const { platform, arch } = process;
|
||||
const libcFamily = getLibcFamily();
|
||||
const packageBaseName = getPackageBaseName();
|
||||
const avx2Supported = supportsAvx2();
|
||||
|
||||
let packageCandidates;
|
||||
@@ -83,6 +93,7 @@ function main() {
|
||||
arch,
|
||||
libcFamily,
|
||||
preferBaseline: avx2Supported === false,
|
||||
packageBaseName,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(`\noh-my-opencode: ${error.message}\n`);
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
|
||||
/**
|
||||
* Get the platform-specific package name
|
||||
* @param {{ platform: string, arch: string, libcFamily?: string | null }} options
|
||||
* @param {{ platform: string, arch: string, libcFamily?: string | null, packageBaseName?: string }} options
|
||||
* @returns {string} Package name like "oh-my-opencode-darwin-arm64"
|
||||
* @throws {Error} If libc cannot be detected on Linux
|
||||
*/
|
||||
export function getPlatformPackage({ platform, arch, libcFamily }) {
|
||||
export function getPlatformPackage({ platform, arch, libcFamily, packageBaseName = "oh-my-opencode" }) {
|
||||
let suffix = "";
|
||||
if (platform === "linux") {
|
||||
if (libcFamily === null || libcFamily === undefined) {
|
||||
@@ -23,13 +23,13 @@ export function getPlatformPackage({ platform, arch, libcFamily }) {
|
||||
|
||||
// Map platform names: win32 -> windows (for package name)
|
||||
const os = platform === "win32" ? "windows" : platform;
|
||||
return `oh-my-opencode-${os}-${arch}${suffix}`;
|
||||
return `${packageBaseName}-${os}-${arch}${suffix}`;
|
||||
}
|
||||
|
||||
/** @param {{ platform: string, arch: string, libcFamily?: string | null, preferBaseline?: boolean }} options */
|
||||
export function getPlatformPackageCandidates({ platform, arch, libcFamily, preferBaseline = false }) {
|
||||
const primaryPackage = getPlatformPackage({ platform, arch, libcFamily });
|
||||
const baselinePackage = getBaselinePlatformPackage({ platform, arch, libcFamily });
|
||||
/** @param {{ platform: string, arch: string, libcFamily?: string | null, preferBaseline?: boolean, packageBaseName?: string }} options */
|
||||
export function getPlatformPackageCandidates({ platform, arch, libcFamily, preferBaseline = false, packageBaseName = "oh-my-opencode" }) {
|
||||
const primaryPackage = getPlatformPackage({ platform, arch, libcFamily, packageBaseName });
|
||||
const baselinePackage = getBaselinePlatformPackage({ platform, arch, libcFamily, packageBaseName });
|
||||
|
||||
if (!baselinePackage) {
|
||||
return [primaryPackage];
|
||||
@@ -38,18 +38,18 @@ export function getPlatformPackageCandidates({ platform, arch, libcFamily, prefe
|
||||
return preferBaseline ? [baselinePackage, primaryPackage] : [primaryPackage, baselinePackage];
|
||||
}
|
||||
|
||||
/** @param {{ platform: string, arch: string, libcFamily?: string | null }} options */
|
||||
function getBaselinePlatformPackage({ platform, arch, libcFamily }) {
|
||||
/** @param {{ platform: string, arch: string, libcFamily?: string | null, packageBaseName?: string }} options */
|
||||
function getBaselinePlatformPackage({ platform, arch, libcFamily, packageBaseName = "oh-my-opencode" }) {
|
||||
if (arch !== "x64") {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (platform === "darwin") {
|
||||
return "oh-my-opencode-darwin-x64-baseline";
|
||||
return `${packageBaseName}-darwin-x64-baseline`;
|
||||
}
|
||||
|
||||
if (platform === "win32") {
|
||||
return "oh-my-opencode-windows-x64-baseline";
|
||||
return `${packageBaseName}-windows-x64-baseline`;
|
||||
}
|
||||
|
||||
if (platform === "linux") {
|
||||
@@ -61,10 +61,10 @@ function getBaselinePlatformPackage({ platform, arch, libcFamily }) {
|
||||
}
|
||||
|
||||
if (libcFamily === "musl") {
|
||||
return "oh-my-opencode-linux-x64-musl-baseline";
|
||||
return `${packageBaseName}-linux-x64-musl-baseline`;
|
||||
}
|
||||
|
||||
return "oh-my-opencode-linux-x64-baseline";
|
||||
return `${packageBaseName}-linux-x64-baseline`;
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
@@ -190,6 +190,21 @@ describe("getPlatformPackageCandidates", () => {
|
||||
]);
|
||||
});
|
||||
|
||||
|
||||
|
||||
test("supports renamed package family via packageBaseName override", () => {
|
||||
// #given Linux x64 with glibc and renamed package base
|
||||
const input = { platform: "linux", arch: "x64", libcFamily: "glibc", packageBaseName: "oh-my-openagent" };
|
||||
|
||||
// #when getting package candidates
|
||||
const result = getPlatformPackageCandidates(input);
|
||||
|
||||
// #then returns renamed package family candidates
|
||||
expect(result).toEqual([
|
||||
"oh-my-openagent-linux-x64",
|
||||
"oh-my-openagent-linux-x64-baseline",
|
||||
]);
|
||||
});
|
||||
test("returns only one candidate for ARM64", () => {
|
||||
// #given non-x64 platform
|
||||
const input = { platform: "linux", arch: "arm64", libcFamily: "glibc" };
|
||||
|
||||
@@ -64,8 +64,8 @@ These agents have Claude-optimized prompts — long, detailed, mechanics-driven.
|
||||
|
||||
| Agent | Role | Fallback Chain | Notes |
|
||||
| ------------ | ----------------- | -------------------------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| **Sisyphus** | Main orchestrator | Claude Opus → opencode-go/kimi-k2.5 → K2P5 → Kimi K2.5 → GPT-5.4 → GLM-5 → Big Pickle | Claude-family first. GPT-5.4 has dedicated prompt support. Kimi available through multiple providers. |
|
||||
| **Metis** | Plan gap analyzer | Claude Opus → GPT-5.4 → opencode-go/glm-5 → K2P5 | Claude preferred. GPT-5.4 as secondary before GLM-5 fallback. |
|
||||
| **Sisyphus** | Main orchestrator | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/kimi-k2.5 → kimi-for-coding/k2p5 → opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → zai-coding-plan\|opencode/glm-5 → opencode/big-pickle | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Metis** | Plan gap analyzer | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → kimi-for-coding/k2p5 | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
|
||||
### Dual-Prompt Agents → Claude preferred, GPT supported
|
||||
|
||||
@@ -73,8 +73,8 @@ These agents ship separate prompts for Claude and GPT families. They auto-detect
|
||||
|
||||
| Agent | Role | Fallback Chain | Notes |
|
||||
| -------------- | ----------------- | -------------------------------------- | -------------------------------------------------------------------- |
|
||||
| **Prometheus** | Strategic planner | Claude Opus → GPT-5.4 → opencode-go/glm-5 → Gemini 3.1 Pro | Interview-mode planning. GPT prompt is compact and principle-driven. |
|
||||
| **Atlas** | Todo orchestrator | Claude Sonnet → opencode-go/kimi-k2.5 → GPT-5.4 | Claude first, opencode-go as intermediate, GPT-5.4 as last resort. |
|
||||
| **Prometheus** | Strategic planner | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → google\|github-copilot\|opencode/gemini-3.1-pro | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Atlas** | Todo orchestrator | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → opencode-go/minimax-m2.7 | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
|
||||
### Deep Specialists → GPT
|
||||
|
||||
@@ -82,9 +82,9 @@ These agents are built for GPT's principle-driven style. Their prompts assume au
|
||||
|
||||
| Agent | Role | Fallback Chain | Notes |
|
||||
| -------------- | ----------------------- | -------------------------------------- | ------------------------------------------------ |
|
||||
| **Hephaestus** | Autonomous deep worker | GPT-5.4 | Requires GPT access. The craftsman. |
|
||||
| **Oracle** | Architecture consultant | GPT-5.4 → Gemini 3.1 Pro → Claude Opus → opencode-go/glm-5 | Read-only high-IQ consultation. |
|
||||
| **Momus** | Ruthless reviewer | GPT-5.4 → Claude Opus → Gemini 3.1 Pro → opencode-go/glm-5 | Verification and plan review. GPT-5.4 uses xhigh variant. |
|
||||
| **Hephaestus** | Autonomous deep worker | GPT-5.4 (medium) | Requires a GPT-capable provider. The craftsman. |
|
||||
| **Oracle** | Architecture consultant | openai\|github-copilot\|opencode/gpt-5.4 (high) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Momus** | Ruthless reviewer | openai\|github-copilot\|opencode/gpt-5.4 (xhigh) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → opencode-go/glm-5 | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
|
||||
### Utility Runners → Speed over Intelligence
|
||||
|
||||
@@ -92,10 +92,10 @@ These agents do grep, search, and retrieval. They intentionally use the fastest,
|
||||
|
||||
| Agent | Role | Fallback Chain | Notes |
|
||||
| --------------------- | ------------------ | ---------------------------------------------- | ----------------------------------------------------- |
|
||||
| **Explore** | Fast codebase grep | Grok Code Fast → opencode-go/minimax-m2.7-highspeed → MiniMax M2.7 → Haiku → GPT-5-Nano | Speed is everything. Fire 10 in parallel. |
|
||||
| **Librarian** | Docs/code search | opencode-go/minimax-m2.7 → MiniMax M2.7-highspeed → Haiku → GPT-5-Nano | Doc retrieval doesn't need deep reasoning. |
|
||||
| **Multimodal Looker** | Vision/screenshots | GPT-5.4 → opencode-go/kimi-k2.5 → GLM-4.6v → GPT-5-Nano | Uses the first available multimodal-capable fallback. |
|
||||
| **Sisyphus-Junior** | Category executor | Claude Sonnet → opencode-go/kimi-k2.5 → GPT-5.4 → MiniMax M2.7 → Big Pickle | Handles delegated category tasks. Sonnet-tier default. |
|
||||
| **Explore** | Fast codebase grep | github-copilot\|xai/grok-code-fast-1 → opencode-go/minimax-m2.7-highspeed → opencode/minimax-m2.7 → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Librarian** | Docs/code search | opencode-go/minimax-m2.7 → opencode/minimax-m2.7-highspeed → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Multimodal Looker** | Vision/screenshots | openai\|opencode/gpt-5.4 (medium) → opencode-go/kimi-k2.5 → zai-coding-plan/glm-4.6v → openai\|github-copilot\|opencode/gpt-5-nano | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Sisyphus-Junior** | Category executor | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → opencode-go/minimax-m2.7 → opencode/big-pickle | Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
|
||||
---
|
||||
|
||||
@@ -131,8 +131,8 @@ Principle-driven, explicit reasoning, deep technical capability. Best for agents
|
||||
| **Gemini 3.1 Pro** | Excels at visual/frontend tasks. Different reasoning style. Default for `visual-engineering` and `artistry`. |
|
||||
| **Gemini 3 Flash** | Fast. Good for doc search and light tasks. |
|
||||
| **Grok Code Fast 1** | Blazing fast code grep. Default for Explore agent. |
|
||||
| **MiniMax M2.7** | Fast and smart. Good for utility tasks and search/retrieval. Upgraded from M2.5 with better reasoning. |
|
||||
| **MiniMax M2.7 Highspeed** | Ultra-fast variant. Optimized for latency-sensitive tasks like codebase grep. |
|
||||
| **MiniMax M2.7** | Fast and smart. Used in OpenCode Go and OpenCode Zen utility fallback chains. |
|
||||
| **MiniMax M2.7 Highspeed** | High-speed OpenCode catalog entry used in utility fallback chains that prefer the fastest available MiniMax path. |
|
||||
|
||||
### OpenCode Go
|
||||
|
||||
@@ -144,11 +144,12 @@ A premium subscription tier ($10/month) that provides reliable access to Chinese
|
||||
| ------------------------ | --------------------------------------------------------------------- |
|
||||
| **opencode-go/kimi-k2.5** | Vision-capable, Claude-like reasoning. Used by Sisyphus, Atlas, Sisyphus-Junior, Multimodal Looker. |
|
||||
| **opencode-go/glm-5** | Text-only orchestration model. Used by Oracle, Prometheus, Metis, Momus. |
|
||||
| **opencode-go/minimax-m2.7** | Ultra-cheap, fast responses. Used by Librarian, Explore, Atlas, Sisyphus-Junior for utility work. |
|
||||
| **opencode-go/minimax-m2.7** | Ultra-cheap, fast responses. Used by Librarian, Atlas, and Sisyphus-Junior for utility work. |
|
||||
| **opencode-go/minimax-m2.7-highspeed** | Even faster OpenCode Go MiniMax entry used by Explore when the high-speed catalog entry is available. |
|
||||
|
||||
**When It Gets Used:**
|
||||
|
||||
OpenCode Go models appear in fallback chains as intermediate options. They bridge the gap between premium Claude access and free-tier alternatives. The system tries OpenCode Go models before falling back to free tiers (MiniMax M2.7-highspeed, Big Pickle) or GPT alternatives.
|
||||
OpenCode Go models appear throughout the fallback chains as intermediate options. Depending on the agent, they can sit before GPT, after GPT, or act as the last structured-model fallback before cheaper utility paths.
|
||||
|
||||
**Go-Only Scenarios:**
|
||||
|
||||
@@ -156,7 +157,7 @@ Some model identifiers like `k2p5` (paid Kimi K2.5) and `glm-5` may only be avai
|
||||
|
||||
### About Free-Tier Fallbacks
|
||||
|
||||
You may see model names like `kimi-k2.5-free`, `minimax-m2.7-highspeed`, or `big-pickle` (GLM 4.6) in the source code or logs. These are free-tier or speed-optimized versions of the same model families. They exist as lower-priority entries in fallback chains.
|
||||
You may see model names like `kimi-k2.5-free`, `minimax-m2.7`, `minimax-m2.7-highspeed`, or `big-pickle` (GLM 4.6) in the source code or logs. These are provider-specific or speed-optimized entries in fallback chains.
|
||||
|
||||
You don't need to configure them. The system includes them so it degrades gracefully when you don't have every paid subscription. If you have the paid version, the paid version is always preferred.
|
||||
|
||||
@@ -168,14 +169,14 @@ When agents delegate work, they don't pick a model name — they pick a **catego
|
||||
|
||||
| Category | When Used | Fallback Chain |
|
||||
| -------------------- | -------------------------- | -------------------------------------------- |
|
||||
| `visual-engineering` | Frontend, UI, CSS, design | Gemini 3.1 Pro → GLM 5 → Claude Opus → opencode-go/glm-5 → K2P5 |
|
||||
| `ultrabrain` | Maximum reasoning needed | GPT-5.4 → Gemini 3.1 Pro → Claude Opus → opencode-go/glm-5 |
|
||||
| `deep` | Deep coding, complex logic | GPT-5.3 Codex → Claude Opus → Gemini 3.1 Pro |
|
||||
| `artistry` | Creative, novel approaches | Gemini 3.1 Pro → Claude Opus → GPT-5.4 |
|
||||
| `quick` | Simple, fast tasks | GPT-5.4 Mini → Claude Haiku → Gemini Flash → opencode-go/minimax-m2.7 → GPT-5-Nano |
|
||||
| `unspecified-high` | General complex work | Claude Opus → GPT-5.4 → GLM 5 → K2P5 → opencode-go/glm-5 → Kimi K2.5 |
|
||||
| `unspecified-low` | General standard work | Claude Sonnet → GPT-5.3 Codex → opencode-go/kimi-k2.5 → Gemini Flash |
|
||||
| `writing` | Text, docs, prose | Gemini Flash → opencode-go/kimi-k2.5 → Claude Sonnet |
|
||||
| `visual-engineering` | Frontend, UI, CSS, design | google\|github-copilot\|opencode/gemini-3.1-pro (high) → zai-coding-plan\|opencode/glm-5 → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 → kimi-for-coding/k2p5 |
|
||||
| `ultrabrain` | Maximum reasoning needed | openai\|opencode/gpt-5.4 (xhigh) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 |
|
||||
| `deep` | Deep coding, complex logic | openai\|opencode/gpt-5.3-codex (medium) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → google\|github-copilot\|opencode/gemini-3.1-pro (high) |
|
||||
| `artistry` | Creative, novel approaches | google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 |
|
||||
| `quick` | Simple, fast tasks | openai\|github-copilot\|opencode/gpt-5.4-mini → anthropic\|github-copilot\|opencode/claude-haiku-4-5 → google\|github-copilot\|opencode/gemini-3-flash → opencode-go/minimax-m2.7 → opencode/gpt-5-nano |
|
||||
| `unspecified-high` | General complex work | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → zai-coding-plan\|opencode/glm-5 → kimi-for-coding/k2p5 → opencode-go/glm-5 → opencode/kimi-k2.5 → opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5 |
|
||||
| `unspecified-low` | General standard work | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → openai\|opencode/gpt-5.3-codex (medium) → opencode-go/kimi-k2.5 → google\|github-copilot\|opencode/gemini-3-flash → opencode-go/minimax-m2.7 |
|
||||
| `writing` | Text, docs, prose | google\|github-copilot\|opencode/gemini-3-flash → opencode-go/kimi-k2.5 → anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/minimax-m2.7 |
|
||||
|
||||
See the [Orchestration System Guide](./orchestration.md) for how agents dispatch tasks to categories.
|
||||
|
||||
@@ -187,7 +188,7 @@ See the [Orchestration System Guide](./orchestration.md) for how agents dispatch
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json",
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
|
||||
"agents": {
|
||||
// Main orchestrator: Claude Opus or Kimi K2.5 work best
|
||||
@@ -255,12 +256,46 @@ Run `opencode models` to see available models, `opencode auth login` to authenti
|
||||
|
||||
### How Model Resolution Works
|
||||
|
||||
Each agent has a fallback chain. The system tries models in priority order until it finds one available through your connected providers. You don't need to configure providers per model — just authenticate (`opencode auth login`) and the system figures out which models are available and where.
|
||||
Each agent has a fallback chain. The system tries models in priority order until it finds one available through your connected providers. You don't need to configure providers per model. Just authenticate (`opencode auth login`) and the system figures out which models are available and where.
|
||||
|
||||
Core-agent tab cycling is deterministic via injected runtime order field. The fixed priority order is Sisyphus (order: 1), Hephaestus (order: 2), Prometheus (order: 3), and Atlas (order: 4), then the remaining agents follow.
|
||||
|
||||
Your explicit configuration always wins. If you set a specific model for an agent, that choice takes precedence even when resolution data is cold.
|
||||
|
||||
Variant and `reasoningEffort` overrides are normalized to model-supported values, so cross-provider overrides degrade gracefully instead of failing hard.
|
||||
|
||||
Model capabilities are models.dev-backed, with a refreshable cache and capability diagnostics. Use `bunx oh-my-opencode refresh-model-capabilities` to update the cache, or configure `model_capabilities.auto_refresh_on_start` to refresh at startup.
|
||||
|
||||
To see which models your agents will actually use, run `bunx oh-my-opencode doctor`. This shows effective model resolution based on your current authentication and config.
|
||||
|
||||
```
|
||||
Agent Request → User Override (if configured) → Fallback Chain → System Default
|
||||
```
|
||||
|
||||
### File-Based Prompts
|
||||
|
||||
You can load agent system prompts from external files using `file://` URLs in the `prompt` field, or append additional content with `prompt_append`. The `prompt_append` field also works on categories.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"prompt": "file:///path/to/custom-prompt.md"
|
||||
},
|
||||
"oracle": {
|
||||
"prompt_append": "file:///path/to/additional-context.md"
|
||||
}
|
||||
},
|
||||
"categories": {
|
||||
"deep": {
|
||||
"prompt_append": "file:///path/to/deep-category-append.md"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The file content is loaded at runtime and injected into the agent's system prompt. Supports `~` expansion for home directory and relative `file://` paths.
|
||||
|
||||
---
|
||||
|
||||
## See Also
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Paste this into your llm agent session:
|
||||
|
||||
```
|
||||
Install and configure oh-my-openagent by following the instructions here:
|
||||
Install and configure oh-my-opencode by following the instructions here:
|
||||
https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
|
||||
```
|
||||
|
||||
@@ -14,8 +14,8 @@ https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/do
|
||||
Run the interactive installer:
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent install # recommended
|
||||
npx oh-my-openagent install # alternative
|
||||
bunx oh-my-opencode install # recommended
|
||||
npx oh-my-opencode install # alternative
|
||||
```
|
||||
|
||||
> **Note**: The CLI ships with standalone binaries for all major platforms. No runtime (Bun/Node.js) is required for CLI execution after installation.
|
||||
@@ -26,6 +26,8 @@ Follow the prompts to configure your Claude, ChatGPT, and Gemini subscriptions.
|
||||
|
||||
After you install it, you can read this [overview guide](./overview.md) to understand more.
|
||||
|
||||
The published package and local binary are still `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`, while legacy `oh-my-opencode` entries still load with a warning. Plugin config loading recognizes both `oh-my-openagent.json[c]` and `oh-my-opencode.json[c]` during the transition. If you see a "Using legacy package name" warning from `bunx oh-my-opencode doctor`, update your `opencode.json` plugin entry from `"oh-my-opencode"` to `"oh-my-openagent"`.
|
||||
|
||||
## For LLM Agents
|
||||
|
||||
> **IMPORTANT: Use `curl` to fetch this file, NOT WebFetch.** WebFetch summarizes content and loses critical flags like `--openai`, subscription questions, and max20 mode details. Always use:
|
||||
@@ -34,7 +36,7 @@ After you install it, you can read this [overview guide](./overview.md) to under
|
||||
> curl -fsSL https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/refs/heads/dev/docs/guide/installation.md
|
||||
> ```
|
||||
|
||||
If you're an LLM Agent helping set up oh-my-openagent, Welcome! You'll now help your user to install the best agent harness, OhMyOpenAgent. Tell user this with greetings, with test 'oMoMoMoMo...'
|
||||
If you're an LLM Agent helping set up oh-my-opencode, welcome. You'll help your user install the harness and verify the rename-compat setup cleanly. Tell user this with greetings, with text 'oMoMoMoMo...'
|
||||
|
||||
follow these steps:
|
||||
|
||||
@@ -73,7 +75,7 @@ Ask the user these questions to determine CLI options:
|
||||
- If **yes** → `--opencode-go=yes`
|
||||
- If **no** → `--opencode-go=no` (default)
|
||||
|
||||
**Provider Priority**: Native (anthropic/, openai/, google/) > Kimi for Coding > GitHub Copilot > OpenCode Go > OpenCode Zen > Z.ai Coding Plan
|
||||
**Provider selection is agent-specific.** The installer and runtime do not use one single global provider priority. Each agent resolves against its own fallback chain.
|
||||
|
||||
MUST STRONGLY WARNING, WHEN USER SAID THEY DON'T HAVE CLAUDE SUBSCRIPTION, SISYPHUS AGENT MIGHT NOT WORK IDEALLY.
|
||||
|
||||
@@ -96,19 +98,19 @@ Spawn a subagent to handle installation and report back - to save context.
|
||||
Based on user's answers, run the CLI installer with appropriate flags:
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent install --no-tui --claude=<yes|no|max20> --gemini=<yes|no> --copilot=<yes|no> [--openai=<yes|no>] [--opencode-go=<yes|no>] [--opencode-zen=<yes|no>] [--zai-coding-plan=<yes|no>]
|
||||
bunx oh-my-opencode install --no-tui --claude=<yes|no|max20> --gemini=<yes|no> --copilot=<yes|no> [--openai=<yes|no>] [--opencode-go=<yes|no>] [--opencode-zen=<yes|no>] [--zai-coding-plan=<yes|no>]
|
||||
```
|
||||
|
||||
**Examples:**
|
||||
|
||||
- User has all native subscriptions: `bunx oh-my-openagent install --no-tui --claude=max20 --openai=yes --gemini=yes --copilot=no`
|
||||
- User has only Claude: `bunx oh-my-openagent install --no-tui --claude=yes --gemini=no --copilot=no`
|
||||
- User has Claude + OpenAI: `bunx oh-my-openagent install --no-tui --claude=yes --openai=yes --gemini=no --copilot=no`
|
||||
- User has only GitHub Copilot: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=yes`
|
||||
- User has Z.ai for Librarian: `bunx oh-my-openagent install --no-tui --claude=yes --gemini=no --copilot=no --zai-coding-plan=yes`
|
||||
- User has only OpenCode Zen: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=no --opencode-zen=yes`
|
||||
- User has OpenCode Go only: `bunx oh-my-openagent install --no-tui --claude=no --openai=no --gemini=no --copilot=no --opencode-go=yes`
|
||||
- User has no subscriptions: `bunx oh-my-openagent install --no-tui --claude=no --gemini=no --copilot=no`
|
||||
- User has all native subscriptions: `bunx oh-my-opencode install --no-tui --claude=max20 --openai=yes --gemini=yes --copilot=no`
|
||||
- User has only Claude: `bunx oh-my-opencode install --no-tui --claude=yes --gemini=no --copilot=no`
|
||||
- User has Claude + OpenAI: `bunx oh-my-opencode install --no-tui --claude=yes --openai=yes --gemini=no --copilot=no`
|
||||
- User has only GitHub Copilot: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=yes`
|
||||
- User has Z.ai for Librarian: `bunx oh-my-opencode install --no-tui --claude=yes --gemini=no --copilot=no --zai-coding-plan=yes`
|
||||
- User has only OpenCode Zen: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=no --opencode-zen=yes`
|
||||
- User has OpenCode Go only: `bunx oh-my-opencode install --no-tui --claude=no --openai=no --gemini=no --copilot=no --opencode-go=yes`
|
||||
- User has no subscriptions: `bunx oh-my-opencode install --no-tui --claude=no --gemini=no --copilot=no`
|
||||
|
||||
The CLI will:
|
||||
|
||||
@@ -120,8 +122,17 @@ The CLI will:
|
||||
|
||||
```bash
|
||||
opencode --version # Should be 1.0.150 or higher
|
||||
cat ~/.config/opencode/opencode.json # Should contain "oh-my-openagent" in plugin array
|
||||
cat ~/.config/opencode/opencode.json # Should contain "oh-my-openagent" in plugin array, or the legacy "oh-my-opencode" entry while you are still migrating
|
||||
```
|
||||
#### Run Doctor Verification
|
||||
|
||||
After installation, verify everything is working correctly:
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode doctor
|
||||
```
|
||||
|
||||
This checks system, config, tools, and model resolution, including legacy package name warnings and compatibility-fallback diagnostics.
|
||||
|
||||
### Step 4: Configure Authentication
|
||||
|
||||
@@ -154,9 +165,9 @@ First, add the opencode-antigravity-auth plugin:
|
||||
You'll also need full model settings in `opencode.json`.
|
||||
Read the [opencode-antigravity-auth documentation](https://github.com/NoeFabris/opencode-antigravity-auth), copy the full model configuration from the README, and merge carefully to avoid breaking the user's existing setup. The plugin now uses a **variant system** — models like `antigravity-gemini-3-pro` support `low`/`high` variants instead of separate `-low`/`-high` model entries.
|
||||
|
||||
##### oh-my-openagent Agent Model Override
|
||||
##### Plugin config model override
|
||||
|
||||
The `opencode-antigravity-auth` plugin uses different model names than the built-in Google auth. Override the agent models in `oh-my-openagent.json` (or `.opencode/oh-my-openagent.json`):
|
||||
The `opencode-antigravity-auth` plugin uses different model names than the built-in Google auth. Override the agent models in your plugin config file. Existing installs still commonly use `oh-my-opencode.json` or `.opencode/oh-my-opencode.json`, while the compatibility layer also recognizes `oh-my-openagent.json[c]`.
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -201,16 +212,16 @@ GitHub Copilot is supported as a **fallback provider** when native providers are
|
||||
|
||||
##### Model Mappings
|
||||
|
||||
When GitHub Copilot is the best available provider, oh-my-openagent uses these model assignments:
|
||||
When GitHub Copilot is the best available provider, install-time defaults are agent-specific. Common examples are:
|
||||
|
||||
| Agent | Model |
|
||||
| ------------- | --------------------------------- |
|
||||
| ------------- | ---------------------------------- |
|
||||
| **Sisyphus** | `github-copilot/claude-opus-4.6` |
|
||||
| **Oracle** | `github-copilot/gpt-5.4` |
|
||||
| **Explore** | `github-copilot/grok-code-fast-1` |
|
||||
| **Librarian** | `github-copilot/gemini-3-flash` |
|
||||
| **Atlas** | `github-copilot/claude-sonnet-4.6` |
|
||||
|
||||
GitHub Copilot acts as a proxy provider, routing requests to underlying models based on your subscription.
|
||||
GitHub Copilot acts as a proxy provider, routing requests to underlying models based on your subscription. Some agents, like Librarian, are not installed from Copilot alone and instead rely on other configured providers or runtime fallback behavior.
|
||||
|
||||
#### Z.ai Coding Plan
|
||||
|
||||
@@ -227,43 +238,37 @@ If Z.ai is your main provider, the most important fallbacks are:
|
||||
|
||||
#### OpenCode Zen
|
||||
|
||||
OpenCode Zen provides access to `opencode/` prefixed models including `opencode/claude-opus-4-6`, `opencode/gpt-5.4`, `opencode/gpt-5.3-codex`, `opencode/gpt-5-nano`, `opencode/glm-5`, `opencode/big-pickle`, and `opencode/minimax-m2.7-highspeed`.
|
||||
OpenCode Zen provides access to `opencode/` prefixed models including `opencode/claude-opus-4-6`, `opencode/gpt-5.4`, `opencode/gpt-5.3-codex`, `opencode/gpt-5-nano`, `opencode/glm-5`, `opencode/big-pickle`, `opencode/minimax-m2.7`, and `opencode/minimax-m2.7-highspeed`.
|
||||
|
||||
When OpenCode Zen is the best available provider (no native or Copilot), these models are used:
|
||||
When OpenCode Zen is the best available provider, these are the most relevant source-backed examples:
|
||||
|
||||
| Agent | Model |
|
||||
| ------------- | ---------------------------------------------------- |
|
||||
| **Sisyphus** | `opencode/claude-opus-4-6` |
|
||||
| **Oracle** | `opencode/gpt-5.4` |
|
||||
| **Explore** | `opencode/gpt-5-nano` |
|
||||
| **Librarian** | `opencode/minimax-m2.7-highspeed` / `opencode/big-pickle` |
|
||||
| **Explore** | `opencode/minimax-m2.7` |
|
||||
|
||||
##### Setup
|
||||
|
||||
Run the installer and select "Yes" for GitHub Copilot:
|
||||
Run the installer and select "Yes" for OpenCode Zen:
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent install
|
||||
# Select your subscriptions (Claude, ChatGPT, Gemini)
|
||||
# When prompted: "Do you have a GitHub Copilot subscription?" → Select "Yes"
|
||||
bunx oh-my-opencode install
|
||||
# Select your subscriptions (Claude, ChatGPT, Gemini, OpenCode Zen, etc.)
|
||||
# When prompted: "Do you have access to OpenCode Zen (opencode/ models)?" → Select "Yes"
|
||||
```
|
||||
|
||||
Or use non-interactive mode:
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent install --no-tui --claude=no --openai=no --gemini=no --copilot=yes
|
||||
bunx oh-my-opencode install --no-tui --claude=no --openai=no --gemini=no --opencode-zen=yes
|
||||
```
|
||||
|
||||
Then authenticate with GitHub:
|
||||
|
||||
```bash
|
||||
opencode auth login
|
||||
# Select: GitHub → Authenticate via OAuth
|
||||
```
|
||||
This provider uses the `opencode/` model catalog. If your OpenCode environment prompts for provider authentication, follow the OpenCode provider flow for `opencode/` models instead of reusing the fallback-provider auth steps above.
|
||||
|
||||
### Step 5: Understand Your Model Setup
|
||||
|
||||
You've just configured oh-my-openagent. Here's what got set up and why.
|
||||
You've just configured oh-my-opencode. Here's what got set up and why.
|
||||
|
||||
#### Model Families: What You're Working With
|
||||
|
||||
@@ -276,7 +281,7 @@ Not all models behave the same way. Understanding which models are "similar" hel
|
||||
| **Claude Opus 4.6** | anthropic, github-copilot, opencode | Best overall. Default for Sisyphus. |
|
||||
| **Claude Sonnet 4.6** | anthropic, github-copilot, opencode | Faster, cheaper. Good balance. |
|
||||
| **Claude Haiku 4.5** | anthropic, opencode | Fast and cheap. Good for quick tasks. |
|
||||
| **Kimi K2.5** | kimi-for-coding | Behaves very similarly to Claude. Great all-rounder. Default for Atlas. |
|
||||
| **Kimi K2.5** | kimi-for-coding, opencode-go, opencode, moonshotai, moonshotai-cn, firmware, ollama-cloud, aihubmix | Behaves very similarly to Claude. Great all-rounder that appears in several orchestration fallback chains. |
|
||||
| **Kimi K2.5 Free** | opencode | Free-tier Kimi. Rate-limited but functional. |
|
||||
| **GLM 5** | zai-coding-plan, opencode | Claude-like behavior. Good for broad tasks. |
|
||||
| **Big Pickle (GLM 4.6)** | opencode | Free-tier GLM. Decent fallback. |
|
||||
@@ -296,16 +301,16 @@ Not all models behave the same way. Understanding which models are "similar" hel
|
||||
| --------------------- | -------------------------------- | ----------------------------------------------------------- |
|
||||
| **Gemini 3.1 Pro** | google, github-copilot, opencode | Excels at visual/frontend tasks. Different reasoning style. |
|
||||
| **Gemini 3 Flash** | google, github-copilot, opencode | Fast, good for doc search and light tasks. |
|
||||
| **MiniMax M2.7** | venice, opencode-go | Fast and smart. Good for utility tasks. Upgraded from M2.5. |
|
||||
| **MiniMax M2.7 Highspeed** | opencode | Ultra-fast MiniMax variant. Optimized for latency. |
|
||||
| **MiniMax M2.7** | opencode-go, opencode | Fast and smart. Utility fallbacks use `minimax-m2.7` or `minimax-m2.7-highspeed` depending on the chain. |
|
||||
| **MiniMax M2.7 Highspeed** | opencode-go, opencode | Faster utility variant used in Explore and other retrieval-heavy fallback chains. |
|
||||
|
||||
**Speed-Focused Models**:
|
||||
|
||||
| Model | Provider(s) | Speed | Notes |
|
||||
| ----------------------- | ---------------------- | -------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Grok Code Fast 1** | github-copilot, venice | Very fast | Optimized for code grep/search. Default for Explore. |
|
||||
| **Grok Code Fast 1** | github-copilot, xai | Very fast | Optimized for code grep/search. Default for Explore. |
|
||||
| **Claude Haiku 4.5** | anthropic, opencode | Fast | Good balance of speed and intelligence. |
|
||||
| **MiniMax M2.7 Highspeed** | opencode | Very fast | Ultra-fast MiniMax variant. Smart for its speed class. |
|
||||
| **MiniMax M2.7 Highspeed** | opencode-go, opencode | Very fast | High-speed MiniMax utility fallback used by runtime chains such as Explore and, on the OpenCode catalog, Librarian. |
|
||||
| **GPT-5.3-codex-spark** | openai | Extremely fast | Blazing fast but compacts so aggressively that oh-my-openagent's context management doesn't work well with it. Not recommended for omo agents. |
|
||||
|
||||
#### What Each Agent Does and Which Model It Got
|
||||
@@ -316,8 +321,8 @@ Based on your subscriptions, here's how the agents were configured:
|
||||
|
||||
| Agent | Role | Default Chain | What It Does |
|
||||
| ------------ | ---------------- | ----------------------------------------------- | ---------------------------------------------------------------------------------------- |
|
||||
| **Sisyphus** | Main ultraworker | Opus (max) → Kimi K2.5 → GLM 5 → Big Pickle | Primary coding agent. Orchestrates everything. **Never use GPT — no GPT prompt exists.** |
|
||||
| **Metis** | Plan review | Opus (max) → Kimi K2.5 → GPT-5.4 → Gemini 3.1 Pro | Reviews Prometheus plans for gaps. |
|
||||
| **Sisyphus** | Main ultraworker | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/kimi-k2.5 → kimi-for-coding/k2p5 → opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → zai-coding-plan\|opencode/glm-5 → opencode/big-pickle | Primary coding agent. Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Metis** | Plan review | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → kimi-for-coding/k2p5 | Reviews Prometheus plans for gaps. Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
|
||||
**Dual-Prompt Agents** (auto-switch between Claude and GPT prompts):
|
||||
|
||||
@@ -327,16 +332,16 @@ Priority: **Claude > GPT > Claude-like models**
|
||||
|
||||
| Agent | Role | Default Chain | GPT Prompt? |
|
||||
| -------------- | ----------------- | ---------------------------------------------------------- | ---------------------------------------------------------------- |
|
||||
| **Prometheus** | Strategic planner | Opus (max) → **GPT-5.4 (high)** → Kimi K2.5 → Gemini 3.1 Pro | Yes — XML-tagged, principle-driven (~300 lines vs ~1,100 Claude) |
|
||||
| **Atlas** | Todo orchestrator | **Kimi K2.5** → Sonnet → GPT-5.4 | Yes — GPT-optimized todo management |
|
||||
| **Prometheus** | Strategic planner | anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → openai\|github-copilot\|opencode/gpt-5.4 (high) → opencode-go/glm-5 → google\|github-copilot\|opencode/gemini-3.1-pro | Yes — XML-tagged, principle-driven (~300 lines vs ~1,100 Claude) |
|
||||
| **Atlas** | Todo orchestrator | anthropic\|github-copilot\|opencode/claude-sonnet-4-6 → opencode-go/kimi-k2.5 → openai\|github-copilot\|opencode/gpt-5.4 (medium) → opencode-go/minimax-m2.7 | Yes - GPT-optimized todo management |
|
||||
|
||||
**GPT-Native Agents** (built for GPT, don't override to Claude):
|
||||
|
||||
| Agent | Role | Default Chain | Notes |
|
||||
| -------------- | ---------------------- | -------------------------------------- | ------------------------------------------------------ |
|
||||
| **Hephaestus** | Deep autonomous worker | GPT-5.4 (medium) only | "Codex on steroids." No fallback. Requires GPT access. |
|
||||
| **Oracle** | Architecture/debugging | GPT-5.4 (high) → Gemini 3.1 Pro → Opus | High-IQ strategic backup. GPT preferred. |
|
||||
| **Momus** | High-accuracy reviewer | GPT-5.4 (medium) → Opus → Gemini 3.1 Pro | Verification agent. GPT preferred. |
|
||||
| **Oracle** | Architecture/debugging | openai\|github-copilot\|opencode/gpt-5.4 (high) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → opencode-go/glm-5 | High-IQ strategic backup. GPT preferred. |
|
||||
| **Momus** | High-accuracy reviewer | openai\|github-copilot\|opencode/gpt-5.4 (xhigh) → anthropic\|github-copilot\|opencode/claude-opus-4-6 (max) → google\|github-copilot\|opencode/gemini-3.1-pro (high) → opencode-go/glm-5 | Verification agent. GPT preferred. |
|
||||
|
||||
**Utility Agents** (speed over intelligence):
|
||||
|
||||
@@ -344,9 +349,9 @@ These agents do search, grep, and retrieval. They intentionally use fast, cheap
|
||||
|
||||
| Agent | Role | Default Chain | Design Rationale |
|
||||
| --------------------- | ------------------ | ---------------------------------------------------------------------- | -------------------------------------------------------------- |
|
||||
| **Explore** | Fast codebase grep | Grok Code Fast → MiniMax M2.7-highspeed → MiniMax M2.7 → Haiku → GPT-5-Nano | Speed is everything. Grok is blazing fast for grep. |
|
||||
| **Librarian** | Docs/code search | MiniMax M2.7 → MiniMax M2.7-highspeed → Haiku → GPT-5-Nano | Doc retrieval doesn't need deep reasoning. MiniMax is fast. |
|
||||
| **Multimodal Looker** | Vision/screenshots | Kimi K2.5 → Kimi Free → Gemini Flash → GPT-5.4 → GLM-4.6v | Kimi excels at multimodal understanding. |
|
||||
| **Explore** | Fast codebase grep | github-copilot\|xai/grok-code-fast-1 → opencode-go/minimax-m2.7-highspeed → opencode/minimax-m2.7 → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Speed is everything. Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Librarian** | Docs/code search | opencode-go/minimax-m2.7 → opencode/minimax-m2.7-highspeed → anthropic\|opencode/claude-haiku-4-5 → opencode/gpt-5-nano | Doc retrieval doesn't need deep reasoning. Exact runtime chain from `src/shared/model-requirements.ts`. |
|
||||
| **Multimodal Looker** | Vision/screenshots | openai\|opencode/gpt-5.4 (medium) → opencode-go/kimi-k2.5 → zai-coding-plan/glm-4.6v → openai\|github-copilot\|opencode/gpt-5-nano | GPT-5.4 now leads the default vision path when available. |
|
||||
|
||||
#### Why Different Models Need Different Prompts
|
||||
|
||||
@@ -365,7 +370,7 @@ This is why Prometheus and Atlas ship separate prompts per model family — they
|
||||
|
||||
#### Custom Model Configuration
|
||||
|
||||
If the user wants to override which model an agent uses, you can customize in `oh-my-openagent.json`:
|
||||
If the user wants to override which model an agent uses, you can customize in your plugin config file. Existing installs still commonly use `oh-my-opencode.json`, while the compatibility layer also recognizes `oh-my-openagent.json[c]`.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
@@ -400,18 +405,14 @@ GPT (5.3-codex, 5.2) > Claude Opus (decent fallback) > Gemini (acceptable)
|
||||
|
||||
**Dangerous** (no prompt support):
|
||||
|
||||
- Sisyphus → GPT: **No GPT prompt. Will degrade significantly.**
|
||||
- Sisyphus → older GPT models: **Still a bad fit. GPT-5.4 is the only dedicated GPT prompt path.**
|
||||
- Hephaestus → Claude: **Built for Codex. Claude can't replicate this.**
|
||||
- Explore → Opus: **Massive cost waste. Explore needs speed, not intelligence.**
|
||||
- Librarian → Opus: **Same. Doc search doesn't need Opus-level reasoning.**
|
||||
|
||||
#### Provider Priority Chain
|
||||
#### Provider Resolution
|
||||
|
||||
When multiple providers are available, oh-my-openagent uses this priority:
|
||||
|
||||
```
|
||||
Native (anthropic/, openai/, google/) > Kimi for Coding > GitHub Copilot > Venice > OpenCode Zen > Z.ai Coding Plan
|
||||
```
|
||||
There is no single global provider priority. The installer and runtime resolve each agent against its own fallback chain, so the winning provider depends on the agent and the subscriptions you enabled.
|
||||
|
||||
### ⚠️ Warning
|
||||
|
||||
@@ -462,3 +463,7 @@ Tell the user of following:
|
||||
4. You wanna have your own agent- catalog setup? I can read the [docs](docs/guide/agent-model-matching.md) and set up for you after interviewing!
|
||||
|
||||
That's it. The agent will figure out the rest and handle everything automatically.
|
||||
|
||||
#### Advanced Configuration
|
||||
|
||||
You can customize agent models and fallback chains in your config. The `fallback_models` field accepts either a single string or an array that mixes strings and per-model objects with settings like `variant` and `temperature`. See the [Configuration Reference](../reference/configuration.md) and example configs in `docs/examples/` for details.
|
||||
|
||||
@@ -1,142 +1,192 @@
|
||||
# CLI Reference
|
||||
|
||||
Complete reference for the `oh-my-openagent` command-line interface.
|
||||
Complete reference for the published `oh-my-opencode` CLI. During the rename transition, OpenCode plugin registration now prefers `oh-my-openagent` inside `opencode.json`.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```bash
|
||||
# Display help
|
||||
bunx oh-my-openagent
|
||||
bunx oh-my-opencode
|
||||
|
||||
# Or with npx
|
||||
npx oh-my-openagent
|
||||
npx oh-my-opencode
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
| ------------------- | ----------------------------------------- |
|
||||
| ----------------------------- | ------------------------------------------------------ |
|
||||
| `install` | Interactive setup wizard |
|
||||
| `doctor` | Environment diagnostics and health checks |
|
||||
| `run` | OpenCode session runner |
|
||||
| `run` | OpenCode session runner with task completion enforcement |
|
||||
| `get-local-version` | Display local version information and update check |
|
||||
| `refresh-model-capabilities` | Refresh the cached models.dev-based model capabilities |
|
||||
| `version` | Show version information |
|
||||
| `mcp oauth` | MCP OAuth authentication management |
|
||||
| `auth` | Google Antigravity OAuth authentication |
|
||||
| `get-local-version` | Display local version information |
|
||||
|
||||
---
|
||||
|
||||
## install
|
||||
|
||||
Interactive installation tool for initial Oh-My-OpenAgent setup. Provides a TUI based on `@clack/prompts`.
|
||||
Interactive installation tool for initial Oh My OpenCode setup. Provides a TUI based on `@clack/prompts`.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent install
|
||||
bunx oh-my-opencode install
|
||||
```
|
||||
|
||||
### Installation Process
|
||||
|
||||
1. **Provider Selection**: Choose your AI provider (Claude, ChatGPT, or Gemini)
|
||||
2. **API Key Input**: Enter the API key for your selected provider
|
||||
3. **Configuration File Creation**: Generates `opencode.json` or `oh-my-openagent.json` files
|
||||
4. **Plugin Registration**: Automatically registers the oh-my-openagent plugin in OpenCode settings
|
||||
1. **Subscription Selection**: Choose which providers and subscriptions you actually have
|
||||
2. **Plugin Registration**: Registers `oh-my-openagent` in OpenCode settings, or upgrades a legacy `oh-my-opencode` entry during the compatibility window
|
||||
3. **Configuration File Creation**: Writes the generated OmO config to `oh-my-opencode.json` in the active OpenCode config directory
|
||||
4. **Authentication Hints**: Shows the `opencode auth login` steps for the providers you selected, unless `--skip-auth` is set
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
| ----------- | ---------------------------------------------------------------- |
|
||||
| `--no-tui` | Run in non-interactive mode without TUI (for CI/CD environments) |
|
||||
| `--verbose` | Display detailed logs |
|
||||
| ------ | ----------- |
|
||||
| `--no-tui` | Run in non-interactive mode without TUI |
|
||||
| `--claude <no\|yes\|max20>` | Claude subscription mode |
|
||||
| `--openai <no\|yes>` | OpenAI / ChatGPT subscription |
|
||||
| `--gemini <no\|yes>` | Gemini integration |
|
||||
| `--copilot <no\|yes>` | GitHub Copilot subscription |
|
||||
| `--opencode-zen <no\|yes>` | OpenCode Zen access |
|
||||
| `--zai-coding-plan <no\|yes>` | Z.ai Coding Plan subscription |
|
||||
| `--kimi-for-coding <no\|yes>` | Kimi for Coding subscription |
|
||||
| `--opencode-go <no\|yes>` | OpenCode Go subscription |
|
||||
| `--skip-auth` | Skip authentication setup hints |
|
||||
|
||||
---
|
||||
|
||||
## doctor
|
||||
|
||||
Diagnoses your environment to ensure Oh-My-OpenAgent is functioning correctly. Performs 17+ health checks.
|
||||
Diagnoses your environment to ensure Oh My OpenCode is functioning correctly. The current checks are grouped into system, config, tools, and models.
|
||||
|
||||
The doctor command detects common issues including:
|
||||
- Legacy plugin entry references in `opencode.json` (warns when `oh-my-opencode` is still used instead of `oh-my-openagent`)
|
||||
- Configuration file validity and JSONC parsing errors
|
||||
- Model resolution and fallback chain verification
|
||||
- Missing or misconfigured MCP servers
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent doctor
|
||||
bunx oh-my-opencode doctor
|
||||
```
|
||||
|
||||
### Diagnostic Categories
|
||||
|
||||
| Category | Check Items |
|
||||
| ------------------ | --------------------------------------------------------- |
|
||||
| **Installation** | OpenCode version (>= 1.0.150), plugin registration status |
|
||||
| **Configuration** | Configuration file validity, JSONC parsing |
|
||||
| **Authentication** | Anthropic, OpenAI, Google API key validity |
|
||||
| **Dependencies** | Bun, Node.js, Git installation status |
|
||||
| **Tools** | LSP server status, MCP server status |
|
||||
| **Updates** | Latest version check |
|
||||
| ----------------- | ------------------------------------------------------------------------------------ |
|
||||
| **System** | OpenCode binary, version (>= 1.0.150), plugin registration, legacy package name warning |
|
||||
| **Config** | Configuration file validity, JSONC parsing, Zod schema validation |
|
||||
| **Tools** | AST-Grep, LSP servers, GitHub CLI, MCP servers |
|
||||
| **Models** | Model capabilities cache, model resolution, agent/category overrides, availability |
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
| ------------------- | ---------------------------------------------------------------- |
|
||||
| `--category <name>` | Check specific category only (e.g., `--category authentication`) |
|
||||
| ------------ | ----------------------------------------- |
|
||||
| `--status` | Show compact system dashboard |
|
||||
| `--verbose` | Show detailed diagnostic information |
|
||||
| `--json` | Output results in JSON format |
|
||||
| `--verbose` | Include detailed information |
|
||||
|
||||
### Example Output
|
||||
|
||||
```
|
||||
oh-my-openagent doctor
|
||||
oh-my-opencode doctor
|
||||
|
||||
┌──────────────────────────────────────────────────┐
|
||||
│ Oh-My-OpenAgent Doctor │
|
||||
└──────────────────────────────────────────────────┘
|
||||
|
||||
Installation
|
||||
System
|
||||
✓ OpenCode version: 1.0.155 (>= 1.0.150)
|
||||
✓ Plugin registered in opencode.json
|
||||
|
||||
Configuration
|
||||
✓ oh-my-openagent.json is valid
|
||||
Config
|
||||
✓ oh-my-opencode.jsonc is valid
|
||||
✓ Model resolution: all agents have valid fallback chains
|
||||
⚠ categories.visual-engineering: using default model
|
||||
|
||||
Authentication
|
||||
✓ Anthropic API key configured
|
||||
✓ OpenAI API key configured
|
||||
✗ Google API key not found
|
||||
Tools
|
||||
✓ AST-Grep available
|
||||
✓ LSP servers configured
|
||||
|
||||
Dependencies
|
||||
✓ Bun 1.2.5 installed
|
||||
✓ Node.js 22.0.0 installed
|
||||
✓ Git 2.45.0 installed
|
||||
Models
|
||||
✓ 11 agents, 8 categories, 0 overrides
|
||||
⚠ Some configured models rely on compatibility fallback
|
||||
|
||||
Summary: 10 passed, 1 warning, 1 failed
|
||||
Summary: 10 passed, 1 warning, 0 failed
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## run
|
||||
|
||||
Executes OpenCode sessions and monitors task completion.
|
||||
Run opencode with todo/background task completion enforcement. Unlike 'opencode run', this command waits until all todos are completed or cancelled, and all child sessions (background tasks) are idle.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent run [prompt]
|
||||
bunx oh-my-opencode run <message>
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
| ------------------------ | ------------------------------------------------- |
|
||||
| `--enforce-completion` | Keep session active until all TODOs are completed |
|
||||
| `--timeout <seconds>` | Set maximum execution time |
|
||||
| `--agent <name>` | Specify agent to use |
|
||||
| `--directory <path>` | Set working directory |
|
||||
| `--port <number>` | Set port for session |
|
||||
| `--attach` | Attach to existing session |
|
||||
| `--json` | Output in JSON format |
|
||||
| `--no-timestamp` | Disable timestamped output |
|
||||
| `--session-id <id>` | Resume existing session |
|
||||
| `--on-complete <action>` | Action on completion |
|
||||
| `--verbose` | Enable verbose logging |
|
||||
| --------------------- | ------------------------------------------------------------------- |
|
||||
| `-a, --agent <name>` | Agent to use (default: from CLI/env/config, fallback: Sisyphus) |
|
||||
| `-m, --model <provider/model>` | Model override (e.g., anthropic/claude-sonnet-4) |
|
||||
| `-d, --directory <path>` | Working directory |
|
||||
| `-p, --port <port>` | Server port (attaches if port already in use) |
|
||||
| `--attach <url>` | Attach to existing opencode server URL |
|
||||
| `--on-complete <command>` | Shell command to run after completion |
|
||||
| `--json` | Output structured JSON result to stdout |
|
||||
| `--no-timestamp` | Disable timestamp prefix in run output |
|
||||
| `--verbose` | Show full event stream (default: messages/tools only) |
|
||||
| `--session-id <id>` | Resume existing session instead of creating new one |
|
||||
|
||||
---
|
||||
|
||||
## get-local-version
|
||||
|
||||
Show current installed version and check for updates.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode get-local-version
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
| ----------------- | ---------------------------------------------- |
|
||||
| `-d, --directory` | Working directory to check config from |
|
||||
| `--json` | Output in JSON format for scripting |
|
||||
|
||||
### Output
|
||||
|
||||
Shows:
|
||||
- Current installed version
|
||||
- Latest available version on npm
|
||||
- Whether you're up to date
|
||||
- Special modes (local dev, pinned version)
|
||||
|
||||
---
|
||||
|
||||
## version
|
||||
|
||||
Show version information.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode version
|
||||
```
|
||||
|
||||
`--on-complete` runs through your current shell when possible: `sh` on Unix shells, `pwsh` for PowerShell on non-Windows, `powershell.exe` for PowerShell on Windows, and `cmd.exe` as the Windows fallback.
|
||||
|
||||
---
|
||||
|
||||
@@ -148,16 +198,16 @@ Manages OAuth 2.1 authentication for remote MCP servers.
|
||||
|
||||
```bash
|
||||
# Login to an OAuth-protected MCP server
|
||||
bunx oh-my-openagent mcp oauth login <server-name> --server-url https://api.example.com
|
||||
bunx oh-my-opencode mcp oauth login <server-name> --server-url https://api.example.com
|
||||
|
||||
# Login with explicit client ID and scopes
|
||||
bunx oh-my-openagent mcp oauth login my-api --server-url https://api.example.com --client-id my-client --scopes "read,write"
|
||||
bunx oh-my-opencode mcp oauth login my-api --server-url https://api.example.com --client-id my-client --scopes read write
|
||||
|
||||
# Remove stored OAuth tokens
|
||||
bunx oh-my-openagent mcp oauth logout <server-name>
|
||||
bunx oh-my-opencode mcp oauth logout <server-name> --server-url https://api.example.com
|
||||
|
||||
# Check OAuth token status
|
||||
bunx oh-my-openagent mcp oauth status [server-name]
|
||||
bunx oh-my-opencode mcp oauth status [server-name]
|
||||
```
|
||||
|
||||
### Options
|
||||
@@ -166,7 +216,7 @@ bunx oh-my-openagent mcp oauth status [server-name]
|
||||
| -------------------- | ------------------------------------------------------------------------- |
|
||||
| `--server-url <url>` | MCP server URL (required for login) |
|
||||
| `--client-id <id>` | OAuth client ID (optional if server supports Dynamic Client Registration) |
|
||||
| `--scopes <scopes>` | Comma-separated OAuth scopes |
|
||||
| `--scopes <scopes>` | OAuth scopes as separate variadic arguments (for example: `--scopes read write`) |
|
||||
|
||||
### Token Storage
|
||||
|
||||
@@ -176,10 +226,20 @@ Tokens are stored in `~/.config/opencode/mcp-oauth.json` with `0600` permissions
|
||||
|
||||
## Configuration Files
|
||||
|
||||
The CLI searches for configuration files in the following locations (in priority order):
|
||||
The runtime loads user config as the base config, then merges project config on top:
|
||||
|
||||
1. **Project Level**: `.opencode/oh-my-openagent.json`
|
||||
2. **User Level**: `~/.config/opencode/oh-my-openagent.json`
|
||||
1. **Project Level**: `.opencode/oh-my-openagent.jsonc`, `.opencode/oh-my-openagent.json`, `.opencode/oh-my-opencode.jsonc`, or `.opencode/oh-my-opencode.json`
|
||||
2. **User Level**: `~/.config/opencode/oh-my-openagent.jsonc`, `~/.config/opencode/oh-my-openagent.json`, `~/.config/opencode/oh-my-opencode.jsonc`, or `~/.config/opencode/oh-my-opencode.json`
|
||||
|
||||
**Naming Note**: The published package and binary are still `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`. Plugin config loading recognizes both `oh-my-openagent.*` and legacy `oh-my-opencode.*` basenames. If both basenames exist in the same directory, the legacy `oh-my-opencode.*` file currently wins.
|
||||
|
||||
### Filename Compatibility
|
||||
|
||||
Both `.jsonc` and `.json` extensions are supported. JSONC (JSON with Comments) is preferred as it allows:
|
||||
- Comments (both `//` and `/* */` styles)
|
||||
- Trailing commas in arrays and objects
|
||||
|
||||
If both `.jsonc` and `.json` exist in the same directory, the `.jsonc` file takes precedence.
|
||||
|
||||
### JSONC Support
|
||||
|
||||
@@ -219,31 +279,78 @@ bun install -g opencode@latest
|
||||
|
||||
```bash
|
||||
# Reinstall plugin
|
||||
bunx oh-my-openagent install
|
||||
bunx oh-my-opencode install
|
||||
```
|
||||
|
||||
### Doctor Check Failures
|
||||
|
||||
```bash
|
||||
# Diagnose with detailed information
|
||||
bunx oh-my-openagent doctor --verbose
|
||||
bunx oh-my-opencode doctor --verbose
|
||||
|
||||
# Check specific category only
|
||||
bunx oh-my-openagent doctor --category authentication
|
||||
# Show compact system dashboard
|
||||
bunx oh-my-opencode doctor --status
|
||||
|
||||
# JSON output for scripting
|
||||
bunx oh-my-opencode doctor --json
|
||||
```
|
||||
|
||||
### "Using legacy package name" Warning
|
||||
|
||||
The doctor warns if it finds the legacy plugin entry `oh-my-opencode` in `opencode.json`. Update the plugin array to the canonical `oh-my-openagent` entry:
|
||||
|
||||
```bash
|
||||
# Replace the legacy plugin entry in user config
|
||||
jq '.plugin = (.plugin // [] | map(if . == "oh-my-opencode" then "oh-my-openagent" else . end))' \
|
||||
~/.config/opencode/opencode.json > /tmp/opencode.json && mv /tmp/opencode.json ~/.config/opencode/opencode.json
|
||||
```
|
||||
---
|
||||
|
||||
## refresh-model-capabilities
|
||||
|
||||
Refreshes the cached model capabilities snapshot from models.dev. This updates the local cache used by capability resolution and compatibility diagnostics.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode refresh-model-capabilities
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
| ----------------- | --------------------------------------------------- |
|
||||
| `-d, --directory` | Working directory to read oh-my-opencode config from |
|
||||
| `--source-url <url>` | Override the models.dev source URL |
|
||||
| `--json` | Output refresh summary as JSON |
|
||||
|
||||
### Configuration
|
||||
|
||||
Configure automatic refresh behavior in your plugin config:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"model_capabilities": {
|
||||
"enabled": true,
|
||||
"auto_refresh_on_start": true,
|
||||
"refresh_timeout_ms": 5000,
|
||||
"source_url": "https://models.dev/api.json"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Non-Interactive Mode
|
||||
|
||||
Use the `--no-tui` option for CI/CD environments.
|
||||
Use JSON output for CI or scripted diagnostics.
|
||||
|
||||
```bash
|
||||
# Run doctor in CI environment
|
||||
bunx oh-my-openagent doctor --no-tui --json
|
||||
bunx oh-my-opencode doctor --json
|
||||
|
||||
# Save results to file
|
||||
bunx oh-my-openagent doctor --json > doctor-report.json
|
||||
bunx oh-my-opencode doctor --json > doctor-report.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Configuration Reference
|
||||
|
||||
Complete reference for `oh-my-openagent.jsonc` configuration. This document covers every available option with examples.
|
||||
Complete reference for Oh My OpenCode plugin configuration. During the rename transition, the runtime recognizes both `oh-my-openagent.json[c]` and legacy `oh-my-opencode.json[c]` files.
|
||||
|
||||
---
|
||||
|
||||
@@ -30,6 +30,7 @@ Complete reference for `oh-my-openagent.jsonc` configuration. This document cove
|
||||
- [LSP](#lsp)
|
||||
- [Advanced](#advanced)
|
||||
- [Runtime Fallback](#runtime-fallback)
|
||||
- [Model Capabilities](#model-capabilities)
|
||||
- [Hashline Edit](#hashline-edit)
|
||||
- [Experimental](#experimental)
|
||||
- [Reference](#reference)
|
||||
@@ -42,27 +43,28 @@ Complete reference for `oh-my-openagent.jsonc` configuration. This document cove
|
||||
|
||||
### File Locations
|
||||
|
||||
Priority order (project overrides user):
|
||||
User config is loaded first, then project config overrides it. In each directory, the compatibility layer recognizes both the renamed and legacy basenames.
|
||||
|
||||
1. `.opencode/oh-my-openagent.jsonc` / `.opencode/oh-my-openagent.json`
|
||||
1. Project config: `.opencode/oh-my-openagent.json[c]` or `.opencode/oh-my-opencode.json[c]`
|
||||
2. User config (`.jsonc` preferred over `.json`):
|
||||
|
||||
| Platform | Path |
|
||||
| ----------- | ----------------------------------------- |
|
||||
| macOS/Linux | `~/.config/opencode/oh-my-openagent.jsonc` |
|
||||
| Windows | `%APPDATA%\opencode\oh-my-openagent.jsonc` |
|
||||
| Platform | Path candidates |
|
||||
| ----------- | --------------- |
|
||||
| macOS/Linux | `~/.config/opencode/oh-my-openagent.json[c]`, `~/.config/opencode/oh-my-opencode.json[c]` |
|
||||
| Windows | `%APPDATA%\opencode\oh-my-openagent.json[c]`, `%APPDATA%\opencode\oh-my-opencode.json[c]` |
|
||||
|
||||
**Rename compatibility:** The published package and CLI binary remain `oh-my-opencode`. OpenCode plugin registration prefers `oh-my-openagent`, while legacy `oh-my-opencode` entries and config basenames still load during the transition. Config detection checks `oh-my-opencode` before `oh-my-openagent`, so if both plugin config basenames exist in the same directory, the legacy `oh-my-opencode.*` file currently wins.
|
||||
JSONC supports `// line comments`, `/* block comments */`, and trailing commas.
|
||||
|
||||
Enable schema autocomplete:
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json"
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json"
|
||||
}
|
||||
```
|
||||
|
||||
Run `bunx oh-my-openagent install` for guided setup. Run `opencode models` to list available models.
|
||||
Run `bunx oh-my-opencode install` for guided setup. Run `opencode models` to list available models.
|
||||
|
||||
### Quick Start Example
|
||||
|
||||
@@ -70,7 +72,7 @@ Here's a practical starting configuration:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-openagent.schema.json",
|
||||
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
|
||||
|
||||
"agents": {
|
||||
// Main orchestrator: Claude Opus or Kimi K2.5 work best
|
||||
@@ -93,19 +95,19 @@ Here's a practical starting configuration:
|
||||
},
|
||||
|
||||
"categories": {
|
||||
// quick — trivial tasks
|
||||
// quick - trivial tasks
|
||||
"quick": { "model": "opencode/gpt-5-nano" },
|
||||
|
||||
// unspecified-low — moderate tasks
|
||||
// unspecified-low - moderate tasks
|
||||
"unspecified-low": { "model": "anthropic/claude-sonnet-4-6" },
|
||||
|
||||
// unspecified-high — complex work
|
||||
// unspecified-high - complex work
|
||||
"unspecified-high": { "model": "anthropic/claude-opus-4-6", "variant": "max" },
|
||||
|
||||
// writing — docs/prose
|
||||
// writing - docs/prose
|
||||
"writing": { "model": "google/gemini-3-flash" },
|
||||
|
||||
// visual-engineering — Gemini dominates visual tasks
|
||||
// visual-engineering - Gemini dominates visual tasks
|
||||
"visual-engineering": {
|
||||
"model": "google/gemini-3.1-pro",
|
||||
"variant": "high",
|
||||
@@ -157,26 +159,28 @@ Override built-in agent settings. Available agents: `sisyphus`, `hephaestus`, `p
|
||||
|
||||
Disable agents entirely: `{ "disabled_agents": ["oracle", "multimodal-looker"] }`
|
||||
|
||||
Core agents receive an injected runtime `order` field for deterministic Tab cycling in the UI: Sisyphus = 1, Hephaestus = 2, Prometheus = 3, Atlas = 4. This is not a user-configurable config key.
|
||||
|
||||
#### Agent Options
|
||||
|
||||
| Option | Type | Description |
|
||||
| ----------------- | ------------- | ------------------------------------------------------ |
|
||||
| ----------------- | -------------- | --------------------------------------------------------------- |
|
||||
| `model` | string | Model override (`provider/model`) |
|
||||
| `fallback_models` | string\|array | Fallback models on API errors |
|
||||
| `fallback_models` | string\|array | Fallback models on API errors. Supports strings or mixed arrays of strings and object entries with per-model settings |
|
||||
| `temperature` | number | Sampling temperature |
|
||||
| `top_p` | number | Top-p sampling |
|
||||
| `prompt` | string | Replace system prompt |
|
||||
| `prompt_append` | string | Append to system prompt |
|
||||
| `prompt` | string | Replace system prompt. Supports `file://` URIs |
|
||||
| `prompt_append` | string | Append to system prompt. Supports `file://` URIs |
|
||||
| `tools` | array | Allowed tools list |
|
||||
| `disable` | boolean | Disable this agent |
|
||||
| `mode` | string | Agent mode |
|
||||
| `color` | string | UI color |
|
||||
| `permission` | object | Per-tool permissions (see below) |
|
||||
| `category` | string | Inherit model from category |
|
||||
| `variant` | string | Model variant: `max`, `high`, `medium`, `low`, `xhigh` |
|
||||
| `variant` | string | Model variant: `max`, `high`, `medium`, `low`, `xhigh`. Normalized to supported values |
|
||||
| `maxTokens` | number | Max response tokens |
|
||||
| `thinking` | object | Anthropic extended thinking |
|
||||
| `reasoningEffort` | string | OpenAI reasoning: `low`, `medium`, `high`, `xhigh` |
|
||||
| `reasoningEffort` | string | OpenAI reasoning: `none`, `minimal`, `low`, `medium`, `high`, `xhigh`. Normalized to supported values |
|
||||
| `textVerbosity` | string | Text verbosity: `low`, `medium`, `high` |
|
||||
| `providerOptions` | object | Provider-specific options |
|
||||
|
||||
@@ -216,6 +220,65 @@ Control what tools an agent can use:
|
||||
| `doom_loop` | `ask` / `allow` / `deny` |
|
||||
| `external_directory` | `ask` / `allow` / `deny` |
|
||||
|
||||
|
||||
#### Fallback Models with Per-Model Settings
|
||||
|
||||
`fallback_models` accepts either a single model string or an array. Array entries can be plain strings or objects with individual model settings:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"fallback_models": [
|
||||
// Simple string fallback
|
||||
"openai/gpt-5.4",
|
||||
// Object with per-model settings
|
||||
{
|
||||
"model": "google/gemini-3.1-pro",
|
||||
"variant": "high",
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"model": "anthropic/claude-sonnet-4-6",
|
||||
"thinking": { "type": "enabled", "budgetTokens": 64000 }
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Object entries support: `model`, `variant`, `reasoningEffort`, `temperature`, `top_p`, `maxTokens`, `thinking`.
|
||||
|
||||
#### File URIs for Prompts
|
||||
|
||||
Both `prompt` and `prompt_append` support loading content from files via `file://` URIs. Category-level `prompt_append` supports the same URI forms.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"prompt_append": "file:///absolute/path/to/prompt.txt"
|
||||
},
|
||||
"oracle": {
|
||||
"prompt": "file://./relative/to/project/prompt.md"
|
||||
},
|
||||
"explore": {
|
||||
"prompt_append": "file://~/home/dir/prompt.txt"
|
||||
}
|
||||
},
|
||||
"categories": {
|
||||
"custom": {
|
||||
"model": "anthropic/claude-sonnet-4-6",
|
||||
"prompt_append": "file://./category-context.md"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Paths can be absolute (`file:///abs/path`), relative to project root (`file://./rel/path`), or home-relative (`file://~/home/path`). If a file URI cannot be decoded, resolved, or read, OmO inserts a warning placeholder into the prompt instead of failing hard.
|
||||
|
||||
### Categories
|
||||
|
||||
Domain-specific model delegation used by the `task()` tool. When Sisyphus delegates work, it picks a category, not a model name.
|
||||
@@ -240,16 +303,16 @@ Domain-specific model delegation used by the `task()` tool. When Sisyphus delega
|
||||
| Option | Type | Default | Description |
|
||||
| ------------------- | ------------- | ------- | ------------------------------------------------------------------- |
|
||||
| `model` | string | - | Model override |
|
||||
| `fallback_models` | string\|array | - | Fallback models on API errors |
|
||||
| `fallback_models` | string\|array | - | Fallback models on API errors. Supports strings or mixed arrays of strings and object entries with per-model settings |
|
||||
| `temperature` | number | - | Sampling temperature |
|
||||
| `top_p` | number | - | Top-p sampling |
|
||||
| `maxTokens` | number | - | Max response tokens |
|
||||
| `thinking` | object | - | Anthropic extended thinking |
|
||||
| `reasoningEffort` | string | - | OpenAI reasoning effort |
|
||||
| `reasoningEffort` | string | - | OpenAI reasoning effort. Unsupported values are normalized |
|
||||
| `textVerbosity` | string | - | Text verbosity |
|
||||
| `tools` | array | - | Allowed tools |
|
||||
| `prompt_append` | string | - | Append to system prompt |
|
||||
| `variant` | string | - | Model variant |
|
||||
| `variant` | string | - | Model variant. Unsupported values are normalized |
|
||||
| `description` | string | - | Shown in `task()` tool prompt |
|
||||
| `is_unstable_agent` | boolean | `false` | Force background mode + monitoring. Auto-enabled for Gemini models. |
|
||||
|
||||
@@ -257,41 +320,66 @@ Disable categories: `{ "disabled_categories": ["ultrabrain"] }`
|
||||
|
||||
### Model Resolution
|
||||
|
||||
3-step priority at runtime:
|
||||
Runtime priority:
|
||||
|
||||
1. **UI-selected model** - model chosen in the OpenCode UI, for primary agents
|
||||
2. **User override** - model set in config → used exactly as-is. Even on cold cache, explicit user configuration takes precedence over hardcoded fallback chains
|
||||
3. **Category default** - model inherited from the assigned category config
|
||||
4. **User `fallback_models`** - user-configured fallback list is tried before built-in fallback chains
|
||||
5. **Provider fallback chain** - built-in provider/model chain from OmO source
|
||||
6. **System default** - OpenCode's configured default model
|
||||
|
||||
#### Model Settings Compatibility
|
||||
|
||||
Model settings are compatibility-normalized against model capabilities instead of failing hard.
|
||||
|
||||
Normalized fields:
|
||||
|
||||
- `variant` - downgraded to the closest supported value
|
||||
- `reasoningEffort` - downgraded to the closest supported value, or removed if unsupported
|
||||
- `temperature` - removed if unsupported by the model metadata
|
||||
- `top_p` - removed if unsupported by the model metadata
|
||||
- `maxTokens` - capped to the model's reported max output limit
|
||||
- `thinking` - removed if the target model does not support thinking
|
||||
|
||||
Examples:
|
||||
- Claude models do not support `reasoningEffort` - it is removed automatically
|
||||
- GPT-4.1 does not support reasoning - `reasoningEffort` is removed
|
||||
- o-series models support `none` through `high` - `xhigh` is downgraded to `high`
|
||||
- GPT-5 supports `none`, `minimal`, `low`, `medium`, `high`, `xhigh` - all pass through
|
||||
|
||||
Capability data comes from provider runtime metadata first. OmO also ships bundled models.dev-backed capability data, supports a refreshable local models.dev cache, and falls back to heuristic family detection plus alias rules when exact metadata is unavailable. `bunx oh-my-opencode doctor` surfaces capability diagnostics and warns when a configured model relies on compatibility fallback.
|
||||
|
||||
1. **User override** — model set in config → used exactly as-is
|
||||
2. **Provider fallback chain** — tries each provider in priority order until available
|
||||
3. **System default** — falls back to OpenCode's configured default model
|
||||
|
||||
#### Agent Provider Chains
|
||||
|
||||
| Agent | Default Model | Provider Priority |
|
||||
| --------------------- | ------------------- | ---------------------------------------------------------------------------- |
|
||||
| **Sisyphus** | `claude-opus-4-6` | `claude-opus-4-6` → `glm-5` → `big-pickle` |
|
||||
| **Hephaestus** | `gpt-5.4` | `gpt-5.4` |
|
||||
| **oracle** | `gpt-5.4` | `gpt-5.4` → `gemini-3.1-pro` → `claude-opus-4-6` |
|
||||
| **librarian** | `minimax-m2.7` | `minimax-m2.7` → `minimax-m2.7-highspeed` → `claude-haiku-4-5` → `gpt-5-nano` |
|
||||
| **explore** | `grok-code-fast-1` | `grok-code-fast-1` → `minimax-m2.7-highspeed` → `minimax-m2.7` → `claude-haiku-4-5` → `gpt-5-nano` |
|
||||
| **multimodal-looker** | `gpt-5.3-codex` | `gpt-5.3-codex` → `k2p5` → `gemini-3-flash` → `glm-4.6v` → `gpt-5-nano` |
|
||||
| **Prometheus** | `claude-opus-4-6` | `claude-opus-4-6` → `gpt-5.4` → `gemini-3.1-pro` |
|
||||
| **Metis** | `claude-opus-4-6` | `claude-opus-4-6` → `gpt-5.4` → `gemini-3.1-pro` |
|
||||
| **Momus** | `gpt-5.4` | `gpt-5.4` → `claude-opus-4-6` → `gemini-3.1-pro` |
|
||||
| **Atlas** | `claude-sonnet-4-6` | `claude-sonnet-4-6` → `gpt-5.4` |
|
||||
| **Sisyphus** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `opencode-go/kimi-k2.5` → `kimi-for-coding/k2p5` → `opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5` → `openai\|github-copilot\|opencode/gpt-5.4 (medium)` → `zai-coding-plan\|opencode/glm-5` → `opencode/big-pickle` |
|
||||
| **Hephaestus** | `gpt-5.4` | `gpt-5.4 (medium)` |
|
||||
| **oracle** | `gpt-5.4` | `openai\|github-copilot\|opencode/gpt-5.4 (high)` → `google\|github-copilot\|opencode/gemini-3.1-pro (high)` → `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `opencode-go/glm-5` |
|
||||
| **librarian** | `minimax-m2.7` | `opencode-go/minimax-m2.7` → `opencode/minimax-m2.7-highspeed` → `anthropic\|opencode/claude-haiku-4-5` → `opencode/gpt-5-nano` |
|
||||
| **explore** | `grok-code-fast-1` | `github-copilot\|xai/grok-code-fast-1` → `opencode-go/minimax-m2.7-highspeed` → `opencode/minimax-m2.7` → `anthropic\|opencode/claude-haiku-4-5` → `opencode/gpt-5-nano` |
|
||||
| **multimodal-looker** | `gpt-5.4` | `openai\|opencode/gpt-5.4 (medium)` → `opencode-go/kimi-k2.5` → `zai-coding-plan/glm-4.6v` → `openai\|github-copilot\|opencode/gpt-5-nano` |
|
||||
| **Prometheus** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `openai\|github-copilot\|opencode/gpt-5.4 (high)` → `opencode-go/glm-5` → `google\|github-copilot\|opencode/gemini-3.1-pro` |
|
||||
| **Metis** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `openai\|github-copilot\|opencode/gpt-5.4 (high)` → `opencode-go/glm-5` → `kimi-for-coding/k2p5` |
|
||||
| **Momus** | `gpt-5.4` | `openai\|github-copilot\|opencode/gpt-5.4 (xhigh)` → `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `google\|github-copilot\|opencode/gemini-3.1-pro (high)` → `opencode-go/glm-5` |
|
||||
| **Atlas** | `claude-sonnet-4-6` | `anthropic\|github-copilot\|opencode/claude-sonnet-4-6` → `opencode-go/kimi-k2.5` → `openai\|github-copilot\|opencode/gpt-5.4 (medium)` → `opencode-go/minimax-m2.7` |
|
||||
|
||||
#### Category Provider Chains
|
||||
|
||||
| Category | Default Model | Provider Priority |
|
||||
| ---------------------- | ------------------- | -------------------------------------------------------------- |
|
||||
| **visual-engineering** | `gemini-3.1-pro` | `gemini-3.1-pro` → `glm-5` → `claude-opus-4-6` |
|
||||
| **ultrabrain** | `gpt-5.4` | `gpt-5.4` → `gemini-3.1-pro` → `claude-opus-4-6` |
|
||||
| **deep** | `gpt-5.3-codex` | `gpt-5.3-codex` → `claude-opus-4-6` → `gemini-3.1-pro` |
|
||||
| **artistry** | `gemini-3.1-pro` | `gemini-3.1-pro` → `claude-opus-4-6` → `gpt-5.4` |
|
||||
| **quick** | `gpt-5.4-mini` | `gpt-5.4-mini` → `claude-haiku-4-5` → `gemini-3-flash` → `minimax-m2.7` → `gpt-5-nano` |
|
||||
| **unspecified-low** | `claude-sonnet-4-6` | `claude-sonnet-4-6` → `gpt-5.3-codex` → `gemini-3-flash` → `minimax-m2.7` |
|
||||
| **unspecified-high** | `claude-opus-4-6` | `claude-opus-4-6` → `gpt-5.4 (high)` → `glm-5` → `k2p5` → `kimi-k2.5` |
|
||||
| **writing** | `gemini-3-flash` | `gemini-3-flash` → `claude-sonnet-4-6` → `minimax-m2.7` |
|
||||
| **visual-engineering** | `gemini-3.1-pro` | `google\|github-copilot\|opencode/gemini-3.1-pro (high)` → `zai-coding-plan\|opencode/glm-5` → `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `opencode-go/glm-5` → `kimi-for-coding/k2p5` |
|
||||
| **ultrabrain** | `gpt-5.4` | `openai\|opencode/gpt-5.4 (xhigh)` → `google\|github-copilot\|opencode/gemini-3.1-pro (high)` → `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `opencode-go/glm-5` |
|
||||
| **deep** | `gpt-5.3-codex` | `openai\|opencode/gpt-5.3-codex (medium)` → `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `google\|github-copilot\|opencode/gemini-3.1-pro (high)` |
|
||||
| **artistry** | `gemini-3.1-pro` | `google\|github-copilot\|opencode/gemini-3.1-pro (high)` → `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `openai\|github-copilot\|opencode/gpt-5.4` |
|
||||
| **quick** | `gpt-5.4-mini` | `openai\|github-copilot\|opencode/gpt-5.4-mini` → `anthropic\|github-copilot\|opencode/claude-haiku-4-5` → `google\|github-copilot\|opencode/gemini-3-flash` → `opencode-go/minimax-m2.7` → `opencode/gpt-5-nano` |
|
||||
| **unspecified-low** | `claude-sonnet-4-6` | `anthropic\|github-copilot\|opencode/claude-sonnet-4-6` → `openai\|opencode/gpt-5.3-codex (medium)` → `opencode-go/kimi-k2.5` → `google\|github-copilot\|opencode/gemini-3-flash` → `opencode-go/minimax-m2.7` |
|
||||
| **unspecified-high** | `claude-opus-4-6` | `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `openai\|github-copilot\|opencode/gpt-5.4 (high)` → `zai-coding-plan\|opencode/glm-5` → `kimi-for-coding/k2p5` → `opencode-go/glm-5` → `opencode/kimi-k2.5` → `opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5` |
|
||||
| **writing** | `gemini-3-flash` | `google\|github-copilot\|opencode/gemini-3-flash` → `opencode-go/kimi-k2.5` → `anthropic\|github-copilot\|opencode/claude-sonnet-4-6` → `opencode-go/minimax-m2.7` |
|
||||
|
||||
Run `bunx oh-my-openagent doctor --verbose` to see effective model resolution for your config.
|
||||
Run `bunx oh-my-opencode doctor --verbose` to see effective model resolution for your config.
|
||||
|
||||
---
|
||||
|
||||
@@ -425,9 +513,10 @@ Available hooks: `todo-continuation-enforcer`, `context-window-monitor`, `sessio
|
||||
|
||||
**Notes:**
|
||||
|
||||
- `directory-agents-injector` — auto-disabled on OpenCode 1.1.37+ (native AGENTS.md support)
|
||||
- `no-sisyphus-gpt` — **do not disable**. It blocks incompatible GPT models for Sisyphus while allowing the dedicated GPT-5.4 prompt path.
|
||||
- `directory-agents-injector` - auto-disabled on OpenCode 1.1.37+ (native AGENTS.md support)
|
||||
- `no-sisyphus-gpt` - **do not disable**. It blocks incompatible GPT models for Sisyphus while allowing the dedicated GPT-5.4 prompt path.
|
||||
- `startup-toast` is a sub-feature of `auto-update-checker`. Disable just the toast by adding `startup-toast` to `disabled_hooks`.
|
||||
- `session-recovery` - automatically recovers from recoverable session errors (missing tool results, unavailable tools, thinking block violations). Shows toast notifications during recovery. Enable `experimental.auto_resume` for automatic retry after recovery.
|
||||
|
||||
### Commands
|
||||
|
||||
@@ -504,7 +593,7 @@ Force-enable session notifications:
|
||||
{ "notification": { "force_enable": true } }
|
||||
```
|
||||
|
||||
`force_enable` (`false`) — force session-notification even if external notification plugins are detected.
|
||||
`force_enable` (`false`) - force session-notification even if external notification plugins are detected.
|
||||
|
||||
### MCPs
|
||||
|
||||
@@ -590,12 +679,233 @@ Define `fallback_models` per agent or category:
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"fallback_models": ["openai/gpt-5.4", "google/gemini-3.1-pro"]
|
||||
"fallback_models": [
|
||||
"openai/gpt-5.4",
|
||||
{
|
||||
"model": "google/gemini-3.1-pro",
|
||||
"variant": "high"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`fallback_models` also supports object-style entries so you can attach settings to a specific fallback model:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"fallback_models": [
|
||||
"openai/gpt-5.4",
|
||||
{
|
||||
"model": "anthropic/claude-sonnet-4-6",
|
||||
"variant": "high",
|
||||
"thinking": { "type": "enabled", "budgetTokens": 12000 }
|
||||
},
|
||||
{
|
||||
"model": "openai/gpt-5.3-codex",
|
||||
"reasoningEffort": "high",
|
||||
"temperature": 0.2,
|
||||
"top_p": 0.95,
|
||||
"maxTokens": 8192
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Mixed arrays are allowed, so string entries and object entries can appear together in the same fallback chain.
|
||||
|
||||
#### Object-style `fallback_models`
|
||||
|
||||
Object entries use the following shape:
|
||||
|
||||
| Field | Type | Description |
|
||||
| ----- | ---- | ----------- |
|
||||
| `model` | string | Fallback model ID. Provider prefix is optional when OmO can inherit the current/default provider. |
|
||||
| `variant` | string | Explicit variant override for this fallback entry. |
|
||||
| `reasoningEffort` | string | OpenAI reasoning effort override for this fallback entry. |
|
||||
| `temperature` | number | Temperature applied if this fallback model becomes active. |
|
||||
| `top_p` | number | Top-p applied if this fallback model becomes active. |
|
||||
| `maxTokens` | number | Max response tokens applied if this fallback model becomes active. |
|
||||
| `thinking` | object | Anthropic thinking config applied if this fallback model becomes active. |
|
||||
|
||||
Per-model settings are **fallback-only**. They are promoted only when that specific fallback model is actually selected, so they do not override your primary model settings when the primary model resolves successfully.
|
||||
|
||||
`thinking` uses the same shape as the normal agent/category option:
|
||||
|
||||
| Field | Type | Description |
|
||||
| ----- | ---- | ----------- |
|
||||
| `type` | string | `enabled` or `disabled` |
|
||||
| `budgetTokens` | number | Optional Anthropic thinking budget |
|
||||
|
||||
Object entries can also omit the provider prefix when OmO can infer it from the current/default provider. If you provide both inline variant syntax in `model` and an explicit `variant` field, the explicit `variant` field wins.
|
||||
|
||||
#### Full examples
|
||||
|
||||
**1. Simple string chain**
|
||||
|
||||
Use strings when you only need an ordered fallback chain:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"atlas": {
|
||||
"model": "anthropic/claude-sonnet-4-6",
|
||||
"fallback_models": [
|
||||
"anthropic/claude-haiku-4-5",
|
||||
"openai/gpt-5.4",
|
||||
"google/gemini-3.1-pro"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**2. Same-provider shorthand**
|
||||
|
||||
If the primary model already establishes the provider, fallback entries can omit the prefix:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"atlas": {
|
||||
"model": "openai/gpt-5.4",
|
||||
"fallback_models": [
|
||||
"gpt-5.4-mini",
|
||||
{
|
||||
"model": "gpt-5.3-codex",
|
||||
"reasoningEffort": "medium",
|
||||
"maxTokens": 4096
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In this example OmO treats `gpt-5.4-mini` and `gpt-5.3-codex` as OpenAI fallback entries because the current/default provider is already `openai`.
|
||||
|
||||
**3. Mixed cross-provider chain**
|
||||
|
||||
Mix string entries and object entries when only some fallback models need special settings:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"fallback_models": [
|
||||
"openai/gpt-5.4",
|
||||
{
|
||||
"model": "anthropic/claude-sonnet-4-6",
|
||||
"variant": "high",
|
||||
"thinking": { "type": "enabled", "budgetTokens": 12000 }
|
||||
},
|
||||
{
|
||||
"model": "google/gemini-3.1-pro",
|
||||
"variant": "high"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**4. Category-level fallback chain**
|
||||
|
||||
`fallback_models` works the same way under `categories`:
|
||||
|
||||
```json
|
||||
{
|
||||
"categories": {
|
||||
"deep": {
|
||||
"model": "openai/gpt-5.3-codex",
|
||||
"fallback_models": [
|
||||
{
|
||||
"model": "openai/gpt-5.4",
|
||||
"reasoningEffort": "xhigh",
|
||||
"maxTokens": 12000
|
||||
},
|
||||
{
|
||||
"model": "anthropic/claude-opus-4-6",
|
||||
"variant": "max",
|
||||
"temperature": 0.2
|
||||
},
|
||||
"google/gemini-3.1-pro(high)"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**5. Full object entry with every supported field**
|
||||
|
||||
This shows every supported object-style parameter in one place:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"oracle": {
|
||||
"model": "openai/gpt-5.4",
|
||||
"fallback_models": [
|
||||
{
|
||||
"model": "openai/gpt-5.3-codex(low)",
|
||||
"variant": "xhigh",
|
||||
"reasoningEffort": "high",
|
||||
"temperature": 0.3,
|
||||
"top_p": 0.9,
|
||||
"maxTokens": 8192,
|
||||
"thinking": {
|
||||
"type": "disabled"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In this example the explicit `"variant": "xhigh"` overrides the inline `(low)` suffix in `"model"`.
|
||||
|
||||
This final example is a **complete shape reference**. In real configs, prefer provider-appropriate settings:
|
||||
|
||||
- use `reasoningEffort` for OpenAI reasoning models
|
||||
- use `thinking` for Anthropic thinking-capable models
|
||||
- use `variant`, `temperature`, `top_p`, and `maxTokens` only when that fallback model supports them
|
||||
|
||||
### Model Capabilities
|
||||
|
||||
OmO can refresh a local models.dev capability snapshot on startup. This cache is controlled by `model_capabilities`.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"model_capabilities": {
|
||||
"enabled": true,
|
||||
"auto_refresh_on_start": true,
|
||||
"refresh_timeout_ms": 5000,
|
||||
"source_url": "https://models.dev/api.json"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
| Option | Default behavior | Description |
|
||||
| ------ | ---------------- | ----------- |
|
||||
| `enabled` | enabled unless explicitly set to `false` | Master switch for model capability refresh behavior |
|
||||
| `auto_refresh_on_start` | refresh on startup unless explicitly set to `false` | Refresh the local models.dev cache during startup checks |
|
||||
| `refresh_timeout_ms` | `5000` | Timeout for the startup refresh attempt |
|
||||
| `source_url` | `https://models.dev/api.json` | Override the models.dev source URL |
|
||||
|
||||
Notes:
|
||||
|
||||
- Startup refresh runs through the auto-update checker hook.
|
||||
- Manual refresh is available via `bunx oh-my-opencode refresh-model-capabilities`.
|
||||
- Provider runtime metadata still takes priority when OmO resolves capabilities for compatibility checks.
|
||||
|
||||
### Hashline Edit
|
||||
|
||||
Replaces the built-in `Edit` tool with a hash-anchored version using `LINE#ID` references to prevent stale-line edits. Disabled by default.
|
||||
@@ -615,7 +925,7 @@ When enabled, two companion hooks are active: `hashline-read-enhancer` (annotate
|
||||
"aggressive_truncation": false,
|
||||
"auto_resume": false,
|
||||
"disable_omo_env": false,
|
||||
"task_system": false,
|
||||
"task_system": true,
|
||||
"dynamic_context_pruning": {
|
||||
"enabled": false,
|
||||
"notification": "detailed",
|
||||
@@ -645,7 +955,7 @@ When enabled, two companion hooks are active: `hashline-read-enhancer` (annotate
|
||||
| `aggressive_truncation` | `false` | Aggressively truncate when token limit exceeded |
|
||||
| `auto_resume` | `false` | Auto-resume after thinking block recovery |
|
||||
| `disable_omo_env` | `false` | Disable auto-injected `<omo-env>` block (date/time/locale). Improves cache hit rate. |
|
||||
| `task_system` | `false` | Enable Sisyphus task system |
|
||||
| `task_system` | `true` | Enable Sisyphus task system |
|
||||
| `dynamic_context_pruning.enabled` | `false` | Auto-prune old tool outputs to manage context window |
|
||||
| `dynamic_context_pruning.notification` | `detailed` | Pruning notifications: `off` / `minimal` / `detailed` |
|
||||
| `turn_protection.turns` | `3` | Recent turns protected from pruning (1–10) |
|
||||
|
||||
@@ -6,29 +6,30 @@ Oh-My-OpenAgent provides 11 specialized AI agents. Each has distinct expertise,
|
||||
|
||||
### Core Agents
|
||||
|
||||
Core-agent tab cycling is deterministic via injected runtime order field. The fixed priority order is Sisyphus (order: 1), Hephaestus (order: 2), Prometheus (order: 3), and Atlas (order: 4). Remaining agents follow after that stable core ordering.
|
||||
|
||||
| Agent | Model | Purpose |
|
||||
| --------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Sisyphus** | `claude-opus-4-6` | The default orchestrator. Plans, delegates, and executes complex tasks using specialized subagents with aggressive parallel execution. Todo-driven workflow with extended thinking (32k budget). Fallback: `glm-5` → `big-pickle`. |
|
||||
| **Sisyphus** | `claude-opus-4-6` | The default orchestrator. Plans, delegates, and executes complex tasks using specialized subagents with aggressive parallel execution. Todo-driven workflow with extended thinking (32k budget). Fallback: `opencode-go/kimi-k2.5` → `kimi-for-coding/k2p5` → `opencode\|moonshotai\|moonshotai-cn\|firmware\|ollama-cloud\|aihubmix/kimi-k2.5` → `openai\|github-copilot\|opencode/gpt-5.4 (medium)` → `zai-coding-plan\|opencode/glm-5` → `opencode/big-pickle`. |
|
||||
| **Hephaestus** | `gpt-5.4` | The Legitimate Craftsman. Autonomous deep worker inspired by AmpCode's deep mode. Goal-oriented execution with thorough research before action. Explores codebase patterns, completes tasks end-to-end without premature stopping. Named after the Greek god of forge and craftsmanship. Requires a GPT-capable provider. |
|
||||
| **Oracle** | `gpt-5.4` | Architecture decisions, code review, debugging. Read-only consultation with stellar logical reasoning and deep analysis. Inspired by AmpCode. Fallback: `gemini-3.1-pro` → `claude-opus-4-6`. |
|
||||
| **Librarian** | `minimax-m2.7` | Multi-repo analysis, documentation lookup, OSS implementation examples. Deep codebase understanding with evidence-based answers. Fallback: `minimax-m2.7-highspeed` → `claude-haiku-4-5` → `gpt-5-nano`. |
|
||||
| **Explore** | `grok-code-fast-1` | Fast codebase exploration and contextual grep. Fallback: `minimax-m2.7-highspeed` → `minimax-m2.7` → `claude-haiku-4-5` → `gpt-5-nano`. |
|
||||
| **Multimodal-Looker** | `gpt-5.3-codex` | Visual content specialist. Analyzes PDFs, images, diagrams to extract information. Fallback: `k2p5` → `gemini-3-flash` → `glm-4.6v` → `gpt-5-nano`. |
|
||||
|
||||
| **Oracle** | `gpt-5.4` | Architecture decisions, code review, debugging. Read-only consultation with stellar logical reasoning and deep analysis. Inspired by AmpCode. Fallback: `google\|github-copilot\|opencode/gemini-3.1-pro (high)` → `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `opencode-go/glm-5`. |
|
||||
| **Librarian** | `minimax-m2.7` | Multi-repo analysis, documentation lookup, OSS implementation examples. Deep codebase understanding with evidence-based answers. Fallback: `opencode/minimax-m2.7-highspeed` → `anthropic\|opencode/claude-haiku-4-5` → `opencode/gpt-5-nano`. |
|
||||
| **Explore** | `grok-code-fast-1` | Fast codebase exploration and contextual grep. Fallback: `opencode-go/minimax-m2.7-highspeed` → `opencode/minimax-m2.7` → `anthropic\|opencode/claude-haiku-4-5` → `opencode/gpt-5-nano`. |
|
||||
| **Multimodal-Looker** | `gpt-5.4` | Visual content specialist. Analyzes PDFs, images, diagrams to extract information. Fallback: `opencode-go/kimi-k2.5` → `zai-coding-plan/glm-4.6v` → `openai\|github-copilot\|opencode/gpt-5-nano`. |
|
||||
### Planning Agents
|
||||
|
||||
| Agent | Model | Purpose |
|
||||
| -------------- | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Prometheus** | `claude-opus-4-6` | Strategic planner with interview mode. Creates detailed work plans through iterative questioning. Fallback: `gpt-5.4` → `gemini-3.1-pro`. |
|
||||
| **Metis** | `claude-opus-4-6` | Plan consultant — pre-planning analysis. Identifies hidden intentions, ambiguities, and AI failure points. Fallback: `gpt-5.4` → `gemini-3.1-pro`. |
|
||||
| **Momus** | `gpt-5.4` | Plan reviewer — validates plans against clarity, verifiability, and completeness standards. Fallback: `claude-opus-4-6` → `gemini-3.1-pro`. |
|
||||
| **Prometheus** | `claude-opus-4-6` | Strategic planner with interview mode. Creates detailed work plans through iterative questioning. Fallback: `openai\|github-copilot\|opencode/gpt-5.4 (high)` → `opencode-go/glm-5` → `google\|github-copilot\|opencode/gemini-3.1-pro`. |
|
||||
| **Metis** | `claude-opus-4-6` | Plan consultant — pre-planning analysis. Identifies hidden intentions, ambiguities, and AI failure points. Fallback: `openai\|github-copilot\|opencode/gpt-5.4 (high)` → `opencode-go/glm-5` → `kimi-for-coding/k2p5`. |
|
||||
| **Momus** | `gpt-5.4` | Plan reviewer — validates plans against clarity, verifiability, and completeness standards. Fallback: `anthropic\|github-copilot\|opencode/claude-opus-4-6 (max)` → `google\|github-copilot\|opencode/gemini-3.1-pro (high)` → `opencode-go/glm-5`. |
|
||||
|
||||
### Orchestration Agents
|
||||
|
||||
| Agent | Model | Purpose |
|
||||
| ------------------- | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Atlas** | `claude-sonnet-4-6` | Todo-list orchestrator. Executes planned tasks systematically, managing todo items and coordinating work. Fallback: `gpt-5.4` (medium). |
|
||||
| **Sisyphus-Junior** | _(category-dependent)_ | Category-spawned executor. Model is selected automatically based on the task category (visual-engineering, quick, deep, etc.). Used when the main agent delegates work via the `task` tool. |
|
||||
| **Atlas** | `claude-sonnet-4-6` | Todo-list orchestrator. Executes planned tasks systematically, managing todo items and coordinating work. Fallback: `opencode-go/kimi-k2.5` → `openai\|github-copilot\|opencode/gpt-5.4 (medium)` → `opencode-go/minimax-m2.7`. |
|
||||
| **Sisyphus-Junior** | _(category-dependent)_ | Category-spawned executor. Model is selected automatically based on the task category (visual-engineering, quick, deep, etc.). Its built-in general fallback chain is `anthropic\|github-copilot\|opencode/claude-sonnet-4-6` → `opencode-go/kimi-k2.5` → `openai\|github-copilot\|opencode/gpt-5.4 (medium)` → `opencode-go/minimax-m2.7` → `opencode/big-pickle`. |
|
||||
|
||||
### Invoking Agents
|
||||
|
||||
@@ -89,8 +90,9 @@ When running inside tmux:
|
||||
- Watch multiple agents work in real-time
|
||||
- Each pane shows agent output live
|
||||
- Auto-cleanup when agents complete
|
||||
- **Stable agent ordering**: core-agent tab cycling is deterministic via injected runtime order field (Sisyphus: 1, Hephaestus: 2, Prometheus: 3, Atlas: 4)
|
||||
|
||||
Customize agent models, prompts, and permissions in `oh-my-openagent.json`.
|
||||
Customize agent models, prompts, and permissions in `oh-my-opencode.jsonc`.
|
||||
|
||||
## Category System
|
||||
|
||||
@@ -129,7 +131,7 @@ task({
|
||||
|
||||
### Custom Categories
|
||||
|
||||
You can define custom categories in `oh-my-openagent.json`.
|
||||
You can define custom categories in your plugin config file. During the rename transition, both `oh-my-openagent.json[c]` and legacy `oh-my-opencode.json[c]` basenames are recognized.
|
||||
|
||||
#### Category Configuration Schema
|
||||
|
||||
@@ -188,6 +190,75 @@ When you use a Category, a special agent called **Sisyphus-Junior** performs the
|
||||
- **Characteristic**: Cannot **re-delegate** tasks to other agents.
|
||||
- **Purpose**: Prevents infinite delegation loops and ensures focus on the assigned task.
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Rename Compatibility
|
||||
|
||||
The published package and binary remain `oh-my-opencode`. Inside `opencode.json`, the compatibility layer now prefers the plugin entry `oh-my-openagent`, while legacy `oh-my-opencode` entries still load with a warning. Plugin config files (`oh-my-openagent.json[c]` or legacy `oh-my-opencode.json[c]`) are recognized during the transition. Run `bunx oh-my-opencode doctor` to check for legacy package name warnings.
|
||||
|
||||
### Fallback Models
|
||||
|
||||
Configure per-agent fallback chains with arrays that can mix plain model strings and per-model objects:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"fallback_models": [
|
||||
"opencode/glm-5",
|
||||
{ "model": "openai/gpt-5.4", "variant": "high" },
|
||||
{ "model": "anthropic/claude-sonnet-4-6", "thinking": { "type": "enabled", "budgetTokens": 64000 } }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
When a model errors, the runtime can move through the configured fallback array. Object entries let you tune the backup model itself instead of only swapping the model name.
|
||||
|
||||
### File-Based Prompts
|
||||
|
||||
Load agent system prompts from external files using `file://` URLs in the `prompt` field, or append additional content with `prompt_append`. The `prompt_append` field also works on categories.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"agents": {
|
||||
"sisyphus": {
|
||||
"prompt": "file:///path/to/custom-prompt.md"
|
||||
},
|
||||
"oracle": {
|
||||
"prompt_append": "file:///path/to/additional-context.md"
|
||||
}
|
||||
},
|
||||
"categories": {
|
||||
"deep": {
|
||||
"prompt_append": "file:///path/to/deep-category-append.md"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Supports `~` expansion for home directory and relative `file://` paths.
|
||||
|
||||
Useful for:
|
||||
- Version controlling prompts separately from config
|
||||
- Sharing prompts across projects
|
||||
- Keeping configuration files concise
|
||||
- Adding category-specific context without duplicating base prompts
|
||||
|
||||
The file content is loaded at runtime and injected into the agent's system prompt.
|
||||
|
||||
### Session Recovery
|
||||
|
||||
The system automatically recovers from common session failures without user intervention:
|
||||
|
||||
- **Missing tool results**: reconstructs recoverable tool state and skips invalid tool-part IDs instead of failing the whole recovery pass
|
||||
- **Thinking block violations**: Recovers from API thinking block mismatches
|
||||
- **Empty messages**: Reconstructs message history when content is missing
|
||||
- **Context window limits**: Gracefully handles Claude context window exceeded errors with intelligent compaction
|
||||
- **JSON parse errors**: Recovers from malformed tool outputs
|
||||
|
||||
Recovery happens transparently during agent execution. You see the result, not the failure.
|
||||
## Skills
|
||||
|
||||
Skills provide specialized workflows with embedded MCP servers and detailed instructions. A Skill is a mechanism that injects **specialized knowledge (Context)** and **tools (MCP)** for specific domains into agents.
|
||||
@@ -844,9 +915,41 @@ When a skill MCP has `oauth` configured:
|
||||
Pre-authenticate via CLI:
|
||||
|
||||
```bash
|
||||
bunx oh-my-openagent mcp oauth login <server-name> --server-url https://api.example.com
|
||||
bunx oh-my-opencode mcp oauth login <server-name> --server-url https://api.example.com
|
||||
```
|
||||
|
||||
## Model Capabilities
|
||||
|
||||
Model capabilities are models.dev-backed, with a refreshable cache and compatibility diagnostics. The system combines bundled models.dev snapshot data, optional refreshed cache data, provider runtime metadata, and heuristics when exact metadata is unavailable.
|
||||
|
||||
### Refreshing Capabilities
|
||||
|
||||
Update the local cache with the latest model information:
|
||||
|
||||
```bash
|
||||
bunx oh-my-opencode refresh-model-capabilities
|
||||
```
|
||||
|
||||
Configure automatic refresh at startup:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"model_capabilities": {
|
||||
"enabled": true,
|
||||
"auto_refresh_on_start": true,
|
||||
"refresh_timeout_ms": 5000,
|
||||
"source_url": "https://models.dev/api.json"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Capability Diagnostics
|
||||
|
||||
Run `bunx oh-my-opencode doctor` to see capability diagnostics including:
|
||||
- effective model resolution for agents and categories
|
||||
- warnings when configured models rely on compatibility fallback
|
||||
- override compatibility details alongside model resolution output
|
||||
|
||||
## Context Injection
|
||||
|
||||
### Directory AGENTS.md
|
||||
|
||||
24
package.json
24
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "The Best AI Agent Harness - Batteries-Included OpenCode Plugin with Multi-Model Orchestration, Parallel Background Agents, and Crafted LSP/AST Tools",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -78,17 +78,17 @@
|
||||
"typescript": "^5.7.3"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"oh-my-opencode-darwin-arm64": "3.11.0",
|
||||
"oh-my-opencode-darwin-x64": "3.11.0",
|
||||
"oh-my-opencode-darwin-x64-baseline": "3.11.0",
|
||||
"oh-my-opencode-linux-arm64": "3.11.0",
|
||||
"oh-my-opencode-linux-arm64-musl": "3.11.0",
|
||||
"oh-my-opencode-linux-x64": "3.11.0",
|
||||
"oh-my-opencode-linux-x64-baseline": "3.11.0",
|
||||
"oh-my-opencode-linux-x64-musl": "3.11.0",
|
||||
"oh-my-opencode-linux-x64-musl-baseline": "3.11.0",
|
||||
"oh-my-opencode-windows-x64": "3.11.0",
|
||||
"oh-my-opencode-windows-x64-baseline": "3.11.0"
|
||||
"oh-my-opencode-darwin-arm64": "3.14.0",
|
||||
"oh-my-opencode-darwin-x64": "3.14.0",
|
||||
"oh-my-opencode-darwin-x64-baseline": "3.14.0",
|
||||
"oh-my-opencode-linux-arm64": "3.14.0",
|
||||
"oh-my-opencode-linux-arm64-musl": "3.14.0",
|
||||
"oh-my-opencode-linux-x64": "3.14.0",
|
||||
"oh-my-opencode-linux-x64-baseline": "3.14.0",
|
||||
"oh-my-opencode-linux-x64-musl": "3.14.0",
|
||||
"oh-my-opencode-linux-x64-musl-baseline": "3.14.0",
|
||||
"oh-my-opencode-windows-x64": "3.14.0",
|
||||
"oh-my-opencode-windows-x64-baseline": "3.14.0"
|
||||
},
|
||||
"overrides": {
|
||||
"@opencode-ai/sdk": "^1.2.24"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-darwin-arm64",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (darwin-arm64)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-darwin-x64-baseline",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (darwin-x64-baseline, no AVX2)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-darwin-x64",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (darwin-x64)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-linux-arm64-musl",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (linux-arm64-musl)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-linux-arm64",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (linux-arm64)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-linux-x64-baseline",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (linux-x64-baseline, no AVX2)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-linux-x64-musl-baseline",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl-baseline, no AVX2)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-linux-x64-musl",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (linux-x64-musl)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-linux-x64",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (linux-x64)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-windows-x64-baseline",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (windows-x64-baseline, no AVX2)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "oh-my-opencode-windows-x64",
|
||||
"version": "3.11.0",
|
||||
"version": "3.14.0",
|
||||
"description": "Platform-specific binary for oh-my-opencode (windows-x64)",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// postinstall.mjs
|
||||
// Runs after npm install to verify platform binary is available
|
||||
|
||||
import { readFileSync } from "node:fs";
|
||||
import { createRequire } from "node:module";
|
||||
import { getPlatformPackageCandidates, getBinaryPath } from "./bin/platform.js";
|
||||
|
||||
@@ -22,15 +23,26 @@ function getLibcFamily() {
|
||||
}
|
||||
}
|
||||
|
||||
function getPackageBaseName() {
|
||||
try {
|
||||
const packageJson = JSON.parse(readFileSync(new URL("./package.json", import.meta.url), "utf8"));
|
||||
return packageJson.name || "oh-my-opencode";
|
||||
} catch {
|
||||
return "oh-my-opencode";
|
||||
}
|
||||
}
|
||||
|
||||
function main() {
|
||||
const { platform, arch } = process;
|
||||
const libcFamily = getLibcFamily();
|
||||
const packageBaseName = getPackageBaseName();
|
||||
|
||||
try {
|
||||
const packageCandidates = getPlatformPackageCandidates({
|
||||
platform,
|
||||
arch,
|
||||
libcFamily,
|
||||
packageBaseName,
|
||||
});
|
||||
|
||||
const resolvedPackage = packageCandidates.find((pkg) => {
|
||||
|
||||
@@ -34,6 +34,72 @@ async function generateChangelog(previousTag: string): Promise<string[]> {
|
||||
return notes
|
||||
}
|
||||
|
||||
async function getChangedFiles(previousTag: string): Promise<string[]> {
|
||||
try {
|
||||
const diff = await $`git diff --name-only ${previousTag}..HEAD`.text()
|
||||
return diff
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
function touchesAnyPath(files: string[], candidates: string[]): boolean {
|
||||
return files.some((file) => candidates.some((candidate) => file === candidate || file.startsWith(`${candidate}/`)))
|
||||
}
|
||||
|
||||
function buildReleaseFraming(files: string[]): string[] {
|
||||
const bullets: string[] = []
|
||||
|
||||
if (
|
||||
touchesAnyPath(files, [
|
||||
"src/index.ts",
|
||||
"src/plugin-config.ts",
|
||||
"bin/platform.js",
|
||||
"postinstall.mjs",
|
||||
"docs",
|
||||
])
|
||||
) {
|
||||
bullets.push("Rename transition updates across package detection, plugin/config compatibility, and install surfaces.")
|
||||
}
|
||||
|
||||
if (touchesAnyPath(files, ["src/tools/delegate-task", "src/plugin/tool-registry.ts"])) {
|
||||
bullets.push("Task and tool behavior updates, including delegate-task contract and runtime registration behavior.")
|
||||
}
|
||||
|
||||
if (
|
||||
touchesAnyPath(files, [
|
||||
"src/plugin/tool-registry.ts",
|
||||
"src/plugin-handlers/agent-config-handler.ts",
|
||||
"src/plugin-handlers/tool-config-handler.ts",
|
||||
"src/hooks/tasks-todowrite-disabler",
|
||||
])
|
||||
) {
|
||||
bullets.push("Task-system default behavior alignment so omitted configuration behaves consistently across runtime paths.")
|
||||
}
|
||||
|
||||
if (touchesAnyPath(files, [".github/workflows", "docs/guide/installation.md", "postinstall.mjs"])) {
|
||||
bullets.push("Install and publish workflow hardening, including safer release sequencing and package/install fixes.")
|
||||
}
|
||||
|
||||
if (bullets.length === 0) {
|
||||
return []
|
||||
}
|
||||
|
||||
return [
|
||||
"## Minor Compatibility and Stability Release",
|
||||
"",
|
||||
"This release carries compatibility-facing behavior changes and operational hardening. Read the summary below before upgrading or publishing.",
|
||||
"",
|
||||
...bullets.map((bullet) => `- ${bullet}`),
|
||||
"",
|
||||
"## Commit Summary",
|
||||
"",
|
||||
]
|
||||
}
|
||||
|
||||
async function getContributors(previousTag: string): Promise<string[]> {
|
||||
const notes: string[] = []
|
||||
|
||||
@@ -78,9 +144,11 @@ async function main() {
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
const changedFiles = await getChangedFiles(previousTag)
|
||||
const changelog = await generateChangelog(previousTag)
|
||||
const contributors = await getContributors(previousTag)
|
||||
const notes = [...changelog, ...contributors]
|
||||
const framing = buildReleaseFraming(changedFiles)
|
||||
const notes = [...framing, ...changelog, ...contributors]
|
||||
|
||||
if (notes.length === 0) {
|
||||
console.log("No notable changes")
|
||||
|
||||
@@ -2335,6 +2335,70 @@
|
||||
"created_at": "2026-03-26T09:55:49Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2833
|
||||
},
|
||||
{
|
||||
"name": "Jholly2008",
|
||||
"id": 29773273,
|
||||
"comment_id": 4139918265,
|
||||
"created_at": "2026-03-27T03:37:00Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2871
|
||||
},
|
||||
{
|
||||
"name": "WhiteGiverMa",
|
||||
"id": 152406589,
|
||||
"comment_id": 4140294245,
|
||||
"created_at": "2026-03-27T05:26:37Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2877
|
||||
},
|
||||
{
|
||||
"name": "codivedev",
|
||||
"id": 249558739,
|
||||
"comment_id": 4142164072,
|
||||
"created_at": "2026-03-27T12:11:45Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2888
|
||||
},
|
||||
{
|
||||
"name": "AlexDochioiu",
|
||||
"id": 38853913,
|
||||
"comment_id": 4147980685,
|
||||
"created_at": "2026-03-28T12:20:42Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2916
|
||||
},
|
||||
{
|
||||
"name": "ryandielhenn",
|
||||
"id": 35785891,
|
||||
"comment_id": 4148508024,
|
||||
"created_at": "2026-03-28T17:46:50Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2919
|
||||
},
|
||||
{
|
||||
"name": "lorenzo-dallamuta",
|
||||
"id": 66994937,
|
||||
"comment_id": 4148848505,
|
||||
"created_at": "2026-03-28T21:30:15Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2925
|
||||
},
|
||||
{
|
||||
"name": "quangtran88",
|
||||
"id": 107824159,
|
||||
"comment_id": 4149327240,
|
||||
"created_at": "2026-03-29T03:21:39Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2929
|
||||
},
|
||||
{
|
||||
"name": "HOYALIM",
|
||||
"id": 166576253,
|
||||
"comment_id": 4149626853,
|
||||
"created_at": "2026-03-29T07:31:36Z",
|
||||
"repoId": 1108837393,
|
||||
"pullRequestNo": 2935
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -308,6 +308,12 @@ Briefly announce "Consulting Oracle for [reason]" before invocation.
|
||||
|
||||
**Collect Oracle results before your final answer. No exceptions.**
|
||||
|
||||
**Oracle-dependent implementation is BLOCKED until Oracle finishes.**
|
||||
|
||||
- If you asked Oracle for architecture/debugging direction that affects the fix, do not implement before Oracle result arrives.
|
||||
- While waiting, only do non-overlapping prep work. Never ship implementation decisions Oracle was asked to decide.
|
||||
- Never "time out and continue anyway" for Oracle-dependent tasks.
|
||||
|
||||
- Oracle takes minutes. When done with your own work: **end your response** — wait for the \`<system-reminder>\`.
|
||||
- Do NOT poll \`background_output\` on a running Oracle. The notification will come.
|
||||
- Never cancel Oracle.
|
||||
|
||||
@@ -127,6 +127,12 @@ This verbalization anchors your routing decision and makes your reasoning transp
|
||||
- **Open-ended** ("Improve", "Refactor", "Add feature") → Assess codebase first
|
||||
- **Ambiguous** (unclear scope, multiple interpretations) → Ask ONE clarifying question
|
||||
|
||||
### Step 1.5: Turn-Local Intent Reset (MANDATORY)
|
||||
|
||||
- Reclassify intent from the CURRENT user message only. Never auto-carry "implementation mode" from prior turns.
|
||||
- If current message is a question/explanation/investigation request, answer/analyze only. Do NOT create todos or edit files.
|
||||
- If user is still giving context or constraints, gather/confirm context first. Do NOT start implementation yet.
|
||||
|
||||
### Step 2: Check for Ambiguity
|
||||
|
||||
- Single valid interpretation → Proceed
|
||||
@@ -135,6 +141,15 @@ This verbalization anchors your routing decision and makes your reasoning transp
|
||||
- Missing critical info (file, error, context) → **MUST ask**
|
||||
- User's design seems flawed or suboptimal → **MUST raise concern** before implementing
|
||||
|
||||
### Step 2.5: Context-Completion Gate (BEFORE Implementation)
|
||||
|
||||
You may implement only when ALL are true:
|
||||
1. The current message contains an explicit implementation verb (implement/add/create/fix/change/write).
|
||||
2. Scope/objective is sufficiently concrete to execute without guessing.
|
||||
3. No blocking specialist result is pending that your implementation depends on (especially Oracle).
|
||||
|
||||
If any condition fails, do research/clarification only, then wait.
|
||||
|
||||
### Step 3: Validate Before Acting
|
||||
|
||||
**Assumptions Check:**
|
||||
|
||||
@@ -167,6 +167,11 @@ Complexity:
|
||||
- Open-ended ("improve", "refactor") → assess codebase first, then propose
|
||||
- Ambiguous (multiple interpretations with 2x+ effort difference) → ask ONE question
|
||||
|
||||
Turn-local reset (mandatory): classify from the CURRENT user message, not conversation momentum.
|
||||
- Never carry implementation mode from prior turns.
|
||||
- If current turn is question/explanation/investigation, answer or analyze only.
|
||||
- If user appears to still be providing context, gather/confirm context first and wait.
|
||||
|
||||
Domain guess (provisional — finalized in ROUTE after exploration):
|
||||
- Visual (UI, CSS, styling, layout, design, animation) → likely visual-engineering
|
||||
- Logic (algorithms, architecture, complex business logic) → likely ultrabrain
|
||||
@@ -184,6 +189,11 @@ Step 2 — Check before acting:
|
||||
- Missing critical info → ask
|
||||
- User's design seems flawed → raise concern concisely, propose alternative, ask if they want to proceed anyway
|
||||
|
||||
Context-completion gate before implementation:
|
||||
- Implement only when the current message explicitly requests implementation (implement/add/create/fix/change/write),
|
||||
scope is concrete enough to execute without guessing, and no blocking specialist result is pending.
|
||||
- If any condition fails, continue with research/clarification only and wait.
|
||||
|
||||
<ask_gate>
|
||||
Proceed unless:
|
||||
(a) the action is irreversible,
|
||||
|
||||
@@ -9,7 +9,7 @@ import { buildEffectiveResolution, getEffectiveModel } from "./model-resolution-
|
||||
import type { AgentResolutionInfo, CategoryResolutionInfo, ModelResolutionInfo, OmoConfig } from "./model-resolution-types"
|
||||
|
||||
function parseProviderModel(value: string): { providerID: string; modelID: string } | null {
|
||||
const slashIndex = value.indexOf("/")
|
||||
const slashIndex = value.lastIndexOf("/")
|
||||
if (slashIndex <= 0 || slashIndex === value.length - 1) {
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -159,8 +159,15 @@ describe("integration: --session-id", () => {
|
||||
|
||||
describe("integration: --on-complete", () => {
|
||||
let spawnSpy: ReturnType<typeof spyOn>
|
||||
let originalPlatform: NodeJS.Platform
|
||||
let originalEnv: Record<string, string | undefined>
|
||||
|
||||
beforeEach(() => {
|
||||
originalPlatform = process.platform
|
||||
originalEnv = {
|
||||
SHELL: process.env.SHELL,
|
||||
PSModulePath: process.env.PSModulePath,
|
||||
}
|
||||
spyOn(console, "error").mockImplementation(() => {})
|
||||
spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue({
|
||||
exited: Promise.resolve(0),
|
||||
@@ -172,11 +179,22 @@ describe("integration: --on-complete", () => {
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
Object.defineProperty(process, "platform", { value: originalPlatform })
|
||||
for (const [key, value] of Object.entries(originalEnv)) {
|
||||
if (value !== undefined) {
|
||||
process.env[key] = value
|
||||
} else {
|
||||
delete process.env[key]
|
||||
}
|
||||
}
|
||||
spawnSpy.mockRestore()
|
||||
})
|
||||
|
||||
it("passes all 4 env vars as strings to spawned process", async () => {
|
||||
// given
|
||||
Object.defineProperty(process, "platform", { value: "linux" })
|
||||
process.env.SHELL = "/bin/bash"
|
||||
delete process.env.PSModulePath
|
||||
spawnSpy.mockClear()
|
||||
|
||||
// when
|
||||
@@ -206,8 +224,15 @@ describe("integration: option combinations", () => {
|
||||
let mockStdout: MockWriteStream
|
||||
let mockStderr: MockWriteStream
|
||||
let spawnSpy: ReturnType<typeof spyOn>
|
||||
let originalPlatform: NodeJS.Platform
|
||||
let originalEnv: Record<string, string | undefined>
|
||||
|
||||
beforeEach(() => {
|
||||
originalPlatform = process.platform
|
||||
originalEnv = {
|
||||
SHELL: process.env.SHELL,
|
||||
PSModulePath: process.env.PSModulePath,
|
||||
}
|
||||
spyOn(console, "log").mockImplementation(() => {})
|
||||
spyOn(console, "error").mockImplementation(() => {})
|
||||
mockStdout = createMockWriteStream()
|
||||
@@ -222,11 +247,22 @@ describe("integration: option combinations", () => {
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
Object.defineProperty(process, "platform", { value: originalPlatform })
|
||||
for (const [key, value] of Object.entries(originalEnv)) {
|
||||
if (value !== undefined) {
|
||||
process.env[key] = value
|
||||
} else {
|
||||
delete process.env[key]
|
||||
}
|
||||
}
|
||||
spawnSpy?.mockRestore?.()
|
||||
})
|
||||
|
||||
it("json output and on-complete hook can both execute", async () => {
|
||||
// given - json manager active + on-complete hook ready
|
||||
Object.defineProperty(process, "platform", { value: "linux" })
|
||||
process.env.SHELL = "/bin/bash"
|
||||
delete process.env.PSModulePath
|
||||
const result: RunResult = {
|
||||
sessionId: "session-123",
|
||||
success: true,
|
||||
|
||||
@@ -4,6 +4,9 @@ import * as loggerModule from "../../shared/logger"
|
||||
import { executeOnCompleteHook } from "./on-complete-hook"
|
||||
|
||||
describe("executeOnCompleteHook", () => {
|
||||
let originalPlatform: NodeJS.Platform
|
||||
let originalEnv: Record<string, string | undefined>
|
||||
|
||||
function createStream(text: string): ReadableStream<Uint8Array> | undefined {
|
||||
if (text.length === 0) {
|
||||
return undefined
|
||||
@@ -31,15 +34,32 @@ describe("executeOnCompleteHook", () => {
|
||||
let logSpy: ReturnType<typeof spyOn<typeof loggerModule, "log">>
|
||||
|
||||
beforeEach(() => {
|
||||
originalPlatform = process.platform
|
||||
originalEnv = {
|
||||
SHELL: process.env.SHELL,
|
||||
PSModulePath: process.env.PSModulePath,
|
||||
ComSpec: process.env.ComSpec,
|
||||
}
|
||||
logSpy = spyOn(loggerModule, "log").mockImplementation(() => {})
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
Object.defineProperty(process, "platform", { value: originalPlatform })
|
||||
for (const [key, value] of Object.entries(originalEnv)) {
|
||||
if (value !== undefined) {
|
||||
process.env[key] = value
|
||||
} else {
|
||||
delete process.env[key]
|
||||
}
|
||||
}
|
||||
logSpy.mockRestore()
|
||||
})
|
||||
|
||||
it("executes command with correct env vars", async () => {
|
||||
it("uses sh on unix shells and passes correct env vars", async () => {
|
||||
// given
|
||||
Object.defineProperty(process, "platform", { value: "linux" })
|
||||
process.env.SHELL = "/bin/bash"
|
||||
delete process.env.PSModulePath
|
||||
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
|
||||
|
||||
try {
|
||||
@@ -68,6 +88,82 @@ describe("executeOnCompleteHook", () => {
|
||||
}
|
||||
})
|
||||
|
||||
it("uses powershell when PowerShell is detected on Windows", async () => {
|
||||
// given
|
||||
Object.defineProperty(process, "platform", { value: "win32" })
|
||||
process.env.PSModulePath = "C:\\Program Files\\PowerShell\\Modules"
|
||||
delete process.env.SHELL
|
||||
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
|
||||
|
||||
try {
|
||||
// when
|
||||
await executeOnCompleteHook({
|
||||
command: "Write-Host done",
|
||||
sessionId: "session-123",
|
||||
exitCode: 0,
|
||||
durationMs: 5000,
|
||||
messageCount: 10,
|
||||
})
|
||||
|
||||
// then
|
||||
const [args] = spawnSpy.mock.calls[0] as Parameters<typeof spawnWithWindowsHideModule.spawnWithWindowsHide>
|
||||
expect(args).toEqual(["powershell.exe", "-NoProfile", "-Command", "Write-Host done"])
|
||||
} finally {
|
||||
spawnSpy.mockRestore()
|
||||
}
|
||||
})
|
||||
|
||||
it("uses pwsh when PowerShell is detected on non-Windows platforms", async () => {
|
||||
// given
|
||||
Object.defineProperty(process, "platform", { value: "linux" })
|
||||
process.env.PSModulePath = "/usr/local/share/powershell/Modules"
|
||||
delete process.env.SHELL
|
||||
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
|
||||
|
||||
try {
|
||||
// when
|
||||
await executeOnCompleteHook({
|
||||
command: "Write-Host done",
|
||||
sessionId: "session-123",
|
||||
exitCode: 0,
|
||||
durationMs: 5000,
|
||||
messageCount: 10,
|
||||
})
|
||||
|
||||
// then
|
||||
const [args] = spawnSpy.mock.calls[0] as Parameters<typeof spawnWithWindowsHideModule.spawnWithWindowsHide>
|
||||
expect(args).toEqual(["pwsh", "-NoProfile", "-Command", "Write-Host done"])
|
||||
} finally {
|
||||
spawnSpy.mockRestore()
|
||||
}
|
||||
})
|
||||
|
||||
it("falls back to cmd.exe on Windows when PowerShell is not detected", async () => {
|
||||
// given
|
||||
Object.defineProperty(process, "platform", { value: "win32" })
|
||||
delete process.env.PSModulePath
|
||||
delete process.env.SHELL
|
||||
process.env.ComSpec = "C:\\Windows\\System32\\cmd.exe"
|
||||
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
|
||||
|
||||
try {
|
||||
// when
|
||||
await executeOnCompleteHook({
|
||||
command: "echo done",
|
||||
sessionId: "session-123",
|
||||
exitCode: 0,
|
||||
durationMs: 5000,
|
||||
messageCount: 10,
|
||||
})
|
||||
|
||||
// then
|
||||
const [args] = spawnSpy.mock.calls[0] as Parameters<typeof spawnWithWindowsHideModule.spawnWithWindowsHide>
|
||||
expect(args).toEqual(["C:\\Windows\\System32\\cmd.exe", "/d", "/s", "/c", "echo done"])
|
||||
} finally {
|
||||
spawnSpy.mockRestore()
|
||||
}
|
||||
})
|
||||
|
||||
it("env var values are strings", async () => {
|
||||
// given
|
||||
const spawnSpy = spyOn(spawnWithWindowsHideModule, "spawnWithWindowsHide").mockReturnValue(createProc(0))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { spawnWithWindowsHide } from "../../shared/spawn-with-windows-hide"
|
||||
import { log } from "../../shared"
|
||||
import { detectShellType, log } from "../../shared"
|
||||
|
||||
async function readOutput(
|
||||
stream: ReadableStream<Uint8Array> | undefined,
|
||||
@@ -20,6 +20,24 @@ async function readOutput(
|
||||
}
|
||||
}
|
||||
|
||||
function resolveHookShellCommand(command: string): string[] {
|
||||
const shellType = detectShellType()
|
||||
|
||||
switch (shellType) {
|
||||
case "powershell": {
|
||||
const powershellExecutable = process.platform === "win32" ? "powershell.exe" : "pwsh"
|
||||
return [powershellExecutable, "-NoProfile", "-Command", command]
|
||||
}
|
||||
case "cmd":
|
||||
return [process.env.ComSpec || "cmd.exe", "/d", "/s", "/c", command]
|
||||
case "csh":
|
||||
return ["csh", "-c", command]
|
||||
case "unix":
|
||||
default:
|
||||
return ["sh", "-c", command]
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeOnCompleteHook(options: {
|
||||
command: string
|
||||
sessionId: string
|
||||
@@ -37,7 +55,8 @@ export async function executeOnCompleteHook(options: {
|
||||
log("Running on-complete hook", { command: trimmedCommand })
|
||||
|
||||
try {
|
||||
const proc = spawnWithWindowsHide(["sh", "-c", trimmedCommand], {
|
||||
const shellCommand = resolveHookShellCommand(trimmedCommand)
|
||||
const proc = spawnWithWindowsHide(shellCommand, {
|
||||
env: {
|
||||
...process.env,
|
||||
SESSION_ID: sessionId,
|
||||
|
||||
@@ -969,6 +969,45 @@ describe("GitMasterConfigSchema", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("OhMyOpenCodeConfigSchema - git_master defaults (#2040)", () => {
|
||||
test("git_master defaults are applied when section is missing from config", () => {
|
||||
//#given
|
||||
const config = {}
|
||||
|
||||
//#when
|
||||
const result = OhMyOpenCodeConfigSchema.safeParse(config)
|
||||
|
||||
//#then
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.git_master).toBeDefined()
|
||||
expect(result.data.git_master.commit_footer).toBe(true)
|
||||
expect(result.data.git_master.include_co_authored_by).toBe(true)
|
||||
expect(result.data.git_master.git_env_prefix).toBe("GIT_MASTER=1")
|
||||
}
|
||||
})
|
||||
|
||||
test("git_master respects explicit false values", () => {
|
||||
//#given
|
||||
const config = {
|
||||
git_master: {
|
||||
commit_footer: false,
|
||||
include_co_authored_by: false,
|
||||
},
|
||||
}
|
||||
|
||||
//#when
|
||||
const result = OhMyOpenCodeConfigSchema.safeParse(config)
|
||||
|
||||
//#then
|
||||
expect(result.success).toBe(true)
|
||||
if (result.success) {
|
||||
expect(result.data.git_master.commit_footer).toBe(false)
|
||||
expect(result.data.git_master.include_co_authored_by).toBe(false)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe("skills schema", () => {
|
||||
test("accepts skills.sources configuration", () => {
|
||||
//#given
|
||||
|
||||
@@ -16,6 +16,10 @@ export const BackgroundTaskConfigSchema = z.object({
|
||||
staleTimeoutMs: z.number().min(60000).optional(),
|
||||
/** Timeout for tasks that never received any progress update, falling back to startedAt (default: 1800000 = 30 minutes, minimum: 60000 = 1 minute) */
|
||||
messageStalenessTimeoutMs: z.number().min(60000).optional(),
|
||||
/** Absolute TTL for non-terminal tasks in milliseconds (default: 1800000 = 30 minutes, minimum: 300000 = 5 minutes). Tasks exceeding this age from their last activity (or startedAt if no progress) are pruned. */
|
||||
taskTtlMs: z.number().min(300000).optional(),
|
||||
/** Timeout for tasks whose session has completely disappeared from the status registry (default: 60000 = 1 minute, minimum: 10000 = 10 seconds). When a session is gone (likely crashed), this shorter timeout is used instead of the normal stale timeout. */
|
||||
sessionGoneTimeoutMs: z.number().min(10000).optional(),
|
||||
syncPollTimeoutMs: z.number().min(60000).optional(),
|
||||
/** Maximum tool calls per subagent task before circuit breaker triggers (default: 200, minimum: 10). Prevents runaway loops from burning unlimited tokens. */
|
||||
maxToolCalls: z.number().int().min(10).optional(),
|
||||
|
||||
@@ -21,6 +21,8 @@ export const ExperimentalConfigSchema = z.object({
|
||||
hashline_edit: z.boolean().optional(),
|
||||
/** Append fallback model info to session title when a runtime fallback occurs (default: false) */
|
||||
model_fallback_title: z.boolean().optional(),
|
||||
/** Maximum number of tools to register. When set, lower-priority tools are excluded to stay within provider limits (e.g., OpenAI's 128-tool cap). Accounts for ~20 OpenCode built-in tools. */
|
||||
max_tools: z.number().int().min(1).optional(),
|
||||
})
|
||||
|
||||
export type ExperimentalConfig = z.infer<typeof ExperimentalConfigSchema>
|
||||
|
||||
@@ -47,11 +47,13 @@ export const HookNameSchema = z.enum([
|
||||
"tasks-todowrite-disabler",
|
||||
"runtime-fallback",
|
||||
"write-existing-file-guard",
|
||||
"bash-file-read-guard",
|
||||
"anthropic-effort",
|
||||
"hashline-read-enhancer",
|
||||
"read-image-resizer",
|
||||
"todo-description-override",
|
||||
"webfetch-redirect-guard",
|
||||
"legacy-plugin-toast",
|
||||
])
|
||||
|
||||
export type HookName = z.infer<typeof HookNameSchema>
|
||||
|
||||
@@ -60,7 +60,11 @@ export const OhMyOpenCodeConfigSchema = z.object({
|
||||
model_capabilities: ModelCapabilitiesConfigSchema.optional(),
|
||||
openclaw: OpenClawConfigSchema.optional(),
|
||||
babysitting: BabysittingConfigSchema.optional(),
|
||||
git_master: GitMasterConfigSchema.optional(),
|
||||
git_master: GitMasterConfigSchema.default({
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
}),
|
||||
browser_automation_engine: BrowserAutomationConfigSchema.optional(),
|
||||
websearch: WebsearchConfigSchema.optional(),
|
||||
tmux: TmuxConfigSchema.optional(),
|
||||
|
||||
@@ -7,8 +7,10 @@ import { BackgroundManager } from "./features/background-agent"
|
||||
import { SkillMcpManager } from "./features/skill-mcp-manager"
|
||||
import { initTaskToastManager } from "./features/task-toast-manager"
|
||||
import { TmuxSessionManager } from "./features/tmux-subagent"
|
||||
import { registerManagerForCleanup } from "./features/background-agent/process-cleanup"
|
||||
import { createConfigHandler } from "./plugin-handlers"
|
||||
import { log } from "./shared"
|
||||
import { markServerRunningInProcess } from "./shared/tmux/tmux-utils/server-health"
|
||||
|
||||
export type Managers = {
|
||||
tmuxSessionManager: TmuxSessionManager
|
||||
@@ -26,8 +28,17 @@ export function createManagers(args: {
|
||||
}): Managers {
|
||||
const { ctx, pluginConfig, tmuxConfig, modelCacheState, backgroundNotificationHookEnabled } = args
|
||||
|
||||
markServerRunningInProcess()
|
||||
const tmuxSessionManager = new TmuxSessionManager(ctx, tmuxConfig)
|
||||
|
||||
registerManagerForCleanup({
|
||||
shutdown: async () => {
|
||||
await tmuxSessionManager.cleanup().catch((error) => {
|
||||
log("[create-managers] tmux cleanup error during process shutdown:", error)
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
const backgroundManager = new BackgroundManager(
|
||||
ctx,
|
||||
pluginConfig.background_task,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { BackgroundTask } from "./types"
|
||||
|
||||
export type BackgroundTaskNotificationStatus = "COMPLETED" | "CANCELLED" | "INTERRUPTED"
|
||||
export type BackgroundTaskNotificationStatus = "COMPLETED" | "CANCELLED" | "INTERRUPTED" | "ERROR"
|
||||
|
||||
export function buildBackgroundTaskNotificationText(input: {
|
||||
task: BackgroundTask
|
||||
@@ -15,21 +15,43 @@ export function buildBackgroundTaskNotificationText(input: {
|
||||
const errorInfo = task.error ? `\n**Error:** ${task.error}` : ""
|
||||
|
||||
if (allComplete) {
|
||||
const completedTasksText = completedTasks
|
||||
.map((t) => `- \`${t.id}\`: ${t.description}`)
|
||||
.join("\n")
|
||||
const succeededTasks = completedTasks.filter((t) => t.status === "completed")
|
||||
const failedTasks = completedTasks.filter((t) => t.status !== "completed")
|
||||
|
||||
const succeededText = succeededTasks.length > 0
|
||||
? succeededTasks.map((t) => `- \`${t.id}\`: ${t.description}`).join("\n")
|
||||
: ""
|
||||
const failedText = failedTasks.length > 0
|
||||
? failedTasks.map((t) => `- \`${t.id}\`: ${t.description} [${t.status.toUpperCase()}]${t.error ? ` - ${t.error}` : ""}`).join("\n")
|
||||
: ""
|
||||
|
||||
const hasFailures = failedTasks.length > 0
|
||||
const header = hasFailures
|
||||
? `[ALL BACKGROUND TASKS FINISHED - ${failedTasks.length} FAILED]`
|
||||
: "[ALL BACKGROUND TASKS COMPLETE]"
|
||||
|
||||
let body = ""
|
||||
if (succeededText) {
|
||||
body += `**Completed:**\n${succeededText}\n`
|
||||
}
|
||||
if (failedText) {
|
||||
body += `\n**Failed:**\n${failedText}\n`
|
||||
}
|
||||
if (!body) {
|
||||
body = `- \`${task.id}\`: ${task.description} [${task.status.toUpperCase()}]${task.error ? ` - ${task.error}` : ""}\n`
|
||||
}
|
||||
|
||||
return `<system-reminder>
|
||||
[ALL BACKGROUND TASKS COMPLETE]
|
||||
${header}
|
||||
|
||||
**Completed:**
|
||||
${completedTasksText || `- \`${task.id}\`: ${task.description}`}
|
||||
${body.trim()}
|
||||
|
||||
Use \`background_output(task_id="<id>")\` to retrieve each result.
|
||||
Use \`background_output(task_id="<id>")\` to retrieve each result.${hasFailures ? `\n\n**ACTION REQUIRED:** ${failedTasks.length} task(s) failed. Check errors above and decide whether to retry or proceed.` : ""}
|
||||
</system-reminder>`
|
||||
}
|
||||
|
||||
const agentInfo = task.category ? `${task.agent} (${task.category})` : task.agent
|
||||
const isFailure = statusText !== "COMPLETED"
|
||||
|
||||
return `<system-reminder>
|
||||
[BACKGROUND TASK ${statusText}]
|
||||
@@ -39,7 +61,7 @@ Use \`background_output(task_id="<id>")\` to retrieve each result.
|
||||
**Duration:** ${duration}${errorInfo}
|
||||
|
||||
**${remainingCount} task${remainingCount === 1 ? "" : "s"} still in progress.** You WILL be notified when ALL complete.
|
||||
Do NOT poll - continue productive work.
|
||||
${isFailure ? "**ACTION REQUIRED:** This task failed. Check the error and decide whether to retry, cancel remaining tasks, or continue." : "Do NOT poll - continue productive work."}
|
||||
|
||||
Use \`background_output(task_id="${task.id}")\` to retrieve this result when ready.
|
||||
</system-reminder>`
|
||||
|
||||
@@ -10,6 +10,7 @@ export const DEFAULT_MAX_TOOL_CALLS = 4000
|
||||
export const DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD = 20
|
||||
export const DEFAULT_CIRCUIT_BREAKER_ENABLED = true
|
||||
export const MIN_RUNTIME_BEFORE_STALE_MS = 30_000
|
||||
export const DEFAULT_SESSION_GONE_TIMEOUT_MS = 60_000
|
||||
export const MIN_IDLE_TIME_MS = 5000
|
||||
export const POLLING_INTERVAL_MS = 3000
|
||||
export const TASK_CLEANUP_DELAY_MS = 10 * 60 * 1000
|
||||
|
||||
@@ -3312,6 +3312,9 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
prompt: async () => ({}),
|
||||
promptAsync: async () => ({}),
|
||||
abort: async () => ({}),
|
||||
get: async () => {
|
||||
throw new Error("missing")
|
||||
},
|
||||
},
|
||||
}
|
||||
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { staleTimeoutMs: 180_000 })
|
||||
@@ -3348,6 +3351,9 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
prompt: async () => ({}),
|
||||
promptAsync: async () => ({}),
|
||||
abort: async () => ({}),
|
||||
get: async () => {
|
||||
throw new Error("missing")
|
||||
},
|
||||
},
|
||||
}
|
||||
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { staleTimeoutMs: 180_000 })
|
||||
@@ -3437,6 +3443,7 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 15 * 60 * 1000),
|
||||
progress: undefined,
|
||||
consecutiveMissedPolls: 2,
|
||||
}
|
||||
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
@@ -3471,6 +3478,7 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 15 * 60 * 1000),
|
||||
progress: undefined,
|
||||
consecutiveMissedPolls: 2,
|
||||
}
|
||||
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
@@ -3478,12 +3486,12 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
//#when — no progress update for 15 minutes
|
||||
await manager["checkAndInterruptStaleTasks"]({})
|
||||
|
||||
//#then — killed after messageStalenessTimeout
|
||||
//#then — killed because session gone from status registry
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("no activity")
|
||||
expect(task.error).toContain("session gone from status registry")
|
||||
})
|
||||
|
||||
test("should NOT interrupt task with no lastUpdate within messageStalenessTimeout", async () => {
|
||||
test("should NOT interrupt task with no lastUpdate within session-gone timeout", async () => {
|
||||
//#given
|
||||
const client = {
|
||||
session: {
|
||||
@@ -3492,7 +3500,7 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
abort: async () => ({}),
|
||||
},
|
||||
}
|
||||
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { messageStalenessTimeoutMs: 600_000 })
|
||||
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput, { messageStalenessTimeoutMs: 600_000, sessionGoneTimeoutMs: 600_000 })
|
||||
|
||||
const task: BackgroundTask = {
|
||||
id: "task-fresh-no-update",
|
||||
@@ -3509,7 +3517,7 @@ describe("BackgroundManager.checkAndInterruptStaleTasks", () => {
|
||||
|
||||
getTaskMap(manager).set(task.id, task)
|
||||
|
||||
//#when — only 5 min since start, within 10min timeout
|
||||
//#when — only 5 min since start, within 10min session-gone timeout
|
||||
await manager["checkAndInterruptStaleTasks"]({})
|
||||
|
||||
//#then — task survives
|
||||
@@ -3728,6 +3736,9 @@ describe("BackgroundManager.handleEvent - session.deleted cascade", () => {
|
||||
properties: { info: { id: parentSessionID } },
|
||||
})
|
||||
|
||||
// Flush twice: cancelTask now awaits session.abort() before cleanupPendingByParent,
|
||||
// so we need additional microtask ticks to let the cascade complete fully.
|
||||
await flushBackgroundNotifications()
|
||||
await flushBackgroundNotifications()
|
||||
|
||||
// then
|
||||
@@ -4263,7 +4274,7 @@ describe("BackgroundManager.pruneStaleTasksAndNotifications - removes pruned tas
|
||||
expect(retainedTask?.status).toBe("error")
|
||||
expect(getTaskMap(manager).has(staleTask.id)).toBe(true)
|
||||
expect(notifications).toHaveLength(1)
|
||||
expect(notifications[0]).toContain("[ALL BACKGROUND TASKS COMPLETE]")
|
||||
expect(notifications[0]).toContain("[ALL BACKGROUND TASKS FINISHED")
|
||||
expect(notifications[0]).toContain(staleTask.description)
|
||||
expect(getCompletionTimers(manager).has(staleTask.id)).toBe(true)
|
||||
expect(removeTaskCalls).toContain(staleTask.id)
|
||||
|
||||
@@ -147,7 +147,7 @@ export class BackgroundManager {
|
||||
private queuesByKey: Map<string, QueueItem[]> = new Map()
|
||||
private processingKeys: Set<string> = new Set()
|
||||
private completionTimers: Map<string, ReturnType<typeof setTimeout>> = new Map()
|
||||
private completedTaskSummaries: Map<string, Array<{id: string, description: string}>> = new Map()
|
||||
private completedTaskSummaries: Map<string, Array<{id: string, description: string, status: string, error?: string}>> = new Map()
|
||||
private idleDeferralTimers: Map<string, ReturnType<typeof setTimeout>> = new Map()
|
||||
private notificationQueueByParent: Map<string, Promise<void>> = new Map()
|
||||
private rootDescendantCounts: Map<string, number>
|
||||
@@ -538,7 +538,7 @@ export class BackgroundManager {
|
||||
})(),
|
||||
parts: [createInternalAgentTextPart(input.prompt)],
|
||||
},
|
||||
}).catch((error) => {
|
||||
}).catch(async (error) => {
|
||||
log("[background-agent] promptAsync error:", error)
|
||||
const existingTask = this.findBySession(sessionID)
|
||||
if (existingTask) {
|
||||
@@ -561,7 +561,8 @@ export class BackgroundManager {
|
||||
removeTaskToastTracking(existingTask.id)
|
||||
|
||||
// Abort the session to prevent infinite polling hang
|
||||
this.client.session.abort({
|
||||
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
|
||||
await this.client.session.abort({
|
||||
path: { id: sessionID },
|
||||
}).catch(() => {})
|
||||
|
||||
@@ -823,7 +824,7 @@ export class BackgroundManager {
|
||||
})(),
|
||||
parts: [createInternalAgentTextPart(input.prompt)],
|
||||
},
|
||||
}).catch((error) => {
|
||||
}).catch(async (error) => {
|
||||
log("[background-agent] resume prompt error:", error)
|
||||
existingTask.status = "interrupt"
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
@@ -842,8 +843,9 @@ export class BackgroundManager {
|
||||
removeTaskToastTracking(existingTask.id)
|
||||
|
||||
// Abort the session to prevent infinite polling hang
|
||||
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
|
||||
if (existingTask.sessionID) {
|
||||
this.client.session.abort({
|
||||
await this.client.session.abort({
|
||||
path: { id: existingTask.sessionID },
|
||||
}).catch(() => {})
|
||||
}
|
||||
@@ -1392,7 +1394,8 @@ export class BackgroundManager {
|
||||
}
|
||||
|
||||
if (abortSession && task.sessionID) {
|
||||
this.client.session.abort({
|
||||
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
|
||||
await this.client.session.abort({
|
||||
path: { id: task.sessionID },
|
||||
}).catch(() => {})
|
||||
|
||||
@@ -1510,7 +1513,8 @@ export class BackgroundManager {
|
||||
}
|
||||
|
||||
if (task.sessionID) {
|
||||
this.client.session.abort({
|
||||
// Awaited to prevent dangling promise during subagent teardown (Bun/WebKit SIGABRT)
|
||||
await this.client.session.abort({
|
||||
path: { id: task.sessionID },
|
||||
}).catch(() => {})
|
||||
|
||||
@@ -1552,6 +1556,8 @@ export class BackgroundManager {
|
||||
this.completedTaskSummaries.get(task.parentSessionID)!.push({
|
||||
id: task.id,
|
||||
description: task.description,
|
||||
status: task.status,
|
||||
error: task.error,
|
||||
})
|
||||
|
||||
// Update pending tracking and check if all tasks complete
|
||||
@@ -1573,7 +1579,7 @@ export class BackgroundManager {
|
||||
}
|
||||
|
||||
const completedTasks = allComplete
|
||||
? (this.completedTaskSummaries.get(task.parentSessionID) ?? [{ id: task.id, description: task.description }])
|
||||
? (this.completedTaskSummaries.get(task.parentSessionID) ?? [{ id: task.id, description: task.description, status: task.status, error: task.error }])
|
||||
: []
|
||||
|
||||
if (allComplete) {
|
||||
@@ -1591,20 +1597,40 @@ export class BackgroundManager {
|
||||
|
||||
let notification: string
|
||||
if (allComplete) {
|
||||
const completedTasksText = completedTasks
|
||||
.map(t => `- \`${t.id}\`: ${t.description}`)
|
||||
.join("\n")
|
||||
const succeededTasks = completedTasks.filter(t => t.status === "completed")
|
||||
const failedTasks = completedTasks.filter(t => t.status !== "completed")
|
||||
|
||||
const succeededText = succeededTasks.length > 0
|
||||
? succeededTasks.map(t => `- \`${t.id}\`: ${t.description}`).join("\n")
|
||||
: ""
|
||||
const failedText = failedTasks.length > 0
|
||||
? failedTasks.map(t => `- \`${t.id}\`: ${t.description} [${t.status.toUpperCase()}]${t.error ? ` - ${t.error}` : ""}`).join("\n")
|
||||
: ""
|
||||
|
||||
const hasFailures = failedTasks.length > 0
|
||||
const header = hasFailures
|
||||
? `[ALL BACKGROUND TASKS FINISHED - ${failedTasks.length} FAILED]`
|
||||
: "[ALL BACKGROUND TASKS COMPLETE]"
|
||||
|
||||
let body = ""
|
||||
if (succeededText) {
|
||||
body += `**Completed:**\n${succeededText}\n`
|
||||
}
|
||||
if (failedText) {
|
||||
body += `\n**Failed:**\n${failedText}\n`
|
||||
}
|
||||
if (!body) {
|
||||
body = `- \`${task.id}\`: ${task.description} [${task.status.toUpperCase()}]${task.error ? ` - ${task.error}` : ""}\n`
|
||||
}
|
||||
|
||||
notification = `<system-reminder>
|
||||
[ALL BACKGROUND TASKS COMPLETE]
|
||||
${header}
|
||||
|
||||
**Completed:**
|
||||
${completedTasksText || `- \`${task.id}\`: ${task.description}`}
|
||||
${body.trim()}
|
||||
|
||||
Use \`background_output(task_id="<id>")\` to retrieve each result.
|
||||
Use \`background_output(task_id="<id>")\` to retrieve each result.${hasFailures ? `\n\n**ACTION REQUIRED:** ${failedTasks.length} task(s) failed. Check errors above and decide whether to retry or proceed.` : ""}
|
||||
</system-reminder>`
|
||||
} else {
|
||||
// Individual completion - silent notification
|
||||
notification = `<system-reminder>
|
||||
[BACKGROUND TASK ${statusText}]
|
||||
**ID:** \`${task.id}\`
|
||||
@@ -1612,7 +1638,7 @@ Use \`background_output(task_id="<id>")\` to retrieve each result.
|
||||
**Duration:** ${duration}${errorInfo}
|
||||
|
||||
**${remainingCount} task${remainingCount === 1 ? "" : "s"} still in progress.** You WILL be notified when ALL complete.
|
||||
Do NOT poll - continue productive work.
|
||||
${statusText === "COMPLETED" ? "Do NOT poll - continue productive work." : "**ACTION REQUIRED:** This task failed. Check the error and decide whether to retry, cancel remaining tasks, or continue."}
|
||||
|
||||
Use \`background_output(task_id="${task.id}")\` to retrieve this result when ready.
|
||||
</system-reminder>`
|
||||
@@ -1675,11 +1701,14 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
|
||||
resolvedModel: model,
|
||||
})
|
||||
|
||||
const isTaskFailure = task.status === "error" || task.status === "cancelled" || task.status === "interrupt"
|
||||
const shouldReply = allComplete || isTaskFailure
|
||||
|
||||
try {
|
||||
await this.client.session.promptAsync({
|
||||
path: { id: task.parentSessionID },
|
||||
body: {
|
||||
noReply: !allComplete,
|
||||
noReply: !shouldReply,
|
||||
...(agent !== undefined ? { agent } : {}),
|
||||
...(model !== undefined ? { model } : {}),
|
||||
...(resolvedTools ? { tools: resolvedTools } : {}),
|
||||
@@ -1689,7 +1718,8 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
|
||||
log("[background-agent] Sent notification to parent session:", {
|
||||
taskId: task.id,
|
||||
allComplete,
|
||||
noReply: !allComplete,
|
||||
isTaskFailure,
|
||||
noReply: !shouldReply,
|
||||
})
|
||||
} catch (error) {
|
||||
if (isAbortedSessionError(error)) {
|
||||
@@ -1725,6 +1755,7 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
|
||||
pruneStaleTasksAndNotifications({
|
||||
tasks: this.tasks,
|
||||
notifications: this.notifications,
|
||||
taskTtlMs: this.config?.taskTtlMs,
|
||||
onTaskPruned: (taskId, task, errorMessage) => {
|
||||
const wasPending = task.status === "pending"
|
||||
log("[background-agent] Pruning stale task:", { taskId, status: task.status, age: Math.round(((wasPending ? task.queuedAt?.getTime() : task.startedAt?.getTime()) ? (Date.now() - (wasPending ? task.queuedAt!.getTime() : task.startedAt!.getTime())) : 0) / 1000) + "s" })
|
||||
@@ -1787,6 +1818,53 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
|
||||
})
|
||||
}
|
||||
|
||||
private async verifySessionExists(sessionID: string): Promise<boolean> {
|
||||
try {
|
||||
const result = await this.client.session.get({ path: { id: sessionID } })
|
||||
return !!result.data
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
private async failCrashedTask(task: BackgroundTask, errorMessage: string): Promise<void> {
|
||||
task.status = "error"
|
||||
task.error = errorMessage
|
||||
task.completedAt = new Date()
|
||||
if (task.rootSessionID) {
|
||||
this.unregisterRootDescendant(task.rootSessionID)
|
||||
}
|
||||
this.taskHistory.record(task.parentSessionID, { id: task.id, sessionID: task.sessionID, agent: task.agent, description: task.description, status: "error", category: task.category, startedAt: task.startedAt, completedAt: task.completedAt })
|
||||
if (task.concurrencyKey) {
|
||||
this.concurrencyManager.release(task.concurrencyKey)
|
||||
task.concurrencyKey = undefined
|
||||
}
|
||||
|
||||
const completionTimer = this.completionTimers.get(task.id)
|
||||
if (completionTimer) {
|
||||
clearTimeout(completionTimer)
|
||||
this.completionTimers.delete(task.id)
|
||||
}
|
||||
const idleTimer = this.idleDeferralTimers.get(task.id)
|
||||
if (idleTimer) {
|
||||
clearTimeout(idleTimer)
|
||||
this.idleDeferralTimers.delete(task.id)
|
||||
}
|
||||
|
||||
this.cleanupPendingByParent(task)
|
||||
this.clearNotificationsForTask(task.id)
|
||||
removeTaskToastTracking(task.id)
|
||||
this.scheduleTaskRemoval(task.id)
|
||||
if (task.sessionID) {
|
||||
SessionCategoryRegistry.remove(task.sessionID)
|
||||
}
|
||||
|
||||
this.markForNotification(task)
|
||||
this.enqueueNotificationForParent(task.parentSessionID, () => this.notifyParentSession(task)).catch(err => {
|
||||
log("[background-agent] Error in notifyParentSession for crashed task:", { taskId: task.id, error: err })
|
||||
})
|
||||
}
|
||||
|
||||
private async pollRunningTasks(): Promise<void> {
|
||||
if (this.pollingInFlight) return
|
||||
this.pollingInFlight = true
|
||||
@@ -1848,11 +1926,20 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
|
||||
}
|
||||
|
||||
// Session is idle or no longer in status response (completed/disappeared)
|
||||
const sessionGoneFromStatus = !sessionStatus
|
||||
const completionSource = sessionStatus?.type === "idle"
|
||||
? "polling (idle status)"
|
||||
: "polling (session gone from status)"
|
||||
const hasValidOutput = await this.validateSessionHasOutput(sessionID)
|
||||
if (!hasValidOutput) {
|
||||
if (sessionGoneFromStatus) {
|
||||
const sessionExists = await this.verifySessionExists(sessionID)
|
||||
if (!sessionExists) {
|
||||
log("[background-agent] Session no longer exists (crashed), marking task as error:", task.id)
|
||||
await this.failCrashedTask(task, "Subagent session no longer exists (process likely crashed). The session disappeared without producing any output.")
|
||||
continue
|
||||
}
|
||||
}
|
||||
log("[background-agent] Polling idle/gone but no valid output yet, waiting:", task.id)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ function registerProcessSignal(
|
||||
handler()
|
||||
if (exitAfter) {
|
||||
process.exitCode = 0
|
||||
setTimeout(() => process.exit(), 6000).unref()
|
||||
setTimeout(() => process.exit(), 6000)
|
||||
}
|
||||
}
|
||||
process.on(signal, listener)
|
||||
@@ -32,16 +32,26 @@ export function registerManagerForCleanup(manager: CleanupTarget): void {
|
||||
if (cleanupRegistered) return
|
||||
cleanupRegistered = true
|
||||
|
||||
let cleanupPromise: Promise<void> | undefined
|
||||
|
||||
const cleanupAll = () => {
|
||||
if (cleanupPromise) return
|
||||
const promises: Promise<void>[] = []
|
||||
for (const m of cleanupManagers) {
|
||||
try {
|
||||
void Promise.resolve(m.shutdown()).catch((error) => {
|
||||
promises.push(
|
||||
Promise.resolve(m.shutdown()).catch((error) => {
|
||||
log("[background-agent] Error during async shutdown cleanup:", error)
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
log("[background-agent] Error during shutdown cleanup:", error)
|
||||
}
|
||||
}
|
||||
cleanupPromise = Promise.allSettled(promises).then(() => {})
|
||||
cleanupPromise.then(() => {
|
||||
log("[background-agent] All shutdown cleanup completed")
|
||||
})
|
||||
}
|
||||
|
||||
const registerSignal = (signal: ProcessCleanupEvent, exitAfter: boolean): void => {
|
||||
|
||||
@@ -8,6 +8,7 @@ describe("checkAndInterruptStaleTasks", () => {
|
||||
const mockClient = {
|
||||
session: {
|
||||
abort: mock(() => Promise.resolve()),
|
||||
get: mock(() => Promise.resolve({ data: { id: "ses-1" } })),
|
||||
},
|
||||
}
|
||||
const mockConcurrencyManager = {
|
||||
@@ -35,6 +36,11 @@ describe("checkAndInterruptStaleTasks", () => {
|
||||
beforeEach(() => {
|
||||
fixedTime = Date.now()
|
||||
spyOn(globalThis.Date, "now").mockReturnValue(fixedTime)
|
||||
mockClient.session.abort.mockClear()
|
||||
mockClient.session.get.mockReset()
|
||||
mockClient.session.get.mockResolvedValue({ data: { id: "ses-1" } })
|
||||
mockConcurrencyManager.release.mockClear()
|
||||
mockNotify.mockClear()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
@@ -288,29 +294,165 @@ describe("checkAndInterruptStaleTasks", () => {
|
||||
expect(task.status).toBe("running")
|
||||
})
|
||||
|
||||
it("should use default stale timeout when session status is unknown/missing", async () => {
|
||||
//#given — lastUpdate exceeds stale timeout, session not in status map
|
||||
it("should NOT cancel healthy task on first missing status poll", async () => {
|
||||
//#given — one missing poll should not be enough to declare the session gone
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
progress: {
|
||||
toolCalls: 1,
|
||||
lastUpdate: new Date(Date.now() - 200_000),
|
||||
lastUpdate: new Date(Date.now() - 120_000),
|
||||
},
|
||||
})
|
||||
|
||||
//#when — empty sessionStatuses (session not found)
|
||||
//#when
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: { staleTimeoutMs: 180_000 },
|
||||
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: {},
|
||||
})
|
||||
|
||||
//#then — unknown session treated as potentially stale, apply default timeout
|
||||
//#then
|
||||
expect(task.status).toBe("running")
|
||||
expect(task.consecutiveMissedPolls).toBe(1)
|
||||
expect(mockClient.session.get).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should NOT cancel task when session.get confirms the session still exists", async () => {
|
||||
//#given — repeated missing polls but direct lookup still succeeds
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
progress: {
|
||||
toolCalls: 1,
|
||||
lastUpdate: new Date(Date.now() - 120_000),
|
||||
},
|
||||
consecutiveMissedPolls: 2,
|
||||
})
|
||||
|
||||
//#when
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: {},
|
||||
})
|
||||
|
||||
//#then
|
||||
expect(task.status).toBe("running")
|
||||
expect(task.consecutiveMissedPolls).toBe(0)
|
||||
expect(mockClient.session.get).toHaveBeenCalledWith({ path: { id: "ses-1" } })
|
||||
})
|
||||
|
||||
it("should use session-gone timeout when session is missing from status map (with progress)", async () => {
|
||||
//#given — lastUpdate 2min ago, session completely gone from status
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
progress: {
|
||||
toolCalls: 1,
|
||||
lastUpdate: new Date(Date.now() - 120_000),
|
||||
},
|
||||
consecutiveMissedPolls: 2,
|
||||
})
|
||||
|
||||
mockClient.session.get.mockRejectedValue(new Error("missing"))
|
||||
|
||||
//#when — empty sessionStatuses (session gone), sessionGoneTimeoutMs = 60s
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: {},
|
||||
})
|
||||
|
||||
//#then — cancelled because session gone timeout (60s) < timeSinceLastUpdate (120s)
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("Stale timeout")
|
||||
expect(task.error).toContain("session gone from status registry")
|
||||
})
|
||||
|
||||
it("should use session-gone timeout when session is missing from status map (no progress)", async () => {
|
||||
//#given — task started 2min ago, no progress, session completely gone
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 120_000),
|
||||
progress: undefined,
|
||||
consecutiveMissedPolls: 2,
|
||||
})
|
||||
|
||||
mockClient.session.get.mockRejectedValue(new Error("missing"))
|
||||
|
||||
//#when — session gone, sessionGoneTimeoutMs = 60s
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: { messageStalenessTimeoutMs: 600_000, sessionGoneTimeoutMs: 60_000 },
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: {},
|
||||
})
|
||||
|
||||
//#then — cancelled because session gone timeout (60s) < runtime (120s)
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("session gone from status registry")
|
||||
})
|
||||
|
||||
it("should NOT use session-gone timeout when session is idle (present in status map)", async () => {
|
||||
//#given — lastUpdate 2min ago, session is idle (present in status but not active)
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
progress: {
|
||||
toolCalls: 1,
|
||||
lastUpdate: new Date(Date.now() - 120_000),
|
||||
},
|
||||
consecutiveMissedPolls: 2,
|
||||
})
|
||||
|
||||
mockClient.session.get.mockRejectedValue(new Error("missing"))
|
||||
|
||||
//#when — session is idle (present in map), staleTimeoutMs = 180s
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: { staleTimeoutMs: 180_000, sessionGoneTimeoutMs: 60_000 },
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: { "ses-1": { type: "idle" } },
|
||||
})
|
||||
|
||||
//#then — still running because normal staleTimeout (180s) > timeSinceLastUpdate (120s)
|
||||
expect(task.status).toBe("running")
|
||||
})
|
||||
|
||||
it("should use default session-gone timeout when not configured", async () => {
|
||||
//#given — lastUpdate 2min ago, session gone, no sessionGoneTimeoutMs config
|
||||
const task = createRunningTask({
|
||||
startedAt: new Date(Date.now() - 300_000),
|
||||
progress: {
|
||||
toolCalls: 1,
|
||||
lastUpdate: new Date(Date.now() - 120_000),
|
||||
},
|
||||
consecutiveMissedPolls: 2,
|
||||
})
|
||||
|
||||
mockClient.session.get.mockRejectedValue(new Error("missing"))
|
||||
|
||||
//#when — no config (default sessionGoneTimeoutMs = 60_000)
|
||||
await checkAndInterruptStaleTasks({
|
||||
tasks: [task],
|
||||
client: mockClient as never,
|
||||
config: undefined,
|
||||
concurrencyManager: mockConcurrencyManager as never,
|
||||
notifyParentSession: mockNotify,
|
||||
sessionStatuses: {},
|
||||
})
|
||||
|
||||
//#then — cancelled because default session gone timeout (60s) < timeSinceLastUpdate (120s)
|
||||
expect(task.status).toBe("cancelled")
|
||||
expect(task.error).toContain("session gone from status registry")
|
||||
})
|
||||
|
||||
it("should NOT interrupt task when session is busy (OpenCode status), even if lastUpdate exceeds stale timeout", async () => {
|
||||
@@ -526,6 +668,132 @@ describe("pruneStaleTasksAndNotifications", () => {
|
||||
expect(pruned).toContain("old-task")
|
||||
})
|
||||
|
||||
it("#given running task with recent progress #when startedAt exceeds TTL #then should NOT prune", () => {
|
||||
//#given
|
||||
const tasks = new Map<string, BackgroundTask>()
|
||||
const activeTask: BackgroundTask = {
|
||||
id: "active-task",
|
||||
parentSessionID: "parent",
|
||||
parentMessageID: "msg",
|
||||
description: "active",
|
||||
prompt: "active",
|
||||
agent: "oracle",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 45 * 60 * 1000),
|
||||
progress: {
|
||||
toolCalls: 10,
|
||||
lastUpdate: new Date(Date.now() - 5 * 60 * 1000),
|
||||
},
|
||||
}
|
||||
tasks.set("active-task", activeTask)
|
||||
|
||||
const pruned: string[] = []
|
||||
const notifications = new Map<string, BackgroundTask[]>()
|
||||
|
||||
//#when
|
||||
pruneStaleTasksAndNotifications({
|
||||
tasks,
|
||||
notifications,
|
||||
onTaskPruned: (taskId) => pruned.push(taskId),
|
||||
})
|
||||
|
||||
//#then
|
||||
expect(pruned).toEqual([])
|
||||
})
|
||||
|
||||
it("#given running task with stale progress #when lastUpdate exceeds TTL #then should prune", () => {
|
||||
//#given
|
||||
const tasks = new Map<string, BackgroundTask>()
|
||||
const staleTask: BackgroundTask = {
|
||||
id: "stale-task",
|
||||
parentSessionID: "parent",
|
||||
parentMessageID: "msg",
|
||||
description: "stale",
|
||||
prompt: "stale",
|
||||
agent: "oracle",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 60 * 60 * 1000),
|
||||
progress: {
|
||||
toolCalls: 10,
|
||||
lastUpdate: new Date(Date.now() - 35 * 60 * 1000),
|
||||
},
|
||||
}
|
||||
tasks.set("stale-task", staleTask)
|
||||
|
||||
const pruned: string[] = []
|
||||
const notifications = new Map<string, BackgroundTask[]>()
|
||||
|
||||
//#when
|
||||
pruneStaleTasksAndNotifications({
|
||||
tasks,
|
||||
notifications,
|
||||
onTaskPruned: (taskId) => pruned.push(taskId),
|
||||
})
|
||||
|
||||
//#then
|
||||
expect(pruned).toContain("stale-task")
|
||||
})
|
||||
|
||||
it("#given custom taskTtlMs #when task exceeds custom TTL #then should prune", () => {
|
||||
//#given
|
||||
const tasks = new Map<string, BackgroundTask>()
|
||||
const task: BackgroundTask = {
|
||||
id: "custom-ttl-task",
|
||||
parentSessionID: "parent",
|
||||
parentMessageID: "msg",
|
||||
description: "custom",
|
||||
prompt: "custom",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 61 * 60 * 1000),
|
||||
}
|
||||
tasks.set("custom-ttl-task", task)
|
||||
|
||||
const pruned: string[] = []
|
||||
const notifications = new Map<string, BackgroundTask[]>()
|
||||
|
||||
//#when
|
||||
pruneStaleTasksAndNotifications({
|
||||
tasks,
|
||||
notifications,
|
||||
taskTtlMs: 60 * 60 * 1000,
|
||||
onTaskPruned: (taskId) => pruned.push(taskId),
|
||||
})
|
||||
|
||||
//#then
|
||||
expect(pruned).toContain("custom-ttl-task")
|
||||
})
|
||||
|
||||
it("#given custom taskTtlMs #when task within custom TTL #then should NOT prune", () => {
|
||||
//#given
|
||||
const tasks = new Map<string, BackgroundTask>()
|
||||
const task: BackgroundTask = {
|
||||
id: "within-ttl-task",
|
||||
parentSessionID: "parent",
|
||||
parentMessageID: "msg",
|
||||
description: "within",
|
||||
prompt: "within",
|
||||
agent: "explore",
|
||||
status: "running",
|
||||
startedAt: new Date(Date.now() - 45 * 60 * 1000),
|
||||
}
|
||||
tasks.set("within-ttl-task", task)
|
||||
|
||||
const pruned: string[] = []
|
||||
const notifications = new Map<string, BackgroundTask[]>()
|
||||
|
||||
//#when
|
||||
pruneStaleTasksAndNotifications({
|
||||
tasks,
|
||||
notifications,
|
||||
taskTtlMs: 60 * 60 * 1000,
|
||||
onTaskPruned: (taskId) => pruned.push(taskId),
|
||||
})
|
||||
|
||||
//#then
|
||||
expect(pruned).toEqual([])
|
||||
})
|
||||
|
||||
it("should prune terminal tasks when completion time exceeds terminal TTL", () => {
|
||||
//#given
|
||||
const tasks = new Map<string, BackgroundTask>()
|
||||
|
||||
@@ -7,6 +7,7 @@ import type { OpencodeClient } from "./opencode-client"
|
||||
|
||||
import {
|
||||
DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS,
|
||||
DEFAULT_SESSION_GONE_TIMEOUT_MS,
|
||||
DEFAULT_STALE_TIMEOUT_MS,
|
||||
MIN_RUNTIME_BEFORE_STALE_MS,
|
||||
TERMINAL_TASK_TTL_MS,
|
||||
@@ -15,6 +16,8 @@ import {
|
||||
import { removeTaskToastTracking } from "./remove-task-toast-tracking"
|
||||
|
||||
import { isActiveSessionStatus } from "./session-status-classifier"
|
||||
|
||||
const MIN_SESSION_GONE_POLLS = 3
|
||||
const TERMINAL_TASK_STATUSES = new Set<BackgroundTask["status"]>([
|
||||
"completed",
|
||||
"error",
|
||||
@@ -26,8 +29,10 @@ export function pruneStaleTasksAndNotifications(args: {
|
||||
tasks: Map<string, BackgroundTask>
|
||||
notifications: Map<string, BackgroundTask[]>
|
||||
onTaskPruned: (taskId: string, task: BackgroundTask, errorMessage: string) => void
|
||||
taskTtlMs?: number
|
||||
}): void {
|
||||
const { tasks, notifications, onTaskPruned } = args
|
||||
const effectiveTtl = args.taskTtlMs ?? TASK_TTL_MS
|
||||
const now = Date.now()
|
||||
const tasksWithPendingNotifications = new Set<string>()
|
||||
|
||||
@@ -52,18 +57,22 @@ export function pruneStaleTasksAndNotifications(args: {
|
||||
continue
|
||||
}
|
||||
|
||||
const lastActivity = task.status === "running" && task.progress?.lastUpdate
|
||||
? task.progress.lastUpdate.getTime()
|
||||
: undefined
|
||||
const timestamp = task.status === "pending"
|
||||
? task.queuedAt?.getTime()
|
||||
: task.startedAt?.getTime()
|
||||
: (lastActivity ?? task.startedAt?.getTime())
|
||||
|
||||
if (!timestamp) continue
|
||||
|
||||
const age = now - timestamp
|
||||
if (age <= TASK_TTL_MS) continue
|
||||
if (age <= effectiveTtl) continue
|
||||
|
||||
const ttlMinutes = Math.round(effectiveTtl / 60000)
|
||||
const errorMessage = task.status === "pending"
|
||||
? "Task timed out while queued (30 minutes)"
|
||||
: "Task timed out after 30 minutes"
|
||||
? `Task timed out while queued (${ttlMinutes} minutes)`
|
||||
: `Task timed out after ${ttlMinutes} minutes of inactivity`
|
||||
|
||||
onTaskPruned(taskId, task, errorMessage)
|
||||
}
|
||||
@@ -77,7 +86,7 @@ export function pruneStaleTasksAndNotifications(args: {
|
||||
const validNotifications = queued.filter((task) => {
|
||||
if (!task.startedAt) return false
|
||||
const age = now - task.startedAt.getTime()
|
||||
return age <= TASK_TTL_MS
|
||||
return age <= effectiveTtl
|
||||
})
|
||||
|
||||
if (validNotifications.length === 0) {
|
||||
@@ -90,6 +99,15 @@ export function pruneStaleTasksAndNotifications(args: {
|
||||
|
||||
export type SessionStatusMap = Record<string, { type: string }>
|
||||
|
||||
async function verifySessionExists(client: OpencodeClient, sessionID: string): Promise<boolean> {
|
||||
try {
|
||||
const result = await client.session.get({ path: { id: sessionID } })
|
||||
return !!result.data
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export async function checkAndInterruptStaleTasks(args: {
|
||||
tasks: Iterable<BackgroundTask>
|
||||
client: OpencodeClient
|
||||
@@ -109,6 +127,7 @@ export async function checkAndInterruptStaleTasks(args: {
|
||||
onTaskInterrupted = (task) => removeTaskToastTracking(task.id),
|
||||
} = args
|
||||
const staleTimeoutMs = config?.staleTimeoutMs ?? DEFAULT_STALE_TIMEOUT_MS
|
||||
const sessionGoneTimeoutMs = config?.sessionGoneTimeoutMs ?? DEFAULT_SESSION_GONE_TIMEOUT_MS
|
||||
const now = Date.now()
|
||||
|
||||
const messageStalenessMs = config?.messageStalenessTimeoutMs ?? DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS
|
||||
@@ -122,15 +141,32 @@ export async function checkAndInterruptStaleTasks(args: {
|
||||
|
||||
const sessionStatus = sessionStatuses?.[sessionID]?.type
|
||||
const sessionIsRunning = sessionStatus !== undefined && isActiveSessionStatus(sessionStatus)
|
||||
const sessionMissing = sessionStatuses !== undefined && sessionStatus === undefined
|
||||
const runtime = now - startedAt.getTime()
|
||||
|
||||
if (sessionMissing) {
|
||||
task.consecutiveMissedPolls = (task.consecutiveMissedPolls ?? 0) + 1
|
||||
} else if (sessionStatuses !== undefined) {
|
||||
task.consecutiveMissedPolls = 0
|
||||
}
|
||||
|
||||
const sessionGone = sessionMissing && (task.consecutiveMissedPolls ?? 0) >= MIN_SESSION_GONE_POLLS
|
||||
|
||||
if (!task.progress?.lastUpdate) {
|
||||
if (sessionIsRunning) continue
|
||||
if (runtime <= messageStalenessMs) continue
|
||||
if (sessionMissing && !sessionGone) continue
|
||||
const effectiveTimeout = sessionGone ? sessionGoneTimeoutMs : messageStalenessMs
|
||||
if (runtime <= effectiveTimeout) continue
|
||||
|
||||
if (sessionGone && await verifySessionExists(client, sessionID)) {
|
||||
task.consecutiveMissedPolls = 0
|
||||
continue
|
||||
}
|
||||
|
||||
const staleMinutes = Math.round(runtime / 60000)
|
||||
const reason = sessionGone ? "session gone from status registry" : "no activity"
|
||||
task.status = "cancelled"
|
||||
task.error = `Stale timeout (no activity for ${staleMinutes}min since start). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.staleTimeoutMs' in .opencode/oh-my-opencode.json.`
|
||||
task.error = `Stale timeout (${reason} for ${staleMinutes}min since start). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.${sessionGone ? "sessionGoneTimeoutMs" : "staleTimeoutMs"}' in .opencode/oh-my-opencode.json.`
|
||||
task.completedAt = new Date()
|
||||
|
||||
if (task.concurrencyKey) {
|
||||
@@ -156,12 +192,19 @@ export async function checkAndInterruptStaleTasks(args: {
|
||||
if (runtime < MIN_RUNTIME_BEFORE_STALE_MS) continue
|
||||
|
||||
const timeSinceLastUpdate = now - task.progress.lastUpdate.getTime()
|
||||
if (timeSinceLastUpdate <= staleTimeoutMs) continue
|
||||
const effectiveStaleTimeout = sessionGone ? sessionGoneTimeoutMs : staleTimeoutMs
|
||||
if (timeSinceLastUpdate <= effectiveStaleTimeout) continue
|
||||
if (task.status !== "running") continue
|
||||
|
||||
if (sessionGone && await verifySessionExists(client, sessionID)) {
|
||||
task.consecutiveMissedPolls = 0
|
||||
continue
|
||||
}
|
||||
|
||||
const staleMinutes = Math.round(timeSinceLastUpdate / 60000)
|
||||
const reason = sessionGone ? "session gone from status registry" : "no activity"
|
||||
task.status = "cancelled"
|
||||
task.error = `Stale timeout (no activity for ${staleMinutes}min). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.staleTimeoutMs' in .opencode/oh-my-opencode.json.`
|
||||
task.error = `Stale timeout (${reason} for ${staleMinutes}min). This is a FINAL cancellation - do NOT create a replacement task. If the timeout is too short, increase 'background_task.${sessionGone ? "sessionGoneTimeoutMs" : "staleTimeoutMs"}' in .opencode/oh-my-opencode.json.`
|
||||
task.completedAt = new Date()
|
||||
|
||||
if (task.concurrencyKey) {
|
||||
|
||||
@@ -66,6 +66,8 @@ export interface BackgroundTask {
|
||||
lastMsgCount?: number
|
||||
/** Number of consecutive polls with stable message count */
|
||||
stablePolls?: number
|
||||
/** Number of consecutive polls where session was missing from status map */
|
||||
consecutiveMissedPolls?: number
|
||||
}
|
||||
|
||||
export interface LaunchInput {
|
||||
|
||||
@@ -2,3 +2,4 @@ export * from "./types"
|
||||
export * from "./constants"
|
||||
export * from "./storage"
|
||||
export * from "./top-level-task"
|
||||
export * from "./worktree-sync"
|
||||
|
||||
88
src/features/boulder-state/worktree-sync.test.ts
Normal file
88
src/features/boulder-state/worktree-sync.test.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
import { describe, expect, test, beforeEach, afterEach } from "bun:test"
|
||||
import { existsSync, mkdirSync, rmSync, writeFileSync, readFileSync } from "node:fs"
|
||||
import { join } from "node:path"
|
||||
import { tmpdir } from "node:os"
|
||||
import { syncSisyphusStateFromWorktree } from "./worktree-sync"
|
||||
|
||||
describe("syncSisyphusStateFromWorktree", () => {
|
||||
const BASE = join(tmpdir(), "worktree-sync-test-" + Date.now())
|
||||
const WORKTREE = join(BASE, "worktree")
|
||||
const MAIN_REPO = join(BASE, "main")
|
||||
|
||||
beforeEach(() => {
|
||||
mkdirSync(WORKTREE, { recursive: true })
|
||||
mkdirSync(MAIN_REPO, { recursive: true })
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(BASE)) {
|
||||
rmSync(BASE, { recursive: true, force: true })
|
||||
}
|
||||
})
|
||||
|
||||
test("#given no .sisyphus in worktree #when syncing #then returns true without error", () => {
|
||||
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
|
||||
|
||||
expect(result).toBe(true)
|
||||
expect(existsSync(join(MAIN_REPO, ".sisyphus"))).toBe(false)
|
||||
})
|
||||
|
||||
test("#given .sisyphus with boulder.json in worktree #when syncing #then copies to main repo", () => {
|
||||
const worktreeSisyphus = join(WORKTREE, ".sisyphus")
|
||||
mkdirSync(worktreeSisyphus, { recursive: true })
|
||||
writeFileSync(join(worktreeSisyphus, "boulder.json"), '{"active_plan":"/plan.md","plan_name":"test"}')
|
||||
|
||||
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
|
||||
|
||||
expect(result).toBe(true)
|
||||
const copied = readFileSync(join(MAIN_REPO, ".sisyphus", "boulder.json"), "utf-8")
|
||||
expect(JSON.parse(copied).plan_name).toBe("test")
|
||||
})
|
||||
|
||||
test("#given nested .sisyphus dirs in worktree #when syncing #then copies full tree recursively", () => {
|
||||
const worktreePlans = join(WORKTREE, ".sisyphus", "plans")
|
||||
const worktreeNotepads = join(WORKTREE, ".sisyphus", "notepads", "my-plan")
|
||||
mkdirSync(worktreePlans, { recursive: true })
|
||||
mkdirSync(worktreeNotepads, { recursive: true })
|
||||
writeFileSync(join(worktreePlans, "my-plan.md"), "- [x] Task 1\n- [ ] Task 2")
|
||||
writeFileSync(join(worktreeNotepads, "learnings.md"), "learned something")
|
||||
|
||||
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
|
||||
|
||||
expect(result).toBe(true)
|
||||
expect(readFileSync(join(MAIN_REPO, ".sisyphus", "plans", "my-plan.md"), "utf-8")).toContain("Task 1")
|
||||
expect(readFileSync(join(MAIN_REPO, ".sisyphus", "notepads", "my-plan", "learnings.md"), "utf-8")).toBe("learned something")
|
||||
})
|
||||
|
||||
test("#given existing .sisyphus in main repo #when syncing #then worktree state overwrites stale state", () => {
|
||||
const mainSisyphus = join(MAIN_REPO, ".sisyphus")
|
||||
mkdirSync(mainSisyphus, { recursive: true })
|
||||
writeFileSync(join(mainSisyphus, "boulder.json"), '{"plan_name":"old"}')
|
||||
|
||||
const worktreeSisyphus = join(WORKTREE, ".sisyphus")
|
||||
mkdirSync(worktreeSisyphus, { recursive: true })
|
||||
writeFileSync(join(worktreeSisyphus, "boulder.json"), '{"plan_name":"updated"}')
|
||||
|
||||
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
|
||||
|
||||
expect(result).toBe(true)
|
||||
const content = readFileSync(join(mainSisyphus, "boulder.json"), "utf-8")
|
||||
expect(JSON.parse(content).plan_name).toBe("updated")
|
||||
})
|
||||
|
||||
test("#given pre-existing files in main .sisyphus #when syncing #then preserves files not in worktree", () => {
|
||||
const mainSisyphus = join(MAIN_REPO, ".sisyphus", "rules")
|
||||
mkdirSync(mainSisyphus, { recursive: true })
|
||||
writeFileSync(join(mainSisyphus, "my-rule.md"), "existing rule")
|
||||
|
||||
const worktreeSisyphus = join(WORKTREE, ".sisyphus")
|
||||
mkdirSync(worktreeSisyphus, { recursive: true })
|
||||
writeFileSync(join(worktreeSisyphus, "boulder.json"), '{"plan_name":"new"}')
|
||||
|
||||
const result = syncSisyphusStateFromWorktree(WORKTREE, MAIN_REPO)
|
||||
|
||||
expect(result).toBe(true)
|
||||
expect(readFileSync(join(MAIN_REPO, ".sisyphus", "rules", "my-rule.md"), "utf-8")).toBe("existing rule")
|
||||
expect(existsSync(join(MAIN_REPO, ".sisyphus", "boulder.json"))).toBe(true)
|
||||
})
|
||||
})
|
||||
34
src/features/boulder-state/worktree-sync.ts
Normal file
34
src/features/boulder-state/worktree-sync.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { existsSync, cpSync, mkdirSync } from "node:fs"
|
||||
import { join } from "node:path"
|
||||
import { BOULDER_DIR } from "./constants"
|
||||
import { log } from "../../shared/logger"
|
||||
|
||||
export function syncSisyphusStateFromWorktree(worktreePath: string, mainRepoPath: string): boolean {
|
||||
const srcDir = join(worktreePath, BOULDER_DIR)
|
||||
const destDir = join(mainRepoPath, BOULDER_DIR)
|
||||
|
||||
if (!existsSync(srcDir)) {
|
||||
log("[worktree-sync] No .sisyphus directory in worktree, nothing to sync", { worktreePath })
|
||||
return true
|
||||
}
|
||||
|
||||
try {
|
||||
if (!existsSync(destDir)) {
|
||||
mkdirSync(destDir, { recursive: true })
|
||||
}
|
||||
|
||||
cpSync(srcDir, destDir, { recursive: true, force: true })
|
||||
log("[worktree-sync] Synced .sisyphus state from worktree to main repo", {
|
||||
worktreePath,
|
||||
mainRepoPath,
|
||||
})
|
||||
return true
|
||||
} catch (err) {
|
||||
log("[worktree-sync] Failed to sync .sisyphus state", {
|
||||
worktreePath,
|
||||
mainRepoPath,
|
||||
error: String(err),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -115,9 +115,14 @@ Register these as task/todo items so progress is tracked and visible throughout
|
||||
|
||||
When working in a worktree (\`worktree_path\` is set in boulder.json) and ALL plan tasks are complete:
|
||||
1. Commit all remaining changes in the worktree
|
||||
2. Switch to the main working directory (the original repo, NOT the worktree)
|
||||
3. Merge the worktree branch into the current branch: \`git merge <worktree-branch>\`
|
||||
4. If merge succeeds, clean up: \`git worktree remove <worktree-path>\`
|
||||
5. Remove the boulder.json state
|
||||
2. **Sync .sisyphus state back**: Copy \`.sisyphus/\` from the worktree to the main repo before removal.
|
||||
This is CRITICAL when \`.sisyphus/\` is gitignored — state written during worktree execution would otherwise be lost.
|
||||
\`\`\`bash
|
||||
cp -r <worktree-path>/.sisyphus/* <main-repo>/.sisyphus/ 2>/dev/null || true
|
||||
\`\`\`
|
||||
3. Switch to the main working directory (the original repo, NOT the worktree)
|
||||
4. Merge the worktree branch into the current branch: \`git merge <worktree-branch>\`
|
||||
5. If merge succeeds, clean up: \`git worktree remove <worktree-path>\`
|
||||
6. Remove the boulder.json state
|
||||
|
||||
This is the DEFAULT behavior when \`--worktree\` was used. Skip merge only if the user explicitly instructs otherwise (e.g., asks to create a PR instead).`
|
||||
|
||||
@@ -10,6 +10,7 @@ import type {
|
||||
} from "./types"
|
||||
import { transformMcpServer } from "./transformer"
|
||||
import { log } from "../../shared/logger"
|
||||
import { shouldLoadMcpServer } from "./scope-filter"
|
||||
|
||||
interface McpConfigPath {
|
||||
path: string
|
||||
@@ -75,6 +76,7 @@ export async function loadMcpConfigs(
|
||||
const loadedServers: LoadedMcpServer[] = []
|
||||
const paths = getMcpConfigPaths()
|
||||
const disabledSet = new Set(disabledMcps)
|
||||
const cwd = process.cwd()
|
||||
|
||||
for (const { path, scope } of paths) {
|
||||
const config = await loadMcpConfigFile(path)
|
||||
@@ -86,6 +88,15 @@ export async function loadMcpConfigs(
|
||||
continue
|
||||
}
|
||||
|
||||
if (!shouldLoadMcpServer(serverConfig, cwd)) {
|
||||
log(`Skipping MCP server "${name}" because local scope does not match cwd`, {
|
||||
path,
|
||||
projectPath: serverConfig.projectPath,
|
||||
cwd,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if (serverConfig.disabled) {
|
||||
log(`Disabling MCP server "${name}"`, { path })
|
||||
delete servers[name]
|
||||
|
||||
28
src/features/claude-code-mcp-loader/scope-filter.ts
Normal file
28
src/features/claude-code-mcp-loader/scope-filter.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { existsSync, realpathSync } from "fs"
|
||||
import { resolve } from "path"
|
||||
import type { ClaudeCodeMcpServer } from "./types"
|
||||
|
||||
function normalizePath(path: string): string {
|
||||
const resolvedPath = resolve(path)
|
||||
|
||||
if (!existsSync(resolvedPath)) {
|
||||
return resolvedPath
|
||||
}
|
||||
|
||||
return realpathSync(resolvedPath)
|
||||
}
|
||||
|
||||
export function shouldLoadMcpServer(
|
||||
server: Pick<ClaudeCodeMcpServer, "scope" | "projectPath">,
|
||||
cwd = process.cwd()
|
||||
): boolean {
|
||||
if (server.scope !== "local") {
|
||||
return true
|
||||
}
|
||||
|
||||
if (!server.projectPath) {
|
||||
return false
|
||||
}
|
||||
|
||||
return normalizePath(server.projectPath) === normalizePath(cwd)
|
||||
}
|
||||
82
src/features/claude-code-mcp-loader/scope-filtering.test.ts
Normal file
82
src/features/claude-code-mcp-loader/scope-filtering.test.ts
Normal file
@@ -0,0 +1,82 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test"
|
||||
import { mkdirSync, rmSync, writeFileSync } from "fs"
|
||||
import { tmpdir } from "os"
|
||||
import { join } from "path"
|
||||
|
||||
const TEST_DIR = join(tmpdir(), `mcp-scope-filtering-test-${Date.now()}`)
|
||||
const TEST_HOME = join(TEST_DIR, "home")
|
||||
|
||||
describe("loadMcpConfigs", () => {
|
||||
beforeEach(() => {
|
||||
mkdirSync(TEST_DIR, { recursive: true })
|
||||
mkdirSync(TEST_HOME, { recursive: true })
|
||||
mock.module("os", () => ({
|
||||
homedir: () => TEST_HOME,
|
||||
tmpdir,
|
||||
}))
|
||||
mock.module("../../shared", () => ({
|
||||
getClaudeConfigDir: () => join(TEST_HOME, ".claude"),
|
||||
}))
|
||||
mock.module("../../shared/logger", () => ({
|
||||
log: () => {},
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
rmSync(TEST_DIR, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
describe("#given user-scoped MCP entries with local scope metadata", () => {
|
||||
it("#when loading configs #then only servers matching the current project path are loaded", async () => {
|
||||
writeFileSync(
|
||||
join(TEST_HOME, ".claude.json"),
|
||||
JSON.stringify({
|
||||
mcpServers: {
|
||||
globalServer: {
|
||||
command: "npx",
|
||||
args: ["global-server"],
|
||||
},
|
||||
matchingLocal: {
|
||||
command: "npx",
|
||||
args: ["matching-local"],
|
||||
scope: "local",
|
||||
projectPath: TEST_DIR,
|
||||
},
|
||||
nonMatchingLocal: {
|
||||
command: "npx",
|
||||
args: ["non-matching-local"],
|
||||
scope: "local",
|
||||
projectPath: join(TEST_DIR, "other-project"),
|
||||
},
|
||||
missingProjectPath: {
|
||||
command: "npx",
|
||||
args: ["missing-project-path"],
|
||||
scope: "local",
|
||||
},
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
const originalCwd = process.cwd()
|
||||
process.chdir(TEST_DIR)
|
||||
|
||||
try {
|
||||
const { loadMcpConfigs } = await import("./loader")
|
||||
const result = await loadMcpConfigs()
|
||||
|
||||
expect(result.servers).toHaveProperty("globalServer")
|
||||
expect(result.servers).toHaveProperty("matchingLocal")
|
||||
expect(result.servers).not.toHaveProperty("nonMatchingLocal")
|
||||
expect(result.servers).not.toHaveProperty("missingProjectPath")
|
||||
|
||||
expect(result.loadedServers.map((server) => server.name)).toEqual([
|
||||
"globalServer",
|
||||
"matchingLocal",
|
||||
])
|
||||
} finally {
|
||||
process.chdir(originalCwd)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
29
src/features/claude-code-mcp-loader/transformer.test.ts
Normal file
29
src/features/claude-code-mcp-loader/transformer.test.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import { describe, expect, it } from "bun:test"
|
||||
import { transformMcpServer } from "./transformer"
|
||||
|
||||
describe("transformMcpServer", () => {
|
||||
describe("#given a remote MCP server with oauth config", () => {
|
||||
it("#when transforming the server #then preserves oauth on the remote config", () => {
|
||||
const transformed = transformMcpServer("remote-oauth", {
|
||||
type: "http",
|
||||
url: "https://mcp.example.com",
|
||||
headers: { Authorization: "Bearer test" },
|
||||
oauth: {
|
||||
clientId: "client-id",
|
||||
scopes: ["read", "write"],
|
||||
},
|
||||
})
|
||||
|
||||
expect(transformed).toEqual({
|
||||
type: "remote",
|
||||
url: "https://mcp.example.com",
|
||||
headers: { Authorization: "Bearer test" },
|
||||
oauth: {
|
||||
clientId: "client-id",
|
||||
scopes: ["read", "write"],
|
||||
},
|
||||
enabled: true,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -30,6 +30,10 @@ export function transformMcpServer(
|
||||
config.headers = expanded.headers
|
||||
}
|
||||
|
||||
if (expanded.oauth && Object.keys(expanded.oauth).length > 0) {
|
||||
config.oauth = expanded.oauth
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
export type McpScope = "user" | "project" | "local"
|
||||
|
||||
export interface McpOAuthConfig {
|
||||
clientId?: string
|
||||
scopes?: string[]
|
||||
}
|
||||
|
||||
export interface ClaudeCodeMcpServer {
|
||||
type?: "http" | "sse" | "stdio"
|
||||
url?: string
|
||||
@@ -7,10 +12,9 @@ export interface ClaudeCodeMcpServer {
|
||||
args?: string[]
|
||||
env?: Record<string, string>
|
||||
headers?: Record<string, string>
|
||||
oauth?: {
|
||||
clientId?: string
|
||||
scopes?: string[]
|
||||
}
|
||||
oauth?: McpOAuthConfig
|
||||
scope?: McpScope
|
||||
projectPath?: string
|
||||
disabled?: boolean
|
||||
}
|
||||
|
||||
@@ -29,6 +33,7 @@ export interface McpRemoteConfig {
|
||||
type: "remote"
|
||||
url: string
|
||||
headers?: Record<string, string>
|
||||
oauth?: McpOAuthConfig
|
||||
enabled?: boolean
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test"
|
||||
import { mkdirSync, rmSync, writeFileSync } from "fs"
|
||||
import { tmpdir } from "os"
|
||||
import { join } from "path"
|
||||
import type { LoadedPlugin } from "./types"
|
||||
|
||||
const TEST_DIR = join(tmpdir(), `plugin-mcp-loader-test-${Date.now()}`)
|
||||
const PROJECT_DIR = join(TEST_DIR, "project")
|
||||
const PLUGIN_DIR = join(TEST_DIR, "plugin")
|
||||
const MCP_CONFIG_PATH = join(PLUGIN_DIR, "mcp.json")
|
||||
|
||||
describe("loadPluginMcpServers", () => {
|
||||
beforeEach(() => {
|
||||
mkdirSync(PROJECT_DIR, { recursive: true })
|
||||
mkdirSync(PLUGIN_DIR, { recursive: true })
|
||||
mock.module("../../shared/logger", () => ({
|
||||
log: () => {},
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore()
|
||||
rmSync(TEST_DIR, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
describe("#given plugin MCP entries with local scope metadata", () => {
|
||||
it("#when loading plugin MCP servers #then only entries matching the current cwd are included", async () => {
|
||||
writeFileSync(
|
||||
MCP_CONFIG_PATH,
|
||||
JSON.stringify({
|
||||
mcpServers: {
|
||||
globalServer: {
|
||||
command: "npx",
|
||||
args: ["global-plugin-server"],
|
||||
},
|
||||
matchingLocal: {
|
||||
command: "npx",
|
||||
args: ["matching-plugin-local"],
|
||||
scope: "local",
|
||||
projectPath: PROJECT_DIR,
|
||||
},
|
||||
nonMatchingLocal: {
|
||||
command: "npx",
|
||||
args: ["non-matching-plugin-local"],
|
||||
scope: "local",
|
||||
projectPath: join(PROJECT_DIR, "other-project"),
|
||||
},
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
const plugin: LoadedPlugin = {
|
||||
name: "demo-plugin",
|
||||
version: "1.0.0",
|
||||
scope: "project",
|
||||
installPath: PLUGIN_DIR,
|
||||
pluginKey: "demo-plugin@test",
|
||||
mcpPath: MCP_CONFIG_PATH,
|
||||
}
|
||||
|
||||
const originalCwd = process.cwd()
|
||||
process.chdir(PROJECT_DIR)
|
||||
|
||||
try {
|
||||
const { loadPluginMcpServers } = await import("./mcp-server-loader")
|
||||
const servers = await loadPluginMcpServers([plugin])
|
||||
|
||||
expect(servers).toHaveProperty("demo-plugin:globalServer")
|
||||
expect(servers).toHaveProperty("demo-plugin:matchingLocal")
|
||||
expect(servers).not.toHaveProperty("demo-plugin:nonMatchingLocal")
|
||||
} finally {
|
||||
process.chdir(originalCwd)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,6 +1,7 @@
|
||||
import { existsSync } from "fs"
|
||||
import type { McpServerConfig } from "../claude-code-mcp-loader/types"
|
||||
import { expandEnvVarsInObject } from "../claude-code-mcp-loader/env-expander"
|
||||
import { shouldLoadMcpServer } from "../claude-code-mcp-loader/scope-filter"
|
||||
import { transformMcpServer } from "../claude-code-mcp-loader/transformer"
|
||||
import type { ClaudeCodeMcpConfig } from "../claude-code-mcp-loader/types"
|
||||
import { log } from "../../shared/logger"
|
||||
@@ -11,6 +12,7 @@ export async function loadPluginMcpServers(
|
||||
plugins: LoadedPlugin[],
|
||||
): Promise<Record<string, McpServerConfig>> {
|
||||
const servers: Record<string, McpServerConfig> = {}
|
||||
const cwd = process.cwd()
|
||||
|
||||
for (const plugin of plugins) {
|
||||
if (!plugin.mcpPath || !existsSync(plugin.mcpPath)) continue
|
||||
@@ -25,6 +27,15 @@ export async function loadPluginMcpServers(
|
||||
if (!config.mcpServers) continue
|
||||
|
||||
for (const [name, serverConfig] of Object.entries(config.mcpServers)) {
|
||||
if (!shouldLoadMcpServer(serverConfig, cwd)) {
|
||||
log(`Skipping local plugin MCP server "${name}" outside current cwd`, {
|
||||
path: plugin.mcpPath,
|
||||
projectPath: serverConfig.projectPath,
|
||||
cwd,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if (serverConfig.disabled) {
|
||||
log(`Skipping disabled MCP server "${name}" from plugin ${plugin.name}`)
|
||||
continue
|
||||
|
||||
@@ -11,12 +11,23 @@ export function getMainSessionID(): string | undefined {
|
||||
return _mainSessionID
|
||||
}
|
||||
|
||||
const registeredAgentNames = new Set<string>()
|
||||
|
||||
export function registerAgentName(name: string): void {
|
||||
registeredAgentNames.add(name.toLowerCase())
|
||||
}
|
||||
|
||||
export function isAgentRegistered(name: string): boolean {
|
||||
return registeredAgentNames.has(name.toLowerCase())
|
||||
}
|
||||
|
||||
/** @internal For testing only */
|
||||
export function _resetForTesting(): void {
|
||||
_mainSessionID = undefined
|
||||
subagentSessions.clear()
|
||||
syncSubagentSessions.clear()
|
||||
sessionAgentMap.clear()
|
||||
registeredAgentNames.clear()
|
||||
}
|
||||
|
||||
const sessionAgentMap = new Map<string, string>()
|
||||
|
||||
@@ -3,6 +3,10 @@ import { parseFrontmatter } from "../../shared/frontmatter"
|
||||
import type { LoadedSkill } from "./types"
|
||||
|
||||
export function extractSkillTemplate(skill: LoadedSkill): string {
|
||||
if (skill.scope === "config" && skill.definition.template) {
|
||||
return skill.definition.template
|
||||
}
|
||||
|
||||
if (skill.path) {
|
||||
const content = readFileSync(skill.path, "utf-8")
|
||||
const { body } = parseFrontmatter(content)
|
||||
|
||||
@@ -34,9 +34,9 @@ RECOMMENDED: Use task with explore/librarian agents for better results:
|
||||
|
||||
\`\`\`
|
||||
// Parallel exploration - fire multiple agents simultaneously
|
||||
task(agent="explore", prompt="Find all files matching pattern X")
|
||||
task(agent="explore", prompt="Search for implementation of Y")
|
||||
task(agent="librarian", prompt="Lookup documentation for Z")
|
||||
task(subagent_type="explore", load_skills=[], prompt="Find all files matching pattern X")
|
||||
task(subagent_type="explore", load_skills=[], prompt="Search for implementation of Y")
|
||||
task(subagent_type="librarian", load_skills=[], prompt="Lookup documentation for Z")
|
||||
|
||||
// Then continue your work while they run in background
|
||||
// System will notify you when each completes
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { replaceEmptyTextPartsAsync } from "../session-recovery/storage/empty-text"
|
||||
import { replaceEmptyTextPartsAsync, findMessagesWithEmptyTextPartsFromSDK } from "../session-recovery/storage/empty-text"
|
||||
import { injectTextPartAsync } from "../session-recovery/storage/text-part-injector"
|
||||
import type { Client } from "./client"
|
||||
|
||||
@@ -157,11 +157,19 @@ export async function fixEmptyMessagesWithSDK(params: {
|
||||
}
|
||||
|
||||
const emptyMessageIds = await findEmptyMessagesFromSDK(params.client, params.sessionID)
|
||||
if (emptyMessageIds.length === 0) {
|
||||
|
||||
// Also find messages with empty text parts alongside non-empty content (e.g., tool calls).
|
||||
// messageHasContentFromSDK returns true for these since they have tool parts,
|
||||
// but the API still rejects the empty text block.
|
||||
const emptyTextPartIds = await findMessagesWithEmptyTextPartsFromSDK(params.client, params.sessionID)
|
||||
const additionalIds = emptyTextPartIds.filter((id) => !emptyMessageIds.includes(id))
|
||||
const allTargetIds = [...emptyMessageIds, ...additionalIds]
|
||||
|
||||
if (allTargetIds.length === 0) {
|
||||
return { fixed: false, fixedMessageIds: [], scannedEmptyCount: 0 }
|
||||
}
|
||||
|
||||
for (const messageID of emptyMessageIds) {
|
||||
for (const messageID of allTargetIds) {
|
||||
const replaced = await replaceEmptyTextPartsAsync(
|
||||
params.client,
|
||||
params.sessionID,
|
||||
@@ -187,5 +195,5 @@ export async function fixEmptyMessagesWithSDK(params: {
|
||||
}
|
||||
}
|
||||
|
||||
return { fixed, fixedMessageIds, scannedEmptyCount: emptyMessageIds.length }
|
||||
return { fixed, fixedMessageIds, scannedEmptyCount: allTargetIds.length }
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import {
|
||||
findEmptyMessages,
|
||||
findEmptyMessageByIndex,
|
||||
findMessagesWithEmptyTextParts,
|
||||
injectTextPart,
|
||||
replaceEmptyTextParts,
|
||||
} from "../session-recovery/storage"
|
||||
@@ -79,7 +80,9 @@ export async function fixEmptyMessages(params: {
|
||||
|
||||
if (!fixed) {
|
||||
const emptyMessageIds = findEmptyMessages(params.sessionID)
|
||||
if (emptyMessageIds.length === 0) {
|
||||
const emptyTextPartIds = findMessagesWithEmptyTextParts(params.sessionID)
|
||||
const allIds = [...new Set([...emptyMessageIds, ...emptyTextPartIds])]
|
||||
if (allIds.length === 0) {
|
||||
await params.client.tui
|
||||
.showToast({
|
||||
body: {
|
||||
@@ -93,7 +96,7 @@ export async function fixEmptyMessages(params: {
|
||||
return false
|
||||
}
|
||||
|
||||
for (const messageID of emptyMessageIds) {
|
||||
for (const messageID of allIds) {
|
||||
const replaced = replaceEmptyTextParts(messageID, PLACEHOLDER_TEXT)
|
||||
if (replaced) {
|
||||
fixed = true
|
||||
|
||||
@@ -0,0 +1,105 @@
|
||||
import { afterAll, beforeEach, describe, expect, mock, test } from "bun:test"
|
||||
|
||||
const replaceEmptyTextPartsAsync = mock(() => Promise.resolve(false))
|
||||
const injectTextPartAsync = mock(() => Promise.resolve(false))
|
||||
const findMessagesWithEmptyTextPartsFromSDK = mock(() => Promise.resolve([] as string[]))
|
||||
|
||||
mock.module("../../shared", () => ({
|
||||
normalizeSDKResponse: (response: { data?: unknown[] }) => response.data ?? [],
|
||||
}))
|
||||
|
||||
mock.module("../../shared/logger", () => ({
|
||||
log: () => {},
|
||||
}))
|
||||
|
||||
mock.module("../../shared/opencode-storage-detection", () => ({
|
||||
isSqliteBackend: () => true,
|
||||
}))
|
||||
|
||||
mock.module("../session-recovery/storage", () => ({
|
||||
findEmptyMessages: () => [],
|
||||
findMessagesWithEmptyTextParts: () => [],
|
||||
injectTextPart: () => false,
|
||||
replaceEmptyTextParts: () => false,
|
||||
}))
|
||||
|
||||
mock.module("../session-recovery/storage/empty-text", () => ({
|
||||
replaceEmptyTextPartsAsync,
|
||||
findMessagesWithEmptyTextPartsFromSDK,
|
||||
}))
|
||||
|
||||
mock.module("../session-recovery/storage/text-part-injector", () => ({
|
||||
injectTextPartAsync,
|
||||
}))
|
||||
|
||||
async function importFreshMessageBuilder(): Promise<typeof import("./message-builder")> {
|
||||
return import(`./message-builder?test=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
afterAll(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
describe("sanitizeEmptyMessagesBeforeSummarize", () => {
|
||||
beforeEach(() => {
|
||||
replaceEmptyTextPartsAsync.mockReset()
|
||||
replaceEmptyTextPartsAsync.mockResolvedValue(false)
|
||||
injectTextPartAsync.mockReset()
|
||||
injectTextPartAsync.mockResolvedValue(false)
|
||||
findMessagesWithEmptyTextPartsFromSDK.mockReset()
|
||||
findMessagesWithEmptyTextPartsFromSDK.mockResolvedValue([])
|
||||
})
|
||||
|
||||
test("#given sqlite message with tool content and empty text part #when sanitizing #then it fixes the mixed-content message", async () => {
|
||||
const { sanitizeEmptyMessagesBeforeSummarize, PLACEHOLDER_TEXT } = await importFreshMessageBuilder()
|
||||
const client = {
|
||||
session: {
|
||||
messages: mock(() => Promise.resolve({
|
||||
data: [
|
||||
{
|
||||
info: { id: "msg-1" },
|
||||
parts: [
|
||||
{ type: "tool_result", text: "done" },
|
||||
{ type: "text", text: "" },
|
||||
],
|
||||
},
|
||||
],
|
||||
})),
|
||||
},
|
||||
} as never
|
||||
findMessagesWithEmptyTextPartsFromSDK.mockResolvedValue(["msg-1"])
|
||||
replaceEmptyTextPartsAsync.mockResolvedValue(true)
|
||||
|
||||
const fixedCount = await sanitizeEmptyMessagesBeforeSummarize("ses-1", client)
|
||||
|
||||
expect(fixedCount).toBe(1)
|
||||
expect(replaceEmptyTextPartsAsync).toHaveBeenCalledWith(client, "ses-1", "msg-1", PLACEHOLDER_TEXT)
|
||||
expect(injectTextPartAsync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
test("#given sqlite message with mixed content and failed replacement #when sanitizing #then it injects the placeholder text part", async () => {
|
||||
const { sanitizeEmptyMessagesBeforeSummarize, PLACEHOLDER_TEXT } = await importFreshMessageBuilder()
|
||||
const client = {
|
||||
session: {
|
||||
messages: mock(() => Promise.resolve({
|
||||
data: [
|
||||
{
|
||||
info: { id: "msg-2" },
|
||||
parts: [
|
||||
{ type: "tool_use", text: "call" },
|
||||
{ type: "text", text: "" },
|
||||
],
|
||||
},
|
||||
],
|
||||
})),
|
||||
},
|
||||
} as never
|
||||
findMessagesWithEmptyTextPartsFromSDK.mockResolvedValue(["msg-2"])
|
||||
injectTextPartAsync.mockResolvedValue(true)
|
||||
|
||||
const fixedCount = await sanitizeEmptyMessagesBeforeSummarize("ses-2", client)
|
||||
|
||||
expect(fixedCount).toBe(1)
|
||||
expect(injectTextPartAsync).toHaveBeenCalledWith(client, "ses-2", "msg-2", PLACEHOLDER_TEXT)
|
||||
})
|
||||
})
|
||||
@@ -4,10 +4,11 @@ import { normalizeSDKResponse } from "../../shared"
|
||||
import { isSqliteBackend } from "../../shared/opencode-storage-detection"
|
||||
import {
|
||||
findEmptyMessages,
|
||||
findMessagesWithEmptyTextParts,
|
||||
injectTextPart,
|
||||
replaceEmptyTextParts,
|
||||
} from "../session-recovery/storage"
|
||||
import { replaceEmptyTextPartsAsync } from "../session-recovery/storage/empty-text"
|
||||
import { findMessagesWithEmptyTextPartsFromSDK, replaceEmptyTextPartsAsync } from "../session-recovery/storage/empty-text"
|
||||
import { injectTextPartAsync } from "../session-recovery/storage/text-part-injector"
|
||||
import type { Client } from "./client"
|
||||
|
||||
@@ -85,12 +86,14 @@ export async function sanitizeEmptyMessagesBeforeSummarize(
|
||||
): Promise<number> {
|
||||
if (client && isSqliteBackend()) {
|
||||
const emptyMessageIds = await findEmptyMessageIdsFromSDK(client, sessionID)
|
||||
if (emptyMessageIds.length === 0) {
|
||||
const emptyTextPartIds = await findMessagesWithEmptyTextPartsFromSDK(client, sessionID)
|
||||
const allIds = [...new Set([...emptyMessageIds, ...emptyTextPartIds])]
|
||||
if (allIds.length === 0) {
|
||||
return 0
|
||||
}
|
||||
|
||||
let fixedCount = 0
|
||||
for (const messageID of emptyMessageIds) {
|
||||
for (const messageID of allIds) {
|
||||
const replaced = await replaceEmptyTextPartsAsync(client, sessionID, messageID, PLACEHOLDER_TEXT)
|
||||
if (replaced) {
|
||||
fixedCount++
|
||||
@@ -106,7 +109,7 @@ export async function sanitizeEmptyMessagesBeforeSummarize(
|
||||
log("[auto-compact] pre-summarize sanitization fixed empty messages", {
|
||||
sessionID,
|
||||
fixedCount,
|
||||
totalEmpty: emptyMessageIds.length,
|
||||
totalEmpty: allIds.length,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -114,12 +117,14 @@ export async function sanitizeEmptyMessagesBeforeSummarize(
|
||||
}
|
||||
|
||||
const emptyMessageIds = findEmptyMessages(sessionID)
|
||||
if (emptyMessageIds.length === 0) {
|
||||
const emptyTextPartIds = findMessagesWithEmptyTextParts(sessionID)
|
||||
const allIds = [...new Set([...emptyMessageIds, ...emptyTextPartIds])]
|
||||
if (allIds.length === 0) {
|
||||
return 0
|
||||
}
|
||||
|
||||
let fixedCount = 0
|
||||
for (const messageID of emptyMessageIds) {
|
||||
for (const messageID of allIds) {
|
||||
const replaced = replaceEmptyTextParts(messageID, PLACEHOLDER_TEXT)
|
||||
if (replaced) {
|
||||
fixedCount++
|
||||
@@ -135,7 +140,7 @@ export async function sanitizeEmptyMessagesBeforeSummarize(
|
||||
log("[auto-compact] pre-summarize sanitization fixed empty messages", {
|
||||
sessionID,
|
||||
fixedCount,
|
||||
totalEmpty: emptyMessageIds.length,
|
||||
totalEmpty: allIds.length,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -153,7 +158,7 @@ export async function getLastAssistant(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
client: any,
|
||||
directory: string,
|
||||
): Promise<Record<string, unknown> | null> {
|
||||
): Promise<{ info: Record<string, unknown>; hasContent: boolean } | null> {
|
||||
try {
|
||||
const resp = await (client as Client).session.messages({
|
||||
path: { id: sessionID },
|
||||
@@ -170,7 +175,15 @@ export async function getLastAssistant(
|
||||
return info?.role === "assistant"
|
||||
})
|
||||
if (!last) return null
|
||||
return (last as { info?: Record<string, unknown> }).info ?? null
|
||||
|
||||
const message = last as SDKMessage & { info?: Record<string, unknown> }
|
||||
const info = message.info
|
||||
if (!info) return null
|
||||
|
||||
return {
|
||||
info,
|
||||
hasContent: messageHasContentFromSDK(message),
|
||||
}
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
|
||||
@@ -6,8 +6,11 @@ import * as originalLogger from "../../shared/logger"
|
||||
|
||||
const executeCompactMock = mock(async () => {})
|
||||
const getLastAssistantMock = mock(async () => ({
|
||||
info: {
|
||||
providerID: "anthropic",
|
||||
modelID: "claude-sonnet-4-6",
|
||||
},
|
||||
hasContent: true,
|
||||
}))
|
||||
const parseAnthropicTokenLimitErrorMock = mock(() => ({
|
||||
providerID: "anthropic",
|
||||
@@ -115,4 +118,43 @@ describe("createAnthropicContextWindowLimitRecoveryHook", () => {
|
||||
restore()
|
||||
}
|
||||
})
|
||||
|
||||
test("does not treat empty summary assistant messages as successful compaction", async () => {
|
||||
//#given
|
||||
const { restore, getClearTimeoutCalls } = setupDelayedTimeoutMocks()
|
||||
getLastAssistantMock.mockResolvedValueOnce({
|
||||
info: {
|
||||
summary: true,
|
||||
providerID: "anthropic",
|
||||
modelID: "claude-sonnet-4-6",
|
||||
},
|
||||
hasContent: false,
|
||||
})
|
||||
const { createAnthropicContextWindowLimitRecoveryHook } = await import("./recovery-hook")
|
||||
const hook = createAnthropicContextWindowLimitRecoveryHook(createMockContext())
|
||||
|
||||
try {
|
||||
//#when
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "session.error",
|
||||
properties: { sessionID: "session-empty-summary", error: "prompt is too long" },
|
||||
},
|
||||
})
|
||||
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "session.idle",
|
||||
properties: { sessionID: "session-empty-summary" },
|
||||
},
|
||||
})
|
||||
|
||||
//#then
|
||||
expect(getClearTimeoutCalls()).toEqual([1 as ReturnType<typeof setTimeout>])
|
||||
expect(executeCompactMock).toHaveBeenCalledTimes(1)
|
||||
expect(executeCompactMock.mock.calls[0]?.[0]).toBe("session-empty-summary")
|
||||
} finally {
|
||||
restore()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -72,8 +72,9 @@ export function createAnthropicContextWindowLimitRecoveryHook(
|
||||
}
|
||||
|
||||
const lastAssistant = await getLastAssistant(sessionID, ctx.client, ctx.directory)
|
||||
const providerID = parsed.providerID ?? (lastAssistant?.providerID as string | undefined)
|
||||
const modelID = parsed.modelID ?? (lastAssistant?.modelID as string | undefined)
|
||||
const lastAssistantInfo = lastAssistant?.info
|
||||
const providerID = parsed.providerID ?? (lastAssistantInfo?.providerID as string | undefined)
|
||||
const modelID = parsed.modelID ?? (lastAssistantInfo?.modelID as string | undefined)
|
||||
|
||||
await ctx.client.tui
|
||||
.showToast({
|
||||
@@ -136,14 +137,15 @@ export function createAnthropicContextWindowLimitRecoveryHook(
|
||||
|
||||
const errorData = autoCompactState.errorDataBySession.get(sessionID)
|
||||
const lastAssistant = await getLastAssistant(sessionID, ctx.client, ctx.directory)
|
||||
const lastAssistantInfo = lastAssistant?.info
|
||||
|
||||
if (lastAssistant?.summary === true) {
|
||||
if (lastAssistantInfo?.summary === true && lastAssistant?.hasContent) {
|
||||
autoCompactState.pendingCompact.delete(sessionID)
|
||||
return
|
||||
}
|
||||
|
||||
const providerID = errorData?.providerID ?? (lastAssistant?.providerID as string | undefined)
|
||||
const modelID = errorData?.modelID ?? (lastAssistant?.modelID as string | undefined)
|
||||
const providerID = errorData?.providerID ?? (lastAssistantInfo?.providerID as string | undefined)
|
||||
const modelID = errorData?.modelID ?? (lastAssistantInfo?.modelID as string | undefined)
|
||||
|
||||
await ctx.client.tui
|
||||
.showToast({
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import type { PluginInput } from "@opencode-ai/plugin"
|
||||
import type { BackgroundManager } from "../../features/background-agent"
|
||||
import { isAgentRegistered } from "../../features/claude-code-session-state"
|
||||
import { log } from "../../shared/logger"
|
||||
import { createInternalAgentTextPart, resolveInheritedPromptTools } from "../../shared"
|
||||
import { getAgentConfigKey } from "../../shared/agent-display-names"
|
||||
import { HOOK_NAME } from "./hook-name"
|
||||
import { BOULDER_CONTINUATION_PROMPT } from "./system-reminder-templates"
|
||||
import { resolveRecentPromptContextForSession } from "./recent-model-resolver"
|
||||
@@ -52,6 +54,15 @@ export async function injectBoulderContinuation(input: {
|
||||
`\n\n[Status: ${total - remaining}/${total} completed, ${remaining} remaining]` +
|
||||
preferredSessionContext +
|
||||
worktreeContext
|
||||
const continuationAgent = agent ?? (isAgentRegistered("atlas") ? "atlas" : undefined)
|
||||
|
||||
if (!continuationAgent || !isAgentRegistered(continuationAgent)) {
|
||||
log(`[${HOOK_NAME}] Skipped injection: continuation agent unavailable`, {
|
||||
sessionID,
|
||||
agent: continuationAgent ?? agent ?? "unknown",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
log(`[${HOOK_NAME}] Injecting boulder continuation`, { sessionID, planName, remaining })
|
||||
@@ -62,7 +73,7 @@ export async function injectBoulderContinuation(input: {
|
||||
await ctx.client.session.promptAsync({
|
||||
path: { id: sessionID },
|
||||
body: {
|
||||
agent: agent ?? "atlas",
|
||||
agent: getAgentConfigKey(continuationAgent),
|
||||
...(promptContext.model !== undefined ? { model: promptContext.model } : {}),
|
||||
...(inheritedTools ? { tools: inheritedTools } : {}),
|
||||
parts: [createInternalAgentTextPart(prompt)],
|
||||
|
||||
@@ -6,7 +6,7 @@ import { join } from "node:path"
|
||||
import { randomUUID } from "node:crypto"
|
||||
|
||||
import { clearBoulderState, writeBoulderState } from "../../features/boulder-state"
|
||||
import { _resetForTesting } from "../../features/claude-code-session-state"
|
||||
import { _resetForTesting, registerAgentName } from "../../features/claude-code-session-state"
|
||||
import type { BoulderState } from "../../features/boulder-state"
|
||||
|
||||
const TEST_STORAGE_ROOT = join(tmpdir(), `atlas-compaction-storage-${randomUUID()}`)
|
||||
@@ -66,6 +66,8 @@ describe("atlas hook compaction agent filtering", () => {
|
||||
mkdirSync(testDirectory, { recursive: true })
|
||||
clearBoulderState(testDirectory)
|
||||
_resetForTesting()
|
||||
registerAgentName("atlas")
|
||||
registerAgentName("sisyphus")
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
|
||||
@@ -6,7 +6,7 @@ import { tmpdir } from "node:os"
|
||||
import { join } from "node:path"
|
||||
import { clearBoulderState, readBoulderState, writeBoulderState } from "../../features/boulder-state"
|
||||
import type { BoulderState } from "../../features/boulder-state"
|
||||
import { _resetForTesting, setSessionAgent, subagentSessions } from "../../features/claude-code-session-state"
|
||||
import { _resetForTesting, registerAgentName, setSessionAgent, subagentSessions } from "../../features/claude-code-session-state"
|
||||
|
||||
const { createAtlasHook } = await import("./index")
|
||||
|
||||
@@ -64,6 +64,8 @@ describe("atlas hook idle-event session lineage", () => {
|
||||
promptCalls = []
|
||||
clearBoulderState(testDirectory)
|
||||
_resetForTesting()
|
||||
registerAgentName("atlas")
|
||||
registerAgentName("sisyphus")
|
||||
subagentSessions.clear()
|
||||
})
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import {
|
||||
readBoulderState,
|
||||
readCurrentTopLevelTask,
|
||||
} from "../../features/boulder-state"
|
||||
import { getSessionAgent, subagentSessions } from "../../features/claude-code-session-state"
|
||||
import { getSessionAgent, isAgentRegistered, subagentSessions } from "../../features/claude-code-session-state"
|
||||
import { getAgentConfigKey } from "../../shared/agent-display-names"
|
||||
import { log } from "../../shared/logger"
|
||||
import { injectBoulderContinuation } from "./boulder-continuation-injector"
|
||||
@@ -141,7 +141,15 @@ export async function handleAtlasSessionIdle(input: {
|
||||
if (subagentSessions.has(sessionID)) {
|
||||
const sessionAgent = getSessionAgent(sessionID)
|
||||
const agentKey = getAgentConfigKey(sessionAgent ?? "")
|
||||
const requiredAgentKey = getAgentConfigKey(boulderState.agent ?? "atlas")
|
||||
const requiredAgentName = boulderState.agent ?? (isAgentRegistered("atlas") ? "atlas" : undefined)
|
||||
if (!requiredAgentName || !isAgentRegistered(requiredAgentName)) {
|
||||
log(`[${HOOK_NAME}] Skipped: boulder agent is unavailable for continuation`, {
|
||||
sessionID,
|
||||
requiredAgent: boulderState.agent ?? "unknown",
|
||||
})
|
||||
return
|
||||
}
|
||||
const requiredAgentKey = getAgentConfigKey(requiredAgentName)
|
||||
const agentMatches =
|
||||
agentKey === requiredAgentKey ||
|
||||
(requiredAgentKey === getAgentConfigKey("atlas") && agentKey === getAgentConfigKey("sisyphus"))
|
||||
@@ -149,7 +157,7 @@ export async function handleAtlasSessionIdle(input: {
|
||||
log(`[${HOOK_NAME}] Skipped: subagent agent does not match boulder agent`, {
|
||||
sessionID,
|
||||
agent: sessionAgent ?? "unknown",
|
||||
requiredAgent: boulderState.agent ?? "atlas",
|
||||
requiredAgent: requiredAgentName,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import {
|
||||
readBoulderState,
|
||||
} from "../../features/boulder-state"
|
||||
import type { BoulderState } from "../../features/boulder-state"
|
||||
import { _resetForTesting, subagentSessions, updateSessionAgent } from "../../features/claude-code-session-state"
|
||||
import { _resetForTesting, registerAgentName, subagentSessions, updateSessionAgent } from "../../features/claude-code-session-state"
|
||||
import type { PendingTaskRef } from "./types"
|
||||
|
||||
const TEST_STORAGE_ROOT = join(tmpdir(), `atlas-message-storage-${randomUUID()}`)
|
||||
@@ -90,6 +90,9 @@ describe("atlas hook", () => {
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
_resetForTesting()
|
||||
registerAgentName("atlas")
|
||||
registerAgentName("sisyphus")
|
||||
TEST_DIR = join(tmpdir(), `atlas-test-${randomUUID()}`)
|
||||
SISYPHUS_DIR = join(TEST_DIR, ".sisyphus")
|
||||
if (!existsSync(TEST_DIR)) {
|
||||
@@ -102,6 +105,7 @@ describe("atlas hook", () => {
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
_resetForTesting()
|
||||
clearBoulderState(TEST_DIR)
|
||||
if (existsSync(TEST_DIR)) {
|
||||
rmSync(TEST_DIR, { recursive: true, force: true })
|
||||
@@ -1182,6 +1186,8 @@ session_id: ses_untrusted_999
|
||||
|
||||
beforeEach(() => {
|
||||
_resetForTesting()
|
||||
registerAgentName("atlas")
|
||||
registerAgentName("sisyphus")
|
||||
subagentSessions.clear()
|
||||
setupMessageStorage(MAIN_SESSION_ID, "atlas")
|
||||
})
|
||||
|
||||
@@ -203,6 +203,7 @@ But for any substantial changes, USE \`task\`.
|
||||
\`\`\`
|
||||
task(
|
||||
category="...",
|
||||
load_skills=[],
|
||||
prompt="[specific single task with clear acceptance criteria]"
|
||||
)
|
||||
\`\`\`
|
||||
|
||||
@@ -29,7 +29,7 @@ Your completion will NOT be recorded until you complete ALL of the following:
|
||||
|
||||
If anything fails while closing this out, resume the same session immediately:
|
||||
\`\`\`typescript
|
||||
task(session_id="${sessionId}", prompt="fix: checkbox not recorded correctly")
|
||||
task(session_id="${sessionId}", load_skills=[], prompt="fix: checkbox not recorded correctly")
|
||||
\`\`\`
|
||||
|
||||
**Your completion is NOT tracked until the checkbox is marked in the plan file.**
|
||||
@@ -47,7 +47,7 @@ ${VERIFICATION_REMINDER}
|
||||
|
||||
**If ANY verification fails, use this immediately:**
|
||||
\`\`\`
|
||||
task(session_id="${sessionId}", prompt="fix: [describe the specific failure]")
|
||||
task(session_id="${sessionId}", load_skills=[], prompt="fix: [describe the specific failure]")
|
||||
\`\`\`
|
||||
|
||||
${buildReuseHint(sessionId)}`
|
||||
|
||||
44
src/hooks/bash-file-read-guard.ts
Normal file
44
src/hooks/bash-file-read-guard.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import type { Hooks } from "@opencode-ai/plugin"
|
||||
|
||||
import { log } from "../shared"
|
||||
|
||||
const WARNING_MESSAGE = "Prefer the Read tool over `cat`/`head`/`tail` for reading file contents. The Read tool provides line numbers and hash anchors for precise editing."
|
||||
|
||||
const FILE_READ_PATTERNS = [
|
||||
/^\s*cat\s+(?!-)[^\s|&;]+\s*$/,
|
||||
/^\s*head\s+(-n\s+\d+\s+)?(?!-)[^\s|&;]+\s*$/,
|
||||
/^\s*tail\s+(-n\s+\d+\s+)?(?!-)[^\s|&;]+\s*$/,
|
||||
]
|
||||
|
||||
function isSimpleFileReadCommand(command: string): boolean {
|
||||
return FILE_READ_PATTERNS.some((pattern) => pattern.test(command))
|
||||
}
|
||||
|
||||
export function createBashFileReadGuardHook(): Hooks {
|
||||
return {
|
||||
"tool.execute.before": async (
|
||||
input: { tool: string; sessionID: string; callID: string },
|
||||
output: { args: Record<string, unknown>; message?: string },
|
||||
): Promise<void> => {
|
||||
if (input.tool.toLowerCase() !== "bash") {
|
||||
return
|
||||
}
|
||||
|
||||
const command = output.args.command
|
||||
if (typeof command !== "string") {
|
||||
return
|
||||
}
|
||||
|
||||
if (!isSimpleFileReadCommand(command)) {
|
||||
return
|
||||
}
|
||||
|
||||
output.message = WARNING_MESSAGE
|
||||
|
||||
log("[bash-file-read-guard] warned on bash file read command", {
|
||||
sessionID: input.sessionID,
|
||||
command,
|
||||
})
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -48,8 +48,10 @@ export { createPreemptiveCompactionHook } from "./preemptive-compaction";
|
||||
export { createTasksTodowriteDisablerHook } from "./tasks-todowrite-disabler";
|
||||
export { createRuntimeFallbackHook, type RuntimeFallbackHook, type RuntimeFallbackOptions } from "./runtime-fallback";
|
||||
export { createWriteExistingFileGuardHook } from "./write-existing-file-guard";
|
||||
export { createBashFileReadGuardHook } from "./bash-file-read-guard";
|
||||
export { createHashlineReadEnhancerHook } from "./hashline-read-enhancer";
|
||||
export { createJsonErrorRecoveryHook, JSON_ERROR_TOOL_EXCLUDE_LIST, JSON_ERROR_PATTERNS, JSON_ERROR_REMINDER } from "./json-error-recovery";
|
||||
export { createReadImageResizerHook } from "./read-image-resizer"
|
||||
export { createTodoDescriptionOverrideHook } from "./todo-description-override"
|
||||
export { createWebFetchRedirectGuardHook } from "./webfetch-redirect-guard"
|
||||
export { createLegacyPluginToastHook } from "./legacy-plugin-toast"
|
||||
|
||||
@@ -25,7 +25,22 @@ export const KEYWORD_DETECTORS: KeywordDetector[] = [
|
||||
message: SEARCH_MESSAGE,
|
||||
},
|
||||
{
|
||||
pattern: ANALYZE_PATTERN,
|
||||
message: ANALYZE_MESSAGE,
|
||||
pattern:
|
||||
/\b(analyze|analyse|investigate|examine|research|study|deep[\s-]?dive|inspect|audit|evaluate|assess|review|diagnose|scrutinize|dissect|debug|comprehend|interpret|breakdown|understand)\b|why\s+is|how\s+does|how\s+to|분석|조사|파악|연구|검토|진단|이해|설명|원인|이유|뜯어봐|따져봐|평가|해석|디버깅|디버그|어떻게|왜|살펴|分析|調査|解析|検討|研究|診断|理解|説明|検証|精査|究明|デバッグ|なぜ|どう|仕組み|调查|检查|剖析|深入|诊断|解释|调试|为什么|原理|搞清楚|弄明白|phân tích|điều tra|nghiên cứu|kiểm tra|xem xét|chẩn đoán|giải thích|tìm hiểu|gỡ lỗi|tại sao/i,
|
||||
message: `[analyze-mode]
|
||||
ANALYSIS MODE. Gather context before diving deep:
|
||||
CONTEXT GATHERING (parallel):
|
||||
- 1-2 explore agents (codebase patterns, implementations)
|
||||
- 1-2 librarian agents (if external library involved)
|
||||
- Direct tools: Grep, AST-grep, LSP for targeted searches
|
||||
|
||||
IF COMPLEX - DO NOT STRUGGLE ALONE. Consult specialists:
|
||||
- **Oracle**: Conventional problems (architecture, debugging, complex logic)
|
||||
- **Artistry**: Non-conventional problems (different approach needed)
|
||||
|
||||
SYNTHESIZE findings before proceeding.
|
||||
---
|
||||
MANDATORY delegate_task params: ALWAYS include load_skills=[] and run_in_background when calling delegate_task.
|
||||
Example: delegate_task(subagent_type="explore", prompt="...", run_in_background=true, load_skills=[])`,
|
||||
},
|
||||
]
|
||||
|
||||
@@ -57,17 +57,20 @@ export function createKeywordDetectorHook(ctx: PluginInput, _collector?: Context
|
||||
let detectedKeywords = detectKeywordsWithType(cleanText, currentAgent, modelID)
|
||||
|
||||
if (isPlannerAgent(currentAgent)) {
|
||||
const preFilterCount = detectedKeywords.length
|
||||
detectedKeywords = detectedKeywords.filter((k) => k.type !== "ultrawork")
|
||||
if (preFilterCount > detectedKeywords.length) {
|
||||
log(`[keyword-detector] Filtered ultrawork keywords for planner agent`, { sessionID: input.sessionID, agent: currentAgent })
|
||||
}
|
||||
}
|
||||
|
||||
if (detectedKeywords.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip keyword detection for background task sessions to prevent mode injection
|
||||
// (e.g., [analyze-mode]) which incorrectly triggers Prometheus restrictions
|
||||
const isBackgroundTaskSession = subagentSessions.has(input.sessionID)
|
||||
if (isBackgroundTaskSession) {
|
||||
log(`[keyword-detector] Skipping keyword injection for background task session`, { sessionID: input.sessionID })
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
138
src/hooks/legacy-plugin-toast/auto-migrate.test.ts
Normal file
138
src/hooks/legacy-plugin-toast/auto-migrate.test.ts
Normal file
@@ -0,0 +1,138 @@
|
||||
import { afterEach, beforeEach, describe, expect, it } from "bun:test"
|
||||
import { mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs"
|
||||
import { tmpdir } from "node:os"
|
||||
import { join } from "node:path"
|
||||
async function importFreshAutoMigrateModule(): Promise<typeof import("./auto-migrate")> {
|
||||
return import(`./auto-migrate?test=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
describe("autoMigrateLegacyPluginEntry", () => {
|
||||
let testConfigDir = ""
|
||||
|
||||
beforeEach(() => {
|
||||
testConfigDir = join(tmpdir(), `omo-legacy-migrate-${Date.now()}-${Math.random().toString(36).slice(2)}`)
|
||||
mkdirSync(testConfigDir, { recursive: true })
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(testConfigDir, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
describe("#given opencode.json has a bare legacy plugin entry", () => {
|
||||
it("#then replaces oh-my-opencode with oh-my-openagent", async () => {
|
||||
// given
|
||||
writeFileSync(
|
||||
join(testConfigDir, "opencode.json"),
|
||||
JSON.stringify({ plugin: ["oh-my-opencode"] }, null, 2) + "\n",
|
||||
)
|
||||
|
||||
const { autoMigrateLegacyPluginEntry } = await importFreshAutoMigrateModule()
|
||||
|
||||
// when
|
||||
const result = autoMigrateLegacyPluginEntry(testConfigDir)
|
||||
|
||||
// then
|
||||
expect(result.migrated).toBe(true)
|
||||
expect(result.from).toBe("oh-my-opencode")
|
||||
expect(result.to).toBe("oh-my-openagent")
|
||||
const saved = JSON.parse(readFileSync(join(testConfigDir, "opencode.json"), "utf-8"))
|
||||
expect(saved.plugin).toEqual(["oh-my-openagent"])
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given opencode.json has a version-pinned legacy entry", () => {
|
||||
it("#then preserves the version suffix", async () => {
|
||||
// given
|
||||
writeFileSync(
|
||||
join(testConfigDir, "opencode.json"),
|
||||
JSON.stringify({ plugin: ["oh-my-opencode@3.10.0"] }, null, 2) + "\n",
|
||||
)
|
||||
|
||||
const { autoMigrateLegacyPluginEntry } = await importFreshAutoMigrateModule()
|
||||
|
||||
// when
|
||||
const result = autoMigrateLegacyPluginEntry(testConfigDir)
|
||||
|
||||
// then
|
||||
expect(result.migrated).toBe(true)
|
||||
expect(result.from).toBe("oh-my-opencode@3.10.0")
|
||||
expect(result.to).toBe("oh-my-openagent@3.10.0")
|
||||
const saved = JSON.parse(readFileSync(join(testConfigDir, "opencode.json"), "utf-8"))
|
||||
expect(saved.plugin).toEqual(["oh-my-openagent@3.10.0"])
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given both canonical and legacy entries exist", () => {
|
||||
it("#then removes legacy entry and keeps canonical", async () => {
|
||||
// given
|
||||
writeFileSync(
|
||||
join(testConfigDir, "opencode.json"),
|
||||
JSON.stringify({ plugin: ["oh-my-openagent", "oh-my-opencode"] }, null, 2) + "\n",
|
||||
)
|
||||
|
||||
const { autoMigrateLegacyPluginEntry } = await importFreshAutoMigrateModule()
|
||||
|
||||
// when
|
||||
const result = autoMigrateLegacyPluginEntry(testConfigDir)
|
||||
|
||||
// then
|
||||
expect(result.migrated).toBe(true)
|
||||
const saved = JSON.parse(readFileSync(join(testConfigDir, "opencode.json"), "utf-8"))
|
||||
expect(saved.plugin).toEqual(["oh-my-openagent"])
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given no config file exists", () => {
|
||||
it("#then returns migrated false", async () => {
|
||||
// given - empty dir
|
||||
const { autoMigrateLegacyPluginEntry } = await importFreshAutoMigrateModule()
|
||||
|
||||
// when
|
||||
const result = autoMigrateLegacyPluginEntry(testConfigDir)
|
||||
|
||||
// then
|
||||
expect(result.migrated).toBe(false)
|
||||
expect(result.from).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given opencode.jsonc has comments and a legacy entry", () => {
|
||||
it("#then preserves comments and replaces entry", async () => {
|
||||
// given
|
||||
writeFileSync(
|
||||
join(testConfigDir, "opencode.jsonc"),
|
||||
'{\n // my config\n "plugin": ["oh-my-opencode"]\n}\n',
|
||||
)
|
||||
|
||||
const { autoMigrateLegacyPluginEntry } = await importFreshAutoMigrateModule()
|
||||
|
||||
// when
|
||||
const result = autoMigrateLegacyPluginEntry(testConfigDir)
|
||||
|
||||
// then
|
||||
expect(result.migrated).toBe(true)
|
||||
const content = readFileSync(join(testConfigDir, "opencode.jsonc"), "utf-8")
|
||||
expect(content).toContain("// my config")
|
||||
expect(content).toContain("oh-my-openagent")
|
||||
expect(content).not.toContain("oh-my-opencode")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given only canonical entry exists", () => {
|
||||
it("#then returns migrated false and leaves file untouched", async () => {
|
||||
// given
|
||||
const original = JSON.stringify({ plugin: ["oh-my-openagent"] }, null, 2) + "\n"
|
||||
writeFileSync(join(testConfigDir, "opencode.json"), original)
|
||||
|
||||
const { autoMigrateLegacyPluginEntry } = await importFreshAutoMigrateModule()
|
||||
|
||||
// when
|
||||
const result = autoMigrateLegacyPluginEntry(testConfigDir)
|
||||
|
||||
// then
|
||||
expect(result.migrated).toBe(false)
|
||||
const content = readFileSync(join(testConfigDir, "opencode.json"), "utf-8")
|
||||
expect(content).toBe(original)
|
||||
})
|
||||
})
|
||||
})
|
||||
90
src/hooks/legacy-plugin-toast/auto-migrate.ts
Normal file
90
src/hooks/legacy-plugin-toast/auto-migrate.ts
Normal file
@@ -0,0 +1,90 @@
|
||||
import { existsSync, readFileSync, writeFileSync } from "node:fs"
|
||||
import { join } from "node:path"
|
||||
|
||||
import { parseJsoncSafe } from "../../shared/jsonc-parser"
|
||||
import { getOpenCodeConfigPaths } from "../../shared/opencode-config-dir"
|
||||
import { LEGACY_PLUGIN_NAME, PLUGIN_NAME } from "../../shared/plugin-identity"
|
||||
|
||||
export interface MigrationResult {
|
||||
migrated: boolean
|
||||
from: string | null
|
||||
to: string | null
|
||||
configPath: string | null
|
||||
}
|
||||
|
||||
interface OpenCodeConfig {
|
||||
plugin?: string[]
|
||||
}
|
||||
|
||||
function isLegacyEntry(entry: string): boolean {
|
||||
return entry === LEGACY_PLUGIN_NAME || entry.startsWith(`${LEGACY_PLUGIN_NAME}@`)
|
||||
}
|
||||
|
||||
function isCanonicalEntry(entry: string): boolean {
|
||||
return entry === PLUGIN_NAME || entry.startsWith(`${PLUGIN_NAME}@`)
|
||||
}
|
||||
|
||||
function toLegacyCanonical(entry: string): string {
|
||||
if (entry === LEGACY_PLUGIN_NAME) return PLUGIN_NAME
|
||||
if (entry.startsWith(`${LEGACY_PLUGIN_NAME}@`)) {
|
||||
return `${PLUGIN_NAME}${entry.slice(LEGACY_PLUGIN_NAME.length)}`
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
function detectOpenCodeConfigPath(overrideConfigDir?: string): string | null {
|
||||
if (overrideConfigDir) {
|
||||
const jsoncPath = join(overrideConfigDir, "opencode.jsonc")
|
||||
const jsonPath = join(overrideConfigDir, "opencode.json")
|
||||
if (existsSync(jsoncPath)) return jsoncPath
|
||||
if (existsSync(jsonPath)) return jsonPath
|
||||
return null
|
||||
}
|
||||
|
||||
const paths = getOpenCodeConfigPaths({ binary: "opencode", version: null })
|
||||
if (existsSync(paths.configJsonc)) return paths.configJsonc
|
||||
if (existsSync(paths.configJson)) return paths.configJson
|
||||
return null
|
||||
}
|
||||
|
||||
export function autoMigrateLegacyPluginEntry(overrideConfigDir?: string): MigrationResult {
|
||||
const configPath = detectOpenCodeConfigPath(overrideConfigDir)
|
||||
if (!configPath) return { migrated: false, from: null, to: null, configPath: null }
|
||||
|
||||
try {
|
||||
const content = readFileSync(configPath, "utf-8")
|
||||
const parseResult = parseJsoncSafe<OpenCodeConfig>(content)
|
||||
if (!parseResult.data?.plugin) return { migrated: false, from: null, to: null, configPath }
|
||||
|
||||
const plugins = parseResult.data.plugin
|
||||
const legacyEntries = plugins.filter(isLegacyEntry)
|
||||
if (legacyEntries.length === 0) return { migrated: false, from: null, to: null, configPath }
|
||||
|
||||
const hasCanonical = plugins.some(isCanonicalEntry)
|
||||
const from = legacyEntries[0]
|
||||
const to = toLegacyCanonical(from)
|
||||
|
||||
const normalized = hasCanonical
|
||||
? plugins.filter((p) => !isLegacyEntry(p))
|
||||
: plugins.map((p) => (isLegacyEntry(p) ? toLegacyCanonical(p) : p))
|
||||
|
||||
const isJsonc = configPath.endsWith(".jsonc")
|
||||
if (isJsonc) {
|
||||
const pluginArrayRegex = /((?:"plugin"|plugin)\s*:\s*)\[([\s\S]*?)\]/
|
||||
const match = content.match(pluginArrayRegex)
|
||||
if (match) {
|
||||
const formattedPlugins = normalized.map((p) => `"${p}"`).join(",\n ")
|
||||
const newContent = content.replace(pluginArrayRegex, `$1[\n ${formattedPlugins}\n ]`)
|
||||
writeFileSync(configPath, newContent)
|
||||
return { migrated: true, from, to, configPath }
|
||||
}
|
||||
}
|
||||
|
||||
const parsed = JSON.parse(content) as Record<string, unknown>
|
||||
parsed.plugin = normalized
|
||||
writeFileSync(configPath, JSON.stringify(parsed, null, 2) + "\n")
|
||||
return { migrated: true, from, to, configPath }
|
||||
} catch {
|
||||
return { migrated: false, from: null, to: null, configPath }
|
||||
}
|
||||
}
|
||||
206
src/hooks/legacy-plugin-toast/hook.test.ts
Normal file
206
src/hooks/legacy-plugin-toast/hook.test.ts
Normal file
@@ -0,0 +1,206 @@
|
||||
import { afterAll, beforeEach, describe, expect, it, mock } from "bun:test"
|
||||
import type { MigrationResult } from "./auto-migrate"
|
||||
|
||||
const mockCheckForLegacyPluginEntry = mock(() => ({
|
||||
hasLegacyEntry: false,
|
||||
hasCanonicalEntry: false,
|
||||
legacyEntries: [] as string[],
|
||||
}))
|
||||
|
||||
const mockAutoMigrate = mock((): MigrationResult => ({
|
||||
migrated: false,
|
||||
from: null,
|
||||
to: null,
|
||||
configPath: null,
|
||||
}))
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const mockShowToast = mock((_arg: any) => Promise.resolve())
|
||||
const mockLog = mock(() => {})
|
||||
|
||||
mock.module("../../shared/legacy-plugin-warning", () => ({
|
||||
checkForLegacyPluginEntry: mockCheckForLegacyPluginEntry,
|
||||
}))
|
||||
|
||||
mock.module("../../shared/logger", () => ({
|
||||
log: mockLog,
|
||||
}))
|
||||
|
||||
mock.module("./auto-migrate", () => ({
|
||||
autoMigrateLegacyPluginEntry: mockAutoMigrate,
|
||||
}))
|
||||
|
||||
afterAll(() => {
|
||||
mock.restore()
|
||||
})
|
||||
|
||||
function createMockCtx() {
|
||||
return {
|
||||
client: {
|
||||
tui: { showToast: mockShowToast },
|
||||
},
|
||||
directory: "/tmp/test",
|
||||
} as never
|
||||
}
|
||||
|
||||
function createEvent(type: string, parentID?: string) {
|
||||
return {
|
||||
event: {
|
||||
type,
|
||||
properties: parentID ? { info: { parentID } } : { info: {} },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function importFreshModule() {
|
||||
return import(`./hook?t=${Date.now()}-${Math.random()}`)
|
||||
}
|
||||
|
||||
describe("createLegacyPluginToastHook", () => {
|
||||
beforeEach(() => {
|
||||
mockCheckForLegacyPluginEntry.mockReset()
|
||||
mockAutoMigrate.mockReset()
|
||||
mockShowToast.mockReset()
|
||||
mockLog.mockReset()
|
||||
|
||||
mockCheckForLegacyPluginEntry.mockReturnValue({
|
||||
hasLegacyEntry: false,
|
||||
hasCanonicalEntry: true,
|
||||
legacyEntries: [],
|
||||
})
|
||||
mockAutoMigrate.mockReturnValue({ migrated: false, from: null, to: null, configPath: null })
|
||||
mockShowToast.mockResolvedValue(undefined)
|
||||
})
|
||||
|
||||
describe("#given no legacy entry exists", () => {
|
||||
it("#then does not show a toast", async () => {
|
||||
// given
|
||||
const { createLegacyPluginToastHook } = await importFreshModule()
|
||||
const hook = createLegacyPluginToastHook(createMockCtx())
|
||||
|
||||
// when
|
||||
await hook.event(createEvent("session.created"))
|
||||
|
||||
// then
|
||||
expect(mockShowToast).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given legacy entry exists and migration succeeds", () => {
|
||||
it("#then shows success toast", async () => {
|
||||
// given
|
||||
mockCheckForLegacyPluginEntry.mockReturnValue({
|
||||
hasLegacyEntry: true,
|
||||
hasCanonicalEntry: false,
|
||||
legacyEntries: ["oh-my-opencode"],
|
||||
})
|
||||
mockAutoMigrate.mockReturnValue({
|
||||
migrated: true,
|
||||
from: "oh-my-opencode",
|
||||
to: "oh-my-openagent",
|
||||
configPath: "/tmp/opencode.json",
|
||||
})
|
||||
const { createLegacyPluginToastHook } = await importFreshModule()
|
||||
const hook = createLegacyPluginToastHook(createMockCtx())
|
||||
|
||||
// when
|
||||
await hook.event(createEvent("session.created"))
|
||||
|
||||
// then
|
||||
expect(mockShowToast).toHaveBeenCalledTimes(1)
|
||||
const toastArg = mockShowToast.mock.calls[0][0] as { body: { variant: string } }
|
||||
expect(toastArg.body.variant).toBe("success")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given legacy entry exists but migration fails", () => {
|
||||
it("#then shows warning toast", async () => {
|
||||
// given
|
||||
mockCheckForLegacyPluginEntry.mockReturnValue({
|
||||
hasLegacyEntry: true,
|
||||
hasCanonicalEntry: false,
|
||||
legacyEntries: ["oh-my-opencode"],
|
||||
})
|
||||
mockAutoMigrate.mockReturnValue({
|
||||
migrated: false,
|
||||
from: null,
|
||||
to: null,
|
||||
configPath: "/tmp/opencode.json",
|
||||
})
|
||||
const { createLegacyPluginToastHook } = await importFreshModule()
|
||||
const hook = createLegacyPluginToastHook(createMockCtx())
|
||||
|
||||
// when
|
||||
await hook.event(createEvent("session.created"))
|
||||
|
||||
// then
|
||||
expect(mockShowToast).toHaveBeenCalledTimes(1)
|
||||
const toastArg2 = mockShowToast.mock.calls[0][0] as { body: { variant: string } }
|
||||
expect(toastArg2.body.variant).toBe("warning")
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given session.created fires twice", () => {
|
||||
it("#then only fires once (once-guard)", async () => {
|
||||
// given
|
||||
mockCheckForLegacyPluginEntry.mockReturnValue({
|
||||
hasLegacyEntry: true,
|
||||
hasCanonicalEntry: false,
|
||||
legacyEntries: ["oh-my-opencode"],
|
||||
})
|
||||
mockAutoMigrate.mockReturnValue({
|
||||
migrated: true,
|
||||
from: "oh-my-opencode",
|
||||
to: "oh-my-openagent",
|
||||
configPath: "/tmp/opencode.json",
|
||||
})
|
||||
const { createLegacyPluginToastHook } = await importFreshModule()
|
||||
const hook = createLegacyPluginToastHook(createMockCtx())
|
||||
|
||||
// when
|
||||
await hook.event(createEvent("session.created"))
|
||||
await hook.event(createEvent("session.created"))
|
||||
|
||||
// then
|
||||
expect(mockShowToast).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given a non-session.created event fires", () => {
|
||||
it("#then does nothing", async () => {
|
||||
// given
|
||||
mockCheckForLegacyPluginEntry.mockReturnValue({
|
||||
hasLegacyEntry: true,
|
||||
hasCanonicalEntry: false,
|
||||
legacyEntries: ["oh-my-opencode"],
|
||||
})
|
||||
const { createLegacyPluginToastHook } = await importFreshModule()
|
||||
const hook = createLegacyPluginToastHook(createMockCtx())
|
||||
|
||||
// when
|
||||
await hook.event(createEvent("session.deleted"))
|
||||
|
||||
// then
|
||||
expect(mockCheckForLegacyPluginEntry).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given session.created from a subagent (has parentID)", () => {
|
||||
it("#then skips the check", async () => {
|
||||
// given
|
||||
mockCheckForLegacyPluginEntry.mockReturnValue({
|
||||
hasLegacyEntry: true,
|
||||
hasCanonicalEntry: false,
|
||||
legacyEntries: ["oh-my-opencode"],
|
||||
})
|
||||
const { createLegacyPluginToastHook } = await importFreshModule()
|
||||
const hook = createLegacyPluginToastHook(createMockCtx())
|
||||
|
||||
// when
|
||||
await hook.event(createEvent("session.created", "parent-session-id"))
|
||||
|
||||
// then
|
||||
expect(mockCheckForLegacyPluginEntry).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
59
src/hooks/legacy-plugin-toast/hook.ts
Normal file
59
src/hooks/legacy-plugin-toast/hook.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import type { PluginInput } from "@opencode-ai/plugin"
|
||||
|
||||
import { checkForLegacyPluginEntry } from "../../shared/legacy-plugin-warning"
|
||||
import { log } from "../../shared/logger"
|
||||
import { LEGACY_PLUGIN_NAME, PLUGIN_NAME } from "../../shared/plugin-identity"
|
||||
import { autoMigrateLegacyPluginEntry } from "./auto-migrate"
|
||||
|
||||
export function createLegacyPluginToastHook(ctx: PluginInput) {
|
||||
let fired = false
|
||||
|
||||
return {
|
||||
event: async ({ event }: { event: { type: string; properties?: unknown } }) => {
|
||||
if (event.type !== "session.created" || fired) return
|
||||
|
||||
const props = event.properties as { info?: { parentID?: string } } | undefined
|
||||
if (props?.info?.parentID) return
|
||||
|
||||
fired = true
|
||||
|
||||
const result = checkForLegacyPluginEntry()
|
||||
if (!result.hasLegacyEntry) return
|
||||
|
||||
const migration = autoMigrateLegacyPluginEntry()
|
||||
|
||||
if (migration.migrated) {
|
||||
log("[legacy-plugin-toast] Auto-migrated opencode.json plugin entry", {
|
||||
from: migration.from,
|
||||
to: migration.to,
|
||||
})
|
||||
|
||||
await ctx.client.tui
|
||||
.showToast({
|
||||
body: {
|
||||
title: "Plugin Entry Migrated",
|
||||
message: `"${migration.from}" has been renamed to "${migration.to}" in your opencode.json.\nNo action needed.`,
|
||||
variant: "success" as const,
|
||||
duration: 8000,
|
||||
},
|
||||
})
|
||||
.catch(() => {})
|
||||
} else {
|
||||
log("[legacy-plugin-toast] Legacy entry detected but migration failed", {
|
||||
legacyEntries: result.legacyEntries,
|
||||
})
|
||||
|
||||
await ctx.client.tui
|
||||
.showToast({
|
||||
body: {
|
||||
title: "Legacy Plugin Name Detected",
|
||||
message: `Update your opencode.json: "${LEGACY_PLUGIN_NAME}" has been renamed to "${PLUGIN_NAME}".\nRun: bunx ${PLUGIN_NAME} install`,
|
||||
variant: "warning" as const,
|
||||
duration: 10000,
|
||||
},
|
||||
})
|
||||
.catch(() => {})
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
1
src/hooks/legacy-plugin-toast/index.ts
Normal file
1
src/hooks/legacy-plugin-toast/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { createLegacyPluginToastHook } from "./hook"
|
||||
@@ -284,6 +284,74 @@ describe("preemptive-compaction", () => {
|
||||
})
|
||||
})
|
||||
|
||||
// #given compaction fails
|
||||
// #when tool.execute.after is called again immediately
|
||||
// #then should NOT retry due to cooldown
|
||||
it("should enforce cooldown even after failed compaction to prevent rapid retry loops", async () => {
|
||||
//#given
|
||||
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
|
||||
const sessionID = "ses_fail_cooldown"
|
||||
ctx.client.session.summarize.mockRejectedValueOnce(new Error("rate limited"))
|
||||
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "message.updated",
|
||||
properties: {
|
||||
info: {
|
||||
role: "assistant",
|
||||
sessionID,
|
||||
providerID: "anthropic",
|
||||
modelID: "claude-sonnet-4-6",
|
||||
finish: true,
|
||||
tokens: {
|
||||
input: 170000,
|
||||
output: 0,
|
||||
reasoning: 0,
|
||||
cache: { read: 10000, write: 0 },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
await hook["tool.execute.after"](
|
||||
{ tool: "bash", sessionID, callID: "call_fail" },
|
||||
{ title: "", output: "test", metadata: null }
|
||||
)
|
||||
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(1)
|
||||
|
||||
//#when - message.updated clears compactedSessions, but cooldown should still block
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "message.updated",
|
||||
properties: {
|
||||
info: {
|
||||
role: "assistant",
|
||||
sessionID,
|
||||
providerID: "anthropic",
|
||||
modelID: "claude-sonnet-4-6",
|
||||
finish: true,
|
||||
tokens: {
|
||||
input: 170000,
|
||||
output: 0,
|
||||
reasoning: 0,
|
||||
cache: { read: 10000, write: 0 },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
await hook["tool.execute.after"](
|
||||
{ tool: "bash", sessionID, callID: "call_fail_2" },
|
||||
{ title: "", output: "test", metadata: null }
|
||||
)
|
||||
|
||||
//#then - should NOT have retried
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it("should use 1M limit when model cache flag is enabled", async () => {
|
||||
//#given
|
||||
const hook = createPreemptiveCompactionHook(ctx as never, {}, {
|
||||
@@ -399,17 +467,48 @@ describe("preemptive-compaction", () => {
|
||||
{ title: "", output: "test", metadata: null },
|
||||
)
|
||||
|
||||
//#then - first call timed out
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(1)
|
||||
expect(logMock).toHaveBeenCalledWith("[preemptive-compaction] Compaction failed", {
|
||||
sessionID,
|
||||
error: expect.stringContaining("Compaction summarize timed out"),
|
||||
})
|
||||
|
||||
//#when - advance past cooldown, clear compactedSessions via message.updated, then retry
|
||||
const originalNow = Date.now
|
||||
Date.now = () => originalNow() + 61_000
|
||||
try {
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "message.updated",
|
||||
properties: {
|
||||
info: {
|
||||
role: "assistant",
|
||||
sessionID,
|
||||
providerID: "anthropic",
|
||||
modelID: "claude-sonnet-4-6",
|
||||
finish: true,
|
||||
tokens: {
|
||||
input: 170000,
|
||||
output: 0,
|
||||
reasoning: 0,
|
||||
cache: { read: 10000, write: 0 },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
await hook["tool.execute.after"](
|
||||
{ tool: "bash", sessionID, callID: "call_timeout_2" },
|
||||
{ title: "", output: "test", metadata: null },
|
||||
)
|
||||
|
||||
//#then
|
||||
//#then - should have retried after cooldown
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(2)
|
||||
expect(logMock).toHaveBeenCalledWith("[preemptive-compaction] Compaction failed", {
|
||||
sessionID,
|
||||
error: expect.stringContaining("Compaction summarize timed out"),
|
||||
})
|
||||
} finally {
|
||||
Date.now = originalNow
|
||||
}
|
||||
} finally {
|
||||
restoreTimeouts()
|
||||
}
|
||||
@@ -451,7 +550,9 @@ describe("preemptive-compaction", () => {
|
||||
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(1)
|
||||
|
||||
// when - new message with high tokens (context grew after compaction)
|
||||
// when - advance past the 60s cooldown window, then new message with high tokens
|
||||
const originalNow = Date.now
|
||||
Date.now = () => originalNow() + 61_000
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "message.updated",
|
||||
@@ -480,6 +581,7 @@ describe("preemptive-compaction", () => {
|
||||
|
||||
// then - summarize should fire again
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(2)
|
||||
Date.now = originalNow
|
||||
})
|
||||
|
||||
// #given modelContextLimitsCache has model-specific limit (256k)
|
||||
@@ -567,4 +669,43 @@ describe("preemptive-compaction", () => {
|
||||
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should ignore stale cached Anthropic limits for older models", async () => {
|
||||
const modelContextLimitsCache = new Map<string, number>()
|
||||
modelContextLimitsCache.set("anthropic/claude-sonnet-4-5", 500000)
|
||||
|
||||
const hook = createPreemptiveCompactionHook(ctx as never, {} as never, {
|
||||
anthropicContext1MEnabled: false,
|
||||
modelContextLimitsCache,
|
||||
})
|
||||
const sessionID = "ses_old_anthropic_limit"
|
||||
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "message.updated",
|
||||
properties: {
|
||||
info: {
|
||||
role: "assistant",
|
||||
sessionID,
|
||||
providerID: "anthropic",
|
||||
modelID: "claude-sonnet-4-5",
|
||||
finish: true,
|
||||
tokens: {
|
||||
input: 170000,
|
||||
output: 0,
|
||||
reasoning: 0,
|
||||
cache: { read: 10000, write: 0 },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
await hook["tool.execute.after"](
|
||||
{ tool: "bash", sessionID, callID: "call_1" },
|
||||
{ title: "", output: "test", metadata: null }
|
||||
)
|
||||
|
||||
expect(ctx.client.session.summarize).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -10,6 +10,7 @@ import { createPostCompactionDegradationMonitor } from "./preemptive-compaction-
|
||||
|
||||
const PREEMPTIVE_COMPACTION_TIMEOUT_MS = 120_000
|
||||
const PREEMPTIVE_COMPACTION_THRESHOLD = 0.78
|
||||
const PREEMPTIVE_COMPACTION_COOLDOWN_MS = 60_000
|
||||
|
||||
declare function setTimeout(handler: () => void, timeout?: number): unknown
|
||||
declare function clearTimeout(timeoutID: unknown): void
|
||||
@@ -68,6 +69,7 @@ export function createPreemptiveCompactionHook(
|
||||
) {
|
||||
const compactionInProgress = new Set<string>()
|
||||
const compactedSessions = new Set<string>()
|
||||
const lastCompactionTime = new Map<string, number>()
|
||||
const tokenCache = new Map<string, CachedCompactionState>()
|
||||
|
||||
const postCompactionMonitor = createPostCompactionDegradationMonitor({
|
||||
@@ -85,6 +87,9 @@ export function createPreemptiveCompactionHook(
|
||||
const { sessionID } = input
|
||||
if (compactedSessions.has(sessionID) || compactionInProgress.has(sessionID)) return
|
||||
|
||||
const lastTime = lastCompactionTime.get(sessionID)
|
||||
if (lastTime && Date.now() - lastTime < PREEMPTIVE_COMPACTION_COOLDOWN_MS) return
|
||||
|
||||
const cached = tokenCache.get(sessionID)
|
||||
if (!cached) return
|
||||
|
||||
@@ -107,6 +112,7 @@ export function createPreemptiveCompactionHook(
|
||||
if (usageRatio < PREEMPTIVE_COMPACTION_THRESHOLD || !cached.modelID) return
|
||||
|
||||
compactionInProgress.add(sessionID)
|
||||
lastCompactionTime.set(sessionID, Date.now())
|
||||
|
||||
try {
|
||||
const { providerID: targetProviderID, modelID: targetModelID } = resolveCompactionModel(
|
||||
@@ -142,6 +148,7 @@ export function createPreemptiveCompactionHook(
|
||||
if (sessionID) {
|
||||
compactionInProgress.delete(sessionID)
|
||||
compactedSessions.delete(sessionID)
|
||||
lastCompactionTime.delete(sessionID)
|
||||
tokenCache.delete(sessionID)
|
||||
postCompactionMonitor.clear(sessionID)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/// <reference types="bun-types" />
|
||||
import { describe, expect, test } from "bun:test"
|
||||
import type { PluginInput } from "@opencode-ai/plugin"
|
||||
import { detectCompletionInSessionMessages } from "./completion-promise-detector"
|
||||
import { detectCompletionInSessionMessages, detectSemanticCompletion } from "./completion-promise-detector"
|
||||
|
||||
type SessionMessage = {
|
||||
info?: { role?: string }
|
||||
@@ -110,7 +110,7 @@ describe("detectCompletionInSessionMessages", () => {
|
||||
})
|
||||
|
||||
describe("#given promise appears in tool_result part (not text part)", () => {
|
||||
test("#when Oracle returns VERIFIED via task() tool_result #then should detect completion", async () => {
|
||||
test("#when Oracle returns VERIFIED via task() tool_result #then should NOT detect completion", async () => {
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
@@ -137,10 +137,10 @@ describe("detectCompletionInSessionMessages", () => {
|
||||
sinceMessageIndex: 0,
|
||||
})
|
||||
|
||||
expect(detected).toBe(true)
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when DONE appears only in tool_result part #then should detect completion", async () => {
|
||||
test("#when DONE appears only in tool_result part #then should NOT detect completion", async () => {
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
@@ -159,7 +159,7 @@ describe("detectCompletionInSessionMessages", () => {
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
expect(detected).toBe(true)
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when promise appears in tool_use part (not tool_result) #then should NOT detect completion", async () => {
|
||||
@@ -184,4 +184,217 @@ describe("detectCompletionInSessionMessages", () => {
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("#given semantic completion patterns", () => {
|
||||
test("#when agent says 'task is complete' without explicit promise #then should NOT detect completion", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [{ type: "text", text: "The task is complete. All work has been finished." }],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-123",
|
||||
promise: "DONE",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when agent says 'all items are done' without explicit promise #then should NOT detect completion", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [{ type: "text", text: "All items are done and marked as complete." }],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-123",
|
||||
promise: "DONE",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when agent says 'nothing left to do' without explicit promise #then should NOT detect completion", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [{ type: "text", text: "There is nothing left to do. Everything is finished." }],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-123",
|
||||
promise: "DONE",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when agent says 'successfully completed all' without explicit promise #then should NOT detect completion", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [{ type: "text", text: "I have successfully completed all the required tasks." }],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-123",
|
||||
promise: "DONE",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when promise is VERIFIED #then semantic completion should NOT trigger", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [{ type: "text", text: "The task is complete. All work has been finished." }],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-123",
|
||||
promise: "VERIFIED",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when completion text appears inside a quote #then should NOT detect completion", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [{ type: "text", text: 'The user wrote: "the task is complete". I am still working.' }],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-quoted",
|
||||
promise: "DONE",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when tool_result says all items are complete #then should NOT detect completion", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [
|
||||
{ type: "tool_result", text: "Background agent report: all items are complete." },
|
||||
{ type: "text", text: "Still validating the final behavior." },
|
||||
],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-tool-result-semantic",
|
||||
promise: "DONE",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
|
||||
test("#when assistant says complete but not actually done #then should NOT detect completion", async () => {
|
||||
// #given
|
||||
const messages: SessionMessage[] = [
|
||||
{
|
||||
info: { role: "assistant" },
|
||||
parts: [{ type: "text", text: "The implementation looks complete, but I still need to run the tests." }],
|
||||
},
|
||||
]
|
||||
const ctx = createPluginInput(messages)
|
||||
|
||||
// #when
|
||||
const detected = await detectCompletionInSessionMessages(ctx, {
|
||||
sessionID: "session-not-actually-done",
|
||||
promise: "DONE",
|
||||
apiTimeoutMs: 1000,
|
||||
directory: "/tmp",
|
||||
})
|
||||
|
||||
// #then
|
||||
expect(detected).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("detectSemanticCompletion", () => {
|
||||
describe("#given semantic completion patterns", () => {
|
||||
test("#when text contains 'task is complete' #then should return true", () => {
|
||||
expect(detectSemanticCompletion("The task is complete.")).toBe(true)
|
||||
})
|
||||
|
||||
test("#when text contains 'all items are done' #then should return true", () => {
|
||||
expect(detectSemanticCompletion("All items are done.")).toBe(true)
|
||||
})
|
||||
|
||||
test("#when text contains 'nothing left to do' #then should return true", () => {
|
||||
expect(detectSemanticCompletion("There is nothing left to do.")).toBe(true)
|
||||
})
|
||||
|
||||
test("#when text contains 'successfully completed all' #then should return true", () => {
|
||||
expect(detectSemanticCompletion("Successfully completed all tasks.")).toBe(true)
|
||||
})
|
||||
|
||||
test("#when text contains 'everything is finished' #then should return true", () => {
|
||||
expect(detectSemanticCompletion("Everything is finished.")).toBe(true)
|
||||
})
|
||||
|
||||
test("#when text does NOT contain completion patterns #then should return false", () => {
|
||||
expect(detectSemanticCompletion("Working on the next task.")).toBe(false)
|
||||
})
|
||||
|
||||
test("#when text is empty #then should return false", () => {
|
||||
expect(detectSemanticCompletion("")).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -9,6 +9,20 @@ interface OpenCodeSessionMessage {
|
||||
parts?: Array<{ type: string; text?: string }>
|
||||
}
|
||||
|
||||
interface TranscriptEntry {
|
||||
type?: string
|
||||
timestamp?: string
|
||||
content?: string
|
||||
tool_output?: { output?: string } | string
|
||||
}
|
||||
|
||||
function extractTranscriptEntryText(entry: TranscriptEntry): string {
|
||||
if (typeof entry.content === "string") return entry.content
|
||||
if (typeof entry.tool_output === "string") return entry.tool_output
|
||||
if (entry.tool_output && typeof entry.tool_output === "object" && typeof entry.tool_output.output === "string") return entry.tool_output.output
|
||||
return ""
|
||||
}
|
||||
|
||||
function escapeRegex(str: string): string {
|
||||
return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
|
||||
}
|
||||
@@ -17,6 +31,20 @@ function buildPromisePattern(promise: string): RegExp {
|
||||
return new RegExp(`<promise>\\s*${escapeRegex(promise)}\\s*</promise>`, "is")
|
||||
}
|
||||
|
||||
const SEMANTIC_COMPLETION_PATTERNS = [
|
||||
/\b(?:task|work|implementation|all\s+tasks?)\s+(?:is|are)\s+(?:complete|completed|done|finished)\b/i,
|
||||
/\ball\s+(?:items?|todos?|steps?)\s+(?:are\s+)?(?:complete|completed|done|finished|marked)\b/i,
|
||||
/\b(?:everything|all\s+work)\s+(?:is\s+)?(?:complete|completed|done|finished)\b/i,
|
||||
/\bsuccessfully\s+completed?\s+all\b/i,
|
||||
/\bnothing\s+(?:left|more|remaining)\s+to\s+(?:do|implement|fix)\b/i,
|
||||
]
|
||||
|
||||
const SEMANTIC_DONE_FALLBACK_ENABLED = false
|
||||
|
||||
export function detectSemanticCompletion(text: string): boolean {
|
||||
return SEMANTIC_COMPLETION_PATTERNS.some((pattern) => pattern.test(text))
|
||||
}
|
||||
|
||||
export function detectCompletionInTranscript(
|
||||
transcriptPath: string | undefined,
|
||||
promise: string,
|
||||
@@ -33,10 +61,18 @@ export function detectCompletionInTranscript(
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const entry = JSON.parse(line) as { type?: string; timestamp?: string }
|
||||
const entry = JSON.parse(line) as TranscriptEntry
|
||||
if (entry.type === "user") continue
|
||||
if (entry.type !== "assistant" && entry.type !== "text") continue
|
||||
if (startedAt && entry.timestamp && entry.timestamp < startedAt) continue
|
||||
if (pattern.test(line)) return true
|
||||
const entryText = extractTranscriptEntryText(entry)
|
||||
if (!entryText) continue
|
||||
if (pattern.test(entryText)) return true
|
||||
const isAssistantEntry = entry.type === "assistant" || entry.type === "text"
|
||||
if (SEMANTIC_DONE_FALLBACK_ENABLED && promise === "DONE" && isAssistantEntry && detectSemanticCompletion(entryText)) {
|
||||
log("[ralph-loop] WARNING: Semantic completion detected in transcript (agent used natural language instead of <promise>DONE</promise>)")
|
||||
return true
|
||||
}
|
||||
} catch {
|
||||
continue
|
||||
}
|
||||
@@ -93,13 +129,20 @@ export async function detectCompletionInSessionMessages(
|
||||
|
||||
let responseText = ""
|
||||
for (const part of assistant.parts) {
|
||||
if (part.type !== "text" && part.type !== "tool_result") continue
|
||||
if (part.type !== "text") continue
|
||||
responseText += `${responseText ? "\n" : ""}${part.text ?? ""}`
|
||||
}
|
||||
|
||||
if (pattern.test(responseText)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (SEMANTIC_DONE_FALLBACK_ENABLED && options.promise === "DONE" && detectSemanticCompletion(responseText)) {
|
||||
log("[ralph-loop] WARNING: Semantic completion detected (agent used natural language instead of <promise>DONE</promise>)", {
|
||||
sessionID: options.sessionID,
|
||||
})
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
@@ -562,7 +562,7 @@ describe("ralph-loop", () => {
|
||||
})
|
||||
hook.startLoop("session-123", "Build something", { completionPromise: "COMPLETE" })
|
||||
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "tool_result", tool_name: "write", tool_output: { output: "Task done <promise>COMPLETE</promise>" } }) + "\n")
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "assistant", content: "Task done <promise>COMPLETE</promise>" }) + "\n")
|
||||
|
||||
// when - session goes idle (transcriptPath now derived from sessionID via getTranscriptPath)
|
||||
await hook.event({
|
||||
@@ -1020,7 +1020,7 @@ Original task: Build something`
|
||||
expect(hook.getState()?.iteration).toBe(2)
|
||||
})
|
||||
|
||||
test("should detect completion from tool_result entry in transcript", async () => {
|
||||
test("should NOT detect completion from tool_result entry in transcript", async () => {
|
||||
// given - transcript contains a tool_result with completion promise
|
||||
const transcriptPath = join(TEST_DIR, "transcript.jsonl")
|
||||
const toolResultEntry = JSON.stringify({
|
||||
@@ -1044,16 +1044,15 @@ Original task: Build something`
|
||||
},
|
||||
})
|
||||
|
||||
// then - loop should complete (tool_result contains actual completion output)
|
||||
expect(promptCalls.length).toBe(0)
|
||||
expect(toastCalls.some((t) => t.title === "Ralph Loop Complete!")).toBe(true)
|
||||
expect(hook.getState()).toBeNull()
|
||||
expect(promptCalls.length).toBe(1)
|
||||
expect(toastCalls.some((t) => t.title === "Ralph Loop Complete!")).toBe(false)
|
||||
expect(hook.getState()?.iteration).toBe(2)
|
||||
})
|
||||
|
||||
test("should check transcript BEFORE API to optimize performance", async () => {
|
||||
// given - transcript has completion promise
|
||||
const transcriptPath = join(TEST_DIR, "transcript.jsonl")
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "tool_result", tool_name: "write", tool_output: { output: "<promise>DONE</promise>" } }) + "\n")
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "assistant", content: "<promise>DONE</promise>" }) + "\n")
|
||||
mockSessionMessages = [
|
||||
{ info: { role: "assistant" }, parts: [{ type: "text", text: "No promise here" }] },
|
||||
]
|
||||
@@ -1083,7 +1082,7 @@ Original task: Build something`
|
||||
const hook = createRalphLoopHook(createMockPluginInput(), {
|
||||
getTranscriptPath: () => transcriptPath,
|
||||
})
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "tool_result", tool_name: "write", tool_output: { output: "<promise>DONE</promise>" } }) + "\n")
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "assistant", content: "<promise>DONE</promise>" }) + "\n")
|
||||
hook.startLoop("test-id", "Build API", { ultrawork: true })
|
||||
|
||||
// when - idle event triggered
|
||||
@@ -1100,7 +1099,7 @@ Original task: Build something`
|
||||
const hook = createRalphLoopHook(createMockPluginInput(), {
|
||||
getTranscriptPath: () => transcriptPath,
|
||||
})
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "tool_result", tool_name: "write", tool_output: { output: "<promise>DONE</promise>" } }) + "\n")
|
||||
writeFileSync(transcriptPath, JSON.stringify({ type: "assistant", content: "<promise>DONE</promise>" }) + "\n")
|
||||
hook.startLoop("test-id", "Build API")
|
||||
|
||||
// when - idle event triggered
|
||||
|
||||
@@ -75,11 +75,11 @@ describe("ulw-loop verification", () => {
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
|
||||
expect(hook.getState()?.verification_pending).toBe(true)
|
||||
expect(hook.getState()?.completion_promise).toBe(ULTRAWORK_VERIFICATION_PROMISE)
|
||||
expect(hook.getState()?.verification_session_id).toBeUndefined()
|
||||
expect(hook.getState()?.verification_pending).toBeUndefined()
|
||||
expect(hook.getState()?.completion_promise).toBe("DONE")
|
||||
expect(hook.getState()?.iteration).toBe(2)
|
||||
expect(promptCalls).toHaveLength(1)
|
||||
expect(promptCalls[0].text).toContain('task(subagent_type="oracle"')
|
||||
expect(promptCalls[0].text).not.toContain('task(subagent_type="oracle"')
|
||||
expect(toastCalls.some((toast) => toast.title === "ULTRAWORK LOOP COMPLETE!")).toBe(false)
|
||||
})
|
||||
|
||||
@@ -90,7 +90,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -100,7 +100,7 @@ describe("ulw-loop verification", () => {
|
||||
})
|
||||
writeFileSync(
|
||||
oracleTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -116,7 +116,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -126,7 +126,7 @@ describe("ulw-loop verification", () => {
|
||||
})
|
||||
writeFileSync(
|
||||
oracleTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "ses-oracle" } } })
|
||||
@@ -142,7 +142,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -166,7 +166,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -213,7 +213,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -256,7 +256,7 @@ describe("ulw-loop verification", () => {
|
||||
test("#given prior transcript completion from older run #when new ulw loop starts #then old completion is ignored", async () => {
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: "2000-01-01T00:00:00.000Z", tool_output: { output: "old <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: "2000-01-01T00:00:00.000Z", content: "old <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
const hook = createRalphLoopHook(createMockPluginInput(), {
|
||||
getTranscriptPath: (sessionID) => sessionID === "ses-oracle" ? oracleTranscriptPath : parentTranscriptPath,
|
||||
@@ -277,7 +277,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -295,7 +295,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -314,7 +314,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -325,7 +325,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-456", "Ship CLI", { ultrawork: true })
|
||||
writeFileSync(
|
||||
oracleTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "ses-oracle-old" } } })
|
||||
@@ -343,7 +343,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -354,7 +354,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Restarted task", { ultrawork: true })
|
||||
writeFileSync(
|
||||
oracleTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "ses-oracle-old" } } })
|
||||
@@ -373,13 +373,13 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: `verified <promise>${ULTRAWORK_VERIFICATION_PROMISE}</promise>` })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -409,7 +409,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -449,7 +449,7 @@ describe("ulw-loop verification", () => {
|
||||
hook.startLoop("session-123", "Build API", { ultrawork: true })
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "done <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
|
||||
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
|
||||
@@ -467,7 +467,7 @@ describe("ulw-loop verification", () => {
|
||||
|
||||
writeFileSync(
|
||||
parentTranscriptPath,
|
||||
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "fixed it <promise>DONE</promise>" } })}\n`,
|
||||
`${JSON.stringify({ type: "assistant", timestamp: new Date().toISOString(), content: "fixed it <promise>DONE</promise>" })}\n`,
|
||||
)
|
||||
writeState(testDir, {
|
||||
...hook.getState()!,
|
||||
|
||||
@@ -9,6 +9,7 @@ import { SessionCategoryRegistry } from "../../shared/session-category-registry"
|
||||
import { buildRetryModelPayload } from "./retry-model-payload"
|
||||
import { getLastUserRetryParts } from "./last-user-retry-parts"
|
||||
import { extractSessionMessages } from "./session-messages"
|
||||
import { getAgentDisplayName } from "../../shared/agent-display-names"
|
||||
|
||||
const SESSION_TTL_MS = 30 * 60 * 1000
|
||||
|
||||
@@ -126,13 +127,14 @@ export function createAutoRetryHelpers(deps: HookDeps) {
|
||||
})
|
||||
|
||||
const retryAgent = resolvedAgent ?? getSessionAgent(sessionID)
|
||||
const retryAgentDisplayName = retryAgent ? getAgentDisplayName(retryAgent) : undefined
|
||||
sessionAwaitingFallbackResult.add(sessionID)
|
||||
scheduleSessionFallbackTimeout(sessionID, retryAgent)
|
||||
|
||||
await ctx.client.session.promptAsync({
|
||||
path: { id: sessionID },
|
||||
body: {
|
||||
...(retryAgent ? { agent: retryAgent } : {}),
|
||||
...(retryAgentDisplayName ? { agent: retryAgentDisplayName } : {}),
|
||||
...retryModelPayload,
|
||||
parts: retryParts,
|
||||
},
|
||||
|
||||
@@ -27,10 +27,13 @@ export const RETRYABLE_ERROR_PATTERNS = [
|
||||
/too.?many.?requests/i,
|
||||
/quota.?exceeded/i,
|
||||
/quota\s+will\s+reset\s+after/i,
|
||||
/(?:you(?:'ve|\s+have)\s+)?reached\s+your\s+usage\s+limit/i,
|
||||
/all\s+credentials\s+for\s+model/i,
|
||||
/cool(?:ing)?\s+down/i,
|
||||
/exhausted\s+your\s+capacity/i,
|
||||
/usage\s+limit\s+has\s+been\s+reached/i,
|
||||
/model.{0,20}?not.{0,10}?supported/i,
|
||||
/model_not_supported/i,
|
||||
/service.?unavailable/i,
|
||||
/overloaded/i,
|
||||
/temporarily.?unavailable/i,
|
||||
|
||||
@@ -253,6 +253,17 @@ describe("quota error detection (fixes #2747)", () => {
|
||||
expect(retryable).toBe(true)
|
||||
})
|
||||
|
||||
test("treats hard usage-limit wording as retryable", () => {
|
||||
//#given
|
||||
const error = { message: "You've reached your usage limit for this month. Please upgrade to continue." }
|
||||
|
||||
//#when
|
||||
const retryable = isRetryableError(error, [429, 503])
|
||||
|
||||
//#then
|
||||
expect(retryable).toBe(true)
|
||||
})
|
||||
|
||||
test("classifies QuotaExceededError by errorName even without quota keywords in message", () => {
|
||||
//#given
|
||||
const error = { name: "QuotaExceededError", message: "Request failed." }
|
||||
@@ -276,4 +287,21 @@ describe("quota error detection (fixes #2747)", () => {
|
||||
expect(errorType).toBe("quota_exceeded")
|
||||
expect(retryable).toBe(true)
|
||||
})
|
||||
|
||||
test("detects model_not_supported errors as retryable for fallback chain", () => {
|
||||
//#given
|
||||
const error1 = { message: "model_not_supported" }
|
||||
const error2 = { message: "The model 'gpt-4-foo' is not supported by this API" }
|
||||
const error3 = { message: "model not supported on free tier" }
|
||||
|
||||
//#when
|
||||
const retryable1 = isRetryableError(error1, [400, 404])
|
||||
const retryable2 = isRetryableError(error2, [400, 404])
|
||||
const retryable3 = isRetryableError(error3, [400, 404])
|
||||
|
||||
//#then
|
||||
expect(retryable1).toBe(true)
|
||||
expect(retryable2).toBe(true)
|
||||
expect(retryable3).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -64,6 +64,11 @@ describe("runtime-fallback", () => {
|
||||
|
||||
function createMockPluginConfigWithCategoryFallback(fallbackModels: string[]): OhMyOpenCodeConfig {
|
||||
return {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
categories: {
|
||||
test: {
|
||||
fallback_models: fallbackModels,
|
||||
@@ -79,6 +84,11 @@ describe("runtime-fallback", () => {
|
||||
variant?: string,
|
||||
): OhMyOpenCodeConfig {
|
||||
return {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
categories: {
|
||||
[categoryName]: {
|
||||
model,
|
||||
@@ -272,6 +282,39 @@ describe("runtime-fallback", () => {
|
||||
expect(errorLog).toBeDefined()
|
||||
})
|
||||
|
||||
test("should trigger fallback when session.error says you've reached your usage limit", async () => {
|
||||
const hook = createRuntimeFallbackHook(createMockPluginInput(), {
|
||||
config: createMockConfig({ notify_on_fallback: false }),
|
||||
pluginConfig: createMockPluginConfigWithCategoryFallback(["zai-coding-plan/glm-5.1"]),
|
||||
})
|
||||
const sessionID = "test-session-usage-limit"
|
||||
SessionCategoryRegistry.register(sessionID, "test")
|
||||
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "session.created",
|
||||
properties: { info: { id: sessionID, model: "kimi-for-coding/k2p5" } },
|
||||
},
|
||||
})
|
||||
|
||||
await hook.event({
|
||||
event: {
|
||||
type: "session.error",
|
||||
properties: {
|
||||
sessionID,
|
||||
error: { message: "You've reached your usage limit for this month. Please upgrade to continue." },
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
const fallbackLog = logCalls.find((c) => c.msg.includes("Preparing fallback"))
|
||||
expect(fallbackLog).toBeDefined()
|
||||
expect(fallbackLog?.data).toMatchObject({ from: "kimi-for-coding/k2p5", to: "zai-coding-plan/glm-5.1" })
|
||||
|
||||
const skipLog = logCalls.find((c) => c.msg.includes("Error not retryable"))
|
||||
expect(skipLog).toBeUndefined()
|
||||
})
|
||||
|
||||
test("should continue fallback chain when fallback model is not found", async () => {
|
||||
const hook = createRuntimeFallbackHook(createMockPluginInput(), {
|
||||
config: createMockConfig({ notify_on_fallback: false }),
|
||||
@@ -767,7 +810,13 @@ describe("runtime-fallback", () => {
|
||||
test("should log when no fallback models configured", async () => {
|
||||
const hook = createRuntimeFallbackHook(createMockPluginInput(), {
|
||||
config: createMockConfig(),
|
||||
pluginConfig: {},
|
||||
pluginConfig: {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
},
|
||||
})
|
||||
const sessionID = "test-session-no-fallbacks"
|
||||
|
||||
@@ -2299,6 +2348,11 @@ describe("runtime-fallback", () => {
|
||||
describe("fallback models configuration", () => {
|
||||
function createMockPluginConfigWithAgentFallback(agentName: string, fallbackModels: string[]): OhMyOpenCodeConfig {
|
||||
return {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
agents: {
|
||||
[agentName]: {
|
||||
fallback_models: fallbackModels,
|
||||
@@ -2404,7 +2458,7 @@ describe("runtime-fallback", () => {
|
||||
|
||||
expect(promptCalls.length).toBe(1)
|
||||
const callBody = promptCalls[0]?.body as Record<string, unknown>
|
||||
expect(callBody?.agent).toBe("prometheus")
|
||||
expect(callBody?.agent).toBe("Prometheus (Plan Builder)")
|
||||
expect(callBody?.model).toEqual({ providerID: "github-copilot", modelID: "claude-opus-4.6" })
|
||||
})
|
||||
})
|
||||
@@ -2496,6 +2550,11 @@ describe("runtime-fallback", () => {
|
||||
{
|
||||
config: createMockConfig({ notify_on_fallback: false }),
|
||||
pluginConfig: {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
categories: {
|
||||
test: {
|
||||
fallback_models: ["provider-a/model-a", "provider-b/model-b"],
|
||||
@@ -2548,6 +2607,11 @@ describe("runtime-fallback", () => {
|
||||
const hook = createRuntimeFallbackHook(createMockPluginInput(), {
|
||||
config: createMockConfig({ notify_on_fallback: false }),
|
||||
pluginConfig: {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
categories: {
|
||||
test: {
|
||||
fallback_models: ["provider-a/model-a", "provider-b/model-b"],
|
||||
@@ -2605,6 +2669,11 @@ describe("runtime-fallback", () => {
|
||||
{
|
||||
config: createMockConfig({ notify_on_fallback: false }),
|
||||
pluginConfig: {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
categories: {
|
||||
test: {
|
||||
fallback_models: ["provider-a/model-a", "provider-b/model-b"],
|
||||
@@ -2647,6 +2716,11 @@ describe("runtime-fallback", () => {
|
||||
const hook = createRuntimeFallbackHook(createMockPluginInput(), {
|
||||
config: createMockConfig({ notify_on_fallback: false }),
|
||||
pluginConfig: {
|
||||
git_master: {
|
||||
commit_footer: true,
|
||||
include_co_authored_by: true,
|
||||
git_env_prefix: "GIT_MASTER=1",
|
||||
},
|
||||
categories: {
|
||||
test: {
|
||||
fallback_models: ["provider-a/model-a", "provider-b/model-b"],
|
||||
|
||||
@@ -54,9 +54,25 @@ export function createSessionRecoveryHook(ctx: PluginInput, options?: SessionRec
|
||||
if (!errorType) return false
|
||||
|
||||
const sessionID = info.sessionID
|
||||
const assistantMsgID = info.id
|
||||
let assistantMsgID = info.id
|
||||
|
||||
if (!sessionID || !assistantMsgID) return false
|
||||
if (!sessionID) return false
|
||||
|
||||
if (!assistantMsgID) {
|
||||
try {
|
||||
const messagesResp = await ctx.client.session.messages({
|
||||
path: { id: sessionID },
|
||||
query: { directory: ctx.directory },
|
||||
})
|
||||
const msgs = (messagesResp as { data?: MessageData[] }).data
|
||||
const lastAssistant = msgs?.findLast((m) => m.info?.role === "assistant" && m.info?.error)
|
||||
assistantMsgID = lastAssistant?.info?.id
|
||||
} catch {
|
||||
log("[session-recovery] Failed to fetch messages for messageID fallback", { sessionID })
|
||||
}
|
||||
}
|
||||
|
||||
if (!assistantMsgID) return false
|
||||
if (processingErrors.has(assistantMsgID)) return false
|
||||
processingErrors.add(assistantMsgID)
|
||||
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
import { describe, expect, it } from "bun:test"
|
||||
import { readMessagesFromSDK, readPartsFromSDK } from "../storage"
|
||||
import { readMessages } from "./messages-reader"
|
||||
import { readParts } from "./parts-reader"
|
||||
async function importFreshReaders() {
|
||||
const token = `${Date.now()}-${Math.random()}`
|
||||
const [{ readMessagesFromSDK, readMessages }, { readPartsFromSDK, readParts }] = await Promise.all([
|
||||
import(`./messages-reader?test=${token}`),
|
||||
import(`./parts-reader?test=${token}`),
|
||||
])
|
||||
|
||||
return { readMessagesFromSDK, readPartsFromSDK, readMessages, readParts }
|
||||
}
|
||||
|
||||
function createMockClient(handlers: {
|
||||
messages?: (sessionID: string) => unknown[]
|
||||
@@ -28,6 +34,7 @@ function createMockClient(handlers: {
|
||||
describe("session-recovery storage SDK readers", () => {
|
||||
it("readPartsFromSDK returns empty array when fetch fails", async () => {
|
||||
//#given a client that throws on request
|
||||
const { readPartsFromSDK } = await importFreshReaders()
|
||||
const client = createMockClient({}) as Parameters<typeof readPartsFromSDK>[0]
|
||||
|
||||
//#when readPartsFromSDK is called
|
||||
@@ -39,6 +46,7 @@ describe("session-recovery storage SDK readers", () => {
|
||||
|
||||
it("readPartsFromSDK returns stored parts from SDK response", async () => {
|
||||
//#given a client that returns a message with parts
|
||||
const { readPartsFromSDK } = await importFreshReaders()
|
||||
const sessionID = "ses_test"
|
||||
const messageID = "msg_test"
|
||||
const storedParts = [
|
||||
@@ -58,6 +66,7 @@ describe("session-recovery storage SDK readers", () => {
|
||||
|
||||
it("readMessagesFromSDK normalizes and sorts messages", async () => {
|
||||
//#given a client that returns messages list
|
||||
const { readMessagesFromSDK } = await importFreshReaders()
|
||||
const sessionID = "ses_test"
|
||||
const client = createMockClient({
|
||||
messages: () => [
|
||||
@@ -78,8 +87,9 @@ describe("session-recovery storage SDK readers", () => {
|
||||
])
|
||||
})
|
||||
|
||||
it("readParts returns empty array for nonexistent message", () => {
|
||||
it("readParts returns empty array for nonexistent message", async () => {
|
||||
//#given a message ID that has no stored parts
|
||||
const { readParts } = await importFreshReaders()
|
||||
//#when readParts is called
|
||||
const parts = readParts("msg_nonexistent")
|
||||
|
||||
@@ -87,8 +97,9 @@ describe("session-recovery storage SDK readers", () => {
|
||||
expect(parts).toEqual([])
|
||||
})
|
||||
|
||||
it("readMessages returns empty array for nonexistent session", () => {
|
||||
it("readMessages returns empty array for nonexistent session", async () => {
|
||||
//#given a session ID that has no stored messages
|
||||
const { readMessages } = await importFreshReaders()
|
||||
//#when readMessages is called
|
||||
const messages = readMessages("ses_nonexistent")
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
import type { BoulderState } from "../../features/boulder-state"
|
||||
import * as sessionState from "../../features/claude-code-session-state"
|
||||
import * as worktreeDetector from "./worktree-detector"
|
||||
import * as worktreeDetector from "./worktree-detector"
|
||||
|
||||
describe("start-work hook", () => {
|
||||
let testDir: string
|
||||
@@ -26,6 +25,9 @@ describe("start-work hook", () => {
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
sessionState._resetForTesting()
|
||||
sessionState.registerAgentName("atlas")
|
||||
sessionState.registerAgentName("sisyphus")
|
||||
testDir = join(tmpdir(), `start-work-test-${randomUUID()}`)
|
||||
sisyphusDir = join(testDir, ".sisyphus")
|
||||
if (!existsSync(testDir)) {
|
||||
@@ -38,6 +40,7 @@ describe("start-work hook", () => {
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
sessionState._resetForTesting()
|
||||
clearBoulderState(testDir)
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true })
|
||||
@@ -409,7 +412,7 @@ describe("start-work hook", () => {
|
||||
// given
|
||||
const hook = createStartWorkHook(createMockPluginInput())
|
||||
const output = {
|
||||
message: {},
|
||||
message: {} as Record<string, unknown>,
|
||||
parts: [{ type: "text", text: "<session-context></session-context>" }],
|
||||
}
|
||||
|
||||
@@ -422,6 +425,29 @@ describe("start-work hook", () => {
|
||||
// then
|
||||
expect(output.message.agent).toBe("Atlas (Plan Executor)")
|
||||
})
|
||||
|
||||
test("should keep the current agent when Atlas is unavailable", async () => {
|
||||
// given
|
||||
sessionState._resetForTesting()
|
||||
sessionState.registerAgentName("sisyphus")
|
||||
sessionState.updateSessionAgent("ses-prometheus-to-sisyphus", "sisyphus")
|
||||
|
||||
const hook = createStartWorkHook(createMockPluginInput())
|
||||
const output = {
|
||||
message: {} as Record<string, unknown>,
|
||||
parts: [{ type: "text", text: "<session-context></session-context>" }],
|
||||
}
|
||||
|
||||
// when
|
||||
await hook["chat.message"](
|
||||
{ sessionID: "ses-prometheus-to-sisyphus" },
|
||||
output
|
||||
)
|
||||
|
||||
// then
|
||||
expect(output.message.agent).toBe("Sisyphus (Ultraworker)")
|
||||
expect(sessionState.getSessionAgent("ses-prometheus-to-sisyphus")).toBe("sisyphus")
|
||||
})
|
||||
})
|
||||
|
||||
describe("worktree support", () => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user