Compare commits

..

1 Commits

Author SHA1 Message Date
YeonGyu-Kim
6ba1d675b9 fix(installer): improve Windows compatibility for shell detection and paths
Closes #461
2026-02-24 21:42:04 +09:00
129 changed files with 1675 additions and 2983 deletions

View File

@@ -1,61 +0,0 @@
[sisyphus-bot]
## Confirmed Bug
We have identified the root cause of this issue. The bug is in the config writing logic during installation.
### Root Cause
**File:** `src/cli/config-manager/write-omo-config.ts` (line 46)
```typescript
const merged = deepMergeRecord(existing, newConfig)
```
When a user runs `oh-my-opencode install` (even just to update settings), the installer:
1. Reads the existing config (with user's custom model settings)
2. Generates a **new** config based on detected provider availability
3. Calls `deepMergeRecord(existing, newConfig)`
4. Writes the result back
**The problem:** `deepMergeRecord` overwrites values in `existing` with values from `newConfig`. This means your custom `"model": "openai/gpt-5.2-codex"` gets overwritten by the generated default model (e.g., `anthropic/claude-opus-4-6` if Claude is available).
### Why This Happens
Looking at `deepMergeRecord` (line 24-25):
```typescript
} else if (sourceValue !== undefined) {
result[key] = sourceValue as TTarget[keyof TTarget]
}
```
Any defined value in the source (generated config) overwrites the target (user's config).
### Fix Approach
The merge direction should be reversed to respect user overrides:
```typescript
const merged = deepMergeRecord(newConfig, existing)
```
This ensures:
- User's explicit settings take precedence
- Only new/undefined keys get populated from generated defaults
- Custom model choices are preserved
### SEVERITY: HIGH
- **Impact:** User configuration is overwritten without consent
- **Affected Files:**
- `src/cli/config-manager/write-omo-config.ts`
- `src/cli/config-manager/deep-merge-record.ts`
- **Trigger:** Running `oh-my-opencode install` (even for unrelated updates)
### Workaround (Until Fix)
Backup your config before running install:
```bash
cp ~/.config/opencode/oh-my-opencode.jsonc ~/.config/opencode/oh-my-opencode.jsonc.backup
```
We're working on a fix that will preserve your explicit model configurations.

View File

@@ -1,10 +1,10 @@
# oh-my-opencode — OpenCode Plugin
**Generated:** 2026-02-24 | **Commit:** fcb90d92 | **Branch:** dev
**Generated:** 2026-02-21 | **Commit:** 86e3c7d1 | **Branch:** dev
## OVERVIEW
OpenCode plugin (npm: `oh-my-opencode`) that extends Claude Code (OpenCode fork) with multi-agent orchestration, 46 lifecycle hooks, 26 tools, skill/command/MCP systems, and Claude Code compatibility. 1208 TypeScript files, 143k LOC.
OpenCode plugin (npm: `oh-my-opencode`) that extends Claude Code (OpenCode fork) with multi-agent orchestration, 44 lifecycle hooks, 26 tools, skill/command/MCP systems, and Claude Code compatibility. 1208 TypeScript files, 143k LOC.
## STRUCTURE
@@ -14,14 +14,14 @@ oh-my-opencode/
│ ├── index.ts # Plugin entry: loadConfig → createManagers → createTools → createHooks → createPluginInterface
│ ├── plugin-config.ts # JSONC multi-level config: user → project → defaults (Zod v4)
│ ├── agents/ # 11 agents (Sisyphus, Hephaestus, Oracle, Librarian, Explore, Atlas, Prometheus, Metis, Momus, Multimodal-Looker, Sisyphus-Junior)
| `hooks/`                # 46 hooks across 39 directories + 6 standalone files
│ ├── hooks/ # 44 hooks across 39 directories + 6 standalone files
│ ├── tools/ # 26 tools across 15 directories
│ ├── features/ # 19 feature modules (background-agent, skill-loader, tmux, MCP-OAuth, etc.)
│ ├── shared/ # 100+ utility files in 13 categories
│ ├── config/ # Zod v4 schema system (22+ files)
│ ├── cli/ # CLI: install, run, doctor, mcp-oauth (Commander.js)
│ ├── mcp/ # 3 built-in remote MCPs (websearch, context7, grep_app)
│ ├── plugin/ # 8 OpenCode hook handlers + 46 hook composition
│ ├── plugin/ # 8 OpenCode hook handlers + 44 hook composition
│ └── plugin-handlers/ # 6-phase config loading pipeline
├── packages/ # Monorepo: comment-checker, opencode-sdk, 10 platform binaries
└── local-ignore/ # Dev-only test fixtures
@@ -34,7 +34,7 @@ OhMyOpenCodePlugin(ctx)
├─→ loadPluginConfig() # JSONC parse → project/user merge → Zod validate → migrate
├─→ createManagers() # TmuxSessionManager, BackgroundManager, SkillMcpManager, ConfigHandler
├─→ createTools() # SkillContext + AvailableCategories + ToolRegistry (26 tools)
├─→ createHooks() # 3-tier: Core(37) + Continuation(7) + Skill(2) = 46 hooks
├─→ createHooks() # 3-tier: Core(35) + Continuation(7) + Skill(2) = 44 hooks
└─→ createPluginInterface() # 8 OpenCode hook handlers → PluginInterface
```
@@ -87,7 +87,7 @@ Fields: agents (14 overridable, 21 fields each), categories (8 built-in + custom
- **Test pattern**: Bun test (`bun:test`), co-located `*.test.ts`, given/when/then style (nested describe with `#given`/`#when`/`#then` prefixes)
- **Factory pattern**: `createXXX()` for all tools, hooks, agents
- **Hook tiers**: Session (23) → Tool-Guard (10) → Transform (4) → Continuation (7) → Skill (2)
- **Hook tiers**: Session (22) → Tool-Guard (10) → Transform (4) → Continuation (7) → Skill (2)
- **Agent modes**: `primary` (respects UI model) vs `subagent` (own fallback chain) vs `all`
- **Model resolution**: 3-step: override → category-default → provider-fallback → system-default
- **Config format**: JSONC with comments, Zod v4 validation, snake_case keys

View File

@@ -960,9 +960,6 @@
}
},
"additionalProperties": false
},
"allow_non_gpt_model": {
"type": "boolean"
}
},
"additionalProperties": false
@@ -3251,11 +3248,6 @@
"prompt_append": {
"type": "string"
},
"max_prompt_tokens": {
"type": "integer",
"exclusiveMinimum": 0,
"maximum": 9007199254740991
},
"is_unstable_agent": {
"type": "boolean"
},

View File

@@ -14,7 +14,6 @@
"@opencode-ai/sdk": "^1.1.19",
"commander": "^14.0.2",
"detect-libc": "^2.0.0",
"diff": "^8.0.3",
"js-yaml": "^4.1.1",
"jsonc-parser": "^3.3.1",
"picocolors": "^1.1.1",
@@ -29,13 +28,13 @@
"typescript": "^5.7.3",
},
"optionalDependencies": {
"oh-my-opencode-darwin-arm64": "3.8.5",
"oh-my-opencode-darwin-x64": "3.8.5",
"oh-my-opencode-linux-arm64": "3.8.5",
"oh-my-opencode-linux-arm64-musl": "3.8.5",
"oh-my-opencode-linux-x64": "3.8.5",
"oh-my-opencode-linux-x64-musl": "3.8.5",
"oh-my-opencode-windows-x64": "3.8.5",
"oh-my-opencode-darwin-arm64": "3.8.1",
"oh-my-opencode-darwin-x64": "3.8.1",
"oh-my-opencode-linux-arm64": "3.8.1",
"oh-my-opencode-linux-arm64-musl": "3.8.1",
"oh-my-opencode-linux-x64": "3.8.1",
"oh-my-opencode-linux-x64-musl": "3.8.1",
"oh-my-opencode-windows-x64": "3.8.1",
},
},
},
@@ -139,8 +138,6 @@
"detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="],
"diff": ["diff@8.0.3", "", {}, "sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ=="],
"dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
"ee-first": ["ee-first@1.1.1", "", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="],
@@ -231,19 +228,19 @@
"object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="],
"oh-my-opencode-darwin-arm64": ["oh-my-opencode-darwin-arm64@3.8.5", "", { "os": "darwin", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-bbLu1We9NNhYAVp9Q/FK8dYFlYLp2PKfvdBCr+O6QjNRixdjp8Ru4RK7i9mKg0ybYBUzzCcbbC2Cc1o8orkhBA=="],
"oh-my-opencode-darwin-arm64": ["oh-my-opencode-darwin-arm64@3.8.1", "", { "os": "darwin", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-vbtS0WUFOZpufKzlX2G83fIDry3rpiXej8zNuXNCkx7hF34rK04rj0zeBH9dL+kdNV0Ys0Wl1rR1Mjto28UcAw=="],
"oh-my-opencode-darwin-x64": ["oh-my-opencode-darwin-x64@3.8.5", "", { "os": "darwin", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-N9GcmzYgL87UybSaMGiHc5lwT5Mxg1tyB502el5syouN39wfeUYoj37SonENrMUTiEfn75Lwv/5cSLCesSubpA=="],
"oh-my-opencode-darwin-x64": ["oh-my-opencode-darwin-x64@3.8.1", "", { "os": "darwin", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-gLz6dLNg9hr7roqBjaqlxta6+XYCs032/FiE0CiwypIBtYOq5EAgDVJ95JY5DQ2M+3Un028d50yMfwsfNfGlSw=="],
"oh-my-opencode-linux-arm64": ["oh-my-opencode-linux-arm64@3.8.5", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-ki4a7s1DD5z5wEKmzcchqAKOIpw0LsBvyF8ieqNLS5Xl8PWE0gAZ7rqjlXC54NTubpexVH6lO2yenFJsk2Zk9A=="],
"oh-my-opencode-linux-arm64": ["oh-my-opencode-linux-arm64@3.8.1", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-teAIuHlR5xOAoUmA+e0bGzy3ikgIr+nCdyOPwHYm8jIp0aBUWAqbcdoQLeNTgenWpoM8vhHk+2xh4WcCeQzjEA=="],
"oh-my-opencode-linux-arm64-musl": ["oh-my-opencode-linux-arm64-musl@3.8.5", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-9+6hU3z503fBzuV0VjxIkTKFElbKacHijFcdKAussG6gPFLWmCRWtdowzEDwUfAoIsoHHH7FBwvh5waGp/ZksA=="],
"oh-my-opencode-linux-arm64-musl": ["oh-my-opencode-linux-arm64-musl@3.8.1", "", { "os": "linux", "cpu": "arm64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-VzBEq1H5dllEloouIoLdbw1icNUW99qmvErFrNj66mX42DNXK+f1zTtvBG8U6eeFfUBRRJoUjdCsvO65f8BkFA=="],
"oh-my-opencode-linux-x64": ["oh-my-opencode-linux-x64@3.8.5", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-DmnMK/PgvdcCYL+OQE5iZWgi/vmjm0sIPQVQgSUbWn3izcUF7C5DtlxqaU2cKxNZwrhDTlJdLWxmJqgLmLqd9A=="],
"oh-my-opencode-linux-x64": ["oh-my-opencode-linux-x64@3.8.1", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-8hDcb8s+wdQpQObSmiyaaTV0P/js2Bs9Lu+HmzrkKjuMLXXj/Gk7K0kKWMoEnMbMGfj86GfBHHIWmu9juI/SjA=="],
"oh-my-opencode-linux-x64-musl": ["oh-my-opencode-linux-x64-musl@3.8.5", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-jhCNStljsyapVq9X7PaHSOcWxxEA4BUcIibvoPs/xc7fVP8D47p651LzIRsM6STn6Bx684mlYbxxX1P/0QPKNg=="],
"oh-my-opencode-linux-x64-musl": ["oh-my-opencode-linux-x64-musl@3.8.1", "", { "os": "linux", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode" } }, "sha512-idyH5bdYn7wrLkIkYr83omN83E2BjA/9DUHCX2we8VXbhDVbBgmMpUg8B8nKnd5NK/SyLHgRs5QqQJw8XBC0cQ=="],
"oh-my-opencode-windows-x64": ["oh-my-opencode-windows-x64@3.8.5", "", { "os": "win32", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode.exe" } }, "sha512-lcPBp9NCNQ6TnqzsN9p/K+xKwOzBoIPw7HncxmrXSberZ3uHy0K9uNraQ7fqnXIKWqQiK4kSwWfSHpmhbaHiNg=="],
"oh-my-opencode-windows-x64": ["oh-my-opencode-windows-x64@3.8.1", "", { "os": "win32", "cpu": "x64", "bin": { "oh-my-opencode": "bin/oh-my-opencode.exe" } }, "sha512-O30L1PUF9aq1vSOyadcXQOLnDFSTvYn6cGd5huh0LAK/us0hGezoahtXegMdFtDXPIIREJlkRQhyJiafza7YgA=="],
"on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="],

View File

@@ -60,7 +60,6 @@
"@opencode-ai/sdk": "^1.1.19",
"commander": "^14.0.2",
"detect-libc": "^2.0.0",
"diff": "^8.0.3",
"js-yaml": "^4.1.1",
"jsonc-parser": "^3.3.1",
"picocolors": "^1.1.1",

View File

@@ -1703,70 +1703,6 @@
"created_at": "2026-02-23T19:27:59Z",
"repoId": 1108837393,
"pullRequestNo": 2080
},
{
"name": "PHP-Expert",
"id": 12047666,
"comment_id": 3951828700,
"created_at": "2026-02-24T13:27:18Z",
"repoId": 1108837393,
"pullRequestNo": 2098
},
{
"name": "Pantoria",
"id": 37699442,
"comment_id": 3953543578,
"created_at": "2026-02-24T17:12:31Z",
"repoId": 1108837393,
"pullRequestNo": 1983
},
{
"name": "east-shine",
"id": 20237288,
"comment_id": 3957576758,
"created_at": "2026-02-25T08:19:34Z",
"repoId": 1108837393,
"pullRequestNo": 2113
},
{
"name": "SupenBysz",
"id": 3314033,
"comment_id": 3962352704,
"created_at": "2026-02-25T22:00:54Z",
"repoId": 1108837393,
"pullRequestNo": 2119
},
{
"name": "zhzy0077",
"id": 8717471,
"comment_id": 3964015975,
"created_at": "2026-02-26T04:45:23Z",
"repoId": 1108837393,
"pullRequestNo": 2125
},
{
"name": "spacecowboy0416",
"id": 239068998,
"comment_id": 3964320737,
"created_at": "2026-02-26T06:05:27Z",
"repoId": 1108837393,
"pullRequestNo": 2126
},
{
"name": "imwxc",
"id": 49653609,
"comment_id": 3965127447,
"created_at": "2026-02-26T09:00:16Z",
"repoId": 1108837393,
"pullRequestNo": 2129
},
{
"name": "maou-shonen",
"id": 22576780,
"comment_id": 3965445132,
"created_at": "2026-02-26T09:50:46Z",
"repoId": 1108837393,
"pullRequestNo": 2131
}
]
}

View File

@@ -1,6 +1,6 @@
# src/ — Plugin Source
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW
@@ -14,7 +14,7 @@ Root source directory. Entry point `index.ts` orchestrates 4-step initialization
| `plugin-config.ts` | JSONC parse, multi-level merge (user → project → defaults), Zod validation |
| `create-managers.ts` | TmuxSessionManager, BackgroundManager, SkillMcpManager, ConfigHandler |
| `create-tools.ts` | SkillContext + AvailableCategories + ToolRegistry |
| `create-hooks.ts` | 3-tier hook composition: Core(37) + Continuation(7) + Skill(2) |
| `create-hooks.ts` | 3-tier hook composition: Core(35) + Continuation(7) + Skill(2) |
| `plugin-interface.ts` | Assembles 8 OpenCode hook handlers into PluginInterface |
## CONFIG LOADING
@@ -32,9 +32,9 @@ loadPluginConfig(directory, ctx)
```
createHooks()
├─→ createCoreHooks() # 37 hooks
│ ├─ createSessionHooks() # 23: contextWindowMonitor, thinkMode, ralphLoop, modelFallback, runtimeFallback, noSisyphusGpt, noHephaestusNonGpt, anthropicEffort...
│ ├─ createToolGuardHooks() # 10: commentChecker, rulesInjector, writeExistingFileGuard, jsonErrorRecovery, hashlineReadEnhancer...
├─→ createCoreHooks() # 35 hooks
│ ├─ createSessionHooks() # 21: contextWindowMonitor, thinkMode, ralphLoop, sessionRecovery, jsonErrorRecovery, sisyphusGptHephaestusReminder, anthropicEffort...
│ ├─ createToolGuardHooks() # 10: commentChecker, rulesInjector, writeExistingFileGuard, hashlineEditDiffEnhancer...
│ └─ createTransformHooks() # 4: claudeCodeHooks, keywordDetector, contextInjector, thinkingBlockValidator
├─→ createContinuationHooks() # 7: todoContinuationEnforcer, atlas, stopContinuationGuard...
└─→ createSkillHooks() # 2: categorySkillReminder, autoSlashCommand

View File

@@ -1,6 +1,6 @@
# src/agents/ — 11 Agent Definitions
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -17,6 +17,7 @@ import type { AvailableAgent, AvailableSkill, AvailableCategory } from "../dynam
import { buildCategorySkillsDelegationGuide } from "../dynamic-agent-prompt-builder"
import type { CategoryConfig } from "../../config/schema"
import { mergeCategories } from "../../shared/merge-categories"
import { createAgentToolRestrictions } from "../../shared/permission-compat"
import { getDefaultAtlasPrompt } from "./default"
import { getGptAtlasPrompt } from "./gpt"
@@ -99,6 +100,11 @@ function buildDynamicOrchestratorPrompt(ctx?: OrchestratorContext): string {
}
export function createAtlasAgent(ctx: OrchestratorContext): AgentConfig {
const restrictions = createAgentToolRestrictions([
"task",
"call_omo_agent",
])
const baseConfig = {
description:
"Orchestrates work via task() to complete ALL tasks in a todo list until fully done. (Atlas - OhMyOpenCode)",
@@ -107,6 +113,7 @@ export function createAtlasAgent(ctx: OrchestratorContext): AgentConfig {
temperature: 0.1,
prompt: buildDynamicOrchestratorPrompt(ctx),
color: "#10B981",
...restrictions,
}
return baseConfig as AgentConfig

View File

@@ -1,41 +0,0 @@
/// <reference types="bun-types" />
import { describe, test, expect } from "bun:test"
import { createEnvContext } from "./env-context"
describe("createEnvContext", () => {
test("returns omo-env block with timezone and locale", () => {
// #given - no setup needed
// #when
const result = createEnvContext()
// #then
expect(result).toContain("<omo-env>")
expect(result).toContain("</omo-env>")
expect(result).toContain("Timezone:")
expect(result).toContain("Locale:")
expect(result).not.toContain("Current date:")
})
test("does not include time with seconds precision to preserve token cache", () => {
// #given - seconds-precision time changes every second, breaking cache on every request
// #when
const result = createEnvContext()
// #then - no HH:MM:SS pattern anywhere in the output
expect(result).not.toMatch(/\d{1,2}:\d{2}:\d{2}/)
})
test("does not include date or time fields since OpenCode already provides them", () => {
// #given - OpenCode's system.ts already injects date, platform, working directory
// #when
const result = createEnvContext()
// #then - only timezone and locale remain; both are stable across requests
expect(result).not.toContain("Current date:")
expect(result).not.toContain("Current time:")
})
})

View File

@@ -1,15 +1,32 @@
/**
* Creates OmO-specific environment context (timezone, locale).
* Creates OmO-specific environment context (time, timezone, locale).
* Note: Working directory, platform, and date are already provided by OpenCode's system.ts,
* so we only include fields that OpenCode doesn't provide to avoid duplication.
* See: https://github.com/code-yeongyu/oh-my-opencode/issues/379
*/
export function createEnvContext(): string {
const now = new Date()
const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone
const locale = Intl.DateTimeFormat().resolvedOptions().locale
const dateStr = now.toLocaleDateString(locale, {
weekday: "short",
year: "numeric",
month: "short",
day: "numeric",
})
const timeStr = now.toLocaleTimeString(locale, {
hour: "2-digit",
minute: "2-digit",
second: "2-digit",
hour12: true,
})
return `
<omo-env>
Current date: ${dateStr}
Current time: ${timeStr}
Timezone: ${timezone}
Locale: ${locale}
</omo-env>`

View File

@@ -448,21 +448,6 @@ ${oracleSection}
4. **Run build** if applicable — exit code 0 required
5. **Tell user** what you verified and the results — keep it clear and helpful
### Auto-Commit Policy (MANDATORY for implementation/fix work)
1. **Auto-commit after implementation is complete** when the task includes feature/fix code changes
2. **Commit ONLY after verification gates pass**:
- \`lsp_diagnostics\` clean on all modified files
- Related tests pass
- Typecheck/build pass when applicable
3. **If any gate fails, DO NOT commit** — fix issues first, re-run verification, then commit
4. **Use Conventional Commits format** with meaningful intent-focused messages:
- \`feat(scope): add ...\` for new functionality
- \`fix(scope): resolve ...\` for bug fixes
- \`refactor(scope): simplify ...\` for internal restructuring
5. **Do not make placeholder commits** (\`wip\`, \`temp\`, \`update\`) or commit unverified code
6. **If user explicitly says not to commit**, skip commit and report that changes are left uncommitted
- **File edit** — \`lsp_diagnostics\` clean
- **Build** — Exit code 0
- **Tests** — Pass (or pre-existing failures noted)

View File

@@ -4,7 +4,6 @@ import { createLibrarianAgent } from "./librarian"
import { createExploreAgent } from "./explore"
import { createMomusAgent } from "./momus"
import { createMetisAgent } from "./metis"
import { createAtlasAgent } from "./atlas"
const TEST_MODEL = "anthropic/claude-sonnet-4-5"
@@ -97,18 +96,4 @@ describe("read-only agent tool restrictions", () => {
}
})
})
describe("Atlas", () => {
test("allows delegation tools for orchestration", () => {
// given
const agent = createAtlasAgent({ model: TEST_MODEL })
// when
const permission = (agent.permission ?? {}) as Record<string, string>
// then
expect(permission["task"]).toBeUndefined()
expect(permission["call_omo_agent"]).toBeUndefined()
})
})
})

View File

@@ -2,17 +2,11 @@ import { describe, test, expect } from "bun:test";
import { isGptModel, isGeminiModel } from "./types";
describe("isGptModel", () => {
test("standard openai provider gpt models", () => {
test("standard openai provider models", () => {
expect(isGptModel("openai/gpt-5.2")).toBe(true);
expect(isGptModel("openai/gpt-4o")).toBe(true);
});
test("o-series models are not gpt by name", () => {
expect(isGptModel("openai/o1")).toBe(false);
expect(isGptModel("openai/o3-mini")).toBe(false);
expect(isGptModel("litellm/o1")).toBe(false);
expect(isGptModel("litellm/o3-mini")).toBe(false);
expect(isGptModel("litellm/o4-mini")).toBe(false);
expect(isGptModel("openai/o1")).toBe(true);
expect(isGptModel("openai/o3-mini")).toBe(true);
});
test("github copilot gpt models", () => {
@@ -23,6 +17,9 @@ describe("isGptModel", () => {
test("litellm proxied gpt models", () => {
expect(isGptModel("litellm/gpt-5.2")).toBe(true);
expect(isGptModel("litellm/gpt-4o")).toBe(true);
expect(isGptModel("litellm/o1")).toBe(true);
expect(isGptModel("litellm/o3-mini")).toBe(true);
expect(isGptModel("litellm/o4-mini")).toBe(true);
});
test("other proxied gpt models", () => {
@@ -30,11 +27,6 @@ describe("isGptModel", () => {
expect(isGptModel("custom-provider/gpt-5.2")).toBe(true);
});
test("venice provider gpt models", () => {
expect(isGptModel("venice/gpt-5.2")).toBe(true);
expect(isGptModel("venice/gpt-4o")).toBe(true);
});
test("gpt4 prefix without hyphen (legacy naming)", () => {
expect(isGptModel("litellm/gpt4o")).toBe(true);
expect(isGptModel("ollama/gpt4")).toBe(true);

View File

@@ -70,9 +70,14 @@ function extractModelName(model: string): string {
return model.includes("/") ? model.split("/").pop() ?? model : model
}
const GPT_MODEL_PREFIXES = ["gpt-", "gpt4", "o1", "o3", "o4"]
export function isGptModel(model: string): boolean {
if (model.startsWith("openai/") || model.startsWith("github-copilot/gpt-"))
return true
const modelName = extractModelName(model).toLowerCase()
return modelName.includes("gpt")
return GPT_MODEL_PREFIXES.some((prefix) => modelName.startsWith(prefix))
}
const GEMINI_PROVIDERS = ["google/", "google-vertex/"]

View File

@@ -589,22 +589,20 @@ describe("createBuiltinAgents with requiresProvider gating (hephaestus)", () =>
}
})
test("hephaestus IS created when github-copilot is connected with a GPT model", async () => {
// #given - github-copilot provider has gpt-5.3-codex available
test("hephaestus is created when github-copilot provider is connected", async () => {
// #given - github-copilot provider has models available
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
new Set(["github-copilot/gpt-5.3-codex"])
)
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(null)
try {
// #when
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], {})
// #then - github-copilot is now a valid provider for hephaestus
// #then
expect(agents.hephaestus).toBeDefined()
} finally {
fetchSpy.mockRestore()
cacheSpy.mockRestore()
}
})

View File

@@ -1,6 +1,6 @@
# src/cli/ — CLI: install, run, doctor, mcp-oauth
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -750,6 +750,10 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models when
"explore": {
"model": "github-copilot/gpt-5-mini",
},
"hephaestus": {
"model": "github-copilot/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {
"model": "github-copilot/claude-sonnet-4.5",
},
@@ -782,12 +786,16 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models when
"model": "github-copilot/gemini-3-pro-preview",
"variant": "high",
},
"deep": {
"model": "github-copilot/gpt-5.3-codex",
"variant": "medium",
},
"quick": {
"model": "github-copilot/claude-haiku-4.5",
},
"ultrabrain": {
"model": "github-copilot/gemini-3-pro-preview",
"variant": "high",
"model": "github-copilot/gpt-5.3-codex",
"variant": "xhigh",
},
"unspecified-high": {
"model": "github-copilot/claude-sonnet-4.5",
@@ -816,6 +824,10 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models with
"explore": {
"model": "github-copilot/gpt-5-mini",
},
"hephaestus": {
"model": "github-copilot/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {
"model": "github-copilot/claude-sonnet-4.5",
},
@@ -848,12 +860,16 @@ exports[`generateModelConfig fallback providers uses GitHub Copilot models with
"model": "github-copilot/gemini-3-pro-preview",
"variant": "high",
},
"deep": {
"model": "github-copilot/gpt-5.3-codex",
"variant": "medium",
},
"quick": {
"model": "github-copilot/claude-haiku-4.5",
},
"ultrabrain": {
"model": "github-copilot/gemini-3-pro-preview",
"variant": "high",
"model": "github-copilot/gpt-5.3-codex",
"variant": "xhigh",
},
"unspecified-high": {
"model": "github-copilot/claude-opus-4.6",
@@ -1269,7 +1285,7 @@ exports[`generateModelConfig mixed provider scenarios uses all fallback provider
"model": "opencode/claude-haiku-4-5",
},
"hephaestus": {
"model": "opencode/gpt-5.3-codex",
"model": "github-copilot/gpt-5.3-codex",
"variant": "medium",
},
"librarian": {
@@ -1305,14 +1321,14 @@ exports[`generateModelConfig mixed provider scenarios uses all fallback provider
"variant": "high",
},
"deep": {
"model": "opencode/gpt-5.3-codex",
"model": "github-copilot/gpt-5.3-codex",
"variant": "medium",
},
"quick": {
"model": "github-copilot/claude-haiku-4.5",
},
"ultrabrain": {
"model": "opencode/gpt-5.3-codex",
"model": "github-copilot/gpt-5.3-codex",
"variant": "xhigh",
},
"unspecified-high": {

View File

@@ -1,6 +1,6 @@
# src/cli/config-manager/ — CLI Installation Utilities
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -0,0 +1,47 @@
import { afterEach, beforeEach, describe, expect, spyOn, test } from "bun:test"
import { initConfigContext, resetConfigContext } from "./config-context"
import { runBunInstallWithDetails } from "./bun-install"
describe("bun-install", () => {
let originalPlatform: NodeJS.Platform
beforeEach(() => {
originalPlatform = process.platform
resetConfigContext()
initConfigContext("opencode", null)
})
afterEach(() => {
Object.defineProperty(process, "platform", { value: originalPlatform })
resetConfigContext()
})
test("#given Windows with bun.exe on PATH #when runBunInstallWithDetails is called #then uses bun.exe", async () => {
Object.defineProperty(process, "platform", { value: "win32" })
const whichSpy = spyOn(Bun, "which")
.mockImplementation((binary: string) => {
if (binary === "bun.exe") {
return "C:\\Tools\\bun.exe"
}
return null
})
const spawnSpy = spyOn(Bun, "spawn").mockReturnValue({
exited: Promise.resolve(0),
exitCode: 0,
kill: () => {},
} as unknown as ReturnType<typeof Bun.spawn>)
try {
const result = await runBunInstallWithDetails()
expect(result.success).toBe(true)
expect(spawnSpy).toHaveBeenCalledTimes(1)
expect(spawnSpy.mock.calls[0]?.[0]).toEqual(["C:\\Tools\\bun.exe", "install"])
} finally {
spawnSpy.mockRestore()
whichSpy.mockRestore()
}
})
})

View File

@@ -9,6 +9,14 @@ export interface BunInstallResult {
error?: string
}
function resolveBunCommand(): string {
if (process.platform === "win32") {
return Bun.which("bun.exe") ?? Bun.which("bun") ?? "bun.exe"
}
return Bun.which("bun") ?? "bun"
}
export async function runBunInstall(): Promise<boolean> {
const result = await runBunInstallWithDetails()
return result.success
@@ -16,7 +24,8 @@ export async function runBunInstall(): Promise<boolean> {
export async function runBunInstallWithDetails(): Promise<BunInstallResult> {
try {
const proc = Bun.spawn(["bun", "install"], {
const bunCommand = resolveBunCommand()
const proc = Bun.spawn([bunCommand, "install"], {
cwd: getConfigDir(),
stdout: "inherit",
stderr: "inherit",
@@ -39,7 +48,7 @@ export async function runBunInstallWithDetails(): Promise<BunInstallResult> {
return {
success: false,
timedOut: true,
error: `bun install timed out after ${BUN_INSTALL_TIMEOUT_SECONDS} seconds. Try running manually: cd ${getConfigDir()} && bun i`,
error: `bun install timed out after ${BUN_INSTALL_TIMEOUT_SECONDS} seconds. Try running manually in ${getConfigDir()}: bun install`,
}
}
@@ -55,7 +64,7 @@ export async function runBunInstallWithDetails(): Promise<BunInstallResult> {
const message = err instanceof Error ? err.message : String(err)
return {
success: false,
error: `bun install failed: ${message}. Is bun installed? Try: curl -fsSL https://bun.sh/install | bash`,
error: `bun install failed: ${message}. Ensure Bun is installed and available in PATH: https://bun.sh/docs/installation`,
}
}
}

View File

@@ -1,80 +0,0 @@
import { afterEach, beforeEach, describe, expect, it } from "bun:test"
import { mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs"
import { tmpdir } from "node:os"
import { join } from "node:path"
import { parseJsonc } from "../../shared/jsonc-parser"
import type { InstallConfig } from "../types"
import { resetConfigContext } from "./config-context"
import { generateOmoConfig } from "./generate-omo-config"
import { writeOmoConfig } from "./write-omo-config"
const installConfig: InstallConfig = {
hasClaude: true,
isMax20: true,
hasOpenAI: true,
hasGemini: true,
hasCopilot: false,
hasOpencodeZen: false,
hasZaiCodingPlan: false,
hasKimiForCoding: false,
}
function getRecord(value: unknown): Record<string, unknown> {
if (value && typeof value === "object" && !Array.isArray(value)) {
return value as Record<string, unknown>
}
return {}
}
describe("writeOmoConfig", () => {
let testConfigDir = ""
let testConfigPath = ""
beforeEach(() => {
testConfigDir = join(tmpdir(), `omo-write-config-${Date.now()}-${Math.random().toString(36).slice(2)}`)
testConfigPath = join(testConfigDir, "oh-my-opencode.json")
mkdirSync(testConfigDir, { recursive: true })
process.env.OPENCODE_CONFIG_DIR = testConfigDir
resetConfigContext()
})
afterEach(() => {
rmSync(testConfigDir, { recursive: true, force: true })
resetConfigContext()
delete process.env.OPENCODE_CONFIG_DIR
})
it("preserves existing user values while adding new defaults", () => {
// given
const existingConfig = {
agents: {
sisyphus: {
model: "custom/provider-model",
},
},
disabled_hooks: ["comment-checker"],
}
writeFileSync(testConfigPath, JSON.stringify(existingConfig, null, 2) + "\n", "utf-8")
const generatedDefaults = generateOmoConfig(installConfig)
// when
const result = writeOmoConfig(installConfig)
// then
expect(result.success).toBe(true)
const savedConfig = parseJsonc<Record<string, unknown>>(readFileSync(testConfigPath, "utf-8"))
const savedAgents = getRecord(savedConfig.agents)
const savedSisyphus = getRecord(savedAgents.sisyphus)
expect(savedSisyphus.model).toBe("custom/provider-model")
expect(savedConfig.disabled_hooks).toEqual(["comment-checker"])
for (const defaultKey of Object.keys(generatedDefaults)) {
expect(savedConfig).toHaveProperty(defaultKey)
}
})
})

View File

@@ -43,7 +43,7 @@ export function writeOmoConfig(installConfig: InstallConfig): ConfigMergeResult
return { success: true, configPath: omoConfigPath }
}
const merged = deepMergeRecord(newConfig, existing)
const merged = deepMergeRecord(existing, newConfig)
writeFileSync(omoConfigPath, JSON.stringify(merged, null, 2) + "\n")
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {

View File

@@ -17,9 +17,9 @@ export const CLI_AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
},
hephaestus: {
fallbackChain: [
{ providers: ["openai", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
],
requiresProvider: ["openai", "opencode"],
requiresProvider: ["openai", "github-copilot", "opencode"],
},
oracle: {
fallbackChain: [
@@ -100,14 +100,14 @@ export const CLI_CATEGORY_MODEL_REQUIREMENTS: Record<string, ModelRequirement> =
},
ultrabrain: {
fallbackChain: [
{ providers: ["openai", "opencode"], model: "gpt-5.3-codex", variant: "xhigh" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "xhigh" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
],
},
deep: {
fallbackChain: [
{ providers: ["openai", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
],
@@ -131,7 +131,7 @@ export const CLI_CATEGORY_MODEL_REQUIREMENTS: Record<string, ModelRequirement> =
"unspecified-low": {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-5" },
{ providers: ["openai", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" },
],
},

View File

@@ -421,15 +421,16 @@ describe("generateModelConfig", () => {
expect(result.agents?.hephaestus?.variant).toBe("medium")
})
test("Hephaestus is NOT created when only Copilot is available (gpt-5.3-codex unavailable on github-copilot)", () => {
test("Hephaestus is created when Copilot is available (github-copilot provider connected)", () => {
// #given
const config = createConfig({ hasCopilot: true })
// #when
const result = generateModelConfig(config)
// #then - hephaestus is omitted because gpt-5.3-codex is not available on github-copilot
expect(result.agents?.hephaestus).toBeUndefined()
// #then
expect(result.agents?.hephaestus?.model).toBe("github-copilot/gpt-5.3-codex")
expect(result.agents?.hephaestus?.variant).toBe("medium")
})
test("Hephaestus is created when OpenCode Zen is available (opencode provider connected)", () => {

View File

@@ -1,6 +1,6 @@
# src/cli/run/ — Non-Interactive Session Launcher
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,4 +1,4 @@
const { describe, it, expect, spyOn } = require("bun:test")
import { describe, it, expect, spyOn } from "bun:test"
import type { RunContext } from "./types"
import { createEventState } from "./events"
import { handleSessionStatus, handleMessagePartUpdated, handleMessageUpdated, handleTuiToast } from "./event-handlers"
@@ -235,7 +235,9 @@ describe("handleMessagePartUpdated", () => {
it("prints completion metadata once when assistant text part is completed", () => {
// given
const nowSpy = spyOn(Date, "now").mockReturnValue(3400)
const nowSpy = spyOn(Date, "now")
nowSpy.mockReturnValueOnce(1000)
nowSpy.mockReturnValueOnce(3400)
const ctx = createMockContext("ses_main")
const state = createEventState()
@@ -257,7 +259,6 @@ describe("handleMessagePartUpdated", () => {
} as any,
state,
)
state.messageStartedAtById["msg_1"] = 1000
// when
handleMessagePartUpdated(

View File

@@ -7,8 +7,6 @@ export interface EventState {
currentTool: string | null
/** Set to true when the main session has produced meaningful work (text, tool call, or tool result) */
hasReceivedMeaningfulWork: boolean
/** Timestamp of the last received event (for watchdog detection) */
lastEventTimestamp: number
/** Count of assistant messages for the main session */
messageCount: number
/** Current agent name from the latest assistant message */
@@ -56,7 +54,6 @@ export function createEventState(): EventState {
lastPartText: "",
currentTool: null,
hasReceivedMeaningfulWork: false,
lastEventTimestamp: Date.now(),
messageCount: 0,
currentAgent: null,
currentModel: null,

View File

@@ -35,9 +35,6 @@ export async function processEvents(
logEventVerbose(ctx, payload)
}
// Update last event timestamp for watchdog detection
state.lastEventTimestamp = Date.now()
handleSessionError(ctx, payload, state)
handleSessionIdle(ctx, payload, state)
handleSessionStatus(ctx, payload, state)

View File

@@ -8,15 +8,11 @@ const DEFAULT_POLL_INTERVAL_MS = 500
const DEFAULT_REQUIRED_CONSECUTIVE = 1
const ERROR_GRACE_CYCLES = 3
const MIN_STABILIZATION_MS = 1_000
const DEFAULT_EVENT_WATCHDOG_MS = 30_000 // 30 seconds
const DEFAULT_SECONDARY_MEANINGFUL_WORK_TIMEOUT_MS = 60_000 // 60 seconds
export interface PollOptions {
pollIntervalMs?: number
requiredConsecutive?: number
minStabilizationMs?: number
eventWatchdogMs?: number
secondaryMeaningfulWorkTimeoutMs?: number
}
export async function pollForCompletion(
@@ -32,15 +28,9 @@ export async function pollForCompletion(
options.minStabilizationMs ?? MIN_STABILIZATION_MS
const minStabilizationMs =
rawMinStabilizationMs > 0 ? rawMinStabilizationMs : MIN_STABILIZATION_MS
const eventWatchdogMs =
options.eventWatchdogMs ?? DEFAULT_EVENT_WATCHDOG_MS
const secondaryMeaningfulWorkTimeoutMs =
options.secondaryMeaningfulWorkTimeoutMs ??
DEFAULT_SECONDARY_MEANINGFUL_WORK_TIMEOUT_MS
let consecutiveCompleteChecks = 0
let errorCycleCount = 0
let firstWorkTimestamp: number | null = null
let secondaryTimeoutChecked = false
const pollStartTimestamp = Date.now()
while (!abortController.signal.aborted) {
@@ -69,37 +59,7 @@ export async function pollForCompletion(
errorCycleCount = 0
}
// Watchdog: if no events received for N seconds, verify session status via API
let mainSessionStatus: "idle" | "busy" | "retry" | null = null
if (eventState.lastEventTimestamp !== null) {
const timeSinceLastEvent = Date.now() - eventState.lastEventTimestamp
if (timeSinceLastEvent > eventWatchdogMs) {
// Events stopped coming - verify actual session state
console.log(
pc.yellow(
`\n No events for ${Math.round(
timeSinceLastEvent / 1000
)}s, verifying session status...`
)
)
// Force check session status directly
mainSessionStatus = await getMainSessionStatus(ctx)
if (mainSessionStatus === "idle") {
eventState.mainSessionIdle = true
} else if (mainSessionStatus === "busy" || mainSessionStatus === "retry") {
eventState.mainSessionIdle = false
}
// Reset timestamp to avoid repeated checks
eventState.lastEventTimestamp = Date.now()
}
}
// Only call getMainSessionStatus if watchdog didn't already check
if (mainSessionStatus === null) {
mainSessionStatus = await getMainSessionStatus(ctx)
}
const mainSessionStatus = await getMainSessionStatus(ctx)
if (mainSessionStatus === "busy" || mainSessionStatus === "retry") {
eventState.mainSessionIdle = false
} else if (mainSessionStatus === "idle") {
@@ -121,50 +81,6 @@ export async function pollForCompletion(
consecutiveCompleteChecks = 0
continue
}
// Secondary timeout: if we've been polling for reasonable time but haven't
// received meaningful work via events, check if there's active work via API
// Only check once to avoid unnecessary API calls every poll cycle
if (
Date.now() - pollStartTimestamp > secondaryMeaningfulWorkTimeoutMs &&
!secondaryTimeoutChecked
) {
secondaryTimeoutChecked = true
// Check if session actually has pending work (children, todos, etc.)
const childrenRes = await ctx.client.session.children({
path: { id: ctx.sessionID },
query: { directory: ctx.directory },
})
const children = normalizeSDKResponse(childrenRes, [] as unknown[])
const todosRes = await ctx.client.session.todo({
path: { id: ctx.sessionID },
query: { directory: ctx.directory },
})
const todos = normalizeSDKResponse(todosRes, [] as unknown[])
const hasActiveChildren =
Array.isArray(children) && children.length > 0
const hasActiveTodos =
Array.isArray(todos) &&
todos.some(
(t: unknown) =>
(t as { status?: string })?.status !== "completed" &&
(t as { status?: string })?.status !== "cancelled"
)
const hasActiveWork = hasActiveChildren || hasActiveTodos
if (hasActiveWork) {
// Assume meaningful work is happening even without events
eventState.hasReceivedMeaningfulWork = true
console.log(
pc.yellow(
`\n No meaningful work events for ${Math.round(
secondaryMeaningfulWorkTimeoutMs / 1000
)}s but session has active work - assuming in progress`
)
)
}
}
} else {
// Track when first meaningful work was received
if (firstWorkTimestamp === null) {

View File

@@ -1,6 +1,6 @@
# src/config/ — Zod v4 Schema System
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -59,9 +59,7 @@ export const AgentOverridesSchema = z.object({
build: AgentOverrideConfigSchema.optional(),
plan: AgentOverrideConfigSchema.optional(),
sisyphus: AgentOverrideConfigSchema.optional(),
hephaestus: AgentOverrideConfigSchema.extend({
allow_non_gpt_model: z.boolean().optional(),
}).optional(),
hephaestus: AgentOverrideConfigSchema.optional(),
"sisyphus-junior": AgentOverrideConfigSchema.optional(),
"OpenCode-Builder": AgentOverrideConfigSchema.optional(),
prometheus: AgentOverrideConfigSchema.optional(),

View File

@@ -20,7 +20,6 @@ export const CategoryConfigSchema = z.object({
textVerbosity: z.enum(["low", "medium", "high"]).optional(),
tools: z.record(z.string(), z.boolean()).optional(),
prompt_append: z.string().optional(),
max_prompt_tokens: z.number().int().positive().optional(),
/** Mark agent as unstable - forces background mode for monitoring. Auto-enabled for gemini/minimax models. */
is_unstable_agent: z.boolean().optional(),
/** Disable this category. Disabled categories are excluded from task delegation. */

View File

@@ -1,6 +1,6 @@
# src/features/ — 19 Feature Modules
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/features/background-agent/ — Core Orchestration Engine
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -191,10 +191,6 @@ function getPendingByParent(manager: BackgroundManager): Map<string, Set<string>
return (manager as unknown as { pendingByParent: Map<string, Set<string>> }).pendingByParent
}
function getPendingNotifications(manager: BackgroundManager): Map<string, string[]> {
return (manager as unknown as { pendingNotifications: Map<string, string[]> }).pendingNotifications
}
function getCompletionTimers(manager: BackgroundManager): Map<string, ReturnType<typeof setTimeout>> {
return (manager as unknown as { completionTimers: Map<string, ReturnType<typeof setTimeout>> }).completionTimers
}
@@ -1061,49 +1057,6 @@ describe("BackgroundManager.notifyParentSession - aborted parent", () => {
manager.shutdown()
})
test("should queue notification when promptAsync aborts while parent is idle", async () => {
//#given
const promptMock = async () => {
const error = new Error("Request aborted while waiting for input")
error.name = "MessageAbortedError"
throw error
}
const client = {
session: {
prompt: promptMock,
promptAsync: promptMock,
abort: async () => ({}),
messages: async () => ({ data: [] }),
},
}
const manager = new BackgroundManager({ client, directory: tmpdir() } as unknown as PluginInput)
const task: BackgroundTask = {
id: "task-aborted-idle-queue",
sessionID: "session-child",
parentSessionID: "session-parent",
parentMessageID: "msg-parent",
description: "task idle queue",
prompt: "test",
agent: "explore",
status: "completed",
startedAt: new Date(),
completedAt: new Date(),
}
getPendingByParent(manager).set("session-parent", new Set([task.id]))
//#when
await (manager as unknown as { notifyParentSession: (task: BackgroundTask) => Promise<void> })
.notifyParentSession(task)
//#then
const queuedNotifications = getPendingNotifications(manager).get("session-parent") ?? []
expect(queuedNotifications).toHaveLength(1)
expect(queuedNotifications[0]).toContain("<system-reminder>")
expect(queuedNotifications[0]).toContain("[ALL BACKGROUND TASKS COMPLETE]")
manager.shutdown()
})
})
describe("BackgroundManager.notifyParentSession - notifications toggle", () => {
@@ -1152,29 +1105,6 @@ describe("BackgroundManager.notifyParentSession - notifications toggle", () => {
})
})
describe("BackgroundManager.injectPendingNotificationsIntoChatMessage", () => {
test("should prepend queued notifications to first text part and clear queue", () => {
// given
const manager = createBackgroundManager()
manager.queuePendingNotification("session-parent", "<system-reminder>queued-one</system-reminder>")
manager.queuePendingNotification("session-parent", "<system-reminder>queued-two</system-reminder>")
const output = {
parts: [{ type: "text", text: "User prompt" }],
}
// when
manager.injectPendingNotificationsIntoChatMessage(output, "session-parent")
// then
expect(output.parts[0].text).toContain("<system-reminder>queued-one</system-reminder>")
expect(output.parts[0].text).toContain("<system-reminder>queued-two</system-reminder>")
expect(output.parts[0].text).toContain("User prompt")
expect(getPendingNotifications(manager).get("session-parent")).toBeUndefined()
manager.shutdown()
})
})
function buildNotificationPromptBody(
task: BackgroundTask,
currentMessage: CurrentMessage | null

View File

@@ -93,7 +93,6 @@ export class BackgroundManager {
private tasks: Map<string, BackgroundTask>
private notifications: Map<string, BackgroundTask[]>
private pendingNotifications: Map<string, string[]>
private pendingByParent: Map<string, Set<string>> // Track pending tasks per parent for batching
private client: OpencodeClient
private directory: string
@@ -126,7 +125,6 @@ export class BackgroundManager {
) {
this.tasks = new Map()
this.notifications = new Map()
this.pendingNotifications = new Map()
this.pendingByParent = new Map()
this.client = ctx.client
this.directory = ctx.directory
@@ -919,32 +917,6 @@ export class BackgroundManager {
this.notifications.delete(sessionID)
}
queuePendingNotification(sessionID: string | undefined, notification: string): void {
if (!sessionID) return
const existingNotifications = this.pendingNotifications.get(sessionID) ?? []
existingNotifications.push(notification)
this.pendingNotifications.set(sessionID, existingNotifications)
}
injectPendingNotificationsIntoChatMessage(output: { parts: Array<{ type: string; text?: string; [key: string]: unknown }> }, sessionID: string): void {
const pendingNotifications = this.pendingNotifications.get(sessionID)
if (!pendingNotifications || pendingNotifications.length === 0) {
return
}
this.pendingNotifications.delete(sessionID)
const notificationContent = pendingNotifications.join("\n\n")
const firstTextPartIndex = output.parts.findIndex((part) => part.type === "text")
if (firstTextPartIndex === -1) {
output.parts.unshift(createInternalAgentTextPart(notificationContent))
return
}
const originalText = output.parts[firstTextPartIndex].text ?? ""
output.parts[firstTextPartIndex].text = `${notificationContent}\n\n---\n\n${originalText}`
}
/**
* Validates that a session has actual assistant/tool output before marking complete.
* Prevents premature completion when session.idle fires before agent responds.
@@ -1368,7 +1340,6 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
taskId: task.id,
parentSessionID: task.parentSessionID,
})
this.queuePendingNotification(task.parentSessionID, notification)
} else {
log("[background-agent] Failed to send notification:", error)
}
@@ -1597,7 +1568,6 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
this.concurrencyManager.clear()
this.tasks.clear()
this.notifications.clear()
this.pendingNotifications.clear()
this.pendingByParent.clear()
this.notificationQueueByParent.clear()
this.queuesByKey.clear()

View File

@@ -269,71 +269,6 @@ describe("boulder-state", () => {
expect(progress.isComplete).toBe(false)
})
test("should count space-indented unchecked checkbox", () => {
// given - plan file with a two-space indented checkbox
const planPath = join(TEST_DIR, "space-indented-plan.md")
writeFileSync(planPath, `# Plan
- [ ] indented task
`)
// when
const progress = getPlanProgress(planPath)
// then
expect(progress.total).toBe(1)
expect(progress.completed).toBe(0)
expect(progress.isComplete).toBe(false)
})
test("should count tab-indented unchecked checkbox", () => {
// given - plan file with a tab-indented checkbox
const planPath = join(TEST_DIR, "tab-indented-plan.md")
writeFileSync(planPath, `# Plan
- [ ] tab-indented task
`)
// when
const progress = getPlanProgress(planPath)
// then
expect(progress.total).toBe(1)
expect(progress.completed).toBe(0)
expect(progress.isComplete).toBe(false)
})
test("should count mixed top-level checked and indented unchecked checkboxes", () => {
// given - plan file with checked top-level and unchecked indented task
const planPath = join(TEST_DIR, "mixed-indented-plan.md")
writeFileSync(planPath, `# Plan
- [x] top-level completed task
- [ ] nested unchecked task
`)
// when
const progress = getPlanProgress(planPath)
// then
expect(progress.total).toBe(2)
expect(progress.completed).toBe(1)
expect(progress.isComplete).toBe(false)
})
test("should count space-indented completed checkbox", () => {
// given - plan file with a two-space indented completed checkbox
const planPath = join(TEST_DIR, "indented-completed-plan.md")
writeFileSync(planPath, `# Plan
- [x] indented completed task
`)
// when
const progress = getPlanProgress(planPath)
// then
expect(progress.total).toBe(1)
expect(progress.completed).toBe(1)
expect(progress.isComplete).toBe(true)
})
test("should return isComplete true when all checked", () => {
// given - all tasks completed
const planPath = join(TEST_DIR, "complete-plan.md")

View File

@@ -121,8 +121,8 @@ export function getPlanProgress(planPath: string): PlanProgress {
const content = readFileSync(planPath, "utf-8")
// Match markdown checkboxes: - [ ] or - [x] or - [X]
const uncheckedMatches = content.match(/^\s*[-*]\s*\[\s*\]/gm) || []
const checkedMatches = content.match(/^\s*[-*]\s*\[[xX]\]/gm) || []
const uncheckedMatches = content.match(/^[-*]\s*\[\s*\]/gm) || []
const checkedMatches = content.match(/^[-*]\s*\[[xX]\]/gm) || []
const total = uncheckedMatches.length + checkedMatches.length
const completed = checkedMatches.length
@@ -150,8 +150,7 @@ export function getPlanName(planPath: string): string {
export function createBoulderState(
planPath: string,
sessionId: string,
agent?: string,
worktreePath?: string,
agent?: string
): BoulderState {
return {
active_plan: planPath,
@@ -159,6 +158,5 @@ export function createBoulderState(
session_ids: [sessionId],
plan_name: getPlanName(planPath),
...(agent !== undefined ? { agent } : {}),
...(worktreePath !== undefined ? { worktree_path: worktreePath } : {}),
}
}

View File

@@ -16,8 +16,6 @@ export interface BoulderState {
plan_name: string
/** Agent type to use when resuming (e.g., 'atlas') */
agent?: string
/** Absolute path to the git worktree root where work happens */
worktree_path?: string
}
export interface PlanProgress {

View File

@@ -1,14 +1,5 @@
export const START_WORK_TEMPLATE = `You are starting a Sisyphus work session.
## ARGUMENTS
- \`/start-work [plan-name] [--worktree <path>]\`
- \`plan-name\` (optional): name or partial match of the plan to start
- \`--worktree <path>\` (optional): absolute path to an existing git worktree to work in
- If specified and valid: hook pre-sets worktree_path in boulder.json
- If specified but invalid: you must run \`git worktree add <path> <branch>\` first
- If omitted: you MUST choose or create a worktree (see Worktree Setup below)
## WHAT TO DO
1. **Find available plans**: Search for Prometheus-generated plan files at \`.sisyphus/plans/\`
@@ -24,24 +15,17 @@ export const START_WORK_TEMPLATE = `You are starting a Sisyphus work session.
- If ONE plan: auto-select it
- If MULTIPLE plans: show list with timestamps, ask user to select
4. **Worktree Setup** (when \`worktree_path\` not already set in boulder.json):
1. \`git worktree list --porcelain\` — see available worktrees
2. Create: \`git worktree add <absolute-path> <branch-or-HEAD>\`
3. Update boulder.json to add \`"worktree_path": "<absolute-path>"\`
4. All work happens inside that worktree directory
5. **Create/Update boulder.json**:
4. **Create/Update boulder.json**:
\`\`\`json
{
"active_plan": "/absolute/path/to/plan.md",
"started_at": "ISO_TIMESTAMP",
"session_ids": ["session_id_1", "session_id_2"],
"plan_name": "plan-name",
"worktree_path": "/absolute/path/to/git/worktree"
"plan_name": "plan-name"
}
\`\`\`
6. **Read the plan file** and start executing tasks according to atlas workflow
5. **Read the plan file** and start executing tasks according to atlas workflow
## OUTPUT FORMAT
@@ -65,7 +49,6 @@ Resuming Work Session
Active Plan: {plan-name}
Progress: {completed}/{total} tasks
Sessions: {count} (appending current session)
Worktree: {worktree_path}
Reading plan and continuing from last incomplete task...
\`\`\`
@@ -77,7 +60,6 @@ Starting Work Session
Plan: {plan-name}
Session ID: {session_id}
Started: {timestamp}
Worktree: {worktree_path}
Reading plan and beginning execution...
\`\`\`
@@ -86,6 +68,5 @@ Reading plan and beginning execution...
- The session_id is injected by the hook - use it directly
- Always update boulder.json BEFORE starting work
- Always set worktree_path in boulder.json before executing any tasks
- Read the FULL plan file before delegating any tasks
- Follow atlas delegation protocols (7-section format)`

View File

@@ -1,6 +1,6 @@
# src/features/claude-tasks/ — Task Schema + Storage
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/features/mcp-oauth/ — OAuth 2.0 + PKCE + DCR for MCP Servers
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/features/opencode-skill-loader/ — 4-Scope Skill Discovery
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/features/tmux-subagent/ — Tmux Pane Management
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,14 +1,14 @@
# src/hooks/ — 46 Lifecycle Hooks
# src/hooks/ — 44 Lifecycle Hooks
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW
46 hooks across 39 directories + 6 standalone files. Three-tier composition: Core(37) + Continuation(7) + Skill(2). All hooks follow `createXXXHook(deps) → HookFunction` factory pattern.
44 hooks across 39 directories + 6 standalone files. Three-tier composition: Core(35) + Continuation(7) + Skill(2). All hooks follow `createXXXHook(deps) → HookFunction` factory pattern.
## HOOK TIERS
### Tier 1: Session Hooks (23) — `create-session-hooks.ts`
### Tier 1: Session Hooks (22) — `create-session-hooks.ts`
## STRUCTURE
```
hooks/
@@ -70,12 +70,11 @@ hooks/
| questionLabelTruncator | tool.execute.before | Truncate long question labels |
| taskResumeInfo | chat.message | Inject task context on resume |
| anthropicEffort | chat.params | Adjust reasoning effort level |
| modelFallback | chat.params | Provider-level model fallback on errors |
| noSisyphusGpt | chat.message | Block Sisyphus from using GPT models (toast warning) |
| noHephaestusNonGpt | chat.message | Block Hephaestus from using non-GPT models |
| runtimeFallback | event | Auto-switch models on API provider errors |
| jsonErrorRecovery | tool.execute.after | Detect JSON parse errors, inject correction reminder |
| sisyphusGptHephaestusReminder | chat.message | Toast warning when Sisyphus uses GPT model |
| taskReminder | tool.execute.after | Remind about task tools after 10 turns without usage |
### Tier 2: Tool Guard Hooks (10) — `create-tool-guard-hooks.ts`
### Tier 2: Tool Guard Hooks (9) — `create-tool-guard-hooks.ts`
| Hook | Event | Purpose |
|------|-------|---------|
@@ -88,7 +87,6 @@ hooks/
| tasksTodowriteDisabler | tool.execute.before | Disable TodoWrite when task system active |
| writeExistingFileGuard | tool.execute.before | Require Read before Write on existing files |
| hashlineReadEnhancer | tool.execute.after | Enhance Read output with line hashes |
| jsonErrorRecovery | tool.execute.after | Detect JSON parse errors, inject correction reminder |
### Tier 3: Transform Hooks (4) — `create-transform-hooks.ts`

View File

@@ -1,6 +1,6 @@
# src/hooks/anthropic-context-window-limit-recovery/ — Multi-Strategy Context Recovery
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -6,7 +6,7 @@ export function getOrCreateRetryState(
): RetryState {
let state = autoCompactState.retryStateBySession.get(sessionID)
if (!state) {
state = { attempt: 0, lastAttemptTime: 0, firstAttemptTime: 0 }
state = { attempt: 0, lastAttemptTime: 0 }
autoCompactState.retryStateBySession.set(sessionID, state)
}
return state

View File

@@ -1,122 +0,0 @@
import { afterEach, beforeEach, describe, expect, mock, test } from "bun:test"
import { runSummarizeRetryStrategy } from "./summarize-retry-strategy"
import type { AutoCompactState, ParsedTokenLimitError, RetryState } from "./types"
import type { OhMyOpenCodeConfig } from "../../config"
type TimeoutCall = {
delay: number
}
function createAutoCompactState(): AutoCompactState {
return {
pendingCompact: new Set<string>(),
errorDataBySession: new Map<string, ParsedTokenLimitError>(),
retryStateBySession: new Map<string, RetryState>(),
truncateStateBySession: new Map(),
emptyContentAttemptBySession: new Map(),
compactionInProgress: new Set<string>(),
}
}
describe("runSummarizeRetryStrategy", () => {
const sessionID = "ses_retry_timeout"
const directory = "/tmp"
let autoCompactState: AutoCompactState
const summarizeMock = mock(() => Promise.resolve())
const showToastMock = mock(() => Promise.resolve())
const client = {
session: {
summarize: summarizeMock,
messages: mock(() => Promise.resolve({ data: [] })),
promptAsync: mock(() => Promise.resolve()),
revert: mock(() => Promise.resolve()),
},
tui: {
showToast: showToastMock,
},
}
beforeEach(() => {
autoCompactState = createAutoCompactState()
summarizeMock.mockReset()
showToastMock.mockReset()
summarizeMock.mockResolvedValue(undefined)
showToastMock.mockResolvedValue(undefined)
})
afterEach(() => {
globalThis.setTimeout = originalSetTimeout
})
const originalSetTimeout = globalThis.setTimeout
test("stops retries when total summarize timeout is exceeded", async () => {
//#given
autoCompactState.pendingCompact.add(sessionID)
autoCompactState.errorDataBySession.set(sessionID, {
currentTokens: 250000,
maxTokens: 200000,
errorType: "token_limit_exceeded",
})
autoCompactState.retryStateBySession.set(sessionID, {
attempt: 1,
lastAttemptTime: Date.now(),
firstAttemptTime: Date.now() - 130000,
})
//#when
await runSummarizeRetryStrategy({
sessionID,
msg: { providerID: "anthropic", modelID: "claude-sonnet-4-6" },
autoCompactState,
client: client as never,
directory,
pluginConfig: {} as OhMyOpenCodeConfig,
})
//#then
expect(summarizeMock).not.toHaveBeenCalled()
expect(autoCompactState.pendingCompact.has(sessionID)).toBe(false)
expect(autoCompactState.errorDataBySession.has(sessionID)).toBe(false)
expect(autoCompactState.retryStateBySession.has(sessionID)).toBe(false)
expect(showToastMock).toHaveBeenCalledWith(
expect.objectContaining({
body: expect.objectContaining({
title: "Auto Compact Timed Out",
}),
}),
)
})
test("caps retry delay by remaining total timeout window", async () => {
//#given
const timeoutCalls: TimeoutCall[] = []
globalThis.setTimeout = ((_: (...args: unknown[]) => void, delay?: number) => {
timeoutCalls.push({ delay: delay ?? 0 })
return 1 as unknown as ReturnType<typeof setTimeout>
}) as typeof setTimeout
autoCompactState.retryStateBySession.set(sessionID, {
attempt: 1,
lastAttemptTime: Date.now(),
firstAttemptTime: Date.now() - 119700,
})
summarizeMock.mockRejectedValueOnce(new Error("rate limited"))
//#when
await runSummarizeRetryStrategy({
sessionID,
msg: { providerID: "anthropic", modelID: "claude-sonnet-4-6" },
autoCompactState,
client: client as never,
directory,
pluginConfig: {} as OhMyOpenCodeConfig,
})
//#then
expect(timeoutCalls.length).toBe(1)
expect(timeoutCalls[0]!.delay).toBeGreaterThan(0)
expect(timeoutCalls[0]!.delay).toBeLessThanOrEqual(500)
})
})

View File

@@ -7,8 +7,6 @@ import { sanitizeEmptyMessagesBeforeSummarize } from "./message-builder"
import { fixEmptyMessages } from "./empty-content-recovery"
import { resolveCompactionModel } from "../shared/compaction-model-resolver"
const SUMMARIZE_RETRY_TOTAL_TIMEOUT_MS = 120_000
export async function runSummarizeRetryStrategy(params: {
sessionID: string
msg: Record<string, unknown>
@@ -20,27 +18,6 @@ export async function runSummarizeRetryStrategy(params: {
messageIndex?: number
}): Promise<void> {
const retryState = getOrCreateRetryState(params.autoCompactState, params.sessionID)
const now = Date.now()
if (retryState.firstAttemptTime === 0) {
retryState.firstAttemptTime = now
}
const elapsedTimeMs = now - retryState.firstAttemptTime
if (elapsedTimeMs >= SUMMARIZE_RETRY_TOTAL_TIMEOUT_MS) {
clearSessionState(params.autoCompactState, params.sessionID)
await params.client.tui
.showToast({
body: {
title: "Auto Compact Timed Out",
message: "Compaction retries exceeded the timeout window. Please start a new session.",
variant: "error",
duration: 5000,
},
})
.catch(() => {})
return
}
if (params.errorType?.includes("non-empty content")) {
const attempt = getEmptyContentAttempt(params.autoCompactState, params.sessionID)
@@ -75,7 +52,6 @@ export async function runSummarizeRetryStrategy(params: {
if (Date.now() - retryState.lastAttemptTime > 300000) {
retryState.attempt = 0
retryState.firstAttemptTime = Date.now()
params.autoCompactState.truncateStateBySession.delete(params.sessionID)
}
@@ -116,26 +92,10 @@ export async function runSummarizeRetryStrategy(params: {
})
return
} catch {
const remainingTimeMs = SUMMARIZE_RETRY_TOTAL_TIMEOUT_MS - (Date.now() - retryState.firstAttemptTime)
if (remainingTimeMs <= 0) {
clearSessionState(params.autoCompactState, params.sessionID)
await params.client.tui
.showToast({
body: {
title: "Auto Compact Timed Out",
message: "Compaction retries exceeded the timeout window. Please start a new session.",
variant: "error",
duration: 5000,
},
})
.catch(() => {})
return
}
const delay =
RETRY_CONFIG.initialDelayMs *
Math.pow(RETRY_CONFIG.backoffFactor, retryState.attempt - 1)
const cappedDelay = Math.min(delay, RETRY_CONFIG.maxDelayMs, remainingTimeMs)
const cappedDelay = Math.min(delay, RETRY_CONFIG.maxDelayMs)
setTimeout(() => {
void runSummarizeRetryStrategy(params)

View File

@@ -11,7 +11,6 @@ export interface ParsedTokenLimitError {
export interface RetryState {
attempt: number
lastAttemptTime: number
firstAttemptTime: number
}
export interface TruncateState {

View File

@@ -1,6 +1,6 @@
# src/hooks/atlas/ — Master Boulder Orchestrator
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -14,7 +14,6 @@ export async function injectBoulderContinuation(input: {
remaining: number
total: number
agent?: string
worktreePath?: string
backgroundManager?: BackgroundManager
sessionState: SessionState
}): Promise<void> {
@@ -25,7 +24,6 @@ export async function injectBoulderContinuation(input: {
remaining,
total,
agent,
worktreePath,
backgroundManager,
sessionState,
} = input
@@ -39,11 +37,9 @@ export async function injectBoulderContinuation(input: {
return
}
const worktreeContext = worktreePath ? `\n\n[Worktree: ${worktreePath}]` : ""
const prompt =
BOULDER_CONTINUATION_PROMPT.replace(/{PLAN_NAME}/g, planName) +
`\n\n[Status: ${total - remaining}/${total} completed, ${remaining} remaining]` +
worktreeContext
`\n\n[Status: ${total - remaining}/${total} completed, ${remaining} remaining]`
try {
log(`[${HOOK_NAME}] Injecting boulder continuation`, { sessionID, planName, remaining })
@@ -66,7 +62,6 @@ export async function injectBoulderContinuation(input: {
log(`[${HOOK_NAME}] Boulder continuation injected`, { sessionID })
} catch (err) {
sessionState.promptFailureCount += 1
sessionState.lastFailureAt = Date.now()
log(`[${HOOK_NAME}] Boulder continuation failed`, {
sessionID,
error: String(err),

View File

@@ -10,7 +10,6 @@ import { getLastAgentFromSession } from "./session-last-agent"
import type { AtlasHookOptions, SessionState } from "./types"
const CONTINUATION_COOLDOWN_MS = 5000
const FAILURE_BACKOFF_MS = 5 * 60 * 1000
export function createAtlasEventHandler(input: {
ctx: PluginInput
@@ -54,7 +53,6 @@ export function createAtlasEventHandler(input: {
}
const state = getState(sessionID)
const now = Date.now()
if (state.lastEventWasAbortError) {
state.lastEventWasAbortError = false
@@ -63,18 +61,11 @@ export function createAtlasEventHandler(input: {
}
if (state.promptFailureCount >= 2) {
const timeSinceLastFailure = state.lastFailureAt !== undefined ? now - state.lastFailureAt : Number.POSITIVE_INFINITY
if (timeSinceLastFailure < FAILURE_BACKOFF_MS) {
log(`[${HOOK_NAME}] Skipped: continuation in backoff after repeated failures`, {
sessionID,
promptFailureCount: state.promptFailureCount,
backoffRemaining: FAILURE_BACKOFF_MS - timeSinceLastFailure,
})
return
}
state.promptFailureCount = 0
state.lastFailureAt = undefined
log(`[${HOOK_NAME}] Skipped: continuation disabled after repeated prompt failures`, {
sessionID,
promptFailureCount: state.promptFailureCount,
})
return
}
const backgroundManager = options?.backgroundManager
@@ -101,15 +92,17 @@ export function createAtlasEventHandler(input: {
const lastAgentKey = getAgentConfigKey(lastAgent ?? "")
const requiredAgent = getAgentConfigKey(boulderState.agent ?? "atlas")
const lastAgentMatchesRequired = lastAgentKey === requiredAgent
const boulderAgentWasNotExplicitlySet = boulderState.agent === undefined
const boulderAgentDefaultsToAtlas = requiredAgent === "atlas"
const lastAgentIsSisyphus = lastAgentKey === "sisyphus"
const allowSisyphusForAtlasBoulder = boulderAgentDefaultsToAtlas && lastAgentIsSisyphus
const agentMatches = lastAgentMatchesRequired || allowSisyphusForAtlasBoulder
const allowSisyphusWhenDefaultAtlas = boulderAgentWasNotExplicitlySet && boulderAgentDefaultsToAtlas && lastAgentIsSisyphus
const agentMatches = lastAgentMatchesRequired || allowSisyphusWhenDefaultAtlas
if (!agentMatches) {
log(`[${HOOK_NAME}] Skipped: last agent does not match boulder agent`, {
sessionID,
lastAgent: lastAgent ?? "unknown",
requiredAgent,
boulderAgentExplicitlySet: boulderState.agent !== undefined,
})
return
}
@@ -120,6 +113,7 @@ export function createAtlasEventHandler(input: {
return
}
const now = Date.now()
if (state.lastContinuationInjectedAt && now - state.lastContinuationInjectedAt < CONTINUATION_COOLDOWN_MS) {
log(`[${HOOK_NAME}] Skipped: continuation cooldown active`, {
sessionID,
@@ -138,7 +132,6 @@ export function createAtlasEventHandler(input: {
remaining,
total: progress.total,
agent: boulderState.agent,
worktreePath: boulderState.worktree_path,
backgroundManager,
sessionState: state,
})

View File

@@ -933,8 +933,8 @@ describe("atlas hook", () => {
expect(callArgs.body.parts[0].text).toContain("2 remaining")
})
test("should inject when last agent is sisyphus and boulder targets atlas explicitly", async () => {
// given - boulder explicitly set to atlas, but last agent is sisyphus (initial state after /start-work)
test("should not inject when last agent does not match boulder agent", async () => {
// given - boulder state with incomplete plan, but last agent does NOT match
const planPath = join(TEST_DIR, "test-plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1\n- [ ] Task 2")
@@ -947,7 +947,7 @@ describe("atlas hook", () => {
}
writeBoulderState(TEST_DIR, state)
// given - last agent is sisyphus (typical state right after /start-work)
// given - last agent is NOT the boulder agent
cleanupMessageStorage(MAIN_SESSION_ID)
setupMessageStorage(MAIN_SESSION_ID, "sisyphus")
@@ -962,39 +962,7 @@ describe("atlas hook", () => {
},
})
// then - should call prompt because sisyphus is always allowed for atlas boulders
expect(mockInput._promptMock).toHaveBeenCalled()
})
test("should not inject when last agent is non-sisyphus and does not match boulder agent", async () => {
// given - boulder explicitly set to atlas, last agent is hephaestus (unrelated agent)
const planPath = join(TEST_DIR, "test-plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1\n- [ ] Task 2")
const state: BoulderState = {
active_plan: planPath,
started_at: "2026-01-02T10:00:00Z",
session_ids: [MAIN_SESSION_ID],
plan_name: "test-plan",
agent: "atlas",
}
writeBoulderState(TEST_DIR, state)
cleanupMessageStorage(MAIN_SESSION_ID)
setupMessageStorage(MAIN_SESSION_ID, "hephaestus")
const mockInput = createMockPluginInput()
const hook = createAtlasHook(mockInput)
// when
await hook.handler({
event: {
type: "session.idle",
properties: { sessionID: MAIN_SESSION_ID },
},
})
// then - should NOT call prompt because hephaestus does not match atlas or sisyphus
// then - should NOT call prompt because agent does not match
expect(mockInput._promptMock).not.toHaveBeenCalled()
})
@@ -1154,144 +1122,6 @@ describe("atlas hook", () => {
}
})
test("should keep skipping continuation during 5-minute backoff after 2 consecutive failures", async () => {
//#given - boulder state with incomplete plan and prompt always fails
const planPath = join(TEST_DIR, "test-plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1\n- [ ] Task 2")
const state: BoulderState = {
active_plan: planPath,
started_at: "2026-01-02T10:00:00Z",
session_ids: [MAIN_SESSION_ID],
plan_name: "test-plan",
}
writeBoulderState(TEST_DIR, state)
const promptMock = mock(() => Promise.reject(new Error("Bad Request")))
const mockInput = createMockPluginInput({ promptMock })
const hook = createAtlasHook(mockInput)
const originalDateNow = Date.now
let now = 0
Date.now = () => now
try {
//#when - third idle occurs inside 5-minute backoff window
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 6000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 60000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
//#then - third attempt should still be skipped
expect(promptMock).toHaveBeenCalledTimes(2)
} finally {
Date.now = originalDateNow
}
})
test("should retry continuation after 5-minute backoff expires following 2 consecutive failures", async () => {
//#given - boulder state with incomplete plan and prompt always fails
const planPath = join(TEST_DIR, "test-plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1\n- [ ] Task 2")
const state: BoulderState = {
active_plan: planPath,
started_at: "2026-01-02T10:00:00Z",
session_ids: [MAIN_SESSION_ID],
plan_name: "test-plan",
}
writeBoulderState(TEST_DIR, state)
const promptMock = mock(() => Promise.reject(new Error("Bad Request")))
const mockInput = createMockPluginInput({ promptMock })
const hook = createAtlasHook(mockInput)
const originalDateNow = Date.now
let now = 0
Date.now = () => now
try {
//#when - third idle occurs after 5+ minutes
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 6000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 300000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
//#then - third attempt should run after backoff expiration
expect(promptMock).toHaveBeenCalledTimes(3)
} finally {
Date.now = originalDateNow
}
})
test("should reset prompt failure counter after successful retry beyond backoff window", async () => {
//#given - boulder state with incomplete plan and success on first retry after backoff
const planPath = join(TEST_DIR, "test-plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1\n- [ ] Task 2")
const state: BoulderState = {
active_plan: planPath,
started_at: "2026-01-02T10:00:00Z",
session_ids: [MAIN_SESSION_ID],
plan_name: "test-plan",
}
writeBoulderState(TEST_DIR, state)
const promptMock = mock((): Promise<void> => Promise.reject(new Error("Bad Request")))
promptMock.mockImplementationOnce(() => Promise.reject(new Error("Bad Request")))
promptMock.mockImplementationOnce(() => Promise.reject(new Error("Bad Request")))
promptMock.mockImplementationOnce(() => Promise.resolve(undefined))
const mockInput = createMockPluginInput({ promptMock })
const hook = createAtlasHook(mockInput)
const originalDateNow = Date.now
let now = 0
Date.now = () => now
try {
//#when - fail twice, recover after backoff with success, then fail twice again
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 6000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 300000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 6000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 6000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
now += 6000
await hook.handler({ event: { type: "session.idle", properties: { sessionID: MAIN_SESSION_ID } } })
await flushMicrotasks()
//#then - success retry resets counter, so two additional failures are allowed before skip
expect(promptMock).toHaveBeenCalledTimes(5)
} finally {
Date.now = originalDateNow
}
})
test("should reset continuation failure state on session.compacted event", async () => {
//#given - boulder state with incomplete plan and prompt always fails
const planPath = join(TEST_DIR, "test-plan.md")

View File

@@ -26,5 +26,4 @@ export interface SessionState {
lastEventWasAbortError?: boolean
lastContinuationInjectedAt?: number
promptFailureCount: number
lastFailureAt?: number
}

View File

@@ -9,14 +9,6 @@ interface EventInput {
event: Event
}
interface ChatMessageInput {
sessionID: string
}
interface ChatMessageOutput {
parts: Array<{ type: string; text?: string; [key: string]: unknown }>
}
/**
* Background notification hook - handles event routing to BackgroundManager.
*
@@ -28,15 +20,7 @@ export function createBackgroundNotificationHook(manager: BackgroundManager) {
manager.handleEvent(event)
}
const chatMessageHandler = async (
input: ChatMessageInput,
output: ChatMessageOutput,
): Promise<void> => {
manager.injectPendingNotificationsIntoChatMessage(output, input.sessionID)
}
return {
"chat.message": chatMessageHandler,
event: eventHandler,
}
}

View File

@@ -1,6 +1,6 @@
# src/hooks/claude-code-hooks/ — Claude Code Compatibility
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/hooks/keyword-detector/ — Mode Keyword Injection
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -12,16 +12,12 @@ const TOAST_MESSAGE = [
].join("\n")
const SISYPHUS_DISPLAY = getAgentDisplayName("sisyphus")
type NoHephaestusNonGptHookOptions = {
allowNonGptModel?: boolean
}
function showToast(ctx: PluginInput, sessionID: string, variant: "error" | "warning"): void {
function showToast(ctx: PluginInput, sessionID: string): void {
ctx.client.tui.showToast({
body: {
title: TOAST_TITLE,
message: TOAST_MESSAGE,
variant,
variant: "error",
duration: 10000,
},
}).catch((error) => {
@@ -32,10 +28,7 @@ function showToast(ctx: PluginInput, sessionID: string, variant: "error" | "warn
})
}
export function createNoHephaestusNonGptHook(
ctx: PluginInput,
options?: NoHephaestusNonGptHookOptions,
) {
export function createNoHephaestusNonGptHook(ctx: PluginInput) {
return {
"chat.message": async (input: {
sessionID: string
@@ -47,13 +40,9 @@ export function createNoHephaestusNonGptHook(
const rawAgent = input.agent ?? getSessionAgent(input.sessionID) ?? ""
const agentKey = getAgentConfigKey(rawAgent)
const modelID = input.model?.modelID
const allowNonGptModel = options?.allowNonGptModel === true
if (agentKey === "hephaestus" && modelID && !isGptModel(modelID)) {
showToast(ctx, input.sessionID, allowNonGptModel ? "warning" : "error")
if (allowNonGptModel) {
return
}
showToast(ctx, input.sessionID)
input.agent = SISYPHUS_DISPLAY
if (output?.message) {
output.message.agent = SISYPHUS_DISPLAY

View File

@@ -1,5 +1,3 @@
/// <reference types="bun-types" />
import { describe, expect, spyOn, test } from "bun:test"
import { _resetForTesting, updateSessionAgent } from "../../features/claude-code-session-state"
import { getAgentDisplayName } from "../../shared/agent-display-names"
@@ -10,7 +8,7 @@ const SISYPHUS_DISPLAY = getAgentDisplayName("sisyphus")
function createOutput() {
return {
message: {} as { agent?: string; [key: string]: unknown },
message: {},
parts: [],
}
}
@@ -18,7 +16,7 @@ function createOutput() {
describe("no-hephaestus-non-gpt hook", () => {
test("shows toast on every chat.message when hephaestus uses non-gpt model", async () => {
// given - hephaestus with claude model
const showToast = spyOn({ fn: async (_input: unknown) => ({}) }, "fn")
const showToast = spyOn({ fn: async () => ({}) }, "fn")
const hook = createNoHephaestusNonGptHook({
client: { tui: { showToast } },
} as any)
@@ -51,38 +49,9 @@ describe("no-hephaestus-non-gpt hook", () => {
})
})
test("shows warning and does not switch agent when allow_non_gpt_model is enabled", async () => {
// given - hephaestus with claude model and opt-out enabled
const showToast = spyOn({ fn: async (_input: unknown) => ({}) }, "fn")
const hook = createNoHephaestusNonGptHook({
client: { tui: { showToast } },
} as any, {
allowNonGptModel: true,
})
const output = createOutput()
// when - chat.message runs
await hook["chat.message"]?.({
sessionID: "ses_opt_out",
agent: HEPHAESTUS_DISPLAY,
model: { providerID: "anthropic", modelID: "claude-opus-4-6" },
}, output)
// then - warning toast is shown but agent is not switched
expect(showToast).toHaveBeenCalledTimes(1)
expect(output.message.agent).toBeUndefined()
expect(showToast.mock.calls[0]?.[0]).toMatchObject({
body: {
title: "NEVER Use Hephaestus with Non-GPT",
variant: "warning",
},
})
})
test("does not show toast when hephaestus uses gpt model", async () => {
// given - hephaestus with gpt model
const showToast = spyOn({ fn: async (_input: unknown) => ({}) }, "fn")
const showToast = spyOn({ fn: async () => ({}) }, "fn")
const hook = createNoHephaestusNonGptHook({
client: { tui: { showToast } },
} as any)
@@ -103,7 +72,7 @@ describe("no-hephaestus-non-gpt hook", () => {
test("does not show toast for non-hephaestus agent", async () => {
// given - sisyphus with claude model (non-gpt)
const showToast = spyOn({ fn: async (_input: unknown) => ({}) }, "fn")
const showToast = spyOn({ fn: async () => ({}) }, "fn")
const hook = createNoHephaestusNonGptHook({
client: { tui: { showToast } },
} as any)
@@ -126,7 +95,7 @@ describe("no-hephaestus-non-gpt hook", () => {
// given - session agent saved as hephaestus
_resetForTesting()
updateSessionAgent("ses_4", HEPHAESTUS_DISPLAY)
const showToast = spyOn({ fn: async (_input: unknown) => ({}) }, "fn")
const showToast = spyOn({ fn: async () => ({}) }, "fn")
const hook = createNoHephaestusNonGptHook({
client: { tui: { showToast } },
} as any)

View File

@@ -45,23 +45,6 @@ function createMockCtx() {
}
}
function setupImmediateTimeouts(): () => void {
const originalSetTimeout = globalThis.setTimeout
const originalClearTimeout = globalThis.clearTimeout
globalThis.setTimeout = ((callback: (...args: unknown[]) => void, _delay?: number, ...args: unknown[]) => {
callback(...args)
return 1 as unknown as ReturnType<typeof setTimeout>
}) as typeof setTimeout
globalThis.clearTimeout = (() => {}) as typeof clearTimeout
return () => {
globalThis.setTimeout = originalSetTimeout
globalThis.clearTimeout = originalClearTimeout
}
}
describe("preemptive-compaction", () => {
let ctx: ReturnType<typeof createMockCtx>
@@ -80,7 +63,7 @@ describe("preemptive-compaction", () => {
// #when tool.execute.after is called
// #then session.messages() should NOT be called
it("should use cached token info instead of fetching session.messages()", async () => {
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const hook = createPreemptiveCompactionHook(ctx as never)
const sessionID = "ses_test1"
// Simulate message.updated with token info below threshold
@@ -118,7 +101,7 @@ describe("preemptive-compaction", () => {
// #when tool.execute.after is called
// #then should skip without fetching
it("should skip gracefully when no cached token info exists", async () => {
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const hook = createPreemptiveCompactionHook(ctx as never)
const output = { title: "", output: "test", metadata: null }
await hook["tool.execute.after"](
@@ -133,7 +116,7 @@ describe("preemptive-compaction", () => {
// #when tool.execute.after runs
// #then should trigger summarize
it("should trigger compaction when usage exceeds threshold", async () => {
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const hook = createPreemptiveCompactionHook(ctx as never)
const sessionID = "ses_high"
// 170K input + 10K cache = 180K → 90% of 200K
@@ -170,7 +153,7 @@ describe("preemptive-compaction", () => {
it("should trigger compaction for google-vertex-anthropic provider", async () => {
//#given google-vertex-anthropic usage above threshold
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const hook = createPreemptiveCompactionHook(ctx as never)
const sessionID = "ses_vertex_anthropic_high"
await hook.event({
@@ -208,7 +191,7 @@ describe("preemptive-compaction", () => {
// #given session deleted
// #then cache should be cleaned up
it("should clean up cache on session.deleted", async () => {
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const hook = createPreemptiveCompactionHook(ctx as never)
const sessionID = "ses_del"
await hook.event({
@@ -245,7 +228,7 @@ describe("preemptive-compaction", () => {
it("should log summarize errors instead of swallowing them", async () => {
//#given
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const hook = createPreemptiveCompactionHook(ctx as never)
const sessionID = "ses_log_error"
const summarizeError = new Error("summarize failed")
ctx.client.session.summarize.mockRejectedValueOnce(summarizeError)
@@ -360,58 +343,4 @@ describe("preemptive-compaction", () => {
//#then
expect(ctx.client.session.summarize).not.toHaveBeenCalled()
})
it("should clear in-progress lock when summarize times out", async () => {
//#given
const restoreTimeouts = setupImmediateTimeouts()
const hook = createPreemptiveCompactionHook(ctx as never, {} as never)
const sessionID = "ses_timeout"
ctx.client.session.summarize
.mockImplementationOnce(() => new Promise(() => {}))
.mockResolvedValueOnce({})
try {
await hook.event({
event: {
type: "message.updated",
properties: {
info: {
role: "assistant",
sessionID,
providerID: "anthropic",
modelID: "claude-sonnet-4-6",
finish: true,
tokens: {
input: 170000,
output: 0,
reasoning: 0,
cache: { read: 10000, write: 0 },
},
},
},
},
})
//#when
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_timeout_1" },
{ title: "", output: "test", metadata: null },
)
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_timeout_2" },
{ title: "", output: "test", metadata: null },
)
//#then
expect(ctx.client.session.summarize).toHaveBeenCalledTimes(2)
expect(logMock).toHaveBeenCalledWith("[preemptive-compaction] Compaction failed", {
sessionID,
error: expect.stringContaining("Compaction summarize timed out"),
})
} finally {
restoreTimeouts()
}
})
})

View File

@@ -3,7 +3,6 @@ import type { OhMyOpenCodeConfig } from "../config"
import { resolveCompactionModel } from "./shared/compaction-model-resolver"
const DEFAULT_ACTUAL_LIMIT = 200_000
const PREEMPTIVE_COMPACTION_TIMEOUT_MS = 120_000
type ModelCacheStateLike = {
anthropicContext1MEnabled: boolean
@@ -32,26 +31,6 @@ interface CachedCompactionState {
tokens: TokenInfo
}
function withTimeout<TValue>(
promise: Promise<TValue>,
timeoutMs: number,
errorMessage: string,
): Promise<TValue> {
let timeoutID: ReturnType<typeof setTimeout> | undefined
const timeoutPromise = new Promise<never>((_, reject) => {
timeoutID = setTimeout(() => {
reject(new Error(errorMessage))
}, timeoutMs)
})
return Promise.race([promise, timeoutPromise]).finally(() => {
if (timeoutID !== undefined) {
clearTimeout(timeoutID)
}
})
}
function isAnthropicProvider(providerID: string): boolean {
return providerID === "anthropic" || providerID === "google-vertex-anthropic"
}
@@ -115,15 +94,11 @@ export function createPreemptiveCompactionHook(
modelID
)
await withTimeout(
ctx.client.session.summarize({
path: { id: sessionID },
body: { providerID: targetProviderID, modelID: targetModelID, auto: true } as never,
query: { directory: ctx.directory },
}),
PREEMPTIVE_COMPACTION_TIMEOUT_MS,
`Compaction summarize timed out after ${PREEMPTIVE_COMPACTION_TIMEOUT_MS}ms`,
)
await ctx.client.session.summarize({
path: { id: sessionID },
body: { providerID: targetProviderID, modelID: targetModelID, auto: true } as never,
query: { directory: ctx.directory },
})
compactedSessions.add(sessionID)
} catch (error) {

View File

@@ -1,6 +1,6 @@
# src/hooks/ralph-loop/ — Self-Referential Dev Loop
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -33,16 +33,6 @@ export async function continueIteration(
return
}
await injectContinuationPrompt(ctx, {
sessionID: newSessionID,
inheritFromSessionID: options.previousSessionID,
prompt: continuationPrompt,
directory: options.directory,
apiTimeoutMs: options.apiTimeoutMs,
})
await selectSessionInTui(ctx.client, newSessionID)
const boundState = options.loopState.setSessionID(newSessionID)
if (!boundState) {
log(`[${HOOK_NAME}] Failed to bind loop state to new session`, {
@@ -52,6 +42,15 @@ export async function continueIteration(
return
}
await injectContinuationPrompt(ctx, {
sessionID: newSessionID,
inheritFromSessionID: options.previousSessionID,
prompt: continuationPrompt,
directory: options.directory,
apiTimeoutMs: options.apiTimeoutMs,
})
await selectSessionInTui(ctx.client, newSessionID)
return
}

View File

@@ -1,113 +0,0 @@
/// <reference types="bun-types" />
import { describe, expect, test } from "bun:test"
import { createRalphLoopHook } from "./index"
function createDeferred(): {
promise: Promise<void>
resolve: () => void
} {
let resolvePromise: (() => void) | null = null
const promise = new Promise<void>((resolve) => {
resolvePromise = resolve
})
return {
promise,
resolve: () => {
if (resolvePromise) {
resolvePromise()
}
},
}
}
async function waitUntil(condition: () => boolean): Promise<void> {
for (let index = 0; index < 100; index++) {
if (condition()) {
return
}
await new Promise<void>((resolve) => {
setTimeout(resolve, 0)
})
}
throw new Error("Condition was not met in time")
}
describe("ralph-loop reset strategy race condition", () => {
test("should continue iteration when old session idle arrives before TUI switch completes", async () => {
// given - reset strategy loop with blocked TUI session switch
const promptCalls: Array<{ sessionID: string; text: string }> = []
const createSessionCalls: Array<{ parentID?: string }> = []
let selectSessionCalls = 0
const selectSessionDeferred = createDeferred()
const hook = createRalphLoopHook({
directory: process.cwd(),
client: {
session: {
prompt: async (options: {
path: { id: string }
body: { parts: Array<{ type: string; text: string }> }
}) => {
promptCalls.push({
sessionID: options.path.id,
text: options.body.parts[0].text,
})
return {}
},
promptAsync: async (options: {
path: { id: string }
body: { parts: Array<{ type: string; text: string }> }
}) => {
promptCalls.push({
sessionID: options.path.id,
text: options.body.parts[0].text,
})
return {}
},
create: async (options: {
body: { parentID?: string; title?: string }
query?: { directory?: string }
}) => {
createSessionCalls.push({ parentID: options.body.parentID })
return { data: { id: `new-session-${createSessionCalls.length}` } }
},
messages: async () => ({ data: [] }),
},
tui: {
showToast: async () => ({}),
selectSession: async () => {
selectSessionCalls += 1
await selectSessionDeferred.promise
return {}
},
},
},
} as Parameters<typeof createRalphLoopHook>[0])
hook.startLoop("session-old", "Build feature", { strategy: "reset" })
// when - first idle is in-flight and old session fires idle again before TUI switch resolves
const firstIdleEvent = hook.event({
event: { type: "session.idle", properties: { sessionID: "session-old" } },
})
await waitUntil(() => selectSessionCalls > 0)
const secondIdleEvent = hook.event({
event: { type: "session.idle", properties: { sessionID: "session-old" } },
})
await waitUntil(() => selectSessionCalls > 1)
selectSessionDeferred.resolve()
await Promise.all([firstIdleEvent, secondIdleEvent])
// then - second idle should not be skipped during reset transition
expect(createSessionCalls.length).toBe(2)
expect(promptCalls.length).toBe(2)
expect(hook.getState()?.iteration).toBe(3)
})
})

View File

@@ -1,6 +1,6 @@
# src/hooks/rules-injector/ — Conditional Rules Injection
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -3,7 +3,6 @@ const { describe, expect, test, beforeEach, afterEach, spyOn } = require("bun:te
const { createSessionNotification } = require("./session-notification")
const { setMainSession, subagentSessions, _resetForTesting } = require("../features/claude-code-session-state")
const utils = require("./session-notification-utils")
const sender = require("./session-notification-sender")
describe("session-notification input-needed events", () => {
let notificationCalls: string[]
@@ -38,10 +37,6 @@ describe("session-notification input-needed events", () => {
spyOn(utils, "getNotifySendPath").mockResolvedValue("/usr/bin/notify-send")
spyOn(utils, "getPowershellPath").mockResolvedValue("powershell")
spyOn(utils, "startBackgroundCheck").mockImplementation(() => {})
spyOn(sender, "detectPlatform").mockReturnValue("darwin")
spyOn(sender, "sendSessionNotification").mockImplementation(async (_ctx: unknown, _platform: unknown, _title: unknown, message: string) => {
notificationCalls.push(message)
})
})
afterEach(() => {
@@ -52,7 +47,7 @@ describe("session-notification input-needed events", () => {
test("sends question notification when question tool asks for input", async () => {
const sessionID = "main-question"
setMainSession(sessionID)
const hook = createSessionNotification(createMockPluginInput(), { enforceMainSessionFilter: false })
const hook = createSessionNotification(createMockPluginInput())
await hook({
event: {
@@ -79,7 +74,7 @@ describe("session-notification input-needed events", () => {
test("sends permission notification for permission events", async () => {
const sessionID = "main-permission"
setMainSession(sessionID)
const hook = createSessionNotification(createMockPluginInput(), { enforceMainSessionFilter: false })
const hook = createSessionNotification(createMockPluginInput())
await hook({
event: {

View File

@@ -1,9 +1,8 @@
const { describe, expect, test, beforeEach, afterEach, spyOn } = require("bun:test")
import { describe, expect, test, beforeEach, afterEach, spyOn } from "bun:test"
import { createSessionNotification } from "./session-notification"
import { setMainSession, subagentSessions, _resetForTesting } from "../features/claude-code-session-state"
import * as utils from "./session-notification-utils"
import * as sender from "./session-notification-sender"
describe("session-notification", () => {
let notificationCalls: string[]
@@ -41,10 +40,6 @@ describe("session-notification", () => {
spyOn(utils, "getPaplayPath").mockResolvedValue("/usr/bin/paplay")
spyOn(utils, "getAplayPath").mockResolvedValue("/usr/bin/aplay")
spyOn(utils, "startBackgroundCheck").mockImplementation(() => {})
spyOn(sender, "detectPlatform").mockReturnValue("darwin")
spyOn(sender, "sendSessionNotification").mockImplementation(async (_ctx, _platform, _title, message) => {
notificationCalls.push(message)
})
})
afterEach(() => {
@@ -110,7 +105,6 @@ describe("session-notification", () => {
const hook = createSessionNotification(createMockPluginInput(), {
idleConfirmationDelay: 10,
skipIfIncompleteTodos: false,
enforceMainSessionFilter: false,
})
// when - main session goes idle
@@ -338,7 +332,6 @@ describe("session-notification", () => {
const hook = createSessionNotification(createMockPluginInput(), {
idleConfirmationDelay: 10,
skipIfIncompleteTodos: false,
enforceMainSessionFilter: false,
})
// when - session goes idle twice

View File

@@ -4,9 +4,11 @@ import {
startBackgroundCheck,
} from "./session-notification-utils"
import {
type Platform,
detectPlatform,
getDefaultSoundPath,
playSessionNotificationSound,
sendSessionNotification,
} from "./session-notification-sender"
import * as sessionNotificationSender from "./session-notification-sender"
import { hasIncompleteTodos } from "./session-todo-status"
import { createIdleNotificationScheduler } from "./session-notification-scheduler"
@@ -23,14 +25,13 @@ interface SessionNotificationConfig {
skipIfIncompleteTodos?: boolean
/** Maximum number of sessions to track before cleanup (default: 100) */
maxTrackedSessions?: number
enforceMainSessionFilter?: boolean
}
export function createSessionNotification(
ctx: PluginInput,
config: SessionNotificationConfig = {}
) {
const currentPlatform: Platform = sessionNotificationSender.detectPlatform()
const defaultSoundPath = sessionNotificationSender.getDefaultSoundPath(currentPlatform)
const currentPlatform = detectPlatform()
const defaultSoundPath = getDefaultSoundPath(currentPlatform)
startBackgroundCheck(currentPlatform)
@@ -44,7 +45,6 @@ export function createSessionNotification(
idleConfirmationDelay: 1500,
skipIfIncompleteTodos: true,
maxTrackedSessions: 100,
enforceMainSessionFilter: true,
...config,
}
@@ -53,8 +53,8 @@ export function createSessionNotification(
platform: currentPlatform,
config: mergedConfig,
hasIncompleteTodos,
send: sessionNotificationSender.sendSessionNotification,
playSound: sessionNotificationSender.playSessionNotificationSound,
send: sendSessionNotification,
playSound: playSessionNotificationSound,
})
const QUESTION_TOOLS = new Set(["question", "ask_user_question", "askuserquestion"])
@@ -81,10 +81,8 @@ export function createSessionNotification(
const shouldNotifyForSession = (sessionID: string): boolean => {
if (subagentSessions.has(sessionID)) return false
if (mergedConfig.enforceMainSessionFilter) {
const mainSessionID = getMainSessionID()
if (mainSessionID && sessionID !== mainSessionID) return false
}
const mainSessionID = getMainSessionID()
if (mainSessionID && sessionID !== mainSessionID) return false
return true
}
@@ -148,14 +146,9 @@ export function createSessionNotification(
if (!shouldNotifyForSession(sessionID)) return
scheduler.markSessionActivity(sessionID)
await sessionNotificationSender.sendSessionNotification(
ctx,
currentPlatform,
mergedConfig.title,
mergedConfig.permissionMessage,
)
await sendSessionNotification(ctx, currentPlatform, mergedConfig.title, mergedConfig.permissionMessage)
if (mergedConfig.playSound && mergedConfig.soundPath) {
await sessionNotificationSender.playSessionNotificationSound(ctx, currentPlatform, mergedConfig.soundPath)
await playSessionNotificationSound(ctx, currentPlatform, mergedConfig.soundPath)
}
return
}
@@ -175,9 +168,9 @@ export function createSessionNotification(
? mergedConfig.permissionMessage
: mergedConfig.questionMessage
await sessionNotificationSender.sendSessionNotification(ctx, currentPlatform, mergedConfig.title, message)
await sendSessionNotification(ctx, currentPlatform, mergedConfig.title, message)
if (mergedConfig.playSound && mergedConfig.soundPath) {
await sessionNotificationSender.playSessionNotificationSound(ctx, currentPlatform, mergedConfig.soundPath)
await playSessionNotificationSound(ctx, currentPlatform, mergedConfig.soundPath)
}
}
}

View File

@@ -1,6 +1,6 @@
# src/hooks/session-recovery/ — Auto Session Error Recovery
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -7,12 +7,9 @@ import { createStartWorkHook } from "./index"
import {
writeBoulderState,
clearBoulderState,
readBoulderState,
} from "../../features/boulder-state"
import type { BoulderState } from "../../features/boulder-state"
import * as sessionState from "../../features/claude-code-session-state"
import * as worktreeDetector from "./worktree-detector"
import * as worktreeDetector from "./worktree-detector"
describe("start-work hook", () => {
let testDir: string
@@ -405,152 +402,4 @@ describe("start-work hook", () => {
updateSpy.mockRestore()
})
})
describe("worktree support", () => {
let detectSpy: ReturnType<typeof spyOn>
beforeEach(() => {
detectSpy = spyOn(worktreeDetector, "detectWorktreePath").mockReturnValue(null)
})
afterEach(() => {
detectSpy.mockRestore()
})
test("should inject model-decides instructions when no --worktree flag", async () => {
// given - single plan, no worktree flag
const plansDir = join(testDir, ".sisyphus", "plans")
mkdirSync(plansDir, { recursive: true })
writeFileSync(join(plansDir, "my-plan.md"), "# Plan\n- [ ] Task 1")
const hook = createStartWorkHook(createMockPluginInput())
const output = {
parts: [{ type: "text", text: "<session-context></session-context>" }],
}
// when
await hook["chat.message"]({ sessionID: "session-123" }, output)
// then - model-decides instructions should appear
expect(output.parts[0].text).toContain("Worktree Setup Required")
expect(output.parts[0].text).toContain("git worktree list --porcelain")
expect(output.parts[0].text).toContain("git worktree add")
})
test("should inject worktree path when --worktree flag is valid", async () => {
// given - single plan + valid worktree path
const plansDir = join(testDir, ".sisyphus", "plans")
mkdirSync(plansDir, { recursive: true })
writeFileSync(join(plansDir, "my-plan.md"), "# Plan\n- [ ] Task 1")
detectSpy.mockReturnValue("/validated/worktree")
const hook = createStartWorkHook(createMockPluginInput())
const output = {
parts: [{ type: "text", text: "<session-context>\n<user-request>--worktree /validated/worktree</user-request>\n</session-context>" }],
}
// when
await hook["chat.message"]({ sessionID: "session-123" }, output)
// then - validated path shown, no model-decides instructions
expect(output.parts[0].text).toContain("**Worktree**: /validated/worktree")
expect(output.parts[0].text).not.toContain("Worktree Setup Required")
})
test("should store worktree_path in boulder when --worktree is valid", async () => {
// given - plan + valid worktree
const plansDir = join(testDir, ".sisyphus", "plans")
mkdirSync(plansDir, { recursive: true })
writeFileSync(join(plansDir, "my-plan.md"), "# Plan\n- [ ] Task 1")
detectSpy.mockReturnValue("/valid/wt")
const hook = createStartWorkHook(createMockPluginInput())
const output = {
parts: [{ type: "text", text: "<session-context>\n<user-request>--worktree /valid/wt</user-request>\n</session-context>" }],
}
// when
await hook["chat.message"]({ sessionID: "session-123" }, output)
// then - boulder.json has worktree_path
const state = readBoulderState(testDir)
expect(state?.worktree_path).toBe("/valid/wt")
})
test("should NOT store worktree_path when --worktree path is invalid", async () => {
// given - plan + invalid worktree path (detectWorktreePath returns null)
const plansDir = join(testDir, ".sisyphus", "plans")
mkdirSync(plansDir, { recursive: true })
writeFileSync(join(plansDir, "my-plan.md"), "# Plan\n- [ ] Task 1")
// detectSpy already returns null by default
const hook = createStartWorkHook(createMockPluginInput())
const output = {
parts: [{ type: "text", text: "<session-context>\n<user-request>--worktree /nonexistent/wt</user-request>\n</session-context>" }],
}
// when
await hook["chat.message"]({ sessionID: "session-123" }, output)
// then - worktree_path absent, setup instructions present
const state = readBoulderState(testDir)
expect(state?.worktree_path).toBeUndefined()
expect(output.parts[0].text).toContain("needs setup")
expect(output.parts[0].text).toContain("git worktree add /nonexistent/wt")
})
test("should update boulder worktree_path on resume when new --worktree given", async () => {
// given - existing boulder with old worktree, user provides new worktree
const planPath = join(testDir, "plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1")
const existingState: BoulderState = {
active_plan: planPath,
started_at: "2026-01-01T00:00:00Z",
session_ids: ["old-session"],
plan_name: "plan",
worktree_path: "/old/wt",
}
writeBoulderState(testDir, existingState)
detectSpy.mockReturnValue("/new/wt")
const hook = createStartWorkHook(createMockPluginInput())
const output = {
parts: [{ type: "text", text: "<session-context>\n<user-request>--worktree /new/wt</user-request>\n</session-context>" }],
}
// when
await hook["chat.message"]({ sessionID: "session-456" }, output)
// then - boulder reflects updated worktree and new session appended
const state = readBoulderState(testDir)
expect(state?.worktree_path).toBe("/new/wt")
expect(state?.session_ids).toContain("session-456")
})
test("should show existing worktree on resume when no --worktree flag", async () => {
// given - existing boulder already has worktree_path, no flag given
const planPath = join(testDir, "plan.md")
writeFileSync(planPath, "# Plan\n- [ ] Task 1")
const existingState: BoulderState = {
active_plan: planPath,
started_at: "2026-01-01T00:00:00Z",
session_ids: ["old-session"],
plan_name: "plan",
worktree_path: "/existing/wt",
}
writeBoulderState(testDir, existingState)
const hook = createStartWorkHook(createMockPluginInput())
const output = {
parts: [{ type: "text", text: "<session-context></session-context>" }],
}
// when
await hook["chat.message"]({ sessionID: "session-789" }, output)
// then - shows existing worktree, no model-decides instructions
expect(output.parts[0].text).toContain("/existing/wt")
expect(output.parts[0].text).not.toContain("Worktree Setup Required")
})
})
})

View File

@@ -1,4 +1 @@
export { HOOK_NAME, createStartWorkHook } from "./start-work-hook"
export { detectWorktreePath } from "./worktree-detector"
export type { ParsedUserRequest } from "./parse-user-request"
export { parseUserRequest } from "./parse-user-request"

View File

@@ -1,78 +0,0 @@
/// <reference types="bun-types" />
import { describe, expect, test } from "bun:test"
import { parseUserRequest } from "./parse-user-request"
describe("parseUserRequest", () => {
describe("when no user-request tag", () => {
test("#given prompt without tag #when parsing #then returns nulls", () => {
const result = parseUserRequest("Just a regular message without any tags")
expect(result.planName).toBeNull()
expect(result.explicitWorktreePath).toBeNull()
})
})
describe("when user-request tag is empty", () => {
test("#given empty user-request tag #when parsing #then returns nulls", () => {
const result = parseUserRequest("<user-request> </user-request>")
expect(result.planName).toBeNull()
expect(result.explicitWorktreePath).toBeNull()
})
})
describe("when only plan name given", () => {
test("#given plan name without worktree flag #when parsing #then returns plan name with null worktree", () => {
const result = parseUserRequest("<session-context>\n<user-request>my-plan</user-request>\n</session-context>")
expect(result.planName).toBe("my-plan")
expect(result.explicitWorktreePath).toBeNull()
})
})
describe("when only --worktree flag given", () => {
test("#given --worktree with path only #when parsing #then returns worktree path with null plan", () => {
const result = parseUserRequest("<user-request>--worktree /home/user/repo-feat</user-request>")
expect(result.planName).toBeNull()
expect(result.explicitWorktreePath).toBe("/home/user/repo-feat")
})
})
describe("when plan name and --worktree are both given", () => {
test("#given plan name before --worktree #when parsing #then returns both", () => {
const result = parseUserRequest("<user-request>my-plan --worktree /path/to/worktree</user-request>")
expect(result.planName).toBe("my-plan")
expect(result.explicitWorktreePath).toBe("/path/to/worktree")
})
test("#given --worktree before plan name #when parsing #then returns both", () => {
const result = parseUserRequest("<user-request>--worktree /path/to/worktree my-plan</user-request>")
expect(result.planName).toBe("my-plan")
expect(result.explicitWorktreePath).toBe("/path/to/worktree")
})
})
describe("when --worktree flag has no path", () => {
test("#given --worktree without path #when parsing #then worktree path is null", () => {
const result = parseUserRequest("<user-request>--worktree</user-request>")
expect(result.explicitWorktreePath).toBeNull()
})
})
describe("when ultrawork keywords are present", () => {
test("#given plan name with ultrawork keyword #when parsing #then strips keyword from plan name", () => {
const result = parseUserRequest("<user-request>my-plan ultrawork</user-request>")
expect(result.planName).toBe("my-plan")
})
test("#given plan name with ulw keyword and worktree #when parsing #then strips ulw, preserves worktree", () => {
const result = parseUserRequest("<user-request>my-plan ulw --worktree /path/to/wt</user-request>")
expect(result.planName).toBe("my-plan")
expect(result.explicitWorktreePath).toBe("/path/to/wt")
})
test("#given only ultrawork keyword with worktree #when parsing #then plan name is null, worktree preserved", () => {
const result = parseUserRequest("<user-request>ultrawork --worktree /wt</user-request>")
expect(result.planName).toBeNull()
expect(result.explicitWorktreePath).toBe("/wt")
})
})
})

View File

@@ -1,29 +0,0 @@
const KEYWORD_PATTERN = /\b(ultrawork|ulw)\b/gi
const WORKTREE_FLAG_PATTERN = /--worktree(?:\s+(\S+))?/
export interface ParsedUserRequest {
planName: string | null
explicitWorktreePath: string | null
}
export function parseUserRequest(promptText: string): ParsedUserRequest {
const match = promptText.match(/<user-request>\s*([\s\S]*?)\s*<\/user-request>/i)
if (!match) return { planName: null, explicitWorktreePath: null }
let rawArg = match[1].trim()
if (!rawArg) return { planName: null, explicitWorktreePath: null }
const worktreeMatch = rawArg.match(WORKTREE_FLAG_PATTERN)
const explicitWorktreePath = worktreeMatch ? (worktreeMatch[1] ?? null) : null
if (worktreeMatch) {
rawArg = rawArg.replace(worktreeMatch[0], "").trim()
}
const cleanedArg = rawArg.replace(KEYWORD_PATTERN, "").trim()
return {
planName: cleanedArg || null,
explicitWorktreePath,
}
}

View File

@@ -1,4 +1,3 @@
import { statSync } from "node:fs"
import type { PluginInput } from "@opencode-ai/plugin"
import {
readBoulderState,
@@ -12,11 +11,11 @@ import {
} from "../../features/boulder-state"
import { log } from "../../shared/logger"
import { updateSessionAgent } from "../../features/claude-code-session-state"
import { detectWorktreePath } from "./worktree-detector"
import { parseUserRequest } from "./parse-user-request"
export const HOOK_NAME = "start-work" as const
const KEYWORD_PATTERN = /\b(ultrawork|ulw)\b/gi
interface StartWorkHookInput {
sessionID: string
messageID?: string
@@ -26,76 +25,73 @@ interface StartWorkHookOutput {
parts: Array<{ type: string; text?: string }>
}
function findPlanByName(plans: string[], requestedName: string): string | null {
const lowerName = requestedName.toLowerCase()
const exactMatch = plans.find((p) => getPlanName(p).toLowerCase() === lowerName)
if (exactMatch) return exactMatch
const partialMatch = plans.find((p) => getPlanName(p).toLowerCase().includes(lowerName))
return partialMatch || null
function extractUserRequestPlanName(promptText: string): string | null {
const userRequestMatch = promptText.match(/<user-request>\s*([\s\S]*?)\s*<\/user-request>/i)
if (!userRequestMatch) return null
const rawArg = userRequestMatch[1].trim()
if (!rawArg) return null
const cleanedArg = rawArg.replace(KEYWORD_PATTERN, "").trim()
return cleanedArg || null
}
const MODEL_DECIDES_WORKTREE_BLOCK = `
## Worktree Setup Required
No worktree specified. Before starting work, you MUST choose or create one:
1. \`git worktree list --porcelain\` — list existing worktrees
2. Create if needed: \`git worktree add <absolute-path> <branch-or-HEAD>\`
3. Update \`.sisyphus/boulder.json\` — add \`"worktree_path": "<absolute-path>"\`
4. Work exclusively inside that worktree directory`
function resolveWorktreeContext(
explicitWorktreePath: string | null,
): { worktreePath: string | undefined; block: string } {
if (explicitWorktreePath === null) {
return { worktreePath: undefined, block: MODEL_DECIDES_WORKTREE_BLOCK }
}
const validatedPath = detectWorktreePath(explicitWorktreePath)
if (validatedPath) {
return { worktreePath: validatedPath, block: `\n**Worktree**: ${validatedPath}` }
}
return {
worktreePath: undefined,
block: `\n**Worktree** (needs setup): \`git worktree add ${explicitWorktreePath} <branch>\`, then add \`"worktree_path"\` to boulder.json`,
}
function findPlanByName(plans: string[], requestedName: string): string | null {
const lowerName = requestedName.toLowerCase()
const exactMatch = plans.find(p => getPlanName(p).toLowerCase() === lowerName)
if (exactMatch) return exactMatch
const partialMatch = plans.find(p => getPlanName(p).toLowerCase().includes(lowerName))
return partialMatch || null
}
export function createStartWorkHook(ctx: PluginInput) {
return {
"chat.message": async (input: StartWorkHookInput, output: StartWorkHookOutput): Promise<void> => {
"chat.message": async (
input: StartWorkHookInput,
output: StartWorkHookOutput
): Promise<void> => {
const parts = output.parts
const promptText =
parts
?.filter((p) => p.type === "text" && p.text)
.map((p) => p.text)
.join("\n")
.trim() || ""
const promptText = parts
?.filter((p) => p.type === "text" && p.text)
.map((p) => p.text)
.join("\n")
.trim() || ""
if (!promptText.includes("<session-context>")) return
// Only trigger on actual command execution (contains <session-context> tag)
// NOT on description text like "Start Sisyphus work session from Prometheus plan"
const isStartWorkCommand = promptText.includes("<session-context>")
log(`[${HOOK_NAME}] Processing start-work command`, { sessionID: input.sessionID })
updateSessionAgent(input.sessionID, "atlas")
if (!isStartWorkCommand) {
return
}
log(`[${HOOK_NAME}] Processing start-work command`, {
sessionID: input.sessionID,
})
updateSessionAgent(input.sessionID, "atlas") // Always switch: fixes #1298
const existingState = readBoulderState(ctx.directory)
const sessionId = input.sessionID
const timestamp = new Date().toISOString()
const { planName: explicitPlanName, explicitWorktreePath } = parseUserRequest(promptText)
const { worktreePath, block: worktreeBlock } = resolveWorktreeContext(explicitWorktreePath)
let contextInfo = ""
const explicitPlanName = extractUserRequestPlanName(promptText)
if (explicitPlanName) {
log(`[${HOOK_NAME}] Explicit plan name requested: ${explicitPlanName}`, { sessionID: input.sessionID })
log(`[${HOOK_NAME}] Explicit plan name requested: ${explicitPlanName}`, {
sessionID: input.sessionID,
})
const allPlans = findPrometheusPlans(ctx.directory)
const matchedPlan = findPlanByName(allPlans, explicitPlanName)
if (matchedPlan) {
const progress = getPlanProgress(matchedPlan)
if (progress.isComplete) {
contextInfo = `
## Plan Already Complete
@@ -103,10 +99,12 @@ export function createStartWorkHook(ctx: PluginInput) {
The requested plan "${getPlanName(matchedPlan)}" has been completed.
All ${progress.total} tasks are done. Create a new plan with: /plan "your task"`
} else {
if (existingState) clearBoulderState(ctx.directory)
const newState = createBoulderState(matchedPlan, sessionId, "atlas", worktreePath)
if (existingState) {
clearBoulderState(ctx.directory)
}
const newState = createBoulderState(matchedPlan, sessionId, "atlas")
writeBoulderState(ctx.directory, newState)
contextInfo = `
## Auto-Selected Plan
@@ -115,20 +113,17 @@ All ${progress.total} tasks are done. Create a new plan with: /plan "your task"`
**Progress**: ${progress.completed}/${progress.total} tasks
**Session ID**: ${sessionId}
**Started**: ${timestamp}
${worktreeBlock}
boulder.json has been created. Read the plan and begin execution.`
}
} else {
const incompletePlans = allPlans.filter((p) => !getPlanProgress(p).isComplete)
const incompletePlans = allPlans.filter(p => !getPlanProgress(p).isComplete)
if (incompletePlans.length > 0) {
const planList = incompletePlans
.map((p, i) => {
const prog = getPlanProgress(p)
return `${i + 1}. [${getPlanName(p)}] - Progress: ${prog.completed}/${prog.total}`
})
.join("\n")
const planList = incompletePlans.map((p, i) => {
const prog = getPlanProgress(p)
return `${i + 1}. [${getPlanName(p)}] - Progress: ${prog.completed}/${prog.total}`
}).join("\n")
contextInfo = `
## Plan Not Found
@@ -148,25 +143,9 @@ No incomplete plans available. Create a new plan with: /plan "your task"`
}
} else if (existingState) {
const progress = getPlanProgress(existingState.active_plan)
if (!progress.isComplete) {
const effectiveWorktree = worktreePath ?? existingState.worktree_path
if (worktreePath !== undefined) {
const updatedSessions = existingState.session_ids.includes(sessionId)
? existingState.session_ids
: [...existingState.session_ids, sessionId]
writeBoulderState(ctx.directory, {
...existingState,
worktree_path: worktreePath,
session_ids: updatedSessions,
})
} else {
appendSessionId(ctx.directory, sessionId)
}
const worktreeDisplay = effectiveWorktree ? `\n**Worktree**: ${effectiveWorktree}` : worktreeBlock
appendSessionId(ctx.directory, sessionId)
contextInfo = `
## Active Work Session Found
@@ -176,7 +155,6 @@ No incomplete plans available. Create a new plan with: /plan "your task"`
**Progress**: ${progress.completed}/${progress.total} tasks completed
**Sessions**: ${existingState.session_ids.length + 1} (current session appended)
**Started**: ${existingState.started_at}
${worktreeDisplay}
The current session (${sessionId}) has been added to session_ids.
Read the plan file and continue from the first unchecked task.`
@@ -189,15 +167,13 @@ Looking for new plans...`
}
}
if (
(!existingState && !explicitPlanName) ||
(existingState && !explicitPlanName && getPlanProgress(existingState.active_plan).isComplete)
) {
if ((!existingState && !explicitPlanName) || (existingState && !explicitPlanName && getPlanProgress(existingState.active_plan).isComplete)) {
const plans = findPrometheusPlans(ctx.directory)
const incompletePlans = plans.filter((p) => !getPlanProgress(p).isComplete)
const incompletePlans = plans.filter(p => !getPlanProgress(p).isComplete)
if (plans.length === 0) {
contextInfo += `
## No Plans Found
No Prometheus plan files found at .sisyphus/plans/
@@ -211,7 +187,7 @@ All ${plans.length} plan(s) are complete. Create a new plan with: /plan "your ta
} else if (incompletePlans.length === 1) {
const planPath = incompletePlans[0]
const progress = getPlanProgress(planPath)
const newState = createBoulderState(planPath, sessionId, "atlas", worktreePath)
const newState = createBoulderState(planPath, sessionId, "atlas")
writeBoulderState(ctx.directory, newState)
contextInfo += `
@@ -223,17 +199,15 @@ All ${plans.length} plan(s) are complete. Create a new plan with: /plan "your ta
**Progress**: ${progress.completed}/${progress.total} tasks
**Session ID**: ${sessionId}
**Started**: ${timestamp}
${worktreeBlock}
boulder.json has been created. Read the plan and begin execution.`
} else {
const planList = incompletePlans
.map((p, i) => {
const progress = getPlanProgress(p)
const modified = new Date(statSync(p).mtimeMs).toISOString()
return `${i + 1}. [${getPlanName(p)}] - Modified: ${modified} - Progress: ${progress.completed}/${progress.total}`
})
.join("\n")
const planList = incompletePlans.map((p, i) => {
const progress = getPlanProgress(p)
const stat = require("node:fs").statSync(p)
const modified = new Date(stat.mtimeMs).toISOString()
return `${i + 1}. [${getPlanName(p)}] - Modified: ${modified} - Progress: ${progress.completed}/${progress.total}`
}).join("\n")
contextInfo += `
@@ -246,7 +220,6 @@ Session ID: ${sessionId}
${planList}
Ask the user which plan to work on. Present the options above and wait for their response.
${worktreeBlock}
</system-reminder>`
}
}
@@ -256,14 +229,13 @@ ${worktreeBlock}
output.parts[idx].text = output.parts[idx].text
.replace(/\$SESSION_ID/g, sessionId)
.replace(/\$TIMESTAMP/g, timestamp)
output.parts[idx].text += `\n\n---\n${contextInfo}`
}
log(`[${HOOK_NAME}] Context injected`, {
sessionID: input.sessionID,
hasExistingState: !!existingState,
worktreePath,
})
},
}

View File

@@ -1,79 +0,0 @@
/// <reference types="bun-types" />
import { describe, expect, test, spyOn, beforeEach, afterEach } from "bun:test"
import * as childProcess from "node:child_process"
import { detectWorktreePath } from "./worktree-detector"
describe("detectWorktreePath", () => {
let execFileSyncSpy: ReturnType<typeof spyOn>
beforeEach(() => {
execFileSyncSpy = spyOn(childProcess, "execFileSync").mockImplementation(
((_file: string, _args: string[]) => "") as typeof childProcess.execFileSync,
)
})
afterEach(() => {
execFileSyncSpy.mockRestore()
})
describe("when directory is a valid git worktree", () => {
test("#given valid git dir #when detecting #then returns worktree root path", () => {
execFileSyncSpy.mockImplementation(
((_file: string, _args: string[]) => "/home/user/my-repo\n") as typeof childProcess.execFileSync,
)
// when
const result = detectWorktreePath("/home/user/my-repo/src")
// then
expect(result).toBe("/home/user/my-repo")
})
test("#given git output with trailing newline #when detecting #then trims output", () => {
execFileSyncSpy.mockImplementation(
((_file: string, _args: string[]) => "/projects/worktree-a\n\n") as typeof childProcess.execFileSync,
)
const result = detectWorktreePath("/projects/worktree-a")
expect(result).toBe("/projects/worktree-a")
})
test("#given valid dir #when detecting #then calls git rev-parse with cwd", () => {
execFileSyncSpy.mockImplementation(
((_file: string, _args: string[]) => "/repo\n") as typeof childProcess.execFileSync,
)
detectWorktreePath("/repo/some/subdir")
expect(execFileSyncSpy).toHaveBeenCalledWith(
"git",
["rev-parse", "--show-toplevel"],
expect.objectContaining({ cwd: "/repo/some/subdir" }),
)
})
})
describe("when directory is not a git worktree", () => {
test("#given non-git directory #when detecting #then returns null", () => {
execFileSyncSpy.mockImplementation((_file: string, _args: string[]) => {
throw new Error("not a git repository")
})
const result = detectWorktreePath("/tmp/not-a-repo")
expect(result).toBeNull()
})
test("#given non-existent directory #when detecting #then returns null", () => {
execFileSyncSpy.mockImplementation((_file: string, _args: string[]) => {
throw new Error("ENOENT: no such file or directory")
})
const result = detectWorktreePath("/nonexistent/path")
expect(result).toBeNull()
})
})
})

View File

@@ -1,14 +0,0 @@
import { execFileSync } from "node:child_process"
export function detectWorktreePath(directory: string): string | null {
try {
return execFileSync("git", ["rev-parse", "--show-toplevel"], {
cwd: directory,
encoding: "utf-8",
timeout: 5000,
stdio: ["pipe", "pipe", "pipe"],
}).trim()
} catch {
return null
}
}

View File

@@ -1,6 +1,6 @@
import { detectThinkKeyword, extractPromptText } from "./detector"
import { getHighVariant, isAlreadyHighVariant } from "./switcher"
import type { ThinkModeState } from "./types"
import { getHighVariant, getThinkingConfig, isAlreadyHighVariant } from "./switcher"
import type { ThinkModeInput, ThinkModeState } from "./types"
import { log } from "../../shared"
const thinkModeState = new Map<string, ThinkModeState>()
@@ -10,24 +10,53 @@ export function clearThinkModeState(sessionID: string): void {
}
export function createThinkModeHook() {
return {
"chat.message": async (
input: {
sessionID: string
model?: { providerID: string; modelID: string }
},
output: {
message: Record<string, unknown>
parts: Array<{ type: string; text?: string; [key: string]: unknown }>
function isDisabledThinkingConfig(config: Record<string, unknown>): boolean {
const thinkingConfig = config.thinking
if (
typeof thinkingConfig === "object" &&
thinkingConfig !== null &&
"type" in thinkingConfig &&
(thinkingConfig as { type?: string }).type === "disabled"
) {
return true
}
const providerOptions = config.providerOptions
if (typeof providerOptions !== "object" || providerOptions === null) {
return false
}
return Object.values(providerOptions as Record<string, unknown>).some(
(providerConfig) => {
if (typeof providerConfig !== "object" || providerConfig === null) {
return false
}
const providerConfigMap = providerConfig as Record<string, unknown>
const extraBody = providerConfigMap.extra_body
if (typeof extraBody !== "object" || extraBody === null) {
return false
}
const extraBodyMap = extraBody as Record<string, unknown>
const extraThinking = extraBodyMap.thinking
return (
typeof extraThinking === "object" &&
extraThinking !== null &&
(extraThinking as { type?: string }).type === "disabled"
)
}
): Promise<void> => {
)
}
return {
"chat.params": async (output: ThinkModeInput, sessionID: string): Promise<void> => {
const promptText = extractPromptText(output.parts)
const sessionID = input.sessionID
const state: ThinkModeState = {
requested: false,
modelSwitched: false,
variantSet: false,
thinkingConfigInjected: false,
}
if (!detectThinkKeyword(promptText)) {
@@ -37,12 +66,7 @@ export function createThinkModeHook() {
state.requested = true
if (typeof output.message.variant === "string") {
thinkModeState.set(sessionID, state)
return
}
const currentModel = input.model
const currentModel = output.message.model
if (!currentModel) {
thinkModeState.set(sessionID, state)
return
@@ -57,15 +81,14 @@ export function createThinkModeHook() {
}
const highVariant = getHighVariant(currentModel.modelID)
const thinkingConfig = getThinkingConfig(currentModel.providerID, currentModel.modelID)
if (highVariant) {
output.message.model = {
providerID: currentModel.providerID,
modelID: highVariant,
}
output.message.variant = "high"
state.modelSwitched = true
state.variantSet = true
log("Think mode: model switched to high variant", {
sessionID,
from: currentModel.modelID,
@@ -73,6 +96,42 @@ export function createThinkModeHook() {
})
}
if (thinkingConfig) {
const messageData = output.message as Record<string, unknown>
const agentThinking = messageData.thinking as { type?: string } | undefined
const agentProviderOptions = messageData.providerOptions
const agentDisabledThinking = agentThinking?.type === "disabled"
const agentHasCustomProviderOptions = Boolean(agentProviderOptions)
if (agentDisabledThinking) {
log("Think mode: skipping - agent has thinking disabled", {
sessionID,
provider: currentModel.providerID,
})
} else if (agentHasCustomProviderOptions) {
log("Think mode: skipping - agent has custom providerOptions", {
sessionID,
provider: currentModel.providerID,
})
} else if (
!isDisabledThinkingConfig(thinkingConfig as Record<string, unknown>)
) {
Object.assign(output.message, thinkingConfig)
state.thinkingConfigInjected = true
log("Think mode: thinking config injected", {
sessionID,
provider: currentModel.providerID,
config: thinkingConfig,
})
} else {
log("Think mode: skipping disabled thinking config", {
sessionID,
provider: currentModel.providerID,
})
}
}
thinkModeState.set(sessionID, state)
},

View File

@@ -1,155 +1,452 @@
import { beforeEach, describe, expect, it } from "bun:test"
import { describe, expect, it, beforeEach } from "bun:test"
import type { ThinkModeInput } from "./types"
const { clearThinkModeState, createThinkModeHook } = await import("./index")
type ThinkModeHookInput = {
sessionID: string
model?: { providerID: string; modelID: string }
}
type ThinkModeHookOutput = {
message: Record<string, unknown>
parts: Array<{ type: string; text?: string; [key: string]: unknown }>
}
function createHookInput(args: {
sessionID?: string
providerID?: string
modelID?: string
}): ThinkModeHookInput {
const { sessionID = "test-session-id", providerID, modelID } = args
if (!providerID || !modelID) {
return { sessionID }
}
const { createThinkModeHook, clearThinkModeState } = await import("./index")
/**
* Helper to create a mock ThinkModeInput for testing
*/
function createMockInput(
providerID: string,
modelID: string,
promptText: string
): ThinkModeInput {
return {
sessionID,
model: { providerID, modelID },
}
}
function createHookOutput(promptText: string, variant?: string): ThinkModeHookOutput {
return {
message: variant ? { variant } : {},
parts: [{ type: "text", text: promptText }],
message: {
model: {
providerID,
modelID,
},
},
}
}
describe("createThinkModeHook", () => {
/**
* Type helper for accessing dynamically injected properties on message
*/
type MessageWithInjectedProps = Record<string, unknown>
describe("createThinkModeHook integration", () => {
const sessionID = "test-session-id"
beforeEach(() => {
clearThinkModeState(sessionID)
})
it("sets high variant and switches model when think keyword is present", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "github-copilot",
modelID: "claude-opus-4-6",
describe("GitHub Copilot provider integration", () => {
describe("Claude models", () => {
it("should activate thinking mode for github-copilot Claude with think keyword", async () => {
// given a github-copilot Claude model and prompt with "think" keyword
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-6",
"Please think deeply about this problem"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant and inject thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
expect(message.thinking).toBeDefined()
expect((message.thinking as Record<string, unknown>)?.type).toBe(
"enabled"
)
expect(
(message.thinking as Record<string, unknown>)?.budgetTokens
).toBe(64000)
})
it("should handle github-copilot Claude with dots in version", async () => {
// given a github-copilot Claude model with dot format (claude-opus-4.6)
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4.6",
"ultrathink mode"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant (hyphen format)
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
expect(message.thinking).toBeDefined()
})
it("should handle github-copilot Claude Sonnet", async () => {
// given a github-copilot Claude Sonnet model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-sonnet-4-6",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-6-high")
expect(message.thinking).toBeDefined()
})
})
const output = createHookOutput("Please think deeply about this")
// when
await hook["chat.message"](input, output)
describe("Gemini models", () => {
it("should activate thinking mode for github-copilot Gemini Pro", async () => {
// given a github-copilot Gemini Pro model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gemini-3-pro",
"think about this"
)
// then
expect(output.message.variant).toBe("high")
expect(output.message.model).toEqual({
providerID: "github-copilot",
modelID: "claude-opus-4-6-high",
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant and inject google thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-pro-high")
expect(message.providerOptions).toBeDefined()
const googleOptions = (
message.providerOptions as Record<string, unknown>
)?.google as Record<string, unknown>
expect(googleOptions?.thinkingConfig).toBeDefined()
})
it("should activate thinking mode for github-copilot Gemini Flash", async () => {
// given a github-copilot Gemini Flash model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gemini-3-flash",
"ultrathink"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-flash-high")
expect(message.providerOptions).toBeDefined()
})
})
describe("GPT models", () => {
it("should activate thinking mode for github-copilot GPT-5.2", async () => {
// given a github-copilot GPT-5.2 model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gpt-5.2",
"please think"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant and inject openai thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-2-high")
expect(message.reasoning_effort).toBe("high")
})
it("should activate thinking mode for github-copilot GPT-5", async () => {
// given a github-copilot GPT-5 model
const hook = createThinkModeHook()
const input = createMockInput("github-copilot", "gpt-5", "think deeply")
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-high")
expect(message.reasoning_effort).toBe("high")
})
})
describe("No think keyword", () => {
it("should NOT activate for github-copilot without think keyword", async () => {
// given a prompt without any think keyword
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-6",
"Just do this task"
)
const originalModelID = input.message.model?.modelID
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT change model or inject config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe(originalModelID)
expect(message.thinking).toBeUndefined()
})
})
})
it("supports dotted model IDs by switching to normalized high variant", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "github-copilot",
modelID: "gpt-5.2",
describe("Backwards compatibility with direct providers", () => {
it("should still work for direct anthropic provider", async () => {
// given direct anthropic provider
const hook = createThinkModeHook()
const input = createMockInput(
"anthropic",
"claude-sonnet-4-6",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should work as before
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-6-high")
expect(message.thinking).toBeDefined()
})
const output = createHookOutput("ultrathink about this")
// when
await hook["chat.message"](input, output)
it("should work for direct google-vertex-anthropic provider", async () => {
//#given direct google-vertex-anthropic provider
const hook = createThinkModeHook()
const input = createMockInput(
"google-vertex-anthropic",
"claude-opus-4-6",
"think deeply"
)
// then
expect(output.message.variant).toBe("high")
expect(output.message.model).toEqual({
providerID: "github-copilot",
modelID: "gpt-5-2-high",
//#when the chat.params hook is called
await hook["chat.params"](input, sessionID)
//#then should upgrade model and inject Claude thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
expect(message.thinking).toBeDefined()
expect((message.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should still work for direct google provider", async () => {
// given direct google provider
const hook = createThinkModeHook()
const input = createMockInput(
"google",
"gemini-3-pro",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should work as before
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-pro-high")
expect(message.providerOptions).toBeDefined()
})
it("should still work for direct openai provider", async () => {
// given direct openai provider
const hook = createThinkModeHook()
const input = createMockInput("openai", "gpt-5", "think about this")
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should work
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-high")
expect(message.reasoning_effort).toBe("high")
})
it("should still work for amazon-bedrock provider", async () => {
// given amazon-bedrock provider
const hook = createThinkModeHook()
const input = createMockInput(
"amazon-bedrock",
"claude-sonnet-4-6",
"think"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should inject bedrock thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-6-high")
expect(message.reasoningConfig).toBeDefined()
})
})
it("skips when message variant is already set", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "github-copilot",
modelID: "claude-sonnet-4-6",
describe("Already-high variants", () => {
it("should NOT re-upgrade already-high variants", async () => {
// given an already-high variant model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-6-high",
"think deeply"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT modify the model (already high)
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
// No additional thinking config should be injected
expect(message.thinking).toBeUndefined()
})
const output = createHookOutput("think through this", "max")
// when
await hook["chat.message"](input, output)
it("should NOT re-upgrade already-high GPT variants", async () => {
// given an already-high GPT variant
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gpt-5.2-high",
"ultrathink"
)
// then
expect(output.message.variant).toBe("max")
expect(output.message.model).toBeUndefined()
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT modify the model
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5.2-high")
expect(message.reasoning_effort).toBeUndefined()
})
})
it("does nothing when think keyword is absent", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "google",
modelID: "gemini-3-pro",
describe("Unknown models", () => {
it("should not crash for unknown models via github-copilot", async () => {
// given an unknown model type
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"llama-3-70b",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should not crash and model should remain unchanged
expect(input.message.model?.modelID).toBe("llama-3-70b")
})
const output = createHookOutput("Please solve this directly")
// when
await hook["chat.message"](input, output)
// then
expect(output.message.variant).toBeUndefined()
expect(output.message.model).toBeUndefined()
})
it("does not modify already-high models", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "openai",
modelID: "gpt-5-high",
describe("Edge cases", () => {
it("should handle missing model gracefully", async () => {
// given input without a model
const hook = createThinkModeHook()
const input: ThinkModeInput = {
parts: [{ type: "text", text: "think about this" }],
message: {},
}
// when the chat.params hook is called
// then should not crash
await expect(
hook["chat.params"](input, sessionID)
).resolves.toBeUndefined()
})
const output = createHookOutput("think deeply")
// when
await hook["chat.message"](input, output)
it("should handle empty prompt gracefully", async () => {
// given empty prompt
const hook = createThinkModeHook()
const input = createMockInput("github-copilot", "claude-opus-4-6", "")
// then
expect(output.message.variant).toBeUndefined()
expect(output.message.model).toBeUndefined()
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should not upgrade (no think keyword)
expect(input.message.model?.modelID).toBe("claude-opus-4-6")
})
})
it("handles missing input model without crashing", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({ sessionID })
const output = createHookOutput("think about this")
describe("Agent-level thinking configuration respect", () => {
it("should omit Z.ai GLM disabled thinking config", async () => {
//#given a Z.ai GLM model with think prompt
const hook = createThinkModeHook()
const input = createMockInput(
"zai-coding-plan",
"glm-5",
"ultrathink mode"
)
// when
await expect(hook["chat.message"](input, output)).resolves.toBeUndefined()
//#when think mode resolves Z.ai thinking configuration
await hook["chat.params"](input, sessionID)
// then
expect(output.message.variant).toBeUndefined()
expect(output.message.model).toBeUndefined()
//#then thinking config should be omitted from request
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("glm-5")
expect(message.thinking).toBeUndefined()
expect(message.providerOptions).toBeUndefined()
})
it("should NOT inject thinking config when agent has thinking disabled", async () => {
// given agent with thinking explicitly disabled
const hook = createThinkModeHook()
const input: ThinkModeInput = {
parts: [{ type: "text", text: "ultrathink deeply" }],
message: {
model: { providerID: "google", modelID: "gemini-3-pro" },
thinking: { type: "disabled" },
} as ThinkModeInput["message"],
}
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT override agent's thinking disabled setting
const message = input.message as MessageWithInjectedProps
expect((message.thinking as { type: string }).type).toBe("disabled")
expect(message.providerOptions).toBeUndefined()
})
it("should NOT inject thinking config when agent has custom providerOptions", async () => {
// given agent with custom providerOptions
const hook = createThinkModeHook()
const input: ThinkModeInput = {
parts: [{ type: "text", text: "ultrathink" }],
message: {
model: { providerID: "google", modelID: "gemini-3-flash" },
providerOptions: {
google: { thinkingConfig: { thinkingBudget: 0 } },
},
} as ThinkModeInput["message"],
}
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT override agent's providerOptions
const message = input.message as MessageWithInjectedProps
const providerOpts = message.providerOptions as Record<string, unknown>
expect((providerOpts.google as Record<string, unknown>).thinkingConfig).toEqual({
thinkingBudget: 0,
})
})
it("should still inject thinking config when agent has no thinking override", async () => {
// given agent without thinking override
const hook = createThinkModeHook()
const input = createMockInput("google", "gemini-3-pro", "ultrathink")
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should inject thinking config as normal
const message = input.message as MessageWithInjectedProps
expect(message.providerOptions).toBeDefined()
})
})
})

View File

@@ -1,10 +1,128 @@
import { describe, expect, it } from "bun:test"
import {
getHighVariant,
getThinkingConfig,
isAlreadyHighVariant,
THINKING_CONFIGS,
} from "./switcher"
describe("think-mode switcher", () => {
describe("GitHub Copilot provider support", () => {
describe("Claude models via github-copilot", () => {
it("should resolve github-copilot Claude Opus to anthropic config", () => {
// given a github-copilot provider with Claude Opus model
const providerID = "github-copilot"
const modelID = "claude-opus-4-6"
// when getting thinking config
const config = getThinkingConfig(providerID, modelID)
// then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe(
"enabled"
)
expect((config?.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should resolve github-copilot Claude Sonnet to anthropic config", () => {
// given a github-copilot provider with Claude Sonnet model
const config = getThinkingConfig("github-copilot", "claude-sonnet-4-6")
// then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
it("should handle Claude with dots in version number", () => {
// given a model ID with dots (claude-opus-4.6)
const config = getThinkingConfig("github-copilot", "claude-opus-4.6")
// then should still return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
})
describe("Gemini models via github-copilot", () => {
it("should resolve github-copilot Gemini Pro to google config", () => {
// given a github-copilot provider with Gemini Pro model
const config = getThinkingConfig("github-copilot", "gemini-3-pro")
// then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const googleOptions = (
config?.providerOptions as Record<string, unknown>
)?.google as Record<string, unknown>
expect(googleOptions?.thinkingConfig).toBeDefined()
})
it("should resolve github-copilot Gemini Flash to google config", () => {
// given a github-copilot provider with Gemini Flash model
const config = getThinkingConfig(
"github-copilot",
"gemini-3-flash"
)
// then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
})
describe("GPT models via github-copilot", () => {
it("should resolve github-copilot GPT-5.2 to openai config", () => {
// given a github-copilot provider with GPT-5.2 model
const config = getThinkingConfig("github-copilot", "gpt-5.2")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot GPT-5 to openai config", () => {
// given a github-copilot provider with GPT-5 model
const config = getThinkingConfig("github-copilot", "gpt-5")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot o1 to openai config", () => {
// given a github-copilot provider with o1 model
const config = getThinkingConfig("github-copilot", "o1-preview")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot o3 to openai config", () => {
// given a github-copilot provider with o3 model
const config = getThinkingConfig("github-copilot", "o3-mini")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
})
describe("Unknown models via github-copilot", () => {
it("should return null for unknown model types", () => {
// given a github-copilot provider with unknown model
const config = getThinkingConfig("github-copilot", "llama-3-70b")
// then should return null (no matching provider)
expect(config).toBeNull()
})
})
})
describe("Model ID normalization", () => {
describe("getHighVariant with dots vs hyphens", () => {
it("should handle dots in Claude version numbers", () => {
@@ -99,6 +217,149 @@ describe("think-mode switcher", () => {
})
})
describe("getThinkingConfig", () => {
describe("Already high variants", () => {
it("should return null for already-high variants", () => {
// given already-high model variants
expect(
getThinkingConfig("anthropic", "claude-opus-4-6-high")
).toBeNull()
expect(getThinkingConfig("openai", "gpt-5-2-high")).toBeNull()
expect(getThinkingConfig("google", "gemini-3-pro-high")).toBeNull()
})
it("should return null for already-high variants via github-copilot", () => {
// given already-high model variants via github-copilot
expect(
getThinkingConfig("github-copilot", "claude-opus-4-6-high")
).toBeNull()
expect(getThinkingConfig("github-copilot", "gpt-5.2-high")).toBeNull()
})
})
describe("Non-thinking-capable models", () => {
it("should return null for non-thinking-capable models", () => {
// given models that don't support thinking mode
expect(getThinkingConfig("anthropic", "claude-2")).toBeNull()
expect(getThinkingConfig("openai", "gpt-4")).toBeNull()
expect(getThinkingConfig("google", "gemini-1")).toBeNull()
})
})
describe("Unknown providers", () => {
it("should return null for unknown providers", () => {
// given unknown provider IDs
expect(getThinkingConfig("unknown-provider", "some-model")).toBeNull()
expect(getThinkingConfig("azure", "gpt-5")).toBeNull()
})
})
})
describe("Direct provider configs (backwards compatibility)", () => {
it("should still work for direct anthropic provider", () => {
// given direct anthropic provider
const config = getThinkingConfig("anthropic", "claude-opus-4-6")
// then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe("enabled")
})
it("should work for direct google-vertex-anthropic provider", () => {
//#given direct google-vertex-anthropic provider
const config = getThinkingConfig(
"google-vertex-anthropic",
"claude-opus-4-6"
)
//#when thinking config is resolved
//#then it should return anthropic-style thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe("enabled")
expect((config?.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should still work for direct google provider", () => {
// given direct google provider
const config = getThinkingConfig("google", "gemini-3-pro")
// then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
it("should still work for amazon-bedrock provider", () => {
// given amazon-bedrock provider with claude model
const config = getThinkingConfig("amazon-bedrock", "claude-sonnet-4-6")
// then should return bedrock thinking config
expect(config).not.toBeNull()
expect(config?.reasoningConfig).toBeDefined()
})
it("should still work for google-vertex provider", () => {
// given google-vertex provider
const config = getThinkingConfig("google-vertex", "gemini-3-pro")
// then should return google-vertex thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const vertexOptions = (config?.providerOptions as Record<string, unknown>)?.[
"google-vertex"
] as Record<string, unknown>
expect(vertexOptions?.thinkingConfig).toBeDefined()
})
it("should work for direct openai provider", () => {
// given direct openai provider
const config = getThinkingConfig("openai", "gpt-5")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
})
describe("THINKING_CONFIGS structure", () => {
it("should have correct structure for anthropic", () => {
const config = THINKING_CONFIGS.anthropic
expect(config.thinking).toBeDefined()
expect(config.maxTokens).toBe(128000)
})
it("should have correct structure for google-vertex-anthropic", () => {
//#given google-vertex-anthropic config entry
const config = THINKING_CONFIGS["google-vertex-anthropic"]
//#when structure is validated
//#then it should match anthropic style structure
expect(config.thinking).toBeDefined()
expect(config.maxTokens).toBe(128000)
})
it("should have correct structure for google", () => {
const config = THINKING_CONFIGS.google
expect(config.providerOptions).toBeDefined()
})
it("should have correct structure for openai", () => {
const config = THINKING_CONFIGS.openai
expect(config.reasoning_effort).toBe("high")
})
it("should have correct structure for amazon-bedrock", () => {
const config = THINKING_CONFIGS["amazon-bedrock"]
expect(config.reasoningConfig).toBeDefined()
expect(config.maxTokens).toBe(64000)
})
})
describe("Custom provider prefixes support", () => {
describe("getHighVariant with prefixes", () => {
it("should preserve vertex_ai/ prefix when getting high variant", () => {
@@ -165,6 +426,141 @@ describe("think-mode switcher", () => {
expect(isAlreadyHighVariant("vertex_ai/gpt-5.2-high")).toBe(true)
})
})
})
})
describe("getThinkingConfig with prefixes", () => {
it("should return null for custom providers (not in THINKING_CONFIGS)", () => {
// given custom provider with prefixed Claude model
const config = getThinkingConfig("dia-llm", "vertex_ai/claude-sonnet-4-6")
// then should return null (custom provider not in THINKING_CONFIGS)
expect(config).toBeNull()
})
it("should work with prefixed models on known providers", () => {
// given known provider (anthropic) with prefixed model
// This tests that the base model name is correctly extracted for capability check
const config = getThinkingConfig("anthropic", "custom-prefix/claude-opus-4-6")
// then should return thinking config (base model is capable)
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
it("should return null for prefixed models that are already high", () => {
// given prefixed already-high model
const config = getThinkingConfig("anthropic", "vertex_ai/claude-opus-4-6-high")
// then should return null
expect(config).toBeNull()
})
})
describe("Real-world custom provider scenario", () => {
it("should handle LLM proxy with vertex_ai prefix correctly", () => {
// given a custom LLM proxy provider using vertex_ai/ prefix
const providerID = "dia-llm"
const modelID = "vertex_ai/claude-sonnet-4-6"
// when getting high variant
const highVariant = getHighVariant(modelID)
// then should preserve the prefix
expect(highVariant).toBe("vertex_ai/claude-sonnet-4-6-high")
// #and when checking if already high
expect(isAlreadyHighVariant(modelID)).toBe(false)
expect(isAlreadyHighVariant(highVariant!)).toBe(true)
// #and when getting thinking config for custom provider
const config = getThinkingConfig(providerID, modelID)
// then should return null (custom provider, not anthropic)
// This prevents applying incompatible thinking configs to custom providers
expect(config).toBeNull()
})
it("should not break when switching to high variant in think mode", () => {
// given think mode switching vertex_ai/claude model to high variant
const original = "vertex_ai/claude-opus-4-6"
const high = getHighVariant(original)
// then the high variant should be valid
expect(high).toBe("vertex_ai/claude-opus-4-6-high")
// #and should be recognized as already high
expect(isAlreadyHighVariant(high!)).toBe(true)
// #and switching again should return null (already high)
expect(getHighVariant(high!)).toBeNull()
})
})
})
describe("Z.AI GLM-4.7 provider support", () => {
describe("getThinkingConfig for zai-coding-plan", () => {
it("should return thinking config for glm-5", () => {
//#given a Z.ai GLM model
const config = getThinkingConfig("zai-coding-plan", "glm-5")
//#when thinking config is resolved
//#then thinking type is "disabled"
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const zaiOptions = (config?.providerOptions as Record<string, unknown>)?.[
"zai-coding-plan"
] as Record<string, unknown>
expect(zaiOptions?.extra_body).toBeDefined()
const extraBody = zaiOptions?.extra_body as Record<string, unknown>
expect(extraBody?.thinking).toBeDefined()
expect((extraBody?.thinking as Record<string, unknown>)?.type).toBe("disabled")
})
it("should return thinking config for glm-4.6v (multimodal)", () => {
// given zai-coding-plan provider with glm-4.6v model
const config = getThinkingConfig("zai-coding-plan", "glm-4.6v")
// then should return zai-coding-plan thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
it("should return null for non-GLM models on zai-coding-plan", () => {
// given zai-coding-plan provider with unknown model
const config = getThinkingConfig("zai-coding-plan", "some-other-model")
// then should return null
expect(config).toBeNull()
})
})
describe("HIGH_VARIANT_MAP for GLM", () => {
it("should NOT have high variant for glm-5", () => {
// given glm-5 model
const variant = getHighVariant("glm-5")
// then should return null (no high variant needed)
expect(variant).toBeNull()
})
it("should NOT have high variant for glm-4.6v", () => {
// given glm-4.6v model
const variant = getHighVariant("glm-4.6v")
// then should return null
expect(variant).toBeNull()
})
})
})
describe("THINKING_CONFIGS structure for zai-coding-plan", () => {
it("should have correct structure for zai-coding-plan", () => {
const config = THINKING_CONFIGS["zai-coding-plan"]
expect(config.providerOptions).toBeDefined()
const zaiOptions = (config.providerOptions as Record<string, unknown>)?.[
"zai-coding-plan"
] as Record<string, unknown>
expect(zaiOptions?.extra_body).toBeDefined()
})
})
})

View File

@@ -53,7 +53,35 @@ function normalizeModelID(modelID: string): string {
return modelID.replace(/\.(\d+)/g, "-$1")
}
/**
* Resolves proxy providers (like github-copilot) to their underlying provider.
* This allows GitHub Copilot to inherit thinking configurations from the actual
* model provider (Anthropic, Google, OpenAI).
*
* @example
* resolveProvider("github-copilot", "claude-opus-4-6") // "anthropic"
* resolveProvider("github-copilot", "gemini-3-pro") // "google"
* resolveProvider("github-copilot", "gpt-5.2") // "openai"
* resolveProvider("anthropic", "claude-opus-4-6") // "anthropic" (unchanged)
*/
function resolveProvider(providerID: string, modelID: string): string {
// GitHub Copilot is a proxy - infer actual provider from model name
if (providerID === "github-copilot") {
const modelLower = modelID.toLowerCase()
if (modelLower.includes("claude")) return "anthropic"
if (modelLower.includes("gemini")) return "google"
if (
modelLower.includes("gpt") ||
modelLower.includes("o1") ||
modelLower.includes("o3")
) {
return "openai"
}
}
// Direct providers or unknown - return as-is
return providerID
}
// Maps model IDs to their "high reasoning" variant (internal convention)
// For OpenAI models, this signals that reasoning_effort should be set to "high"
@@ -88,6 +116,71 @@ const HIGH_VARIANT_MAP: Record<string, string> = {
const ALREADY_HIGH: Set<string> = new Set(Object.values(HIGH_VARIANT_MAP))
export const THINKING_CONFIGS = {
anthropic: {
thinking: {
type: "enabled",
budgetTokens: 64000,
},
maxTokens: 128000,
},
"google-vertex-anthropic": {
thinking: {
type: "enabled",
budgetTokens: 64000,
},
maxTokens: 128000,
},
"amazon-bedrock": {
reasoningConfig: {
type: "enabled",
budgetTokens: 32000,
},
maxTokens: 64000,
},
google: {
providerOptions: {
google: {
thinkingConfig: {
thinkingLevel: "HIGH",
},
},
},
},
"google-vertex": {
providerOptions: {
"google-vertex": {
thinkingConfig: {
thinkingLevel: "HIGH",
},
},
},
},
openai: {
reasoning_effort: "high",
},
"zai-coding-plan": {
providerOptions: {
"zai-coding-plan": {
extra_body: {
thinking: {
type: "disabled",
},
},
},
},
},
} as const satisfies Record<string, Record<string, unknown>>
const THINKING_CAPABLE_MODELS = {
anthropic: ["claude-sonnet-4", "claude-opus-4", "claude-3"],
"google-vertex-anthropic": ["claude-sonnet-4", "claude-opus-4", "claude-3"],
"amazon-bedrock": ["claude", "anthropic"],
google: ["gemini-2", "gemini-3"],
"google-vertex": ["gemini-2", "gemini-3"],
openai: ["gpt-5", "o1", "o3"],
"zai-coding-plan": ["glm"],
} as const satisfies Record<string, readonly string[]>
export function getHighVariant(modelID: string): string | null {
const normalized = normalizeModelID(modelID)
@@ -114,4 +207,37 @@ export function isAlreadyHighVariant(modelID: string): boolean {
return ALREADY_HIGH.has(base) || base.endsWith("-high")
}
type ThinkingProvider = keyof typeof THINKING_CONFIGS
function isThinkingProvider(provider: string): provider is ThinkingProvider {
return provider in THINKING_CONFIGS
}
export function getThinkingConfig(
providerID: string,
modelID: string
): Record<string, unknown> | null {
const normalized = normalizeModelID(modelID)
const { base } = extractModelPrefix(normalized)
if (isAlreadyHighVariant(normalized)) {
return null
}
const resolvedProvider = resolveProvider(providerID, modelID)
if (!isThinkingProvider(resolvedProvider)) {
return null
}
const config = THINKING_CONFIGS[resolvedProvider]
const capablePatterns = THINKING_CAPABLE_MODELS[resolvedProvider]
// Check capability using base model name (without prefix)
const baseLower = base.toLowerCase()
const isCapable = capablePatterns.some((pattern) =>
baseLower.includes(pattern.toLowerCase())
)
return isCapable ? config : null
}

View File

@@ -1,16 +1,21 @@
export interface ThinkModeState {
requested: boolean
modelSwitched: boolean
variantSet: boolean
thinkingConfigInjected: boolean
providerID?: string
modelID?: string
}
interface ModelRef {
export interface ModelRef {
providerID: string
modelID: string
}
interface MessageWithModel {
export interface MessageWithModel {
model?: ModelRef
}
export interface ThinkModeInput {
parts: Array<{ type: string; text?: string }>
message: MessageWithModel
}

View File

@@ -1,6 +1,6 @@
# src/hooks/todo-continuation-enforcer/ — Boulder Continuation Mechanism
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -17,6 +17,6 @@ export const TOAST_DURATION_MS = 900
export const COUNTDOWN_GRACE_PERIOD_MS = 500
export const ABORT_WINDOW_MS = 3000
export const CONTINUATION_COOLDOWN_MS = 5_000
export const CONTINUATION_COOLDOWN_MS = 30_000
export const MAX_CONSECUTIVE_FAILURES = 5
export const FAILURE_RESET_WINDOW_MS = 5 * 60 * 1000

View File

@@ -1,6 +1,6 @@
# src/mcp/ — 3 Built-in Remote MCPs
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -1,6 +1,6 @@
# src/plugin-handlers/ — 6-Phase Config Loading Pipeline
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -2,7 +2,7 @@ import { describe, it, expect } from "bun:test"
import { remapAgentKeysToDisplayNames } from "./agent-key-remapper"
describe("remapAgentKeysToDisplayNames", () => {
it("remaps known agent keys to display names while preserving original keys", () => {
it("remaps known agent keys to display names", () => {
// given agents with lowercase keys
const agents = {
sisyphus: { prompt: "test", mode: "primary" },
@@ -12,11 +12,10 @@ describe("remapAgentKeysToDisplayNames", () => {
// when remapping
const result = remapAgentKeysToDisplayNames(agents)
// then known agents get display name keys and original keys remain accessible
// then known agents get display name keys
expect(result["Sisyphus (Ultraworker)"]).toBeDefined()
expect(result["oracle"]).toBeDefined()
expect(result["sisyphus"]).toBeDefined()
expect(result["Sisyphus (Ultraworker)"]).toBe(result["sisyphus"])
expect(result["sisyphus"]).toBeUndefined()
})
it("preserves unknown agent keys unchanged", () => {
@@ -32,7 +31,7 @@ describe("remapAgentKeysToDisplayNames", () => {
expect(result["custom-agent"]).toBeDefined()
})
it("remaps all core agents while preserving original keys", () => {
it("remaps all core agents", () => {
// given all core agents
const agents = {
sisyphus: {},
@@ -47,20 +46,15 @@ describe("remapAgentKeysToDisplayNames", () => {
// when remapping
const result = remapAgentKeysToDisplayNames(agents)
// then all get display name keys while original keys still work
expect(result["Sisyphus (Ultraworker)"]).toBeDefined()
expect(result["sisyphus"]).toBeDefined()
expect(result["Hephaestus (Deep Agent)"]).toBeDefined()
expect(result["hephaestus"]).toBeDefined()
expect(result["Prometheus (Plan Builder)"]).toBeDefined()
expect(result["prometheus"]).toBeDefined()
expect(result["Atlas (Plan Executor)"]).toBeDefined()
expect(result["atlas"]).toBeDefined()
expect(result["Metis (Plan Consultant)"]).toBeDefined()
expect(result["metis"]).toBeDefined()
expect(result["Momus (Plan Critic)"]).toBeDefined()
expect(result["momus"]).toBeDefined()
expect(result["Sisyphus-Junior"]).toBeDefined()
expect(result["sisyphus-junior"]).toBeDefined()
// then all get display name keys
expect(Object.keys(result)).toEqual([
"Sisyphus (Ultraworker)",
"Hephaestus (Deep Agent)",
"Prometheus (Plan Builder)",
"Atlas (Plan Executor)",
"Metis (Plan Consultant)",
"Momus (Plan Critic)",
"Sisyphus-Junior",
])
})
})

View File

@@ -9,7 +9,6 @@ export function remapAgentKeysToDisplayNames(
const displayName = AGENT_DISPLAY_NAMES[key]
if (displayName && displayName !== key) {
result[displayName] = value
result[key] = value
} else {
result[key] = value
}

View File

@@ -1,120 +0,0 @@
import { afterEach, beforeEach, describe, expect, spyOn, test } from "bun:test"
import type { OhMyOpenCodeConfig } from "../config"
import { createConfigHandler } from "./config-handler"
import * as agentConfigHandler from "./agent-config-handler"
import * as commandConfigHandler from "./command-config-handler"
import * as mcpConfigHandler from "./mcp-config-handler"
import * as pluginComponentsLoader from "./plugin-components-loader"
import * as providerConfigHandler from "./provider-config-handler"
import * as shared from "../shared"
import * as toolConfigHandler from "./tool-config-handler"
let logSpy: ReturnType<typeof spyOn>
let loadPluginComponentsSpy: ReturnType<typeof spyOn>
let applyAgentConfigSpy: ReturnType<typeof spyOn>
let applyToolConfigSpy: ReturnType<typeof spyOn>
let applyMcpConfigSpy: ReturnType<typeof spyOn>
let applyCommandConfigSpy: ReturnType<typeof spyOn>
let applyProviderConfigSpy: ReturnType<typeof spyOn>
beforeEach(() => {
logSpy = spyOn(shared, "log").mockImplementation(() => {})
loadPluginComponentsSpy = spyOn(
pluginComponentsLoader,
"loadPluginComponents",
).mockResolvedValue({
commands: {},
skills: {},
agents: {},
mcpServers: {},
hooksConfigs: [],
plugins: [],
errors: [],
})
applyAgentConfigSpy = spyOn(agentConfigHandler, "applyAgentConfig").mockResolvedValue(
{},
)
applyToolConfigSpy = spyOn(toolConfigHandler, "applyToolConfig").mockImplementation(
() => {},
)
applyMcpConfigSpy = spyOn(mcpConfigHandler, "applyMcpConfig").mockResolvedValue()
applyCommandConfigSpy = spyOn(
commandConfigHandler,
"applyCommandConfig",
).mockResolvedValue()
applyProviderConfigSpy = spyOn(
providerConfigHandler,
"applyProviderConfig",
).mockImplementation(() => {})
})
afterEach(() => {
logSpy.mockRestore()
loadPluginComponentsSpy.mockRestore()
applyAgentConfigSpy.mockRestore()
applyToolConfigSpy.mockRestore()
applyMcpConfigSpy.mockRestore()
applyCommandConfigSpy.mockRestore()
applyProviderConfigSpy.mockRestore()
})
describe("createConfigHandler formatter pass-through", () => {
test("preserves formatter object configured in opencode config", async () => {
// given
const pluginConfig: OhMyOpenCodeConfig = {}
const formatterConfig = {
prettier: {
command: ["prettier", "--write"],
extensions: [".ts", ".tsx"],
environment: {
PRETTIERD_DEFAULT_CONFIG: ".prettierrc",
},
},
eslint: {
disabled: false,
command: ["eslint", "--fix"],
extensions: [".js", ".ts"],
},
}
const config: Record<string, unknown> = {
formatter: formatterConfig,
}
const handler = createConfigHandler({
ctx: { directory: "/tmp" },
pluginConfig,
modelCacheState: {
anthropicContext1MEnabled: false,
modelContextLimitsCache: new Map(),
},
})
// when
await handler(config)
// then
expect(config.formatter).toEqual(formatterConfig)
})
test("preserves formatter=false configured in opencode config", async () => {
// given
const pluginConfig: OhMyOpenCodeConfig = {}
const config: Record<string, unknown> = {
formatter: false,
}
const handler = createConfigHandler({
ctx: { directory: "/tmp" },
pluginConfig,
modelCacheState: {
anthropicContext1MEnabled: false,
modelContextLimitsCache: new Map(),
},
})
// when
await handler(config)
// then
expect(config.formatter).toBe(false)
})
})

View File

@@ -20,8 +20,6 @@ export function createConfigHandler(deps: ConfigHandlerDeps) {
const { ctx, pluginConfig, modelCacheState } = deps;
return async (config: Record<string, unknown>) => {
const formatterConfig = config.formatter;
applyProviderConfig({ config, modelCacheState });
const pluginComponents = await loadPluginComponents({ pluginConfig });
@@ -37,8 +35,6 @@ export function createConfigHandler(deps: ConfigHandlerDeps) {
await applyMcpConfig({ config, pluginConfig, pluginComponents });
await applyCommandConfig({ config, pluginConfig, ctx, pluginComponents });
config.formatter = formatterConfig;
log("[config-handler] config handler applied", {
agentCount: Object.keys(agentResult).length,
commandCount: Object.keys((config.command as Record<string, unknown>) ?? {})

View File

@@ -1,10 +1,10 @@
# src/plugin/ — 8 OpenCode Hook Handlers + Hook Composition
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW
Core glue layer. 20 source files assembling the 8 OpenCode hook handlers and composing 46 hooks into the PluginInterface. Every handler file corresponds to one OpenCode hook type.
Core glue layer. 20 source files assembling the 8 OpenCode hook handlers and composing 44 hooks into the PluginInterface. Every handler file corresponds to one OpenCode hook type.
## HANDLER FILES
@@ -25,10 +25,10 @@ Core glue layer. 20 source files assembling the 8 OpenCode hook handlers and com
|------|------|-------|
| `create-session-hooks.ts` | Session | 21 |
| `create-tool-guard-hooks.ts` | Tool Guard | 10 |
| `create-session-hooks.ts` | Session | 23 |
| `create-tool-guard-hooks.ts` | Tool Guard | 10 |
| `create-transform-hooks.ts` | Transform | 4 |
| `create-continuation-hooks.ts` | Continuation | 7 |
| `create-skill-hooks.ts` | Skill | 2 |
| `create-core-hooks.ts` | Aggregator | Session + Guard + Transform = 37 |
| `create-core-hooks.ts` | Aggregator | Session + Guard + Transform = 35 |
## SUPPORT FILES

View File

@@ -19,7 +19,6 @@ function createMockHandlerArgs(overrides?: {
},
hooks: {
stopContinuationGuard: null,
backgroundNotificationHook: null,
keywordDetector: null,
claudeCodeHooks: null,
autoSlashCommand: null,
@@ -116,30 +115,4 @@ describe("createChatMessageHandler - TUI variant passthrough", () => {
//#then - gate should still be marked as applied
expect(args._appliedSessions).toContain("test-session")
})
test("injects queued background notifications through chat.message hook", async () => {
//#given
const args = createMockHandlerArgs()
args.hooks.backgroundNotificationHook = {
"chat.message": async (
_input: { sessionID: string },
output: ChatMessageHandlerOutput,
): Promise<void> => {
output.parts.push({
type: "text",
text: "<system-reminder>[BACKGROUND TASK COMPLETED]</system-reminder>",
})
},
}
const handler = createChatMessageHandler(args)
const input = createMockInput("hephaestus", { providerID: "openai", modelID: "gpt-5.3-codex" })
const output = createMockOutput()
//#when
await handler(input, output)
//#then
expect(output.parts).toHaveLength(1)
expect(output.parts[0].text).toContain("[BACKGROUND TASK COMPLETED]")
})
})

View File

@@ -97,10 +97,8 @@ export function createChatMessageHandler(args: {
setSessionModel(input.sessionID, input.model)
}
await hooks.stopContinuationGuard?.["chat.message"]?.(input)
await hooks.backgroundNotificationHook?.["chat.message"]?.(input, output)
await hooks.runtimeFallback?.["chat.message"]?.(input, output)
await hooks.keywordDetector?.["chat.message"]?.(input, output)
await hooks.thinkMode?.["chat.message"]?.(input, output)
await hooks.claudeCodeHooks?.["chat.message"]?.(input, output)
await hooks.autoSlashCommand?.["chat.message"]?.(input, output)
await hooks.noSisyphusGpt?.["chat.message"]?.(input, output)

View File

@@ -232,10 +232,7 @@ export function createSessionHooks(args: {
: null
const noHephaestusNonGpt = isHookEnabled("no-hephaestus-non-gpt")
? safeHook("no-hephaestus-non-gpt", () =>
createNoHephaestusNonGptHook(ctx, {
allowNonGptModel: pluginConfig.agents?.hephaestus?.allow_non_gpt_model,
}))
? safeHook("no-hephaestus-non-gpt", () => createNoHephaestusNonGptHook(ctx))
: null
const questionLabelTruncator = isHookEnabled("question-label-truncator")

View File

@@ -21,10 +21,11 @@ function tryUpdateMessageModel(
)
const result = stmt.run(targetModel.providerID, targetModel.modelID, messageId)
if (result.changes === 0) return false
if (variant) {
db.prepare(
`UPDATE message SET data = json_set(data, '$.variant', ?) WHERE id = ?`,
).run(variant, messageId)
`UPDATE message SET data = json_set(data, '$.variant', ?, '$.thinking', ?) WHERE id = ?`,
).run(variant, variant, messageId)
}
return true
}

View File

@@ -308,6 +308,7 @@ describe("applyUltraworkModelOverrideOnMessage", () => {
//#then
expect(output.message.model).toEqual({ providerID: "anthropic", modelID: "claude-opus-4-6" })
expect(output.message["variant"]).toBe("max")
expect(output.message["thinking"]).toBe("max")
expect(dbOverrideSpy).not.toHaveBeenCalled()
})
@@ -323,6 +324,7 @@ describe("applyUltraworkModelOverrideOnMessage", () => {
//#then
expect(output.message.model).toBeUndefined()
expect(output.message["variant"]).toBe("high")
expect(output.message["thinking"]).toBe("high")
expect(dbOverrideSpy).not.toHaveBeenCalled()
})

View File

@@ -117,6 +117,7 @@ export function applyUltraworkModelOverrideOnMessage(
if (!override.providerID || !override.modelID) {
if (override.variant) {
output.message["variant"] = override.variant
output.message["thinking"] = override.variant
}
return
}
@@ -133,6 +134,7 @@ export function applyUltraworkModelOverrideOnMessage(
output.message.model = targetModel
if (override.variant) {
output.message["variant"] = override.variant
output.message["thinking"] = override.variant
}
return
}

View File

@@ -1,6 +1,6 @@
# src/shared/ — 101 Utility Files in 13 Categories
**Generated:** 2026-02-24
**Generated:** 2026-02-21
## OVERVIEW

View File

@@ -0,0 +1,39 @@
import { afterEach, beforeEach, describe, expect, test } from "bun:test"
import { findBashPath } from "./shell-path"
describe("shell-path", () => {
let originalPlatform: NodeJS.Platform
let originalComspec: string | undefined
beforeEach(() => {
originalPlatform = process.platform
originalComspec = process.env.COMSPEC
})
afterEach(() => {
Object.defineProperty(process, "platform", { value: originalPlatform })
if (originalComspec !== undefined) {
process.env.COMSPEC = originalComspec
return
}
delete process.env.COMSPEC
})
test("#given Windows platform with COMSPEC #when findBashPath is called #then returns COMSPEC path", () => {
Object.defineProperty(process, "platform", { value: "win32" })
process.env.COMSPEC = "C:\\Windows\\System32\\cmd.exe"
const result = findBashPath()
expect(result).toBe("C:\\Windows\\System32\\cmd.exe")
})
test("#given Windows platform without COMSPEC #when findBashPath is called #then returns default cmd path", () => {
Object.defineProperty(process, "platform", { value: "win32" })
delete process.env.COMSPEC
const result = findBashPath()
expect(result).toBe("C:\\Windows\\System32\\cmd.exe")
})
})

Some files were not shown because too many files have changed in this diff Show More