Compare commits

..

6 Commits

Author SHA1 Message Date
feelsodev
4c7b81986a fix: add google provider model transform across all resolution paths
transformModelForProvider only handled github-copilot provider, leaving
google provider models untransformed. This caused ProviderModelNotFoundError
when google/gemini-3-flash was sent to the API (correct ID is
gemini-3-flash-preview).

Changes:
- Add google provider to transformModelForProvider with idempotent regex
  negative lookahead to prevent double -preview suffix
- Fix category-default path in model-resolution-pipeline when
  availableModels is empty but connected provider exists
- Fix getFirstFallbackModel first-run path that constructed raw model IDs
  without transformation
- Fix github-copilot provider gemini transforms to also use idempotent
  regex (was vulnerable to double-transform)
- Extract transformModelForProvider to shared module (single source of
  truth, imported by cli and shared layers)
- Add 20 new test cases: unit tests for both providers, runtime
  integration tests for category-default and fallback-chain paths,
  double-transform prevention for both providers
2026-02-21 02:29:02 +09:00
feelsodev
fec75535ba refactor: move transformModelForProvider to shared for runtime access
Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-21 02:29:02 +09:00
once
e5a0ab4034 fix: add google provider model transform for gemini-3-flash/pro preview suffix
transformModelForProvider only handled github-copilot provider, leaving
google provider models untransformed. This caused ProviderModelNotFoundError
when google/gemini-3-flash was sent to the API (correct ID is
gemini-3-flash-preview).

Add google provider block with -preview suffix guard to prevent double
transformation.
2026-02-21 02:29:02 +09:00
YeonGyu-Kim
31dc65e9ac Merge pull request #1981 from VespianRex/fix/fallback-sync-model-ui
Fix model fallback retries for main, background, and sync subagents + show runtime fallback model in task UI
2026-02-21 02:28:18 +09:00
VespianRex
bf51919a79 Address review feedback for fallback fixes 2026-02-20 17:46:12 +02:00
VespianRex
f5f1d1d4c2 Fix model fallback across main/background/sync agents 2026-02-20 17:45:53 +02:00
83 changed files with 2894 additions and 4595 deletions

View File

@@ -69,6 +69,7 @@
"directory-readme-injector", "directory-readme-injector",
"empty-task-response-detector", "empty-task-response-detector",
"think-mode", "think-mode",
"model-fallback",
"anthropic-context-window-limit-recovery", "anthropic-context-window-limit-recovery",
"preemptive-compaction", "preemptive-compaction",
"rules-injector", "rules-injector",
@@ -80,6 +81,7 @@
"non-interactive-env", "non-interactive-env",
"interactive-bash-session", "interactive-bash-session",
"thinking-block-validator", "thinking-block-validator",
"beast-mode-system",
"ralph-loop", "ralph-loop",
"category-skill-reminder", "category-skill-reminder",
"compaction-context-injector", "compaction-context-injector",
@@ -100,7 +102,6 @@
"task-resume-info", "task-resume-info",
"stop-continuation-guard", "stop-continuation-guard",
"tasks-todowrite-disabler", "tasks-todowrite-disabler",
"runtime-fallback",
"write-existing-file-guard", "write-existing-file-guard",
"anthropic-effort", "anthropic-effort",
"hashline-read-enhancer", "hashline-read-enhancer",
@@ -141,19 +142,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -339,19 +327,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -537,19 +512,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -735,19 +697,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -933,19 +882,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -1131,19 +1067,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -1329,19 +1252,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -1527,19 +1437,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -1725,19 +1622,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -1923,19 +1807,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -2121,19 +1992,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -2319,19 +2177,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -2517,19 +2362,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -2715,19 +2547,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -2921,19 +2740,6 @@
"model": { "model": {
"type": "string" "type": "string"
}, },
"fallback_models": {
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},
"variant": { "variant": {
"type": "string" "type": "string"
}, },
@@ -3205,6 +3011,9 @@
}, },
"disable_omo_env": { "disable_omo_env": {
"type": "boolean" "type": "boolean"
},
"model_fallback_title": {
"type": "boolean"
} }
}, },
"additionalProperties": false "additionalProperties": false
@@ -3343,37 +3152,6 @@
], ],
"additionalProperties": false "additionalProperties": false
}, },
"runtime_fallback": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"retry_on_errors": {
"type": "array",
"items": {
"type": "number"
}
},
"max_fallback_attempts": {
"type": "number",
"minimum": 1,
"maximum": 20
},
"cooldown_seconds": {
"type": "number",
"minimum": 0
},
"timeout_seconds": {
"type": "number",
"minimum": 0
},
"notify_on_fallback": {
"type": "boolean"
}
},
"additionalProperties": false
},
"background_task": { "background_task": {
"type": "object", "type": "object",
"properties": { "properties": {

View File

@@ -163,13 +163,12 @@ Override built-in agent settings:
} }
``` ```
Each agent supports: `model`, `fallback_models`, `temperature`, `top_p`, `prompt`, `prompt_append`, `tools`, `disable`, `description`, `mode`, `color`, `permission`, `category`, `variant`, `maxTokens`, `thinking`, `reasoningEffort`, `textVerbosity`, `providerOptions`. Each agent supports: `model`, `temperature`, `top_p`, `prompt`, `prompt_append`, `tools`, `disable`, `description`, `mode`, `color`, `permission`, `category`, `variant`, `maxTokens`, `thinking`, `reasoningEffort`, `textVerbosity`, `providerOptions`.
### Additional Agent Options ### Additional Agent Options
| Option | Type | Description | | Option | Type | Description |
| ------------------- | -------------- | ----------------------------------------------------------------------------------------------- | | ------------------- | ------- | ----------------------------------------------------------------------------------------------- |
| `fallback_models` | string/array | Fallback models for runtime switching on API errors. Single string or array of model strings. |
| `category` | string | Category name to inherit model and other settings from category defaults | | `category` | string | Category name to inherit model and other settings from category defaults |
| `variant` | string | Model variant (e.g., `max`, `high`, `medium`, `low`, `xhigh`) | | `variant` | string | Model variant (e.g., `max`, `high`, `medium`, `low`, `xhigh`) |
| `maxTokens` | number | Maximum tokens for response. Passed directly to OpenCode SDK. | | `maxTokens` | number | Maximum tokens for response. Passed directly to OpenCode SDK. |
@@ -715,84 +714,6 @@ Configure concurrency limits for background agent tasks. This controls how many
- Allow more concurrent tasks for fast/cheap models (e.g., Gemini Flash) - Allow more concurrent tasks for fast/cheap models (e.g., Gemini Flash)
- Respect provider rate limits by setting provider-level caps - Respect provider rate limits by setting provider-level caps
## Runtime Fallback
Automatically switch to backup models when the primary model encounters retryable API errors (rate limits, overload, etc.) or provider key misconfiguration errors (for example, missing API key). This keeps conversations running without manual intervention.
```json
{
"runtime_fallback": {
"enabled": true,
"retry_on_errors": [400, 429, 503, 529],
"max_fallback_attempts": 3,
"cooldown_seconds": 60,
"timeout_seconds": 30,
"notify_on_fallback": true
}
}
```
| Option | Default | Description |
| ----------------------- | ---------------------- | --------------------------------------------------------------------------- |
| `enabled` | `true` | Enable runtime fallback |
| `retry_on_errors` | `[400, 429, 503, 529]` | HTTP status codes that trigger fallback (rate limit, service unavailable). Also supports certain classified provider errors (for example, missing API key) that do not expose HTTP status codes. |
| `max_fallback_attempts` | `3` | Maximum fallback attempts per session (1-20) |
| `cooldown_seconds` | `60` | Cooldown in seconds before retrying a failed model |
| `timeout_seconds` | `30` | Timeout in seconds for an in-flight fallback request before forcing the next fallback model. **⚠️ Set to `0` to disable auto-retry signal detection** (see below). |
| `notify_on_fallback` | `true` | Show toast notification when switching to a fallback model |
### timeout_seconds: Understanding the 0 Value
**⚠️ IMPORTANT**: Setting `timeout_seconds: 0` **disables auto-retry signal detection**. This is a critical behavior change:
| Setting | Behavior |
|---------|----------|
| `timeout_seconds: 30` (default) | ✅ **Full fallback coverage**: Error-based fallback (429, 503, etc.) + auto-retry signal detection (provider messages like "retrying in 8h") |
| `timeout_seconds: 0` | ⚠️ **Limited fallback**: Only error-based fallback works. Provider retry messages are **completely ignored**. Timeout-based escalation is **disabled**. |
**When `timeout_seconds: 0`:**
- ✅ HTTP errors (429, 503, 529) still trigger fallback
- ✅ Provider key errors (missing API key) still trigger fallback
- ❌ Provider retry messages ("retrying in Xh") are **ignored**
- ❌ Timeout-based escalation is **disabled**
- ❌ Hanging requests do **not** advance to the next fallback model
**Recommendation**: Use a non-zero value (e.g., `30` seconds) to enable full fallback coverage. Only set to `0` if you explicitly want to disable auto-retry signal detection.
### How It Works
1. When an API error matching `retry_on_errors` occurs (or a classified provider key error such as missing API key), the hook intercepts it
2. The next request automatically uses the next available model from `fallback_models`
3. Failed models enter a cooldown period before being retried
4. If `timeout_seconds > 0` and a fallback provider hangs, timeout advances to the next fallback model
5. Toast notification (optional) informs you of the model switch
### Configuring Fallback Models
Define `fallback_models` at the agent or category level:
```json
{
"agents": {
"sisyphus": {
"model": "anthropic/claude-opus-4-5",
"fallback_models": ["openai/gpt-5.2", "google/gemini-3-pro"]
}
},
"categories": {
"ultrabrain": {
"model": "openai/gpt-5.2-codex",
"fallback_models": ["anthropic/claude-opus-4-5", "google/gemini-3-pro"]
}
}
}
```
When the primary model fails:
1. First fallback: `openai/gpt-5.2`
2. Second fallback: `google/gemini-3-pro`
3. After `max_fallback_attempts`, returns to primary model
## Categories ## Categories
Categories enable domain-specific task delegation via the `task` tool. Each category applies runtime presets (model, temperature, prompt additions) when calling the `Sisyphus-Junior` agent. Categories enable domain-specific task delegation via the `task` tool. Each category applies runtime presets (model, temperature, prompt additions) when calling the `Sisyphus-Junior` agent.
@@ -909,75 +830,15 @@ Add your own categories or override built-in ones:
} }
``` ```
Each category supports: `model`, `fallback_models`, `temperature`, `top_p`, `maxTokens`, `thinking`, `reasoningEffort`, `textVerbosity`, `tools`, `prompt_append`, `variant`, `description`, `is_unstable_agent`. Each category supports: `model`, `temperature`, `top_p`, `maxTokens`, `thinking`, `reasoningEffort`, `textVerbosity`, `tools`, `prompt_append`, `variant`, `description`, `is_unstable_agent`.
### Additional Category Options ### Additional Category Options
| Option | Type | Default | Description | | Option | Type | Default | Description |
| ------------------- | ------------ | ------- | --------------------------------------------------------------------------------------------------- | | ------------------ | ------- | ------- | --------------------------------------------------------------------------------------------------- |
| `fallback_models` | string/array | - | Fallback models for runtime switching on API errors. Single string or array of model strings. | | `description` | string | - | Human-readable description of the category's purpose. Shown in task prompt. |
| `description` | string | - | Human-readable description of the category's purpose. Shown in delegate_task prompt. |
| `is_unstable_agent`| boolean | `false` | Mark agent as unstable - forces background mode for monitoring. Auto-enabled for gemini models. | | `is_unstable_agent`| boolean | `false` | Mark agent as unstable - forces background mode for monitoring. Auto-enabled for gemini models. |
## Runtime Fallback
Automatically switch to backup models when the primary model encounters retryable API errors (rate limits, overload, etc.) or provider key misconfiguration errors (for example, missing API key). This keeps conversations running without manual intervention.
```json
{
"runtime_fallback": {
"enabled": true,
"retry_on_errors": [429, 503, 529],
"max_fallback_attempts": 3,
"cooldown_seconds": 60,
"timeout_seconds": 30,
"notify_on_fallback": true
}
}
```
| Option | Default | Description |
| ----------------------- | ----------------- | --------------------------------------------------------------------------- |
| `enabled` | `true` | Enable runtime fallback |
| `retry_on_errors` | `[429, 503, 529]` | HTTP status codes that trigger fallback (rate limit, service unavailable). Also supports certain classified provider errors (for example, missing API key) that do not expose HTTP status codes. |
| `max_fallback_attempts` | `3` | Maximum fallback attempts per session (1-10) |
| `cooldown_seconds` | `60` | Cooldown in seconds before retrying a failed model |
| `timeout_seconds` | `30` | Timeout in seconds for an in-flight fallback request before forcing the next fallback model. Set to `0` to disable timeout-based fallback and provider quota retry signal detection. |
| `notify_on_fallback` | `true` | Show toast notification when switching to a fallback model |
### How It Works
1. When an API error matching `retry_on_errors` occurs (or a classified provider key error such as missing API key), the hook intercepts it
2. The next request automatically uses the next available model from `fallback_models`
3. Failed models enter a cooldown period before being retried
4. If a fallback provider hangs, timeout advances to the next fallback model
5. Toast notification (optional) informs you of the model switch
### Configuring Fallback Models
Define `fallback_models` at the agent or category level:
```json
{
"agents": {
"sisyphus": {
"model": "anthropic/claude-opus-4-5",
"fallback_models": ["openai/gpt-5.2", "google/gemini-3-pro"]
}
},
"categories": {
"ultrabrain": {
"model": "openai/gpt-5.2-codex",
"fallback_models": ["anthropic/claude-opus-4-5", "google/gemini-3-pro"]
}
}
}
```
When the primary model fails:
1. First fallback: `openai/gpt-5.2`
2. Second fallback: `google/gemini-3-pro`
3. After `max_fallback_attempts`, returns to primary model
## Model Resolution System ## Model Resolution System
At runtime, Oh My OpenCode uses a 3-step resolution process to determine which model to use for each agent and category. This happens dynamically based on your configuration and available models. At runtime, Oh My OpenCode uses a 3-step resolution process to determine which model to use for each agent and category. This happens dynamically based on your configuration and available models.
@@ -1112,7 +973,7 @@ Disable specific built-in hooks via `disabled_hooks` in `~/.config/opencode/oh-m
} }
``` ```
Available hooks: `todo-continuation-enforcer`, `context-window-monitor`, `session-recovery`, `session-notification`, `comment-checker`, `grep-output-truncator`, `tool-output-truncator`, `directory-agents-injector`, `directory-readme-injector`, `empty-task-response-detector`, `think-mode`, `anthropic-context-window-limit-recovery`, `rules-injector`, `background-notification`, `auto-update-checker`, `startup-toast`, `keyword-detector`, `agent-usage-reminder`, `non-interactive-env`, `interactive-bash-session`, `compaction-context-injector`, `thinking-block-validator`, `claude-code-hooks`, `ralph-loop`, `preemptive-compaction`, `auto-slash-command`, `sisyphus-junior-notepad`, `no-sisyphus-gpt`, `start-work`, `runtime-fallback` Available hooks: `todo-continuation-enforcer`, `context-window-monitor`, `session-recovery`, `session-notification`, `comment-checker`, `grep-output-truncator`, `tool-output-truncator`, `directory-agents-injector`, `directory-readme-injector`, `empty-task-response-detector`, `think-mode`, `anthropic-context-window-limit-recovery`, `rules-injector`, `background-notification`, `auto-update-checker`, `startup-toast`, `keyword-detector`, `agent-usage-reminder`, `non-interactive-env`, `interactive-bash-session`, `compaction-context-injector`, `thinking-block-validator`, `claude-code-hooks`, `ralph-loop`, `preemptive-compaction`, `auto-slash-command`, `sisyphus-junior-notepad`, `no-sisyphus-gpt`, `start-work`
**Note on `directory-agents-injector`**: This hook is **automatically disabled** when running on OpenCode 1.1.37+ because OpenCode now has native support for dynamically resolving AGENTS.md files from subdirectories (PR #10678). This prevents duplicate AGENTS.md injection. For older OpenCode versions, the hook remains active to provide the same functionality. **Note on `directory-agents-injector`**: This hook is **automatically disabled** when running on OpenCode 1.1.37+ because OpenCode now has native support for dynamically resolving AGENTS.md files from subdirectories (PR #10678). This prevents duplicate AGENTS.md injection. For older OpenCode versions, the hook remains active to provide the same functionality.

View File

@@ -352,7 +352,6 @@ Hooks intercept and modify behavior at key points in the agent lifecycle.
| **session-recovery** | Stop | Recovers from session errors - missing tool results, thinking block issues, empty messages. | | **session-recovery** | Stop | Recovers from session errors - missing tool results, thinking block issues, empty messages. |
| **anthropic-context-window-limit-recovery** | Stop | Handles Claude context window limits gracefully. | | **anthropic-context-window-limit-recovery** | Stop | Handles Claude context window limits gracefully. |
| **background-compaction** | Stop | Auto-compacts sessions hitting token limits. | | **background-compaction** | Stop | Auto-compacts sessions hitting token limits. |
| **runtime-fallback** | Event | Automatically switches to backup models on retryable API errors (e.g., 429, 503, 529), provider key misconfiguration errors (e.g., missing API key), and auto-retry signals (when `timeout_seconds > 0`). Configurable retry logic with per-model cooldown. See [Runtime Fallback Configuration](configurations.md#runtime-fallback) for details on `timeout_seconds` behavior. |
#### Truncation & Context Management #### Truncation & Context Management

View File

@@ -1,4 +1,5 @@
import { resolveModelPipeline } from "../../shared" import { resolveModelPipeline } from "../../shared"
import { transformModelForProvider } from "../../shared/provider-model-id-transform"
export function applyModelResolution(input: { export function applyModelResolution(input: {
uiSelectedModel?: string uiSelectedModel?: string
@@ -20,8 +21,10 @@ export function getFirstFallbackModel(requirement?: {
}) { }) {
const entry = requirement?.fallbackChain?.[0] const entry = requirement?.fallbackChain?.[0]
if (!entry || entry.providers.length === 0) return undefined if (!entry || entry.providers.length === 0) return undefined
const provider = entry.providers[0]
const transformedModel = transformModelForProvider(provider, entry.model)
return { return {
model: `${entry.providers[0]}/${entry.model}`, model: `${provider}/${transformedModel}`,
provenance: "provider-fallback" as const, provenance: "provider-fallback" as const,
variant: entry.variant, variant: entry.variant,
} }

View File

@@ -100,7 +100,6 @@ export type AgentName = BuiltinAgentName
export type AgentOverrideConfig = Partial<AgentConfig> & { export type AgentOverrideConfig = Partial<AgentConfig> & {
prompt_append?: string prompt_append?: string
variant?: string variant?: string
fallback_models?: string | string[]
} }
export type AgentOverrides = Partial<Record<OverridableAgentName, AgentOverrideConfig>> export type AgentOverrides = Partial<Record<OverridableAgentName, AgentOverrideConfig>>

View File

@@ -51,7 +51,7 @@ describe("createBuiltinAgents with model overrides", () => {
expect(agents.sisyphus.thinking).toBeUndefined() expect(agents.sisyphus.thinking).toBeUndefined()
}) })
test("Atlas uses uiSelectedModel", async () => { test("Atlas uses uiSelectedModel when provided", async () => {
// #given // #given
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue( const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
new Set(["openai/gpt-5.2", "anthropic/claude-sonnet-4-6"]) new Set(["openai/gpt-5.2", "anthropic/claude-sonnet-4-6"])

View File

@@ -334,48 +334,48 @@ exports[`generateModelConfig single native provider uses Gemini models when only
"model": "opencode/minimax-m2.5-free", "model": "opencode/minimax-m2.5-free",
}, },
"metis": { "metis": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"momus": { "momus": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"multimodal-looker": { "multimodal-looker": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"oracle": { "oracle": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"prometheus": { "prometheus": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
}, },
}, },
"categories": { "categories": {
"artistry": { "artistry": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"quick": { "quick": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"ultrabrain": { "ultrabrain": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"unspecified-high": { "unspecified-high": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"unspecified-low": { "unspecified-low": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"visual-engineering": { "visual-engineering": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"writing": { "writing": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
}, },
} }
@@ -395,48 +395,48 @@ exports[`generateModelConfig single native provider uses Gemini models with isMa
"model": "opencode/minimax-m2.5-free", "model": "opencode/minimax-m2.5-free",
}, },
"metis": { "metis": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"momus": { "momus": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"multimodal-looker": { "multimodal-looker": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"oracle": { "oracle": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"prometheus": { "prometheus": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
}, },
}, },
"categories": { "categories": {
"artistry": { "artistry": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"quick": { "quick": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"ultrabrain": { "ultrabrain": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"unspecified-high": { "unspecified-high": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
}, },
"unspecified-low": { "unspecified-low": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"visual-engineering": { "visual-engineering": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"writing": { "writing": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
}, },
} }
@@ -468,7 +468,7 @@ exports[`generateModelConfig all native providers uses preferred models from fal
"variant": "medium", "variant": "medium",
}, },
"multimodal-looker": { "multimodal-looker": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"oracle": { "oracle": {
"model": "openai/gpt-5.2", "model": "openai/gpt-5.2",
@@ -485,7 +485,7 @@ exports[`generateModelConfig all native providers uses preferred models from fal
}, },
"categories": { "categories": {
"artistry": { "artistry": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"deep": { "deep": {
@@ -506,11 +506,11 @@ exports[`generateModelConfig all native providers uses preferred models from fal
"model": "anthropic/claude-sonnet-4-6", "model": "anthropic/claude-sonnet-4-6",
}, },
"visual-engineering": { "visual-engineering": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"writing": { "writing": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
}, },
} }
@@ -542,7 +542,7 @@ exports[`generateModelConfig all native providers uses preferred models with isM
"variant": "medium", "variant": "medium",
}, },
"multimodal-looker": { "multimodal-looker": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"oracle": { "oracle": {
"model": "openai/gpt-5.2", "model": "openai/gpt-5.2",
@@ -559,7 +559,7 @@ exports[`generateModelConfig all native providers uses preferred models with isM
}, },
"categories": { "categories": {
"artistry": { "artistry": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"deep": { "deep": {
@@ -581,11 +581,11 @@ exports[`generateModelConfig all native providers uses preferred models with isM
"model": "anthropic/claude-sonnet-4-6", "model": "anthropic/claude-sonnet-4-6",
}, },
"visual-engineering": { "visual-engineering": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"writing": { "writing": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
}, },
} }
@@ -1230,10 +1230,10 @@ exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combi
"variant": "max", "variant": "max",
}, },
"multimodal-looker": { "multimodal-looker": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
"oracle": { "oracle": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"prometheus": { "prometheus": {
@@ -1247,14 +1247,14 @@ exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combi
}, },
"categories": { "categories": {
"artistry": { "artistry": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"quick": { "quick": {
"model": "anthropic/claude-haiku-4-5", "model": "anthropic/claude-haiku-4-5",
}, },
"ultrabrain": { "ultrabrain": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"unspecified-high": { "unspecified-high": {
@@ -1264,11 +1264,11 @@ exports[`generateModelConfig mixed provider scenarios uses Gemini + Claude combi
"model": "anthropic/claude-sonnet-4-6", "model": "anthropic/claude-sonnet-4-6",
}, },
"visual-engineering": { "visual-engineering": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"writing": { "writing": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
}, },
} }
@@ -1391,7 +1391,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers togethe
}, },
"categories": { "categories": {
"artistry": { "artistry": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"deep": { "deep": {
@@ -1412,11 +1412,11 @@ exports[`generateModelConfig mixed provider scenarios uses all providers togethe
"model": "anthropic/claude-sonnet-4-6", "model": "anthropic/claude-sonnet-4-6",
}, },
"visual-engineering": { "visual-engineering": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"writing": { "writing": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
}, },
} }
@@ -1465,7 +1465,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers with is
}, },
"categories": { "categories": {
"artistry": { "artistry": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"deep": { "deep": {
@@ -1487,11 +1487,11 @@ exports[`generateModelConfig mixed provider scenarios uses all providers with is
"model": "anthropic/claude-sonnet-4-6", "model": "anthropic/claude-sonnet-4-6",
}, },
"visual-engineering": { "visual-engineering": {
"model": "google/gemini-3-pro", "model": "google/gemini-3-pro-preview",
"variant": "high", "variant": "high",
}, },
"writing": { "writing": {
"model": "google/gemini-3-flash", "model": "google/gemini-3-flash-preview",
}, },
}, },
} }

View File

@@ -15,7 +15,7 @@ describe("model-resolution check", () => {
const sisyphus = info.agents.find((a) => a.name === "sisyphus") const sisyphus = info.agents.find((a) => a.name === "sisyphus")
expect(sisyphus).toBeDefined() expect(sisyphus).toBeDefined()
expect(sisyphus!.requirement.fallbackChain[0]?.model).toBe("claude-opus-4-6") expect(sisyphus!.requirement.fallbackChain[0]?.model).toBe("claude-opus-4-6")
expect(sisyphus!.requirement.fallbackChain[0]?.providers).toContain("anthropic") expect(sisyphus!.requirement.fallbackChain[0]?.providers).toContain("quotio")
}) })
it("returns category requirements with provider chains", async () => { it("returns category requirements with provider chains", async () => {
@@ -26,8 +26,8 @@ describe("model-resolution check", () => {
// then: Should have category entries // then: Should have category entries
const visual = info.categories.find((c) => c.name === "visual-engineering") const visual = info.categories.find((c) => c.name === "visual-engineering")
expect(visual).toBeDefined() expect(visual).toBeDefined()
expect(visual!.requirement.fallbackChain[0]?.model).toBe("gemini-3-pro") expect(visual!.requirement.fallbackChain[0]?.model).toBe("claude-opus-4-6-thinking")
expect(visual!.requirement.fallbackChain[0]?.providers).toContain("google") expect(visual!.requirement.fallbackChain[0]?.providers).toContain("quotio")
}) })
}) })
@@ -87,7 +87,7 @@ describe("model-resolution check", () => {
expect(sisyphus).toBeDefined() expect(sisyphus).toBeDefined()
expect(sisyphus!.userOverride).toBeUndefined() expect(sisyphus!.userOverride).toBeUndefined()
expect(sisyphus!.effectiveResolution).toContain("Provider fallback:") expect(sisyphus!.effectiveResolution).toContain("Provider fallback:")
expect(sisyphus!.effectiveResolution).toContain("anthropic") expect(sisyphus!.effectiveResolution).toContain("quotio")
}) })
it("captures user variant for agent when configured", async () => { it("captures user variant for agent when configured", async () => {

View File

@@ -1,8 +1,6 @@
import { import type { FallbackEntry } from "../shared/model-requirements"
AGENT_MODEL_REQUIREMENTS,
type FallbackEntry,
} from "../shared/model-requirements"
import type { ProviderAvailability } from "./model-fallback-types" import type { ProviderAvailability } from "./model-fallback-types"
import { CLI_AGENT_MODEL_REQUIREMENTS } from "./model-fallback-requirements"
import { isProviderAvailable } from "./provider-availability" import { isProviderAvailable } from "./provider-availability"
import { transformModelForProvider } from "./provider-model-id-transform" import { transformModelForProvider } from "./provider-model-id-transform"
@@ -25,7 +23,7 @@ export function resolveModelFromChain(
} }
export function getSisyphusFallbackChain(): FallbackEntry[] { export function getSisyphusFallbackChain(): FallbackEntry[] {
return AGENT_MODEL_REQUIREMENTS.sisyphus.fallbackChain return CLI_AGENT_MODEL_REQUIREMENTS.sisyphus.fallbackChain
} }
export function isAnyFallbackEntryAvailable( export function isAnyFallbackEntryAvailable(

View File

@@ -0,0 +1,153 @@
import type { ModelRequirement } from "../shared/model-requirements"
// NOTE: These requirements are used by the CLI config generator (`generateModelConfig`).
// They intentionally use "install-time" provider IDs (anthropic/openai/google/opencode/etc),
// not runtime providers like `quotio`/`nvidia`.
export const CLI_AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
sisyphus: {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["opencode"], model: "kimi-k2.5-free" },
{ providers: ["zai-coding-plan"], model: "glm-4.7" },
{ providers: ["opencode"], model: "glm-4.7-free" },
],
requiresAnyModel: true,
},
hephaestus: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
],
requiresProvider: ["openai", "github-copilot", "opencode"],
},
oracle: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
],
},
librarian: {
fallbackChain: [
{ providers: ["zai-coding-plan"], model: "glm-4.7" },
{ providers: ["opencode"], model: "glm-4.7-free" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-5" },
],
},
explore: {
fallbackChain: [
{ providers: ["github-copilot"], model: "grok-code-fast-1" },
{ providers: ["anthropic", "opencode"], model: "claude-haiku-4-5" },
{ providers: ["opencode"], model: "gpt-5-nano" },
],
},
"multimodal-looker": {
fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" },
{ providers: ["zai-coding-plan"], model: "glm-4.6v" },
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["opencode"], model: "kimi-k2.5-free" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-haiku-4-5" },
{ providers: ["opencode"], model: "gpt-5-nano" },
],
},
prometheus: {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["opencode"], model: "kimi-k2.5-free" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro" },
],
},
metis: {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["opencode"], model: "kimi-k2.5-free" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
],
},
momus: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "medium" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
],
},
atlas: {
fallbackChain: [
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["opencode"], model: "kimi-k2.5-free" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-5" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro" },
],
},
}
export const CLI_CATEGORY_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
"visual-engineering": {
fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["zai-coding-plan"], model: "glm-5" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["kimi-for-coding"], model: "k2p5" },
],
},
ultrabrain: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "xhigh" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
],
},
deep: {
fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
],
requiresModel: "gpt-5.3-codex",
},
artistry: {
fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" },
],
requiresModel: "gemini-3-pro",
},
quick: {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-haiku-4-5" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" },
{ providers: ["opencode"], model: "gpt-5-nano" },
],
},
"unspecified-low": {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-5" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" },
],
},
"unspecified-high": {
fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro" },
],
},
writing: {
fallbackChain: [
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-5" },
],
},
}

View File

@@ -1,7 +1,7 @@
import { import {
AGENT_MODEL_REQUIREMENTS, CLI_AGENT_MODEL_REQUIREMENTS,
CATEGORY_MODEL_REQUIREMENTS, CLI_CATEGORY_MODEL_REQUIREMENTS,
} from "../shared/model-requirements" } from "./model-fallback-requirements"
import type { InstallConfig } from "./types" import type { InstallConfig } from "./types"
import type { AgentConfig, CategoryConfig, GeneratedOmoConfig } from "./model-fallback-types" import type { AgentConfig, CategoryConfig, GeneratedOmoConfig } from "./model-fallback-types"
@@ -16,9 +16,9 @@ import {
export type { GeneratedOmoConfig } from "./model-fallback-types" export type { GeneratedOmoConfig } from "./model-fallback-types"
const LIBRARIAN_MODEL = "opencode/minimax-m2.5-free" const ZAI_MODEL = "zai-coding-plan/glm-4.7"
const ULTIMATE_FALLBACK = "opencode/big-pickle" const ULTIMATE_FALLBACK = "opencode/glm-4.7-free"
const SCHEMA_URL = "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json" const SCHEMA_URL = "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json"
@@ -38,12 +38,12 @@ export function generateModelConfig(config: InstallConfig): GeneratedOmoConfig {
return { return {
$schema: SCHEMA_URL, $schema: SCHEMA_URL,
agents: Object.fromEntries( agents: Object.fromEntries(
Object.entries(AGENT_MODEL_REQUIREMENTS) Object.entries(CLI_AGENT_MODEL_REQUIREMENTS)
.filter(([role, req]) => !(role === "sisyphus" && req.requiresAnyModel)) .filter(([role, req]) => !(role === "sisyphus" && req.requiresAnyModel))
.map(([role]) => [role, { model: ULTIMATE_FALLBACK }]) .map(([role]) => [role, { model: ULTIMATE_FALLBACK }])
), ),
categories: Object.fromEntries( categories: Object.fromEntries(
Object.keys(CATEGORY_MODEL_REQUIREMENTS).map((cat) => [cat, { model: ULTIMATE_FALLBACK }]) Object.keys(CLI_CATEGORY_MODEL_REQUIREMENTS).map((cat) => [cat, { model: ULTIMATE_FALLBACK }])
), ),
} }
} }
@@ -51,9 +51,9 @@ export function generateModelConfig(config: InstallConfig): GeneratedOmoConfig {
const agents: Record<string, AgentConfig> = {} const agents: Record<string, AgentConfig> = {}
const categories: Record<string, CategoryConfig> = {} const categories: Record<string, CategoryConfig> = {}
for (const [role, req] of Object.entries(AGENT_MODEL_REQUIREMENTS)) { for (const [role, req] of Object.entries(CLI_AGENT_MODEL_REQUIREMENTS)) {
if (role === "librarian") { if (role === "librarian" && avail.zai) {
agents[role] = { model: LIBRARIAN_MODEL } agents[role] = { model: ZAI_MODEL }
continue continue
} }
@@ -75,7 +75,6 @@ export function generateModelConfig(config: InstallConfig): GeneratedOmoConfig {
if (req.requiresAnyModel && !isAnyFallbackEntryAvailable(fallbackChain, avail)) { if (req.requiresAnyModel && !isAnyFallbackEntryAvailable(fallbackChain, avail)) {
continue continue
} }
const resolved = resolveModelFromChain(fallbackChain, avail) const resolved = resolveModelFromChain(fallbackChain, avail)
if (resolved) { if (resolved) {
const variant = resolved.variant ?? req.variant const variant = resolved.variant ?? req.variant
@@ -100,11 +99,11 @@ export function generateModelConfig(config: InstallConfig): GeneratedOmoConfig {
} }
} }
for (const [cat, req] of Object.entries(CATEGORY_MODEL_REQUIREMENTS)) { for (const [cat, req] of Object.entries(CLI_CATEGORY_MODEL_REQUIREMENTS)) {
// Special case: unspecified-high downgrades to unspecified-low when not isMaxPlan // Special case: unspecified-high downgrades to unspecified-low when not isMaxPlan
const fallbackChain = const fallbackChain =
cat === "unspecified-high" && !avail.isMaxPlan cat === "unspecified-high" && !avail.isMaxPlan
? CATEGORY_MODEL_REQUIREMENTS["unspecified-low"].fallbackChain ? CLI_CATEGORY_MODEL_REQUIREMENTS["unspecified-low"].fallbackChain
: req.fallbackChain : req.fallbackChain
if (req.requiresModel && !isRequiredModelAvailable(req.requiresModel, req.fallbackChain, avail)) { if (req.requiresModel && !isRequiredModelAvailable(req.requiresModel, req.fallbackChain, avail)) {

View File

@@ -0,0 +1,191 @@
import { describe, expect, test } from "bun:test"
import { transformModelForProvider } from "./provider-model-id-transform"
describe("transformModelForProvider", () => {
describe("github-copilot provider", () => {
test("transforms claude-opus-4-6 to claude-opus-4.6", () => {
// #given github-copilot provider and claude-opus-4-6 model
const provider = "github-copilot"
const model = "claude-opus-4-6"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should transform to claude-opus-4.6
expect(result).toBe("claude-opus-4.6")
})
test("transforms claude-sonnet-4-5 to claude-sonnet-4.5", () => {
// #given github-copilot provider and claude-sonnet-4-5 model
const provider = "github-copilot"
const model = "claude-sonnet-4-5"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should transform to claude-sonnet-4.5
expect(result).toBe("claude-sonnet-4.5")
})
test("transforms claude-haiku-4-5 to claude-haiku-4.5", () => {
// #given github-copilot provider and claude-haiku-4-5 model
const provider = "github-copilot"
const model = "claude-haiku-4-5"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should transform to claude-haiku-4.5
expect(result).toBe("claude-haiku-4.5")
})
test("transforms gemini-3-pro to gemini-3-pro-preview", () => {
// #given github-copilot provider and gemini-3-pro model
const provider = "github-copilot"
const model = "gemini-3-pro"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should transform to gemini-3-pro-preview
expect(result).toBe("gemini-3-pro-preview")
})
test("transforms gemini-3-flash to gemini-3-flash-preview", () => {
// #given github-copilot provider and gemini-3-flash model
const provider = "github-copilot"
const model = "gemini-3-flash"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should transform to gemini-3-flash-preview
expect(result).toBe("gemini-3-flash-preview")
})
test("prevents double transformation of gemini-3-pro-preview", () => {
// #given github-copilot provider and gemini-3-pro-preview model (already transformed)
const provider = "github-copilot"
const model = "gemini-3-pro-preview"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should NOT become gemini-3-pro-preview-preview
expect(result).toBe("gemini-3-pro-preview")
})
test("prevents double transformation of gemini-3-flash-preview", () => {
// #given github-copilot provider and gemini-3-flash-preview model (already transformed)
const provider = "github-copilot"
const model = "gemini-3-flash-preview"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should NOT become gemini-3-flash-preview-preview
expect(result).toBe("gemini-3-flash-preview")
})
})
describe("google provider", () => {
test("transforms gemini-3-flash to gemini-3-flash-preview", () => {
// #given google provider and gemini-3-flash model
const provider = "google"
const model = "gemini-3-flash"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should transform to gemini-3-flash-preview
expect(result).toBe("gemini-3-flash-preview")
})
test("transforms gemini-3-pro to gemini-3-pro-preview", () => {
// #given google provider and gemini-3-pro model
const provider = "google"
const model = "gemini-3-pro"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should transform to gemini-3-pro-preview
expect(result).toBe("gemini-3-pro-preview")
})
test("passes through other gemini models unchanged", () => {
// #given google provider and gemini-2.5-flash model
const provider = "google"
const model = "gemini-2.5-flash"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should pass through unchanged
expect(result).toBe("gemini-2.5-flash")
})
test("prevents double transformation of gemini-3-flash-preview", () => {
// #given google provider and gemini-3-flash-preview model (already transformed)
const provider = "google"
const model = "gemini-3-flash-preview"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should NOT become gemini-3-flash-preview-preview
expect(result).toBe("gemini-3-flash-preview")
})
test("prevents double transformation of gemini-3-pro-preview", () => {
// #given google provider and gemini-3-pro-preview model (already transformed)
const provider = "google"
const model = "gemini-3-pro-preview"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should NOT become gemini-3-pro-preview-preview
expect(result).toBe("gemini-3-pro-preview")
})
test("does not transform claude models for google provider", () => {
// #given google provider and claude-opus-4-6 model
const provider = "google"
const model = "claude-opus-4-6"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should pass through unchanged (google doesn't use claude)
expect(result).toBe("claude-opus-4-6")
})
})
describe("unknown provider", () => {
test("passes model through unchanged for unknown provider", () => {
// #given unknown provider and any model
const provider = "unknown-provider"
const model = "some-model"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should pass through unchanged
expect(result).toBe("some-model")
})
test("passes gemini-3-flash through unchanged for unknown provider", () => {
// #given unknown provider and gemini-3-flash model
const provider = "unknown-provider"
const model = "gemini-3-flash"
// #when transformModelForProvider is called
const result = transformModelForProvider(provider, model)
// #then should pass through unchanged (no transformation for unknown provider)
expect(result).toBe("gemini-3-flash")
})
})
})

View File

@@ -1,12 +1 @@
export function transformModelForProvider(provider: string, model: string): string { export { transformModelForProvider } from "../shared/provider-model-id-transform"
if (provider === "github-copilot") {
return model
.replace("claude-opus-4-6", "claude-opus-4.6")
.replace("claude-sonnet-4-6", "claude-sonnet-4.6")
.replace("claude-haiku-4-5", "claude-haiku-4.5")
.replace("claude-sonnet-4", "claude-sonnet-4")
.replace("gemini-3-pro", "gemini-3-pro-preview")
.replace("gemini-3-flash", "gemini-3-flash-preview")
}
return model
}

View File

@@ -11,8 +11,6 @@ export {
RalphLoopConfigSchema, RalphLoopConfigSchema,
TmuxConfigSchema, TmuxConfigSchema,
TmuxLayoutSchema, TmuxLayoutSchema,
RuntimeFallbackConfigSchema,
FallbackModelsSchema,
} from "./schema" } from "./schema"
export type { export type {
@@ -31,6 +29,4 @@ export type {
TmuxLayout, TmuxLayout,
SisyphusConfig, SisyphusConfig,
SisyphusTasksConfig, SisyphusTasksConfig,
RuntimeFallbackConfig,
FallbackModels,
} from "./schema" } from "./schema"

View File

@@ -9,13 +9,11 @@ export * from "./schema/comment-checker"
export * from "./schema/commands" export * from "./schema/commands"
export * from "./schema/dynamic-context-pruning" export * from "./schema/dynamic-context-pruning"
export * from "./schema/experimental" export * from "./schema/experimental"
export * from "./schema/fallback-models"
export * from "./schema/git-master" export * from "./schema/git-master"
export * from "./schema/hooks" export * from "./schema/hooks"
export * from "./schema/notification" export * from "./schema/notification"
export * from "./schema/oh-my-opencode-config" export * from "./schema/oh-my-opencode-config"
export * from "./schema/ralph-loop" export * from "./schema/ralph-loop"
export * from "./schema/runtime-fallback"
export * from "./schema/skills" export * from "./schema/skills"
export * from "./schema/sisyphus" export * from "./schema/sisyphus"
export * from "./schema/sisyphus-agent" export * from "./schema/sisyphus-agent"

View File

@@ -1,11 +1,9 @@
import { z } from "zod" import { z } from "zod"
import { FallbackModelsSchema } from "./fallback-models"
import { AgentPermissionSchema } from "./internal/permission" import { AgentPermissionSchema } from "./internal/permission"
export const AgentOverrideConfigSchema = z.object({ export const AgentOverrideConfigSchema = z.object({
/** @deprecated Use `category` instead. Model is inherited from category defaults. */ /** @deprecated Use `category` instead. Model is inherited from category defaults. */
model: z.string().optional(), model: z.string().optional(),
fallback_models: FallbackModelsSchema.optional(),
variant: z.string().optional(), variant: z.string().optional(),
/** Category name to inherit model and other settings from CategoryConfig */ /** Category name to inherit model and other settings from CategoryConfig */
category: z.string().optional(), category: z.string().optional(),

View File

@@ -1,11 +1,9 @@
import { z } from "zod" import { z } from "zod"
import { FallbackModelsSchema } from "./fallback-models"
export const CategoryConfigSchema = z.object({ export const CategoryConfigSchema = z.object({
/** Human-readable description of the category's purpose. Shown in task prompt. */ /** Human-readable description of the category's purpose. Shown in task prompt. */
description: z.string().optional(), description: z.string().optional(),
model: z.string().optional(), model: z.string().optional(),
fallback_models: FallbackModelsSchema.optional(),
variant: z.string().optional(), variant: z.string().optional(),
temperature: z.number().min(0).max(2).optional(), temperature: z.number().min(0).max(2).optional(),
top_p: z.number().min(0).max(1).optional(), top_p: z.number().min(0).max(1).optional(),

View File

@@ -17,6 +17,10 @@ export const ExperimentalConfigSchema = z.object({
safe_hook_creation: z.boolean().optional(), safe_hook_creation: z.boolean().optional(),
/** Disable auto-injected <omo-env> context in prompts (experimental) */ /** Disable auto-injected <omo-env> context in prompts (experimental) */
disable_omo_env: z.boolean().optional(), disable_omo_env: z.boolean().optional(),
/** Enable hashline_edit tool for improved file editing with hash-based line anchors */
hashline_edit: z.boolean().optional(),
/** Append fallback model info to session title when a runtime fallback occurs (default: false) */
model_fallback_title: z.boolean().optional(),
}) })
export type ExperimentalConfig = z.infer<typeof ExperimentalConfigSchema> export type ExperimentalConfig = z.infer<typeof ExperimentalConfigSchema>

View File

@@ -1,5 +0,0 @@
import { z } from "zod"
export const FallbackModelsSchema = z.union([z.string(), z.array(z.string())])
export type FallbackModels = z.infer<typeof FallbackModelsSchema>

View File

@@ -13,6 +13,7 @@ export const HookNameSchema = z.enum([
"directory-readme-injector", "directory-readme-injector",
"empty-task-response-detector", "empty-task-response-detector",
"think-mode", "think-mode",
"model-fallback",
"anthropic-context-window-limit-recovery", "anthropic-context-window-limit-recovery",
"preemptive-compaction", "preemptive-compaction",
"rules-injector", "rules-injector",
@@ -25,6 +26,7 @@ export const HookNameSchema = z.enum([
"interactive-bash-session", "interactive-bash-session",
"thinking-block-validator", "thinking-block-validator",
"beast-mode-system",
"ralph-loop", "ralph-loop",
"category-skill-reminder", "category-skill-reminder",
@@ -46,7 +48,6 @@ export const HookNameSchema = z.enum([
"task-resume-info", "task-resume-info",
"stop-continuation-guard", "stop-continuation-guard",
"tasks-todowrite-disabler", "tasks-todowrite-disabler",
"runtime-fallback",
"write-existing-file-guard", "write-existing-file-guard",
"anthropic-effort", "anthropic-effort",
"hashline-read-enhancer", "hashline-read-enhancer",

View File

@@ -14,7 +14,6 @@ import { GitMasterConfigSchema } from "./git-master"
import { HookNameSchema } from "./hooks" import { HookNameSchema } from "./hooks"
import { NotificationConfigSchema } from "./notification" import { NotificationConfigSchema } from "./notification"
import { RalphLoopConfigSchema } from "./ralph-loop" import { RalphLoopConfigSchema } from "./ralph-loop"
import { RuntimeFallbackConfigSchema } from "./runtime-fallback"
import { SkillsConfigSchema } from "./skills" import { SkillsConfigSchema } from "./skills"
import { SisyphusConfigSchema } from "./sisyphus" import { SisyphusConfigSchema } from "./sisyphus"
import { SisyphusAgentConfigSchema } from "./sisyphus-agent" import { SisyphusAgentConfigSchema } from "./sisyphus-agent"
@@ -45,7 +44,6 @@ export const OhMyOpenCodeConfigSchema = z.object({
auto_update: z.boolean().optional(), auto_update: z.boolean().optional(),
skills: SkillsConfigSchema.optional(), skills: SkillsConfigSchema.optional(),
ralph_loop: RalphLoopConfigSchema.optional(), ralph_loop: RalphLoopConfigSchema.optional(),
runtime_fallback: RuntimeFallbackConfigSchema.optional(),
background_task: BackgroundTaskConfigSchema.optional(), background_task: BackgroundTaskConfigSchema.optional(),
notification: NotificationConfigSchema.optional(), notification: NotificationConfigSchema.optional(),
babysitting: BabysittingConfigSchema.optional(), babysitting: BabysittingConfigSchema.optional(),

View File

@@ -1,18 +0,0 @@
import { z } from "zod"
export const RuntimeFallbackConfigSchema = z.object({
/** Enable runtime fallback (default: true) */
enabled: z.boolean().optional(),
/** HTTP status codes that trigger fallback (default: [400, 429, 503, 529]) */
retry_on_errors: z.array(z.number()).optional(),
/** Maximum fallback attempts per session (default: 3) */
max_fallback_attempts: z.number().min(1).max(20).optional(),
/** Cooldown in seconds before retrying a failed model (default: 60) */
cooldown_seconds: z.number().min(0).optional(),
/** Session-level timeout in seconds to advance fallback when provider hangs (default: 30). Set to 0 to disable auto-retry signal detection (only error-based fallback remains active). */
timeout_seconds: z.number().min(0).optional(),
/** Show toast notification when switching to fallback model (default: true) */
notify_on_fallback: z.boolean().optional(),
})
export type RuntimeFallbackConfig = z.infer<typeof RuntimeFallbackConfigSchema>

View File

@@ -2920,6 +2920,39 @@ describe("BackgroundManager.handleEvent - session.deleted cascade", () => {
}) })
describe("BackgroundManager.handleEvent - session.error", () => { describe("BackgroundManager.handleEvent - session.error", () => {
const defaultRetryFallbackChain = [
{ providers: ["quotio"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["quotio"], model: "gpt-5.3-codex", variant: "high" },
]
const stubProcessKey = (manager: BackgroundManager) => {
;(manager as unknown as { processKey: (key: string) => Promise<void> }).processKey = async () => {}
}
const createRetryTask = (manager: BackgroundManager, input: {
id: string
sessionID: string
description: string
concurrencyKey?: string
fallbackChain?: typeof defaultRetryFallbackChain
}) => {
const task = createMockTask({
id: input.id,
sessionID: input.sessionID,
parentSessionID: "parent-session",
parentMessageID: "msg-retry",
description: input.description,
agent: "sisyphus",
status: "running",
concurrencyKey: input.concurrencyKey,
model: { providerID: "quotio", modelID: "claude-opus-4-6-thinking" },
fallbackChain: input.fallbackChain ?? defaultRetryFallbackChain,
attemptCount: 0,
})
getTaskMap(manager).set(task.id, task)
return task
}
test("sets task to error, releases concurrency, and cleans up", async () => { test("sets task to error, releases concurrency, and cleans up", async () => {
//#given //#given
const manager = createBackgroundManager() const manager = createBackgroundManager()
@@ -3046,6 +3079,135 @@ describe("BackgroundManager.handleEvent - session.error", () => {
manager.shutdown() manager.shutdown()
}) })
test("retry path releases current concurrency slot and prefers current provider in fallback entry", async () => {
//#given
const manager = createBackgroundManager()
const concurrencyManager = getConcurrencyManager(manager)
const concurrencyKey = "quotio/claude-opus-4-6-thinking"
await concurrencyManager.acquire(concurrencyKey)
stubProcessKey(manager)
const sessionID = "ses_error_retry"
const task = createRetryTask(manager, {
id: "task-session-error-retry",
sessionID,
description: "task that should retry",
concurrencyKey,
fallbackChain: [
{ providers: ["quotio"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["quotio"], model: "claude-opus-4-5" },
],
})
//#when
manager.handleEvent({
type: "session.error",
properties: {
sessionID,
error: {
name: "UnknownError",
data: {
message:
"Bad Gateway: {\"error\":{\"message\":\"unknown provider for model claude-opus-4-6-thinking\"}}",
},
},
},
})
//#then
expect(task.status).toBe("pending")
expect(task.attemptCount).toBe(1)
expect(task.model).toEqual({
providerID: "quotio",
modelID: "claude-opus-4-6",
variant: "max",
})
expect(task.concurrencyKey).toBeUndefined()
expect(concurrencyManager.getCount(concurrencyKey)).toBe(0)
manager.shutdown()
})
test("retry path triggers on session.status retry events", async () => {
//#given
const manager = createBackgroundManager()
stubProcessKey(manager)
const sessionID = "ses_status_retry"
const task = createRetryTask(manager, {
id: "task-status-retry",
sessionID,
description: "task that should retry on status",
})
//#when
manager.handleEvent({
type: "session.status",
properties: {
sessionID,
status: {
type: "retry",
message: "Provider is overloaded",
},
},
})
//#then
expect(task.status).toBe("pending")
expect(task.attemptCount).toBe(1)
expect(task.model).toEqual({
providerID: "quotio",
modelID: "claude-opus-4-6",
variant: "max",
})
manager.shutdown()
})
test("retry path triggers on message.updated assistant error events", async () => {
//#given
const manager = createBackgroundManager()
stubProcessKey(manager)
const sessionID = "ses_message_updated_retry"
const task = createRetryTask(manager, {
id: "task-message-updated-retry",
sessionID,
description: "task that should retry on message.updated",
})
//#when
manager.handleEvent({
type: "message.updated",
properties: {
info: {
id: "msg_errored",
sessionID,
role: "assistant",
error: {
name: "UnknownError",
data: {
message:
"Bad Gateway: {\"error\":{\"message\":\"unknown provider for model claude-opus-4-6-thinking\"}}",
},
},
},
},
})
//#then
expect(task.status).toBe("pending")
expect(task.attemptCount).toBe(1)
expect(task.model).toEqual({
providerID: "quotio",
modelID: "claude-opus-4-6",
variant: "max",
})
manager.shutdown()
})
}) })
describe("BackgroundManager queue processing - error tasks are skipped", () => { describe("BackgroundManager queue processing - error tasks are skipped", () => {

View File

@@ -5,6 +5,7 @@ import type {
LaunchInput, LaunchInput,
ResumeInput, ResumeInput,
} from "./types" } from "./types"
import type { FallbackEntry } from "../../shared/model-requirements"
import { TaskHistory } from "./task-history" import { TaskHistory } from "./task-history"
import { import {
log, log,
@@ -12,14 +13,21 @@ import {
normalizePromptTools, normalizePromptTools,
normalizeSDKResponse, normalizeSDKResponse,
promptWithModelSuggestionRetry, promptWithModelSuggestionRetry,
readConnectedProvidersCache,
readProviderModelsCache,
resolveInheritedPromptTools, resolveInheritedPromptTools,
createInternalAgentTextPart, createInternalAgentTextPart,
} from "../../shared" } from "../../shared"
import { setSessionTools } from "../../shared/session-tools-store" import { setSessionTools } from "../../shared/session-tools-store"
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
import { ConcurrencyManager } from "./concurrency" import { ConcurrencyManager } from "./concurrency"
import type { BackgroundTaskConfig, TmuxConfig } from "../../config/schema" import type { BackgroundTaskConfig, TmuxConfig } from "../../config/schema"
import { isInsideTmux } from "../../shared/tmux" import { isInsideTmux } from "../../shared/tmux"
import {
shouldRetryError,
getNextFallback,
hasMoreFallbacks,
selectFallbackProvider,
} from "../../shared/model-error-classifier"
import { import {
DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS, DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS,
DEFAULT_STALE_TIMEOUT_MS, DEFAULT_STALE_TIMEOUT_MS,
@@ -157,6 +165,8 @@ export class BackgroundManager {
parentAgent: input.parentAgent, parentAgent: input.parentAgent,
parentTools: input.parentTools, parentTools: input.parentTools,
model: input.model, model: input.model,
fallbackChain: input.fallbackChain,
attemptCount: 0,
category: input.category, category: input.category,
} }
@@ -678,6 +688,27 @@ export class BackgroundManager {
handleEvent(event: Event): void { handleEvent(event: Event): void {
const props = event.properties const props = event.properties
if (event.type === "message.updated") {
const info = props?.info
if (!info || typeof info !== "object") return
const sessionID = (info as Record<string, unknown>)["sessionID"]
const role = (info as Record<string, unknown>)["role"]
if (typeof sessionID !== "string" || role !== "assistant") return
const task = this.findBySession(sessionID)
if (!task || task.status !== "running") return
const assistantError = (info as Record<string, unknown>)["error"]
if (!assistantError) return
const errorInfo = {
name: this.extractErrorName(assistantError),
message: this.extractErrorMessage(assistantError),
}
this.tryFallbackRetry(task, errorInfo, "message.updated")
}
if (event.type === "message.part.updated" || event.type === "message.part.delta") { if (event.type === "message.part.updated" || event.type === "message.part.delta") {
if (!props || typeof props !== "object" || !("sessionID" in props)) return if (!props || typeof props !== "object" || !("sessionID" in props)) return
const partInfo = props as unknown as MessagePartInfo const partInfo = props as unknown as MessagePartInfo
@@ -774,10 +805,29 @@ export class BackgroundManager {
const task = this.findBySession(sessionID) const task = this.findBySession(sessionID)
if (!task || task.status !== "running") return if (!task || task.status !== "running") return
const errorObj = props?.error as { name?: string; message?: string } | undefined
const errorName = errorObj?.name
const errorMessage = props ? this.getSessionErrorMessage(props) : undefined const errorMessage = props ? this.getSessionErrorMessage(props) : undefined
const errorInfo = { name: errorName, message: errorMessage }
if (this.tryFallbackRetry(task, errorInfo, "session.error")) return
// Original error handling (no retry)
const errorMsg = errorMessage ?? "Session error"
const canRetry =
shouldRetryError(errorInfo) &&
!!task.fallbackChain &&
hasMoreFallbacks(task.fallbackChain, task.attemptCount ?? 0)
log("[background-agent] Session error - no retry:", {
taskId: task.id,
errorName,
errorMessage: errorMsg?.slice(0, 100),
hasFallbackChain: !!task.fallbackChain,
canRetry,
})
task.status = "error" task.status = "error"
task.error = errorMessage ?? "Session error" task.error = errorMsg
task.completedAt = new Date() task.completedAt = new Date()
this.taskHistory.record(task.parentSessionID, { id: task.id, sessionID: task.sessionID, agent: task.agent, description: task.description, status: "error", category: task.category, startedAt: task.startedAt, completedAt: task.completedAt }) this.taskHistory.record(task.parentSessionID, { id: task.id, sessionID: task.sessionID, agent: task.agent, description: task.description, status: "error", category: task.category, startedAt: task.startedAt, completedAt: task.completedAt })
@@ -860,8 +910,130 @@ export class BackgroundManager {
subagentSessions.delete(task.sessionID) subagentSessions.delete(task.sessionID)
} }
} }
SessionCategoryRegistry.remove(sessionID)
} }
if (event.type === "session.status") {
const sessionID = props?.sessionID as string | undefined
const status = props?.status as { type?: string; message?: string } | undefined
if (!sessionID || status?.type !== "retry") return
const task = this.findBySession(sessionID)
if (!task || task.status !== "running") return
const errorMessage = typeof status.message === "string" ? status.message : undefined
const errorInfo = { name: "SessionRetry", message: errorMessage }
this.tryFallbackRetry(task, errorInfo, "session.status")
}
}
private tryFallbackRetry(
task: BackgroundTask,
errorInfo: { name?: string; message?: string },
source: string,
): boolean {
const fallbackChain = task.fallbackChain
const canRetry =
shouldRetryError(errorInfo) &&
fallbackChain &&
fallbackChain.length > 0 &&
hasMoreFallbacks(fallbackChain, task.attemptCount ?? 0)
if (!canRetry) return false
const attemptCount = task.attemptCount ?? 0
const providerModelsCache = readProviderModelsCache()
const connectedProviders = providerModelsCache?.connected ?? readConnectedProvidersCache()
const connectedSet = connectedProviders ? new Set(connectedProviders) : null
const isReachable = (entry: FallbackEntry): boolean => {
if (!connectedSet) return true
// Gate only on provider connectivity. Provider model lists can be stale/incomplete,
// especially after users manually add models to opencode.json.
return entry.providers.some((p) => connectedSet.has(p))
}
let selectedAttemptCount = attemptCount
let nextFallback: FallbackEntry | undefined
while (fallbackChain && selectedAttemptCount < fallbackChain.length) {
const candidate = getNextFallback(fallbackChain, selectedAttemptCount)
if (!candidate) break
selectedAttemptCount++
if (!isReachable(candidate)) {
log("[background-agent] Skipping unreachable fallback:", {
taskId: task.id,
source,
model: candidate.model,
providers: candidate.providers,
})
continue
}
nextFallback = candidate
break
}
if (!nextFallback) return false
const providerID = selectFallbackProvider(
nextFallback.providers,
task.model?.providerID,
)
log("[background-agent] Retryable error, attempting fallback:", {
taskId: task.id,
source,
errorName: errorInfo.name,
errorMessage: errorInfo.message?.slice(0, 100),
attemptCount: selectedAttemptCount,
nextModel: `${providerID}/${nextFallback.model}`,
})
if (task.concurrencyKey) {
this.concurrencyManager.release(task.concurrencyKey)
task.concurrencyKey = undefined
}
if (task.sessionID) {
this.client.session.abort({ path: { id: task.sessionID } }).catch(() => {})
subagentSessions.delete(task.sessionID)
}
const idleTimer = this.idleDeferralTimers.get(task.id)
if (idleTimer) {
clearTimeout(idleTimer)
this.idleDeferralTimers.delete(task.id)
}
task.attemptCount = selectedAttemptCount
task.model = {
providerID,
modelID: nextFallback.model,
variant: nextFallback.variant,
}
task.status = "pending"
task.sessionID = undefined
task.startedAt = undefined
task.queuedAt = new Date()
task.error = undefined
const key = task.model ? `${task.model.providerID}/${task.model.modelID}` : task.agent
const queue = this.queuesByKey.get(key) ?? []
const retryInput: LaunchInput = {
description: task.description,
prompt: task.prompt,
agent: task.agent,
parentSessionID: task.parentSessionID,
parentMessageID: task.parentMessageID,
parentModel: task.parentModel,
parentAgent: task.parentAgent,
parentTools: task.parentTools,
model: task.model,
fallbackChain: task.fallbackChain,
category: task.category,
}
queue.push({ task, input: retryInput })
this.queuesByKey.set(key, queue)
this.processKey(key)
return true
} }
markForNotification(task: BackgroundTask): void { markForNotification(task: BackgroundTask): void {
@@ -1024,8 +1196,6 @@ export class BackgroundManager {
this.client.session.abort({ this.client.session.abort({
path: { id: task.sessionID }, path: { id: task.sessionID },
}).catch(() => {}) }).catch(() => {})
SessionCategoryRegistry.remove(task.sessionID)
} }
if (options?.skipNotification) { if (options?.skipNotification) {
@@ -1173,8 +1343,6 @@ export class BackgroundManager {
this.client.session.abort({ this.client.session.abort({
path: { id: task.sessionID }, path: { id: task.sessionID },
}).catch(() => {}) }).catch(() => {})
SessionCategoryRegistry.remove(task.sessionID)
} }
try { try {
@@ -1279,10 +1447,13 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
if (isCompactionAgent(info?.agent)) { if (isCompactionAgent(info?.agent)) {
continue continue
} }
if (info?.agent || info?.model || (info?.modelID && info?.providerID)) { const normalizedTools = this.isRecord(info?.tools)
agent = info.agent ?? task.parentAgent ? normalizePromptTools(info.tools as Record<string, boolean | "allow" | "deny" | "ask">)
model = info.model ?? (info.providerID && info.modelID ? { providerID: info.providerID, modelID: info.modelID } : undefined) : undefined
tools = normalizePromptTools(info.tools) ?? tools if (info?.agent || info?.model || (info?.modelID && info?.providerID) || normalizedTools) {
agent = info?.agent ?? task.parentAgent
model = info?.model ?? (info?.providerID && info?.modelID ? { providerID: info.providerID, modelID: info.modelID } : undefined)
tools = normalizedTools ?? tools
break break
} }
} }
@@ -1302,7 +1473,7 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
tools = normalizePromptTools(currentMessage?.tools) ?? tools tools = normalizePromptTools(currentMessage?.tools) ?? tools
} }
tools = resolveInheritedPromptTools(task.parentSessionID, tools) const resolvedTools = resolveInheritedPromptTools(task.parentSessionID, tools)
log("[background-agent] notifyParentSession context:", { log("[background-agent] notifyParentSession context:", {
taskId: task.id, taskId: task.id,
@@ -1317,7 +1488,7 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
noReply: !allComplete, noReply: !allComplete,
...(agent !== undefined ? { agent } : {}), ...(agent !== undefined ? { agent } : {}),
...(model !== undefined ? { model } : {}), ...(model !== undefined ? { model } : {}),
...(tools ? { tools } : {}), ...(resolvedTools ? { tools: resolvedTools } : {}),
parts: [createInternalAgentTextPart(notification)], parts: [createInternalAgentTextPart(notification)],
}, },
}) })
@@ -1400,6 +1571,46 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
return "" return ""
} }
private extractErrorName(error: unknown): string | undefined {
if (this.isRecord(error) && typeof error["name"] === "string") return error["name"]
if (error instanceof Error) return error.name
return undefined
}
private extractErrorMessage(error: unknown): string | undefined {
if (!error) return undefined
if (typeof error === "string") return error
if (error instanceof Error) return error.message
if (this.isRecord(error)) {
const dataRaw = error["data"]
const candidates: unknown[] = [
error,
dataRaw,
error["error"],
this.isRecord(dataRaw) ? (dataRaw as Record<string, unknown>)["error"] : undefined,
error["cause"],
]
for (const candidate of candidates) {
if (typeof candidate === "string" && candidate.length > 0) return candidate
if (
this.isRecord(candidate) &&
typeof candidate["message"] === "string" &&
candidate["message"].length > 0
) {
return candidate["message"]
}
}
}
try {
return JSON.stringify(error)
} catch {
return String(error)
}
}
private isRecord(value: unknown): value is Record<string, unknown> { private isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null return typeof value === "object" && value !== null
} }
@@ -1477,7 +1688,6 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
this.tasks.delete(taskId) this.tasks.delete(taskId)
if (task.sessionID) { if (task.sessionID) {
subagentSessions.delete(task.sessionID) subagentSessions.delete(task.sessionID)
SessionCategoryRegistry.remove(task.sessionID)
} }
} }
} }
@@ -1617,6 +1827,16 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
// Progress is already tracked via handleEvent(message.part.updated), // Progress is already tracked via handleEvent(message.part.updated),
// so we skip the expensive session.messages() fetch here. // so we skip the expensive session.messages() fetch here.
// Completion will be detected when session transitions to idle. // Completion will be detected when session transitions to idle.
if (sessionStatus?.type === "retry") {
const retryMessage = typeof (sessionStatus as { message?: string }).message === "string"
? (sessionStatus as { message?: string }).message
: undefined
const errorInfo = { name: "SessionRetry", message: retryMessage }
if (this.tryFallbackRetry(task, errorInfo, "polling:session.status")) {
continue
}
}
log("[background-agent] Session still running, relying on event-based progress:", { log("[background-agent] Session still running, relying on event-based progress:", {
taskId: task.id, taskId: task.id,
sessionID, sessionID,

View File

@@ -1,3 +1,5 @@
import type { FallbackEntry } from "../../shared/model-requirements"
export type BackgroundTaskStatus = export type BackgroundTaskStatus =
| "pending" | "pending"
| "running" | "running"
@@ -31,6 +33,10 @@ export interface BackgroundTask {
progress?: TaskProgress progress?: TaskProgress
parentModel?: { providerID: string; modelID: string } parentModel?: { providerID: string; modelID: string }
model?: { providerID: string; modelID: string; variant?: string } model?: { providerID: string; modelID: string; variant?: string }
/** Fallback chain for runtime retry on model errors */
fallbackChain?: FallbackEntry[]
/** Number of fallback retry attempts made */
attemptCount?: number
/** Active concurrency slot key */ /** Active concurrency slot key */
concurrencyKey?: string concurrencyKey?: string
/** Persistent key for re-acquiring concurrency on resume */ /** Persistent key for re-acquiring concurrency on resume */
@@ -60,6 +66,8 @@ export interface LaunchInput {
parentAgent?: string parentAgent?: string
parentTools?: Record<string, boolean> parentTools?: Record<string, boolean>
model?: { providerID: string; modelID: string; variant?: string } model?: { providerID: string; modelID: string; variant?: string }
/** Fallback chain for runtime retry on model errors */
fallbackChain?: FallbackEntry[]
isUnstableAgent?: boolean isUnstableAgent?: boolean
skills?: string[] skills?: string[]
skillContent?: string skillContent?: string

View File

@@ -1,4 +1,5 @@
export const subagentSessions = new Set<string>() export const subagentSessions = new Set<string>()
export const syncSubagentSessions = new Set<string>()
let _mainSessionID: string | undefined let _mainSessionID: string | undefined
@@ -14,6 +15,7 @@ export function getMainSessionID(): string | undefined {
export function _resetForTesting(): void { export function _resetForTesting(): void {
_mainSessionID = undefined _mainSessionID = undefined
subagentSessions.clear() subagentSessions.clear()
syncSubagentSessions.clear()
sessionAgentMap.clear() sessionAgentMap.clear()
} }

View File

@@ -25,13 +25,13 @@ export function discoverAllSkillsBlocking(dirs: string[], scopes: SkillScope[]):
const { port1, port2 } = new MessageChannel() const { port1, port2 } = new MessageChannel()
const worker = new Worker(new URL("./discover-worker.ts", import.meta.url), { const worker = new Worker(new URL("./discover-worker.ts", import.meta.url), {
workerData: { signal } // workerData is structured-cloned; pass the SharedArrayBuffer and recreate the view in the worker.
workerData: { signalBuffer: signal.buffer },
}) })
worker.postMessage({ port: port2 }, [port2])
const input: WorkerInput = { dirs, scopes } const input: WorkerInput = { dirs, scopes }
port1.postMessage(input) // Avoid a race where the worker hasn't attached listeners to the MessagePort yet.
worker.postMessage({ port: port2, input }, [port2])
const waitResult = Atomics.wait(signal, 0, 0, TIMEOUT_MS) const waitResult = Atomics.wait(signal, 0, 0, TIMEOUT_MS)

View File

@@ -18,20 +18,19 @@ interface WorkerOutputError {
error: { message: string; stack?: string } error: { message: string; stack?: string }
} }
const { signal } = workerData as { signal: Int32Array } const { signalBuffer } = workerData as { signalBuffer: SharedArrayBuffer }
const signal = new Int32Array(signalBuffer)
if (!parentPort) { if (!parentPort) {
throw new Error("Worker must be run with parentPort") throw new Error("Worker must be run with parentPort")
} }
parentPort.once("message", (data: { port: MessagePort }) => { parentPort.once("message", (data: { port: MessagePort; input: WorkerInput }) => {
const { port } = data const { port, input } = data
port.on("message", async (input: WorkerInput) => { void (async () => {
try { try {
const results = await Promise.all( const results = await Promise.all(input.dirs.map((dir) => discoverSkillsInDirAsync(dir)))
input.dirs.map(dir => discoverSkillsInDirAsync(dir))
)
const skills = results.flat() const skills = results.flat()
@@ -53,5 +52,5 @@ parentPort.once("message", (data: { port: MessagePort }) => {
Atomics.store(signal, 0, 1) Atomics.store(signal, 0, 1)
Atomics.notify(signal, 0) Atomics.notify(signal, 0)
} }
}) })()
}) })

View File

@@ -217,6 +217,27 @@ describe("TaskToastManager", () => {
expect(call.body.message).toContain("(inherited from parent)") expect(call.body.message).toContain("(inherited from parent)")
}) })
test("should display warning when model is runtime fallback", () => {
// given - runtime-fallback indicates a model swap mid-run
const task = {
id: "task_runtime",
description: "Task with runtime fallback model",
agent: "explore",
isBackground: false,
modelInfo: { model: "quotio/oswe-vscode-prime", type: "runtime-fallback" as const },
}
// when - addTask is called
toastManager.addTask(task)
// then - toast should show fallback warning
expect(mockClient.tui.showToast).toHaveBeenCalled()
const call = mockClient.tui.showToast.mock.calls[0][0]
expect(call.body.message).toContain("[FALLBACK]")
expect(call.body.message).toContain("quotio/oswe-vscode-prime")
expect(call.body.message).toContain("(runtime fallback)")
})
test("should not display model info when user-defined", () => { test("should not display model info when user-defined", () => {
// given - a task with user-defined model // given - a task with user-defined model
const task = { const task = {
@@ -257,4 +278,32 @@ describe("TaskToastManager", () => {
expect(call.body.message).not.toContain("[FALLBACK] Model:") expect(call.body.message).not.toContain("[FALLBACK] Model:")
}) })
}) })
describe("updateTaskModelBySession", () => {
test("updates task model info and shows fallback toast", () => {
// given - task without model info
const task = {
id: "task_update",
sessionID: "ses_update_1",
description: "Task that will fallback",
agent: "explore",
isBackground: false,
}
toastManager.addTask(task)
mockClient.tui.showToast.mockClear()
// when - runtime fallback applied by session
toastManager.updateTaskModelBySession("ses_update_1", {
model: "nvidia/stepfun-ai/step-3.5-flash",
type: "runtime-fallback",
})
// then - new toast shows fallback model
expect(mockClient.tui.showToast).toHaveBeenCalled()
const call = mockClient.tui.showToast.mock.calls[0][0]
expect(call.body.message).toContain("[FALLBACK]")
expect(call.body.message).toContain("nvidia/stepfun-ai/step-3.5-flash")
expect(call.body.message).toContain("(runtime fallback)")
})
})
}) })

View File

@@ -20,6 +20,7 @@ export class TaskToastManager {
addTask(task: { addTask(task: {
id: string id: string
sessionID?: string
description: string description: string
agent: string agent: string
isBackground: boolean isBackground: boolean
@@ -30,6 +31,7 @@ export class TaskToastManager {
}): void { }): void {
const trackedTask: TrackedTask = { const trackedTask: TrackedTask = {
id: task.id, id: task.id,
sessionID: task.sessionID,
description: task.description, description: task.description,
agent: task.agent, agent: task.agent,
status: task.status ?? "running", status: task.status ?? "running",
@@ -54,6 +56,18 @@ export class TaskToastManager {
} }
} }
/**
* Update model info for a task by session ID
*/
updateTaskModelBySession(sessionID: string, modelInfo: ModelFallbackInfo): void {
if (!sessionID) return
const task = Array.from(this.tasks.values()).find((t) => t.sessionID === sessionID)
if (!task) return
if (task.modelInfo?.model === modelInfo.model && task.modelInfo?.type === modelInfo.type) return
task.modelInfo = modelInfo
this.showTaskListToast(task)
}
/** /**
* Remove completed/error task * Remove completed/error task
*/ */
@@ -110,14 +124,17 @@ export class TaskToastManager {
const lines: string[] = [] const lines: string[] = []
const isFallback = newTask.modelInfo && ( const isFallback = newTask.modelInfo && (
newTask.modelInfo.type === "inherited" || newTask.modelInfo.type === "system-default" newTask.modelInfo.type === "inherited" ||
newTask.modelInfo.type === "system-default" ||
newTask.modelInfo.type === "runtime-fallback"
) )
if (isFallback) { if (isFallback) {
const suffixMap: Record<"inherited" | "system-default", string> = { const suffixMap: Record<"inherited" | "system-default" | "runtime-fallback", string> = {
inherited: " (inherited from parent)", inherited: " (inherited from parent)",
"system-default": " (system default fallback)", "system-default": " (system default fallback)",
"runtime-fallback": " (runtime fallback)",
} }
const suffix = suffixMap[newTask.modelInfo!.type as "inherited" | "system-default"] const suffix = suffixMap[newTask.modelInfo!.type as "inherited" | "system-default" | "runtime-fallback"]
lines.push(`[FALLBACK] Model: ${newTask.modelInfo!.model}${suffix}`) lines.push(`[FALLBACK] Model: ${newTask.modelInfo!.model}${suffix}`)
lines.push("") lines.push("")
} }

View File

@@ -4,12 +4,13 @@ export type TaskStatus = "running" | "queued" | "completed" | "error"
export interface ModelFallbackInfo { export interface ModelFallbackInfo {
model: string model: string
type: "user-defined" | "inherited" | "category-default" | "system-default" type: "user-defined" | "inherited" | "category-default" | "system-default" | "runtime-fallback"
source?: ModelSource source?: ModelSource
} }
export interface TrackedTask { export interface TrackedTask {
id: string id: string
sessionID?: string
description: string description: string
agent: string agent: string
status: TaskStatus status: TaskStatus

View File

@@ -9,45 +9,6 @@
## HOOK TIERS ## HOOK TIERS
### Tier 1: Session Hooks (22) — `create-session-hooks.ts` ### Tier 1: Session Hooks (22) — `create-session-hooks.ts`
## STRUCTURE
```
hooks/
├── atlas/ # Main orchestration (757 lines)
├── anthropic-context-window-limit-recovery/ # Auto-summarize
├── todo-continuation-enforcer.ts # Force TODO completion
├── ralph-loop/ # Self-referential dev loop
├── claude-code-hooks/ # settings.json compat layer - see AGENTS.md
├── comment-checker/ # Prevents AI slop
├── auto-slash-command/ # Detects /command patterns
├── rules-injector/ # Conditional rules
├── directory-agents-injector/ # Auto-injects AGENTS.md
├── directory-readme-injector/ # Auto-injects README.md
├── edit-error-recovery/ # Recovers from failures
├── thinking-block-validator/ # Ensures valid <thinking>
├── context-window-monitor.ts # Reminds of headroom
├── session-recovery/ # Auto-recovers from crashes
├── think-mode/ # Dynamic thinking budget
├── keyword-detector/ # ultrawork/search/analyze modes
├── background-notification/ # OS notification
├── prometheus-md-only/ # Planner read-only mode
├── agent-usage-reminder/ # Specialized agent hints
├── auto-update-checker/ # Plugin update check
├── tool-output-truncator.ts # Prevents context bloat
├── compaction-context-injector/ # Injects context on compaction
├── delegate-task-retry/ # Retries failed delegations
├── interactive-bash-session/ # Tmux session management
├── non-interactive-env/ # Non-TTY environment handling
├── start-work/ # Sisyphus work session starter
├── task-resume-info/ # Resume info for cancelled tasks
├── question-label-truncator/ # Auto-truncates question labels
├── category-skill-reminder/ # Reminds of category skills
├── empty-task-response-detector.ts # Detects empty responses
├── sisyphus-junior-notepad/ # Sisyphus Junior notepad
├── stop-continuation-guard/ # Guards stop continuation
├── subagent-question-blocker/ # Blocks subagent questions
├── runtime-fallback/ # Auto-switch models on API errors
└── index.ts # Hook aggregation + registration
```
| Hook | Event | Purpose | | Hook | Event | Purpose |
|------|-------|---------| |------|-------|---------|

View File

@@ -0,0 +1,54 @@
import { describe, expect, test } from "bun:test"
import { clearSessionModel, setSessionModel } from "../../shared/session-model-state"
import { createBeastModeSystemHook, BEAST_MODE_SYSTEM_PROMPT } from "./hook"
describe("beast-mode-system hook", () => {
test("injects beast mode prompt for copilot gpt-4.1", async () => {
//#given
const sessionID = "ses_beast"
setSessionModel(sessionID, { providerID: "github-copilot", modelID: "gpt-4.1" })
const hook = createBeastModeSystemHook()
const output = { system: [] as string[] }
//#when
await hook["experimental.chat.system.transform"]?.({ sessionID }, output)
//#then
expect(output.system[0]).toContain("Beast Mode")
expect(output.system[0]).toContain(BEAST_MODE_SYSTEM_PROMPT.trim().slice(0, 20))
clearSessionModel(sessionID)
})
test("does not inject for other models", async () => {
//#given
const sessionID = "ses_no_beast"
setSessionModel(sessionID, { providerID: "quotio", modelID: "gpt-5.3-codex" })
const hook = createBeastModeSystemHook()
const output = { system: [] as string[] }
//#when
await hook["experimental.chat.system.transform"]?.({ sessionID }, output)
//#then
expect(output.system.length).toBe(0)
clearSessionModel(sessionID)
})
test("avoids duplicate insertion", async () => {
//#given
const sessionID = "ses_dupe"
setSessionModel(sessionID, { providerID: "github-copilot", modelID: "gpt-4.1" })
const hook = createBeastModeSystemHook()
const output = { system: [BEAST_MODE_SYSTEM_PROMPT] }
//#when
await hook["experimental.chat.system.transform"]?.({ sessionID }, output)
//#then
expect(output.system.length).toBe(1)
clearSessionModel(sessionID)
})
})

View File

@@ -0,0 +1,31 @@
import { getSessionModel } from "../../shared/session-model-state"
export const BEAST_MODE_SYSTEM_PROMPT = `Beast Mode (Copilot GPT-4.1)
You are an autonomous coding agent. Execute the task end-to-end.
- Make a brief plan, then act.
- Prefer concrete edits and verification over speculation.
- Run relevant tests when feasible.
- Do not ask the user to perform actions you can do yourself.
- If blocked, state exactly what is needed to proceed.
- Keep responses concise and actionable.`
function isBeastModeModel(model: { providerID: string; modelID: string } | undefined): boolean {
return model?.providerID === "github-copilot" && model.modelID === "gpt-4.1"
}
export function createBeastModeSystemHook() {
return {
"experimental.chat.system.transform": async (
input: { sessionID: string },
output: { system: string[] },
): Promise<void> => {
const model = getSessionModel(input.sessionID)
if (!isBeastModeModel(model)) return
if (output.system.some((entry) => entry.includes("Beast Mode"))) return
output.system.unshift(BEAST_MODE_SYSTEM_PROMPT)
},
}
}

View File

@@ -0,0 +1 @@
export { createBeastModeSystemHook, BEAST_MODE_SYSTEM_PROMPT } from "./hook"

View File

@@ -14,6 +14,7 @@ export { createEmptyTaskResponseDetectorHook } from "./empty-task-response-detec
export { createAnthropicContextWindowLimitRecoveryHook, type AnthropicContextWindowLimitRecoveryOptions } from "./anthropic-context-window-limit-recovery"; export { createAnthropicContextWindowLimitRecoveryHook, type AnthropicContextWindowLimitRecoveryOptions } from "./anthropic-context-window-limit-recovery";
export { createThinkModeHook } from "./think-mode"; export { createThinkModeHook } from "./think-mode";
export { createModelFallbackHook, setPendingModelFallback, clearPendingModelFallback, type ModelFallbackState } from "./model-fallback/hook";
export { createClaudeCodeHooksHook } from "./claude-code-hooks"; export { createClaudeCodeHooksHook } from "./claude-code-hooks";
export { createRulesInjectorHook } from "./rules-injector"; export { createRulesInjectorHook } from "./rules-injector";
export { createBackgroundNotificationHook } from "./background-notification" export { createBackgroundNotificationHook } from "./background-notification"
@@ -31,7 +32,6 @@ export { createNoSisyphusGptHook } from "./no-sisyphus-gpt";
export { createNoHephaestusNonGptHook } from "./no-hephaestus-non-gpt"; export { createNoHephaestusNonGptHook } from "./no-hephaestus-non-gpt";
export { createAutoSlashCommandHook } from "./auto-slash-command"; export { createAutoSlashCommandHook } from "./auto-slash-command";
export { createEditErrorRecoveryHook } from "./edit-error-recovery"; export { createEditErrorRecoveryHook } from "./edit-error-recovery";
export { createJsonErrorRecoveryHook } from "./json-error-recovery";
export { createPrometheusMdOnlyHook } from "./prometheus-md-only"; export { createPrometheusMdOnlyHook } from "./prometheus-md-only";
export { createSisyphusJuniorNotepadHook } from "./sisyphus-junior-notepad"; export { createSisyphusJuniorNotepadHook } from "./sisyphus-junior-notepad";
export { createTaskResumeInfoHook } from "./task-resume-info"; export { createTaskResumeInfoHook } from "./task-resume-info";
@@ -45,7 +45,6 @@ export { createCompactionTodoPreserverHook } from "./compaction-todo-preserver";
export { createUnstableAgentBabysitterHook } from "./unstable-agent-babysitter"; export { createUnstableAgentBabysitterHook } from "./unstable-agent-babysitter";
export { createPreemptiveCompactionHook } from "./preemptive-compaction"; export { createPreemptiveCompactionHook } from "./preemptive-compaction";
export { createTasksTodowriteDisablerHook } from "./tasks-todowrite-disabler"; export { createTasksTodowriteDisablerHook } from "./tasks-todowrite-disabler";
export { createRuntimeFallbackHook, type RuntimeFallbackHook, type RuntimeFallbackOptions } from "./runtime-fallback";
export { createWriteExistingFileGuardHook } from "./write-existing-file-guard"; export { createWriteExistingFileGuardHook } from "./write-existing-file-guard";
export { createHashlineReadEnhancerHook } from "./hashline-read-enhancer"; export { createHashlineReadEnhancerHook } from "./hashline-read-enhancer";
export { createHashlineEditDiffEnhancerHook } from "./hashline-edit-diff-enhancer"; export { createBeastModeSystemHook, BEAST_MODE_SYSTEM_PROMPT } from "./beast-mode-system";

View File

@@ -0,0 +1,141 @@
import { beforeEach, describe, expect, test } from "bun:test"
import {
clearPendingModelFallback,
createModelFallbackHook,
setPendingModelFallback,
} from "./hook"
describe("model fallback hook", () => {
beforeEach(() => {
clearPendingModelFallback("ses_model_fallback_main")
})
test("applies pending fallback on chat.message by overriding model", async () => {
//#given
const hook = createModelFallbackHook() as unknown as {
"chat.message"?: (
input: { sessionID: string },
output: { message: Record<string, unknown>; parts: Array<{ type: string; text?: string }> },
) => Promise<void>
}
const set = setPendingModelFallback(
"ses_model_fallback_main",
"Sisyphus (Ultraworker)",
"quotio",
"claude-opus-4-6-thinking",
)
expect(set).toBe(true)
const output = {
message: {
model: { providerID: "quotio", modelID: "claude-opus-4-6-thinking" },
variant: "max",
},
parts: [{ type: "text", text: "continue" }],
}
//#when
await hook["chat.message"]?.(
{ sessionID: "ses_model_fallback_main" },
output,
)
//#then
expect(output.message["model"]).toEqual({
providerID: "quotio",
modelID: "claude-opus-4-6",
})
})
test("preserves fallback progression across repeated session.error retries", async () => {
//#given
const hook = createModelFallbackHook() as unknown as {
"chat.message"?: (
input: { sessionID: string },
output: { message: Record<string, unknown>; parts: Array<{ type: string; text?: string }> },
) => Promise<void>
}
const sessionID = "ses_model_fallback_main"
expect(
setPendingModelFallback(sessionID, "Sisyphus (Ultraworker)", "quotio", "claude-opus-4-6-thinking"),
).toBe(true)
const firstOutput = {
message: {
model: { providerID: "quotio", modelID: "claude-opus-4-6-thinking" },
variant: "max",
},
parts: [{ type: "text", text: "continue" }],
}
//#when - first retry is applied
await hook["chat.message"]?.({ sessionID }, firstOutput)
//#then
expect(firstOutput.message["model"]).toEqual({
providerID: "quotio",
modelID: "claude-opus-4-6",
})
//#when - second error re-arms fallback and should advance to next entry
expect(
setPendingModelFallback(sessionID, "Sisyphus (Ultraworker)", "quotio", "claude-opus-4-6"),
).toBe(true)
const secondOutput = {
message: {
model: { providerID: "quotio", modelID: "claude-opus-4-6" },
},
parts: [{ type: "text", text: "continue" }],
}
await hook["chat.message"]?.({ sessionID }, secondOutput)
//#then - chain should progress to entry[1], not repeat entry[0]
expect(secondOutput.message["model"]).toEqual({
providerID: "quotio",
modelID: "gpt-5.3-codex",
})
expect(secondOutput.message["variant"]).toBe("high")
})
test("shows toast when fallback is applied", async () => {
//#given
const toastCalls: Array<{ title: string; message: string }> = []
const hook = createModelFallbackHook({
toast: async ({ title, message }) => {
toastCalls.push({ title, message })
},
}) as unknown as {
"chat.message"?: (
input: { sessionID: string },
output: { message: Record<string, unknown>; parts: Array<{ type: string; text?: string }> },
) => Promise<void>
}
const set = setPendingModelFallback(
"ses_model_fallback_toast",
"Sisyphus (Ultraworker)",
"quotio",
"claude-opus-4-6-thinking",
)
expect(set).toBe(true)
const output = {
message: {
model: { providerID: "quotio", modelID: "claude-opus-4-6-thinking" },
variant: "max",
},
parts: [{ type: "text", text: "continue" }],
}
//#when
await hook["chat.message"]?.({ sessionID: "ses_model_fallback_toast" }, output)
//#then
expect(toastCalls.length).toBe(1)
expect(toastCalls[0]?.title).toBe("Model fallback")
})
})

View File

@@ -0,0 +1,246 @@
import type { FallbackEntry } from "../../shared/model-requirements"
import { getAgentConfigKey } from "../../shared/agent-display-names"
import { AGENT_MODEL_REQUIREMENTS } from "../../shared/model-requirements"
import { readConnectedProvidersCache, readProviderModelsCache } from "../../shared/connected-providers-cache"
import { selectFallbackProvider } from "../../shared/model-error-classifier"
import { log } from "../../shared/logger"
import { getTaskToastManager } from "../../features/task-toast-manager"
import type { ChatMessageInput, ChatMessageHandlerOutput } from "../../plugin/chat-message"
type FallbackToast = (input: {
title: string
message: string
variant?: "info" | "success" | "warning" | "error"
duration?: number
}) => void | Promise<void>
type FallbackCallback = (input: {
sessionID: string
providerID: string
modelID: string
variant?: string
}) => void | Promise<void>
export type ModelFallbackState = {
providerID: string
modelID: string
fallbackChain: FallbackEntry[]
attemptCount: number
pending: boolean
}
/**
* Map of sessionID -> pending model fallback state
* When a model error occurs, we store the fallback info here.
* The next chat.message call will use this to switch to the fallback model.
*/
const pendingModelFallbacks = new Map<string, ModelFallbackState>()
const lastToastKey = new Map<string, string>()
const sessionFallbackChains = new Map<string, FallbackEntry[]>()
export function setSessionFallbackChain(sessionID: string, fallbackChain: FallbackEntry[] | undefined): void {
if (!sessionID) return
if (!fallbackChain || fallbackChain.length === 0) {
sessionFallbackChains.delete(sessionID)
return
}
sessionFallbackChains.set(sessionID, fallbackChain)
}
export function clearSessionFallbackChain(sessionID: string): void {
sessionFallbackChains.delete(sessionID)
}
/**
* Sets a pending model fallback for a session.
* Called when a model error is detected in session.error handler.
*/
export function setPendingModelFallback(
sessionID: string,
agentName: string,
currentProviderID: string,
currentModelID: string,
): boolean {
const agentKey = getAgentConfigKey(agentName)
const requirements = AGENT_MODEL_REQUIREMENTS[agentKey]
const sessionFallback = sessionFallbackChains.get(sessionID)
const fallbackChain = sessionFallback && sessionFallback.length > 0
? sessionFallback
: requirements?.fallbackChain
if (!fallbackChain || fallbackChain.length === 0) {
log("[model-fallback] No fallback chain for agent: " + agentName + " (key: " + agentKey + ")")
return false
}
const existing = pendingModelFallbacks.get(sessionID)
if (existing) {
// Preserve progression across repeated session.error retries in same session.
// We only mark the next turn as pending fallback application.
existing.providerID = currentProviderID
existing.modelID = currentModelID
existing.pending = true
if (existing.attemptCount >= existing.fallbackChain.length) {
log("[model-fallback] Fallback chain exhausted for session: " + sessionID)
return false
}
log("[model-fallback] Re-armed pending fallback for session: " + sessionID)
return true
}
const state: ModelFallbackState = {
providerID: currentProviderID,
modelID: currentModelID,
fallbackChain,
attemptCount: 0,
pending: true,
}
pendingModelFallbacks.set(sessionID, state)
log("[model-fallback] Set pending fallback for session: " + sessionID + ", agent: " + agentName)
return true
}
/**
* Gets the next fallback model for a session.
* Increments attemptCount each time called.
*/
export function getNextFallback(
sessionID: string,
): { providerID: string; modelID: string; variant?: string } | null {
const state = pendingModelFallbacks.get(sessionID)
if (!state) return null
if (!state.pending) return null
const { fallbackChain } = state
const providerModelsCache = readProviderModelsCache()
const connectedProviders = providerModelsCache?.connected ?? readConnectedProvidersCache()
const connectedSet = connectedProviders ? new Set(connectedProviders) : null
const isReachable = (entry: FallbackEntry): boolean => {
if (!connectedSet) return true
// Gate only on provider connectivity. Provider model lists can be stale/incomplete,
// especially after users manually add models to opencode.json.
return entry.providers.some((p) => connectedSet.has(p))
}
while (state.attemptCount < fallbackChain.length) {
const attemptCount = state.attemptCount
const fallback = fallbackChain[attemptCount]
state.attemptCount++
if (!isReachable(fallback)) {
log("[model-fallback] Skipping unreachable fallback for session: " + sessionID + ", attempt: " + attemptCount + ", model: " + fallback.model)
continue
}
const providerID = selectFallbackProvider(fallback.providers, state.providerID)
state.pending = false
log("[model-fallback] Using fallback for session: " + sessionID + ", attempt: " + attemptCount + ", model: " + fallback.model)
return {
providerID,
modelID: fallback.model,
variant: fallback.variant,
}
}
log("[model-fallback] No more fallbacks for session: " + sessionID)
pendingModelFallbacks.delete(sessionID)
return null
}
/**
* Clears the pending fallback for a session.
* Called after fallback is successfully applied.
*/
export function clearPendingModelFallback(sessionID: string): void {
pendingModelFallbacks.delete(sessionID)
lastToastKey.delete(sessionID)
}
/**
* Checks if there's a pending fallback for a session.
*/
export function hasPendingModelFallback(sessionID: string): boolean {
const state = pendingModelFallbacks.get(sessionID)
return state?.pending === true
}
/**
* Gets the current fallback state for a session (for debugging).
*/
export function getFallbackState(sessionID: string): ModelFallbackState | undefined {
return pendingModelFallbacks.get(sessionID)
}
/**
* Creates a chat.message hook that applies model fallbacks when pending.
*/
export function createModelFallbackHook(args?: { toast?: FallbackToast; onApplied?: FallbackCallback }) {
const toast = args?.toast
const onApplied = args?.onApplied
return {
"chat.message": async (
input: ChatMessageInput,
output: ChatMessageHandlerOutput,
): Promise<void> => {
const { sessionID } = input
if (!sessionID) return
const fallback = getNextFallback(sessionID)
if (!fallback) return
output.message["model"] = {
providerID: fallback.providerID,
modelID: fallback.modelID,
}
if (fallback.variant !== undefined) {
output.message["variant"] = fallback.variant
} else {
delete output.message["variant"]
}
if (toast) {
const key = `${sessionID}:${fallback.providerID}/${fallback.modelID}:${fallback.variant ?? ""}`
if (lastToastKey.get(sessionID) !== key) {
lastToastKey.set(sessionID, key)
const variantLabel = fallback.variant ? ` (${fallback.variant})` : ""
await Promise.resolve(
toast({
title: "Model fallback",
message: `Using ${fallback.providerID}/${fallback.modelID}${variantLabel}`,
variant: "warning",
duration: 5000,
}),
)
}
}
if (onApplied) {
await Promise.resolve(
onApplied({
sessionID,
providerID: fallback.providerID,
modelID: fallback.modelID,
variant: fallback.variant,
}),
)
}
const toastManager = getTaskToastManager()
if (toastManager) {
const variantLabel = fallback.variant ? ` (${fallback.variant})` : ""
toastManager.updateTaskModelBySession(sessionID, {
model: `${fallback.providerID}/${fallback.modelID}${variantLabel}`,
type: "runtime-fallback",
})
}
log("[model-fallback] Applied fallback model: " + JSON.stringify(fallback))
},
}
}

View File

@@ -1,54 +0,0 @@
import { getSessionAgent } from "../../features/claude-code-session-state"
export const AGENT_NAMES = [
"sisyphus",
"oracle",
"librarian",
"explore",
"prometheus",
"atlas",
"metis",
"momus",
"hephaestus",
"sisyphus-junior",
"build",
"plan",
"multimodal-looker",
]
export const agentPattern = new RegExp(
`\\b(${AGENT_NAMES
.sort((a, b) => b.length - a.length)
.map((a) => a.replace(/-/g, "\\-"))
.join("|")})\\b`,
"i",
)
export function detectAgentFromSession(sessionID: string): string | undefined {
const match = sessionID.match(agentPattern)
if (match) {
return match[1].toLowerCase()
}
return undefined
}
export function normalizeAgentName(agent: string | undefined): string | undefined {
if (!agent) return undefined
const normalized = agent.toLowerCase().trim()
if (AGENT_NAMES.includes(normalized)) {
return normalized
}
const match = normalized.match(agentPattern)
if (match) {
return match[1].toLowerCase()
}
return undefined
}
export function resolveAgentForSession(sessionID: string, eventAgent?: string): string | undefined {
return (
normalizeAgentName(eventAgent) ??
normalizeAgentName(getSessionAgent(sessionID)) ??
detectAgentFromSession(sessionID)
)
}

View File

@@ -1,213 +0,0 @@
import type { HookDeps } from "./types"
import { HOOK_NAME } from "./constants"
import { log } from "../../shared/logger"
import { normalizeAgentName, resolveAgentForSession } from "./agent-resolver"
import { getSessionAgent } from "../../features/claude-code-session-state"
import { getFallbackModelsForSession } from "./fallback-models"
import { prepareFallback } from "./fallback-state"
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
const SESSION_TTL_MS = 30 * 60 * 1000
export function createAutoRetryHelpers(deps: HookDeps) {
const { ctx, config, options, sessionStates, sessionLastAccess, sessionRetryInFlight, sessionAwaitingFallbackResult, sessionFallbackTimeouts, pluginConfig } = deps
const abortSessionRequest = async (sessionID: string, source: string): Promise<void> => {
try {
await ctx.client.session.abort({ path: { id: sessionID } })
log(`[${HOOK_NAME}] Aborted in-flight session request (${source})`, { sessionID })
} catch (error) {
log(`[${HOOK_NAME}] Failed to abort in-flight session request (${source})`, {
sessionID,
error: String(error),
})
}
}
const clearSessionFallbackTimeout = (sessionID: string) => {
const timer = sessionFallbackTimeouts.get(sessionID)
if (timer) {
clearTimeout(timer)
sessionFallbackTimeouts.delete(sessionID)
}
}
const scheduleSessionFallbackTimeout = (sessionID: string, resolvedAgent?: string) => {
clearSessionFallbackTimeout(sessionID)
const timeoutMs = options?.session_timeout_ms ?? config.timeout_seconds * 1000
if (timeoutMs <= 0) return
const timer = setTimeout(async () => {
sessionFallbackTimeouts.delete(sessionID)
const state = sessionStates.get(sessionID)
if (!state) return
if (sessionRetryInFlight.has(sessionID)) {
log(`[${HOOK_NAME}] Overriding in-flight retry due to session timeout`, { sessionID })
}
await abortSessionRequest(sessionID, "session.timeout")
sessionRetryInFlight.delete(sessionID)
if (state.pendingFallbackModel) {
state.pendingFallbackModel = undefined
}
const fallbackModels = getFallbackModelsForSession(sessionID, resolvedAgent, pluginConfig)
if (fallbackModels.length === 0) return
log(`[${HOOK_NAME}] Session fallback timeout reached`, {
sessionID,
timeoutSeconds: config.timeout_seconds,
currentModel: state.currentModel,
})
const result = prepareFallback(sessionID, state, fallbackModels, config)
if (result.success && result.newModel) {
await autoRetryWithFallback(sessionID, result.newModel, resolvedAgent, "session.timeout")
}
}, timeoutMs)
sessionFallbackTimeouts.set(sessionID, timer)
}
const autoRetryWithFallback = async (
sessionID: string,
newModel: string,
resolvedAgent: string | undefined,
source: string,
): Promise<void> => {
if (sessionRetryInFlight.has(sessionID)) {
log(`[${HOOK_NAME}] Retry already in flight, skipping (${source})`, { sessionID })
return
}
const modelParts = newModel.split("/")
if (modelParts.length < 2) {
log(`[${HOOK_NAME}] Invalid model format (missing provider prefix): ${newModel}`)
return
}
const fallbackModelObj = {
providerID: modelParts[0],
modelID: modelParts.slice(1).join("/"),
}
sessionRetryInFlight.add(sessionID)
try {
const messagesResp = await ctx.client.session.messages({
path: { id: sessionID },
query: { directory: ctx.directory },
})
const msgs = (messagesResp as {
data?: Array<{
info?: Record<string, unknown>
parts?: Array<{ type?: string; text?: string }>
}>
}).data
const lastUserMsg = msgs?.filter((m) => m.info?.role === "user").pop()
const lastUserPartsRaw =
lastUserMsg?.parts ??
(lastUserMsg?.info?.parts as Array<{ type?: string; text?: string }> | undefined)
if (lastUserPartsRaw && lastUserPartsRaw.length > 0) {
log(`[${HOOK_NAME}] Auto-retrying with fallback model (${source})`, {
sessionID,
model: newModel,
})
const retryParts = lastUserPartsRaw
.filter((p) => p.type === "text" && typeof p.text === "string" && p.text.length > 0)
.map((p) => ({ type: "text" as const, text: p.text! }))
if (retryParts.length > 0) {
const retryAgent = resolvedAgent ?? getSessionAgent(sessionID)
sessionAwaitingFallbackResult.add(sessionID)
scheduleSessionFallbackTimeout(sessionID, retryAgent)
await ctx.client.session.promptAsync({
path: { id: sessionID },
body: {
...(retryAgent ? { agent: retryAgent } : {}),
model: fallbackModelObj,
parts: retryParts,
},
query: { directory: ctx.directory },
})
}
} else {
log(`[${HOOK_NAME}] No user message found for auto-retry (${source})`, { sessionID })
}
} catch (retryError) {
log(`[${HOOK_NAME}] Auto-retry failed (${source})`, { sessionID, error: String(retryError) })
} finally {
const state = sessionStates.get(sessionID)
if (state?.pendingFallbackModel === newModel) {
state.pendingFallbackModel = undefined
}
sessionRetryInFlight.delete(sessionID)
}
}
const resolveAgentForSessionFromContext = async (
sessionID: string,
eventAgent?: string,
): Promise<string | undefined> => {
const resolved = resolveAgentForSession(sessionID, eventAgent)
if (resolved) return resolved
try {
const messagesResp = await ctx.client.session.messages({
path: { id: sessionID },
query: { directory: ctx.directory },
})
const msgs = (messagesResp as { data?: Array<{ info?: Record<string, unknown> }> }).data
if (!msgs || msgs.length === 0) return undefined
for (let i = msgs.length - 1; i >= 0; i--) {
const info = msgs[i]?.info
const infoAgent = typeof info?.agent === "string" ? info.agent : undefined
const normalized = normalizeAgentName(infoAgent)
if (normalized) {
return normalized
}
}
} catch {
return undefined
}
return undefined
}
const cleanupStaleSessions = () => {
const now = Date.now()
let cleanedCount = 0
for (const [sessionID, lastAccess] of sessionLastAccess.entries()) {
if (now - lastAccess > SESSION_TTL_MS) {
sessionStates.delete(sessionID)
sessionLastAccess.delete(sessionID)
sessionRetryInFlight.delete(sessionID)
sessionAwaitingFallbackResult.delete(sessionID)
clearSessionFallbackTimeout(sessionID)
SessionCategoryRegistry.remove(sessionID)
cleanedCount++
}
}
if (cleanedCount > 0) {
log(`[${HOOK_NAME}] Cleaned up ${cleanedCount} stale session states`)
}
}
return {
abortSessionRequest,
clearSessionFallbackTimeout,
scheduleSessionFallbackTimeout,
autoRetryWithFallback,
resolveAgentForSessionFromContext,
cleanupStaleSessions,
}
}
export type AutoRetryHelpers = ReturnType<typeof createAutoRetryHelpers>

View File

@@ -1,62 +0,0 @@
import type { HookDeps } from "./types"
import { HOOK_NAME } from "./constants"
import { log } from "../../shared/logger"
import { createFallbackState } from "./fallback-state"
export function createChatMessageHandler(deps: HookDeps) {
const { config, sessionStates, sessionLastAccess } = deps
return async (
input: { sessionID: string; agent?: string; model?: { providerID: string; modelID: string } },
output: { message: { model?: { providerID: string; modelID: string } }; parts?: Array<{ type: string; text?: string }> }
) => {
if (!config.enabled) return
const { sessionID } = input
let state = sessionStates.get(sessionID)
if (!state) return
sessionLastAccess.set(sessionID, Date.now())
const requestedModel = input.model
? `${input.model.providerID}/${input.model.modelID}`
: undefined
if (requestedModel && requestedModel !== state.currentModel) {
if (state.pendingFallbackModel && state.pendingFallbackModel === requestedModel) {
state.pendingFallbackModel = undefined
return
}
log(`[${HOOK_NAME}] Detected manual model change, resetting fallback state`, {
sessionID,
from: state.currentModel,
to: requestedModel,
})
state = createFallbackState(requestedModel)
sessionStates.set(sessionID, state)
return
}
if (state.currentModel === state.originalModel) return
const activeModel = state.currentModel
log(`[${HOOK_NAME}] Applying fallback model override`, {
sessionID,
from: input.model,
to: activeModel,
})
if (output.message && activeModel) {
const parts = activeModel.split("/")
if (parts.length >= 2) {
output.message.model = {
providerID: parts[0],
modelID: parts.slice(1).join("/"),
}
}
}
}
}

View File

@@ -1,44 +0,0 @@
/**
* Runtime Fallback Hook - Constants
*
* Default values and configuration constants for the runtime fallback feature.
*/
import type { RuntimeFallbackConfig } from "../../config"
/**
* Default configuration values for runtime fallback
*/
export const DEFAULT_CONFIG: Required<RuntimeFallbackConfig> = {
enabled: true,
retry_on_errors: [400, 429, 503, 529],
max_fallback_attempts: 3,
cooldown_seconds: 60,
timeout_seconds: 30,
notify_on_fallback: true,
}
/**
* Error patterns that indicate rate limiting or temporary failures
* These are checked in addition to HTTP status codes
*/
export const RETRYABLE_ERROR_PATTERNS = [
/rate.?limit/i,
/too.?many.?requests/i,
/quota.?exceeded/i,
/usage\s+limit\s+has\s+been\s+reached/i,
/service.?unavailable/i,
/overloaded/i,
/temporarily.?unavailable/i,
/try.?again/i,
/credit.*balance.*too.*low/i,
/insufficient.?(?:credits?|funds?|balance)/i,
/(?:^|\s)429(?:\s|$)/,
/(?:^|\s)503(?:\s|$)/,
/(?:^|\s)529(?:\s|$)/,
]
/**
* Hook name for identification and logging
*/
export const HOOK_NAME = "runtime-fallback"

View File

@@ -1,169 +0,0 @@
import { DEFAULT_CONFIG, RETRYABLE_ERROR_PATTERNS } from "./constants"
export function getErrorMessage(error: unknown): string {
if (!error) return ""
if (typeof error === "string") return error.toLowerCase()
const errorObj = error as Record<string, unknown>
const paths = [
errorObj.data,
errorObj.error,
errorObj,
(errorObj.data as Record<string, unknown>)?.error,
]
for (const obj of paths) {
if (obj && typeof obj === "object") {
const msg = (obj as Record<string, unknown>).message
if (typeof msg === "string" && msg.length > 0) {
return msg.toLowerCase()
}
}
}
try {
return JSON.stringify(error).toLowerCase()
} catch {
return ""
}
}
export function extractStatusCode(error: unknown, retryOnErrors?: number[]): number | undefined {
if (!error) return undefined
const errorObj = error as Record<string, unknown>
const statusCode = errorObj.statusCode ?? errorObj.status ?? (errorObj.data as Record<string, unknown>)?.statusCode
if (typeof statusCode === "number") {
return statusCode
}
const codes = retryOnErrors ?? DEFAULT_CONFIG.retry_on_errors
const pattern = new RegExp(`\\b(${codes.join("|")})\\b`)
const message = getErrorMessage(error)
const statusMatch = message.match(pattern)
if (statusMatch) {
return parseInt(statusMatch[1], 10)
}
return undefined
}
export function extractErrorName(error: unknown): string | undefined {
if (!error || typeof error !== "object") return undefined
const errorObj = error as Record<string, unknown>
const directName = errorObj.name
if (typeof directName === "string" && directName.length > 0) {
return directName
}
const nestedError = errorObj.error as Record<string, unknown> | undefined
const nestedName = nestedError?.name
if (typeof nestedName === "string" && nestedName.length > 0) {
return nestedName
}
const dataError = (errorObj.data as Record<string, unknown> | undefined)?.error as Record<string, unknown> | undefined
const dataErrorName = dataError?.name
if (typeof dataErrorName === "string" && dataErrorName.length > 0) {
return dataErrorName
}
return undefined
}
export function classifyErrorType(error: unknown): string | undefined {
const message = getErrorMessage(error)
const errorName = extractErrorName(error)?.toLowerCase()
if (
errorName?.includes("loadapi") ||
(/api.?key.?is.?missing/i.test(message) && /environment variable/i.test(message))
) {
return "missing_api_key"
}
if (/api.?key/i.test(message) && /must be a string/i.test(message)) {
return "invalid_api_key"
}
if (errorName?.includes("unknownerror") && /model\s+not\s+found/i.test(message)) {
return "model_not_found"
}
return undefined
}
export interface AutoRetrySignal {
signal: string
}
export const AUTO_RETRY_PATTERNS: Array<(combined: string) => boolean> = [
(combined) => /retrying\s+in/i.test(combined),
(combined) =>
/(?:too\s+many\s+requests|quota\s*exceeded|usage\s+limit|rate\s+limit|limit\s+reached)/i.test(combined),
]
export function extractAutoRetrySignal(info: Record<string, unknown> | undefined): AutoRetrySignal | undefined {
if (!info) return undefined
const candidates: string[] = []
const directStatus = info.status
if (typeof directStatus === "string") candidates.push(directStatus)
const summary = info.summary
if (typeof summary === "string") candidates.push(summary)
const message = info.message
if (typeof message === "string") candidates.push(message)
const details = info.details
if (typeof details === "string") candidates.push(details)
const combined = candidates.join("\n")
if (!combined) return undefined
const isAutoRetry = AUTO_RETRY_PATTERNS.every((test) => test(combined))
if (isAutoRetry) {
return { signal: combined }
}
return undefined
}
export function containsErrorContent(
parts: Array<{ type?: string; text?: string }> | undefined
): { hasError: boolean; errorMessage?: string } {
if (!parts || parts.length === 0) return { hasError: false }
const errorParts = parts.filter((p) => p.type === "error")
if (errorParts.length > 0) {
const errorMessages = errorParts.map((p) => p.text).filter((text): text is string => typeof text === "string")
const errorMessage = errorMessages.length > 0 ? errorMessages.join("\n") : undefined
return { hasError: true, errorMessage }
}
return { hasError: false }
}
export function isRetryableError(error: unknown, retryOnErrors: number[]): boolean {
const statusCode = extractStatusCode(error, retryOnErrors)
const message = getErrorMessage(error)
const errorType = classifyErrorType(error)
if (errorType === "missing_api_key") {
return true
}
if (errorType === "model_not_found") {
return true
}
if (statusCode && retryOnErrors.includes(statusCode)) {
return true
}
return RETRYABLE_ERROR_PATTERNS.some((pattern) => pattern.test(message))
}

View File

@@ -1,187 +0,0 @@
import type { HookDeps } from "./types"
import type { AutoRetryHelpers } from "./auto-retry"
import { HOOK_NAME } from "./constants"
import { log } from "../../shared/logger"
import { extractStatusCode, extractErrorName, classifyErrorType, isRetryableError } from "./error-classifier"
import { createFallbackState, prepareFallback } from "./fallback-state"
import { getFallbackModelsForSession } from "./fallback-models"
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
export function createEventHandler(deps: HookDeps, helpers: AutoRetryHelpers) {
const { config, pluginConfig, sessionStates, sessionLastAccess, sessionRetryInFlight, sessionAwaitingFallbackResult, sessionFallbackTimeouts } = deps
const handleSessionCreated = (props: Record<string, unknown> | undefined) => {
const sessionInfo = props?.info as { id?: string; model?: string } | undefined
const sessionID = sessionInfo?.id
const model = sessionInfo?.model
if (sessionID && model) {
log(`[${HOOK_NAME}] Session created with model`, { sessionID, model })
sessionStates.set(sessionID, createFallbackState(model))
sessionLastAccess.set(sessionID, Date.now())
}
}
const handleSessionDeleted = (props: Record<string, unknown> | undefined) => {
const sessionInfo = props?.info as { id?: string } | undefined
const sessionID = sessionInfo?.id
if (sessionID) {
log(`[${HOOK_NAME}] Cleaning up session state`, { sessionID })
sessionStates.delete(sessionID)
sessionLastAccess.delete(sessionID)
sessionRetryInFlight.delete(sessionID)
sessionAwaitingFallbackResult.delete(sessionID)
helpers.clearSessionFallbackTimeout(sessionID)
SessionCategoryRegistry.remove(sessionID)
}
}
const handleSessionStop = async (props: Record<string, unknown> | undefined) => {
const sessionID = props?.sessionID as string | undefined
if (!sessionID) return
helpers.clearSessionFallbackTimeout(sessionID)
if (sessionRetryInFlight.has(sessionID)) {
await helpers.abortSessionRequest(sessionID, "session.stop")
}
sessionRetryInFlight.delete(sessionID)
sessionAwaitingFallbackResult.delete(sessionID)
const state = sessionStates.get(sessionID)
if (state?.pendingFallbackModel) {
state.pendingFallbackModel = undefined
}
log(`[${HOOK_NAME}] Cleared fallback retry state on session.stop`, { sessionID })
}
const handleSessionIdle = (props: Record<string, unknown> | undefined) => {
const sessionID = props?.sessionID as string | undefined
if (!sessionID) return
if (sessionAwaitingFallbackResult.has(sessionID)) {
log(`[${HOOK_NAME}] session.idle while awaiting fallback result; keeping timeout armed`, { sessionID })
return
}
const hadTimeout = sessionFallbackTimeouts.has(sessionID)
helpers.clearSessionFallbackTimeout(sessionID)
sessionRetryInFlight.delete(sessionID)
const state = sessionStates.get(sessionID)
if (state?.pendingFallbackModel) {
state.pendingFallbackModel = undefined
}
if (hadTimeout) {
log(`[${HOOK_NAME}] Cleared fallback timeout after session completion`, { sessionID })
}
}
const handleSessionError = async (props: Record<string, unknown> | undefined) => {
const sessionID = props?.sessionID as string | undefined
const error = props?.error
const agent = props?.agent as string | undefined
if (!sessionID) {
log(`[${HOOK_NAME}] session.error without sessionID, skipping`)
return
}
const resolvedAgent = await helpers.resolveAgentForSessionFromContext(sessionID, agent)
sessionAwaitingFallbackResult.delete(sessionID)
helpers.clearSessionFallbackTimeout(sessionID)
log(`[${HOOK_NAME}] session.error received`, {
sessionID,
agent,
resolvedAgent,
statusCode: extractStatusCode(error, config.retry_on_errors),
errorName: extractErrorName(error),
errorType: classifyErrorType(error),
})
if (!isRetryableError(error, config.retry_on_errors)) {
log(`[${HOOK_NAME}] Error not retryable, skipping fallback`, {
sessionID,
retryable: false,
statusCode: extractStatusCode(error, config.retry_on_errors),
errorName: extractErrorName(error),
errorType: classifyErrorType(error),
})
return
}
let state = sessionStates.get(sessionID)
const fallbackModels = getFallbackModelsForSession(sessionID, resolvedAgent, pluginConfig)
if (fallbackModels.length === 0) {
log(`[${HOOK_NAME}] No fallback models configured`, { sessionID, agent })
return
}
if (!state) {
const currentModel = props?.model as string | undefined
if (currentModel) {
state = createFallbackState(currentModel)
sessionStates.set(sessionID, state)
sessionLastAccess.set(sessionID, Date.now())
} else {
const detectedAgent = resolvedAgent
const agentConfig = detectedAgent
? pluginConfig?.agents?.[detectedAgent as keyof typeof pluginConfig.agents]
: undefined
const agentModel = agentConfig?.model as string | undefined
if (agentModel) {
log(`[${HOOK_NAME}] Derived model from agent config`, { sessionID, agent: detectedAgent, model: agentModel })
state = createFallbackState(agentModel)
sessionStates.set(sessionID, state)
sessionLastAccess.set(sessionID, Date.now())
} else {
log(`[${HOOK_NAME}] No model info available, cannot fallback`, { sessionID })
return
}
}
} else {
sessionLastAccess.set(sessionID, Date.now())
}
const result = prepareFallback(sessionID, state, fallbackModels, config)
if (result.success && config.notify_on_fallback) {
await deps.ctx.client.tui
.showToast({
body: {
title: "Model Fallback",
message: `Switching to ${result.newModel?.split("/").pop() || result.newModel} for next request`,
variant: "warning",
duration: 5000,
},
})
.catch(() => {})
}
if (result.success && result.newModel) {
await helpers.autoRetryWithFallback(sessionID, result.newModel, resolvedAgent, "session.error")
}
if (!result.success) {
log(`[${HOOK_NAME}] Fallback preparation failed`, { sessionID, error: result.error })
}
}
return async ({ event }: { event: { type: string; properties?: unknown } }) => {
if (!config.enabled) return
const props = event.properties as Record<string, unknown> | undefined
if (event.type === "session.created") { handleSessionCreated(props); return }
if (event.type === "session.deleted") { handleSessionDeleted(props); return }
if (event.type === "session.stop") { await handleSessionStop(props); return }
if (event.type === "session.idle") { handleSessionIdle(props); return }
if (event.type === "session.error") { await handleSessionError(props); return }
}
}

View File

@@ -1,69 +0,0 @@
import type { OhMyOpenCodeConfig } from "../../config"
import { AGENT_NAMES, agentPattern } from "./agent-resolver"
import { HOOK_NAME } from "./constants"
import { log } from "../../shared/logger"
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
import { normalizeFallbackModels } from "../../shared/model-resolver"
export function getFallbackModelsForSession(
sessionID: string,
agent: string | undefined,
pluginConfig: OhMyOpenCodeConfig | undefined
): string[] {
if (!pluginConfig) return []
const sessionCategory = SessionCategoryRegistry.get(sessionID)
if (sessionCategory && pluginConfig.categories?.[sessionCategory]) {
const categoryConfig = pluginConfig.categories[sessionCategory]
if (categoryConfig?.fallback_models) {
return normalizeFallbackModels(categoryConfig.fallback_models) ?? []
}
}
const tryGetFallbackFromAgent = (agentName: string): string[] | undefined => {
const agentConfig = pluginConfig.agents?.[agentName as keyof typeof pluginConfig.agents]
if (!agentConfig) return undefined
if (agentConfig?.fallback_models) {
return normalizeFallbackModels(agentConfig.fallback_models)
}
const agentCategory = agentConfig?.category
if (agentCategory && pluginConfig.categories?.[agentCategory]) {
const categoryConfig = pluginConfig.categories[agentCategory]
if (categoryConfig?.fallback_models) {
return normalizeFallbackModels(categoryConfig.fallback_models)
}
}
return undefined
}
if (agent) {
const result = tryGetFallbackFromAgent(agent)
if (result) return result
}
const sessionAgentMatch = sessionID.match(agentPattern)
if (sessionAgentMatch) {
const detectedAgent = sessionAgentMatch[1].toLowerCase()
const result = tryGetFallbackFromAgent(detectedAgent)
if (result) return result
}
const sisyphusFallback = tryGetFallbackFromAgent("sisyphus")
if (sisyphusFallback) {
log(`[${HOOK_NAME}] Using sisyphus fallback models (no agent detected)`, { sessionID })
return sisyphusFallback
}
for (const agentName of AGENT_NAMES) {
const result = tryGetFallbackFromAgent(agentName)
if (result) {
log(`[${HOOK_NAME}] Using ${agentName} fallback models (no agent detected)`, { sessionID })
return result
}
}
return []
}

View File

@@ -1,74 +0,0 @@
import type { FallbackState, FallbackResult } from "./types"
import { HOOK_NAME } from "./constants"
import { log } from "../../shared/logger"
import type { RuntimeFallbackConfig } from "../../config"
export function createFallbackState(originalModel: string): FallbackState {
return {
originalModel,
currentModel: originalModel,
fallbackIndex: -1,
failedModels: new Map<string, number>(),
attemptCount: 0,
pendingFallbackModel: undefined,
}
}
export function isModelInCooldown(model: string, state: FallbackState, cooldownSeconds: number): boolean {
const failedAt = state.failedModels.get(model)
if (failedAt === undefined) return false
const cooldownMs = cooldownSeconds * 1000
return Date.now() - failedAt < cooldownMs
}
export function findNextAvailableFallback(
state: FallbackState,
fallbackModels: string[],
cooldownSeconds: number
): string | undefined {
for (let i = state.fallbackIndex + 1; i < fallbackModels.length; i++) {
const candidate = fallbackModels[i]
if (!isModelInCooldown(candidate, state, cooldownSeconds)) {
return candidate
}
log(`[${HOOK_NAME}] Skipping fallback model in cooldown`, { model: candidate, index: i })
}
return undefined
}
export function prepareFallback(
sessionID: string,
state: FallbackState,
fallbackModels: string[],
config: Required<RuntimeFallbackConfig>
): FallbackResult {
if (state.attemptCount >= config.max_fallback_attempts) {
log(`[${HOOK_NAME}] Max fallback attempts reached`, { sessionID, attempts: state.attemptCount })
return { success: false, error: "Max fallback attempts reached", maxAttemptsReached: true }
}
const nextModel = findNextAvailableFallback(state, fallbackModels, config.cooldown_seconds)
if (!nextModel) {
log(`[${HOOK_NAME}] No available fallback models`, { sessionID })
return { success: false, error: "No available fallback models (all in cooldown or exhausted)" }
}
log(`[${HOOK_NAME}] Preparing fallback`, {
sessionID,
from: state.currentModel,
to: nextModel,
attempt: state.attemptCount + 1,
})
const failedModel = state.currentModel
const now = Date.now()
state.fallbackIndex = fallbackModels.indexOf(nextModel)
state.failedModels.set(failedModel, now)
state.attemptCount++
state.currentModel = nextModel
state.pendingFallbackModel = nextModel
return { success: true, newModel: nextModel }
}

View File

@@ -1,67 +0,0 @@
import type { PluginInput } from "@opencode-ai/plugin"
import type { HookDeps, RuntimeFallbackHook, RuntimeFallbackOptions } from "./types"
import { DEFAULT_CONFIG, HOOK_NAME } from "./constants"
import { log } from "../../shared/logger"
import { loadPluginConfig } from "../../plugin-config"
import { createAutoRetryHelpers } from "./auto-retry"
import { createEventHandler } from "./event-handler"
import { createMessageUpdateHandler } from "./message-update-handler"
import { createChatMessageHandler } from "./chat-message-handler"
export function createRuntimeFallbackHook(
ctx: PluginInput,
options?: RuntimeFallbackOptions
): RuntimeFallbackHook {
const config = {
enabled: options?.config?.enabled ?? DEFAULT_CONFIG.enabled,
retry_on_errors: options?.config?.retry_on_errors ?? DEFAULT_CONFIG.retry_on_errors,
max_fallback_attempts: options?.config?.max_fallback_attempts ?? DEFAULT_CONFIG.max_fallback_attempts,
cooldown_seconds: options?.config?.cooldown_seconds ?? DEFAULT_CONFIG.cooldown_seconds,
timeout_seconds: options?.config?.timeout_seconds ?? DEFAULT_CONFIG.timeout_seconds,
notify_on_fallback: options?.config?.notify_on_fallback ?? DEFAULT_CONFIG.notify_on_fallback,
}
let pluginConfig = options?.pluginConfig
if (!pluginConfig) {
try {
pluginConfig = loadPluginConfig(ctx.directory, ctx)
} catch {
log(`[${HOOK_NAME}] Plugin config not available`)
}
}
const deps: HookDeps = {
ctx,
config,
options,
pluginConfig,
sessionStates: new Map(),
sessionLastAccess: new Map(),
sessionRetryInFlight: new Set(),
sessionAwaitingFallbackResult: new Set(),
sessionFallbackTimeouts: new Map(),
}
const helpers = createAutoRetryHelpers(deps)
const baseEventHandler = createEventHandler(deps, helpers)
const messageUpdateHandler = createMessageUpdateHandler(deps, helpers)
const chatMessageHandler = createChatMessageHandler(deps)
const cleanupInterval = setInterval(helpers.cleanupStaleSessions, 5 * 60 * 1000)
cleanupInterval.unref()
const eventHandler = async ({ event }: { event: { type: string; properties?: unknown } }) => {
if (event.type === "message.updated") {
if (!config.enabled) return
const props = event.properties as Record<string, unknown> | undefined
await messageUpdateHandler(props)
return
}
await baseEventHandler({ event })
}
return {
event: eventHandler,
"chat.message": chatMessageHandler,
} as RuntimeFallbackHook
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +0,0 @@
export { createRuntimeFallbackHook } from "./hook"
export type { RuntimeFallbackHook, RuntimeFallbackOptions } from "./types"

View File

@@ -1,216 +0,0 @@
import type { HookDeps } from "./types"
import type { AutoRetryHelpers } from "./auto-retry"
import { HOOK_NAME } from "./constants"
import { log } from "../../shared/logger"
import { extractStatusCode, extractErrorName, classifyErrorType, isRetryableError, extractAutoRetrySignal, containsErrorContent } from "./error-classifier"
import { createFallbackState, prepareFallback } from "./fallback-state"
import { getFallbackModelsForSession } from "./fallback-models"
export function hasVisibleAssistantResponse(extractAutoRetrySignalFn: typeof extractAutoRetrySignal) {
return async (
ctx: HookDeps["ctx"],
sessionID: string,
_info: Record<string, unknown> | undefined,
): Promise<boolean> => {
try {
const messagesResp = await ctx.client.session.messages({
path: { id: sessionID },
query: { directory: ctx.directory },
})
const msgs = (messagesResp as {
data?: Array<{
info?: Record<string, unknown>
parts?: Array<{ type?: string; text?: string }>
}>
}).data
if (!msgs || msgs.length === 0) return false
const lastAssistant = [...msgs].reverse().find((m) => m.info?.role === "assistant")
if (!lastAssistant) return false
if (lastAssistant.info?.error) return false
const parts = lastAssistant.parts ??
(lastAssistant.info?.parts as Array<{ type?: string; text?: string }> | undefined)
const textFromParts = (parts ?? [])
.filter((p) => p.type === "text" && typeof p.text === "string")
.map((p) => p.text!.trim())
.filter((text) => text.length > 0)
.join("\n")
if (!textFromParts) return false
if (extractAutoRetrySignalFn({ message: textFromParts })) return false
return true
} catch {
return false
}
}
}
export function createMessageUpdateHandler(deps: HookDeps, helpers: AutoRetryHelpers) {
const { ctx, config, pluginConfig, sessionStates, sessionLastAccess, sessionRetryInFlight, sessionAwaitingFallbackResult } = deps
const checkVisibleResponse = hasVisibleAssistantResponse(extractAutoRetrySignal)
return async (props: Record<string, unknown> | undefined) => {
const info = props?.info as Record<string, unknown> | undefined
const sessionID = info?.sessionID as string | undefined
const retrySignalResult = extractAutoRetrySignal(info)
const retrySignal = retrySignalResult?.signal
const timeoutEnabled = config.timeout_seconds > 0
const parts = props?.parts as Array<{ type?: string; text?: string }> | undefined
const errorContentResult = containsErrorContent(parts)
const error = info?.error ??
(retrySignal && timeoutEnabled ? { name: "ProviderRateLimitError", message: retrySignal } : undefined) ??
(errorContentResult.hasError ? { name: "MessageContentError", message: errorContentResult.errorMessage || "Message contains error content" } : undefined)
const role = info?.role as string | undefined
const model = info?.model as string | undefined
if (sessionID && role === "assistant" && !error) {
if (!sessionAwaitingFallbackResult.has(sessionID)) {
return
}
const hasVisible = await checkVisibleResponse(ctx, sessionID, info)
if (!hasVisible) {
log(`[${HOOK_NAME}] Assistant update observed without visible final response; keeping fallback timeout`, {
sessionID,
model,
})
return
}
sessionAwaitingFallbackResult.delete(sessionID)
helpers.clearSessionFallbackTimeout(sessionID)
const state = sessionStates.get(sessionID)
if (state?.pendingFallbackModel) {
state.pendingFallbackModel = undefined
}
log(`[${HOOK_NAME}] Assistant response observed; cleared fallback timeout`, { sessionID, model })
return
}
if (sessionID && role === "assistant" && error) {
sessionAwaitingFallbackResult.delete(sessionID)
if (sessionRetryInFlight.has(sessionID) && !retrySignal) {
log(`[${HOOK_NAME}] message.updated fallback skipped (retry in flight)`, { sessionID })
return
}
if (retrySignal && sessionRetryInFlight.has(sessionID) && timeoutEnabled) {
log(`[${HOOK_NAME}] Overriding in-flight retry due to provider auto-retry signal`, {
sessionID,
model,
})
await helpers.abortSessionRequest(sessionID, "message.updated.retry-signal")
sessionRetryInFlight.delete(sessionID)
}
if (retrySignal && timeoutEnabled) {
log(`[${HOOK_NAME}] Detected provider auto-retry signal`, { sessionID, model })
}
if (!retrySignal) {
helpers.clearSessionFallbackTimeout(sessionID)
}
log(`[${HOOK_NAME}] message.updated with assistant error`, {
sessionID,
model,
statusCode: extractStatusCode(error, config.retry_on_errors),
errorName: extractErrorName(error),
errorType: classifyErrorType(error),
})
if (!isRetryableError(error, config.retry_on_errors)) {
log(`[${HOOK_NAME}] message.updated error not retryable, skipping fallback`, {
sessionID,
statusCode: extractStatusCode(error, config.retry_on_errors),
errorName: extractErrorName(error),
errorType: classifyErrorType(error),
})
return
}
let state = sessionStates.get(sessionID)
const agent = info?.agent as string | undefined
const resolvedAgent = await helpers.resolveAgentForSessionFromContext(sessionID, agent)
const fallbackModels = getFallbackModelsForSession(sessionID, resolvedAgent, pluginConfig)
if (fallbackModels.length === 0) {
return
}
if (!state) {
let initialModel = model
if (!initialModel) {
const detectedAgent = resolvedAgent
const agentConfig = detectedAgent
? pluginConfig?.agents?.[detectedAgent as keyof typeof pluginConfig.agents]
: undefined
const agentModel = agentConfig?.model as string | undefined
if (agentModel) {
log(`[${HOOK_NAME}] Derived model from agent config for message.updated`, {
sessionID,
agent: detectedAgent,
model: agentModel,
})
initialModel = agentModel
}
}
if (!initialModel) {
log(`[${HOOK_NAME}] message.updated missing model info, cannot fallback`, {
sessionID,
errorName: extractErrorName(error),
errorType: classifyErrorType(error),
})
return
}
state = createFallbackState(initialModel)
sessionStates.set(sessionID, state)
sessionLastAccess.set(sessionID, Date.now())
} else {
sessionLastAccess.set(sessionID, Date.now())
if (state.pendingFallbackModel) {
if (retrySignal && timeoutEnabled) {
log(`[${HOOK_NAME}] Clearing pending fallback due to provider auto-retry signal`, {
sessionID,
pendingFallbackModel: state.pendingFallbackModel,
})
state.pendingFallbackModel = undefined
} else {
log(`[${HOOK_NAME}] message.updated fallback skipped (pending fallback in progress)`, {
sessionID,
pendingFallbackModel: state.pendingFallbackModel,
})
return
}
}
}
const result = prepareFallback(sessionID, state, fallbackModels, config)
if (result.success && config.notify_on_fallback) {
await deps.ctx.client.tui
.showToast({
body: {
title: "Model Fallback",
message: `Switching to ${result.newModel?.split("/").pop() || result.newModel} for next request`,
variant: "warning",
duration: 5000,
},
})
.catch(() => {})
}
if (result.success && result.newModel) {
await helpers.autoRetryWithFallback(sessionID, result.newModel, resolvedAgent, "message.updated")
}
}
}
}

View File

@@ -1,41 +0,0 @@
import type { PluginInput } from "@opencode-ai/plugin"
import type { RuntimeFallbackConfig, OhMyOpenCodeConfig } from "../../config"
export interface FallbackState {
originalModel: string
currentModel: string
fallbackIndex: number
failedModels: Map<string, number>
attemptCount: number
pendingFallbackModel?: string
}
export interface FallbackResult {
success: boolean
newModel?: string
error?: string
maxAttemptsReached?: boolean
}
export interface RuntimeFallbackOptions {
config?: RuntimeFallbackConfig
pluginConfig?: OhMyOpenCodeConfig
session_timeout_ms?: number
}
export interface RuntimeFallbackHook {
event: (input: { event: { type: string; properties?: unknown } }) => Promise<void>
"chat.message"?: (input: { sessionID: string; agent?: string; model?: { providerID: string; modelID: string } }, output: { message: { model?: { providerID: string; modelID: string } }; parts?: Array<{ type: string; text?: string }> }) => Promise<void>
}
export interface HookDeps {
ctx: PluginInput
config: Required<RuntimeFallbackConfig>
options: RuntimeFallbackOptions | undefined
pluginConfig: OhMyOpenCodeConfig | undefined
sessionStates: Map<string, FallbackState>
sessionLastAccess: Map<string, number>
sessionRetryInFlight: Set<string>
sessionAwaitingFallbackResult: Set<string>
sessionFallbackTimeouts: Map<string, ReturnType<typeof setTimeout>>
}

View File

@@ -5,6 +5,7 @@ import { createChatParamsHandler } from "./plugin/chat-params"
import { createChatHeadersHandler } from "./plugin/chat-headers" import { createChatHeadersHandler } from "./plugin/chat-headers"
import { createChatMessageHandler } from "./plugin/chat-message" import { createChatMessageHandler } from "./plugin/chat-message"
import { createMessagesTransformHandler } from "./plugin/messages-transform" import { createMessagesTransformHandler } from "./plugin/messages-transform"
import { createSystemTransformHandler } from "./plugin/system-transform"
import { createEventHandler } from "./plugin/event" import { createEventHandler } from "./plugin/event"
import { createToolExecuteAfterHandler } from "./plugin/tool-execute-after" import { createToolExecuteAfterHandler } from "./plugin/tool-execute-after"
import { createToolExecuteBeforeHandler } from "./plugin/tool-execute-before" import { createToolExecuteBeforeHandler } from "./plugin/tool-execute-before"
@@ -49,6 +50,10 @@ export function createPluginInterface(args: {
hooks, hooks,
}), }),
"experimental.chat.system.transform": createSystemTransformHandler({
hooks,
}),
config: managers.configHandler, config: managers.configHandler,
event: createEventHandler({ event: createEventHandler({

View File

@@ -2,6 +2,7 @@ import type { OhMyOpenCodeConfig } from "../config"
import type { PluginContext } from "./types" import type { PluginContext } from "./types"
import { hasConnectedProvidersCache } from "../shared" import { hasConnectedProvidersCache } from "../shared"
import { setSessionModel } from "../shared/session-model-state"
import { setSessionAgent } from "../features/claude-code-session-state" import { setSessionAgent } from "../features/claude-code-session-state"
import { applyUltraworkModelOverrideOnMessage } from "./ultrawork-model-override" import { applyUltraworkModelOverrideOnMessage } from "./ultrawork-model-override"
@@ -13,7 +14,12 @@ type FirstMessageVariantGate = {
} }
type ChatMessagePart = { type: string; text?: string; [key: string]: unknown } type ChatMessagePart = { type: string; text?: string; [key: string]: unknown }
type ChatMessageHandlerOutput = { message: Record<string, unknown>; parts: ChatMessagePart[] } export type ChatMessageHandlerOutput = { message: Record<string, unknown>; parts: ChatMessagePart[] }
export type ChatMessageInput = {
sessionID: string
agent?: string
model?: { providerID: string; modelID: string }
}
type StartWorkHookOutput = { parts: Array<{ type: string; text?: string }> } type StartWorkHookOutput = { parts: Array<{ type: string; text?: string }> }
function isStartWorkHookOutput(value: unknown): value is StartWorkHookOutput { function isStartWorkHookOutput(value: unknown): value is StartWorkHookOutput {
@@ -34,13 +40,13 @@ export function createChatMessageHandler(args: {
firstMessageVariantGate: FirstMessageVariantGate firstMessageVariantGate: FirstMessageVariantGate
hooks: CreatedHooks hooks: CreatedHooks
}): ( }): (
input: { sessionID: string; agent?: string; model?: { providerID: string; modelID: string } }, input: ChatMessageInput,
output: ChatMessageHandlerOutput output: ChatMessageHandlerOutput
) => Promise<void> { ) => Promise<void> {
const { ctx, pluginConfig, firstMessageVariantGate, hooks } = args const { ctx, pluginConfig, firstMessageVariantGate, hooks } = args
return async ( return async (
input: { sessionID: string; agent?: string; model?: { providerID: string; modelID: string } }, input: ChatMessageInput,
output: ChatMessageHandlerOutput output: ChatMessageHandlerOutput
): Promise<void> => { ): Promise<void> => {
if (input.agent) { if (input.agent) {
@@ -53,8 +59,23 @@ export function createChatMessageHandler(args: {
firstMessageVariantGate.markApplied(input.sessionID) firstMessageVariantGate.markApplied(input.sessionID)
} }
await hooks.modelFallback?.["chat.message"]?.(input, output)
const modelOverride = output.message["model"]
if (
modelOverride &&
typeof modelOverride === "object" &&
"providerID" in modelOverride &&
"modelID" in modelOverride
) {
const providerID = (modelOverride as { providerID?: string }).providerID
const modelID = (modelOverride as { modelID?: string }).modelID
if (typeof providerID === "string" && typeof modelID === "string") {
setSessionModel(input.sessionID, { providerID, modelID })
}
} else if (input.model) {
setSessionModel(input.sessionID, input.model)
}
await hooks.stopContinuationGuard?.["chat.message"]?.(input) await hooks.stopContinuationGuard?.["chat.message"]?.(input)
await hooks.runtimeFallback?.["chat.message"]?.(input, output)
await hooks.keywordDetector?.["chat.message"]?.(input, output) await hooks.keywordDetector?.["chat.message"]?.(input, output)
await hooks.claudeCodeHooks?.["chat.message"]?.(input, output) await hooks.claudeCodeHooks?.["chat.message"]?.(input, output)
await hooks.autoSlashCommand?.["chat.message"]?.(input, output) await hooks.autoSlashCommand?.["chat.message"]?.(input, output)

View File

@@ -0,0 +1,38 @@
import { describe, expect, test } from "bun:test"
import { createChatParamsHandler } from "./chat-params"
describe("createChatParamsHandler", () => {
test("normalizes object-style agent payload and runs chat.params hooks", async () => {
//#given
let called = false
const handler = createChatParamsHandler({
anthropicEffort: {
"chat.params": async (input) => {
called = input.agent.name === "sisyphus"
},
},
})
const input = {
sessionID: "ses_chat_params",
agent: { name: "sisyphus" },
model: { providerID: "opencode", modelID: "claude-opus-4-6" },
provider: { id: "opencode" },
message: {},
}
const output = {
temperature: 0.1,
topP: 1,
topK: 1,
options: {},
}
//#when
await handler(input, output)
//#then
expect(called).toBe(true)
})
})

View File

@@ -1,4 +1,4 @@
type ChatParamsInput = { export type ChatParamsInput = {
sessionID: string sessionID: string
agent: { name?: string } agent: { name?: string }
model: { providerID: string; modelID: string } model: { providerID: string; modelID: string }
@@ -6,7 +6,7 @@ type ChatParamsInput = {
message: { variant?: string } message: { variant?: string }
} }
type ChatParamsOutput = { export type ChatParamsOutput = {
temperature?: number temperature?: number
topP?: number topP?: number
topK?: number topK?: number
@@ -27,11 +27,21 @@ function buildChatParamsInput(raw: unknown): ChatParamsInput | null {
const message = raw.message const message = raw.message
if (typeof sessionID !== "string") return null if (typeof sessionID !== "string") return null
if (typeof agent !== "string") return null
if (!isRecord(model)) return null if (!isRecord(model)) return null
if (!isRecord(provider)) return null if (!isRecord(provider)) return null
if (!isRecord(message)) return null if (!isRecord(message)) return null
let agentName: string | undefined
if (typeof agent === "string") {
agentName = agent
} else if (isRecord(agent)) {
const name = agent.name
if (typeof name === "string") {
agentName = name
}
}
if (!agentName) return null
const providerID = model.providerID const providerID = model.providerID
const modelID = model.modelID const modelID = model.modelID
const providerId = provider.id const providerId = provider.id
@@ -43,7 +53,7 @@ function buildChatParamsInput(raw: unknown): ChatParamsInput | null {
return { return {
sessionID, sessionID,
agent: { name: agent }, agent: { name: agentName },
model: { providerID, modelID }, model: { providerID, modelID },
provider: { id: providerId }, provider: { id: providerId },
message: typeof variant === "string" ? { variant } : {}, message: typeof variant === "string" ? { variant } : {},

View File

@@ -0,0 +1,343 @@
import { afterEach, describe, expect, test } from "bun:test"
import { createEventHandler } from "./event"
import { createChatMessageHandler } from "./chat-message"
import { _resetForTesting, setMainSession } from "../features/claude-code-session-state"
import { createModelFallbackHook, clearPendingModelFallback } from "../hooks/model-fallback/hook"
describe("createEventHandler - model fallback", () => {
const createHandler = (args?: { hooks?: any }) => {
const abortCalls: string[] = []
const promptCalls: string[] = []
const handler = createEventHandler({
ctx: {
directory: "/tmp",
client: {
session: {
abort: async ({ path }: { path: { id: string } }) => {
abortCalls.push(path.id)
return {}
},
prompt: async ({ path }: { path: { id: string } }) => {
promptCalls.push(path.id)
return {}
},
},
},
} as any,
pluginConfig: {} as any,
firstMessageVariantGate: {
markSessionCreated: () => {},
clear: () => {},
},
managers: {
tmuxSessionManager: {
onSessionCreated: async () => {},
onSessionDeleted: async () => {},
},
skillMcpManager: {
disconnectSession: async () => {},
},
} as any,
hooks: args?.hooks ?? ({} as any),
})
return { handler, abortCalls, promptCalls }
}
afterEach(() => {
_resetForTesting()
})
test("triggers retry prompt for assistant message.updated APIError payloads (headless resume)", async () => {
//#given
const sessionID = "ses_message_updated_fallback"
const { handler, abortCalls, promptCalls } = createHandler()
//#when
await handler({
event: {
type: "message.updated",
properties: {
info: {
id: "msg_err_1",
sessionID,
role: "assistant",
time: { created: 1, completed: 2 },
error: {
name: "APIError",
data: {
message:
"Bad Gateway: {\"error\":{\"message\":\"unknown provider for model claude-opus-4-6-thinking\"}}",
isRetryable: true,
},
},
parentID: "msg_user_1",
modelID: "claude-opus-4-6-thinking",
providerID: "quotio",
mode: "Sisyphus (Ultraworker)",
agent: "Sisyphus (Ultraworker)",
path: { cwd: "/tmp", root: "/tmp" },
cost: 0,
tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } },
},
},
},
})
//#then
expect(abortCalls).toEqual([sessionID])
expect(promptCalls).toEqual([sessionID])
})
test("triggers retry prompt for nested model error payloads", async () => {
//#given
const sessionID = "ses_main_fallback_nested"
setMainSession(sessionID)
const { handler, abortCalls, promptCalls } = createHandler()
//#when
await handler({
event: {
type: "session.error",
properties: {
sessionID,
error: {
name: "UnknownError",
data: {
error: {
message:
"Bad Gateway: {\"error\":{\"message\":\"unknown provider for model claude-opus-4-6-thinking\"}}",
},
},
},
},
},
})
//#then
expect(abortCalls).toEqual([sessionID])
expect(promptCalls).toEqual([sessionID])
})
test("triggers retry prompt on session.status retry events and applies fallback", async () => {
//#given
const sessionID = "ses_status_retry_fallback"
setMainSession(sessionID)
clearPendingModelFallback(sessionID)
const modelFallback = createModelFallbackHook()
const { handler, abortCalls, promptCalls } = createHandler({ hooks: { modelFallback } })
const chatMessageHandler = createChatMessageHandler({
ctx: {
client: {
tui: {
showToast: async () => ({}),
},
},
} as any,
pluginConfig: {} as any,
firstMessageVariantGate: {
shouldOverride: () => false,
markApplied: () => {},
},
hooks: {
modelFallback,
stopContinuationGuard: null,
keywordDetector: null,
claudeCodeHooks: null,
autoSlashCommand: null,
startWork: null,
ralphLoop: null,
} as any,
})
await handler({
event: {
type: "message.updated",
properties: {
info: {
id: "msg_user_status_1",
sessionID,
role: "user",
time: { created: 1 },
content: [],
modelID: "claude-opus-4-6-thinking",
providerID: "quotio",
agent: "Sisyphus (Ultraworker)",
path: { cwd: "/tmp", root: "/tmp" },
},
},
},
})
//#when
await handler({
event: {
type: "session.status",
properties: {
sessionID,
status: {
type: "retry",
attempt: 1,
message:
"Bad Gateway: {\"error\":{\"message\":\"unknown provider for model claude-opus-4-6-thinking\"}}",
next: 1234,
},
},
},
})
const output = { message: {}, parts: [] as Array<{ type: string; text?: string }> }
await chatMessageHandler(
{
sessionID,
agent: "sisyphus",
model: { providerID: "quotio", modelID: "claude-opus-4-6-thinking" },
},
output,
)
//#then
expect(abortCalls).toEqual([sessionID])
expect(promptCalls).toEqual([sessionID])
expect(output.message["model"]).toEqual({
providerID: "quotio",
modelID: "claude-opus-4-6",
})
expect(output.message["variant"]).toBe("max")
})
test("advances main-session fallback chain across repeated session.error retries end-to-end", async () => {
//#given
const abortCalls: string[] = []
const promptCalls: string[] = []
const toastCalls: string[] = []
const sessionID = "ses_main_fallback_chain"
setMainSession(sessionID)
clearPendingModelFallback(sessionID)
const modelFallback = createModelFallbackHook()
const eventHandler = createEventHandler({
ctx: {
directory: "/tmp",
client: {
session: {
abort: async ({ path }: { path: { id: string } }) => {
abortCalls.push(path.id)
return {}
},
prompt: async ({ path }: { path: { id: string } }) => {
promptCalls.push(path.id)
return {}
},
},
},
} as any,
pluginConfig: {} as any,
firstMessageVariantGate: {
markSessionCreated: () => {},
clear: () => {},
},
managers: {
tmuxSessionManager: {
onSessionCreated: async () => {},
onSessionDeleted: async () => {},
},
skillMcpManager: {
disconnectSession: async () => {},
},
} as any,
hooks: {
modelFallback,
} as any,
})
const chatMessageHandler = createChatMessageHandler({
ctx: {
client: {
tui: {
showToast: async ({ body }: { body: { title?: string } }) => {
if (body?.title) toastCalls.push(body.title)
return {}
},
},
},
} as any,
pluginConfig: {} as any,
firstMessageVariantGate: {
shouldOverride: () => false,
markApplied: () => {},
},
hooks: {
modelFallback,
stopContinuationGuard: null,
keywordDetector: null,
claudeCodeHooks: null,
autoSlashCommand: null,
startWork: null,
ralphLoop: null,
} as any,
})
const triggerRetryCycle = async () => {
await eventHandler({
event: {
type: "session.error",
properties: {
sessionID,
providerID: "quotio",
modelID: "claude-opus-4-6-thinking",
error: {
name: "UnknownError",
data: {
error: {
message:
"Bad Gateway: {\"error\":{\"message\":\"unknown provider for model claude-opus-4-6-thinking\"}}",
},
},
},
},
},
})
const output = { message: {}, parts: [] as Array<{ type: string; text?: string }> }
await chatMessageHandler(
{
sessionID,
agent: "sisyphus",
model: { providerID: "quotio", modelID: "claude-opus-4-6-thinking" },
},
output,
)
return output
}
//#when - first retry cycle
const first = await triggerRetryCycle()
//#then - first fallback entry applied (prefers current provider when available)
expect(first.message["model"]).toEqual({
providerID: "quotio",
modelID: "claude-opus-4-6",
})
expect(first.message["variant"]).toBe("max")
//#when - second retry cycle
const second = await triggerRetryCycle()
//#then - second fallback entry applied (chain advanced)
expect(second.message["model"]).toEqual({
providerID: "quotio",
modelID: "gpt-5.3-codex",
})
expect(second.message["variant"]).toBe("high")
expect(abortCalls).toEqual([sessionID, sessionID])
expect(promptCalls).toEqual([sessionID, sessionID])
expect(toastCalls.length).toBeGreaterThanOrEqual(0)
})
})

View File

@@ -4,11 +4,17 @@ import type { PluginContext } from "./types"
import { import {
clearSessionAgent, clearSessionAgent,
getMainSessionID, getMainSessionID,
getSessionAgent,
subagentSessions,
syncSubagentSessions,
setMainSession, setMainSession,
updateSessionAgent, updateSessionAgent,
} from "../features/claude-code-session-state" } from "../features/claude-code-session-state"
import { resetMessageCursor } from "../shared" import { resetMessageCursor } from "../shared"
import { lspManager } from "../tools" import { lspManager } from "../tools"
import { shouldRetryError } from "../shared/model-error-classifier"
import { clearPendingModelFallback, clearSessionFallbackChain, setPendingModelFallback } from "../hooks/model-fallback/hook"
import { clearSessionModel, setSessionModel } from "../shared/session-model-state"
import type { CreatedHooks } from "../create-hooks" import type { CreatedHooks } from "../create-hooks"
import type { Managers } from "../create-managers" import type { Managers } from "../create-managers"
@@ -20,9 +26,73 @@ type FirstMessageVariantGate = {
clear: (sessionID: string) => void clear: (sessionID: string) => void
} }
type EventInput = Parameters< function isRecord(value: unknown): value is Record<string, unknown> {
NonNullable<NonNullable<CreatedHooks["writeExistingFileGuard"]>["event"]> return typeof value === "object" && value !== null
>[0] }
function normalizeFallbackModelID(modelID: string): string {
return modelID
.replace(/-thinking$/i, "")
.replace(/-max$/i, "")
.replace(/-high$/i, "")
}
function extractErrorName(error: unknown): string | undefined {
if (isRecord(error) && typeof error.name === "string") return error.name
if (error instanceof Error) return error.name
return undefined
}
function extractErrorMessage(error: unknown): string {
if (!error) return ""
if (typeof error === "string") return error
if (error instanceof Error) return error.message
if (isRecord(error)) {
const candidates: unknown[] = [
error,
error.data,
error.error,
isRecord(error.data) ? error.data.error : undefined,
error.cause,
]
for (const candidate of candidates) {
if (isRecord(candidate) && typeof candidate.message === "string" && candidate.message.length > 0) {
return candidate.message
}
}
}
try {
return JSON.stringify(error)
} catch {
return String(error)
}
}
function extractProviderModelFromErrorMessage(
message: string,
): { providerID?: string; modelID?: string } {
const lower = message.toLowerCase()
const providerModel = lower.match(/model\s+not\s+found:\s*([a-z0-9_-]+)\s*\/\s*([a-z0-9._-]+)/i)
if (providerModel) {
return {
providerID: providerModel[1],
modelID: providerModel[2],
}
}
const modelOnly = lower.match(/unknown\s+provider\s+for\s+model\s+([a-z0-9._-]+)/i)
if (modelOnly) {
return {
modelID: modelOnly[1],
}
}
return {}
}
export function createEventHandler(args: { export function createEventHandler(args: {
ctx: PluginContext ctx: PluginContext
@@ -30,10 +100,15 @@ export function createEventHandler(args: {
firstMessageVariantGate: FirstMessageVariantGate firstMessageVariantGate: FirstMessageVariantGate
managers: Managers managers: Managers
hooks: CreatedHooks hooks: CreatedHooks
}): (input: EventInput) => Promise<void> { }): (input: { event: { type: string; properties?: Record<string, unknown> } }) => Promise<void> {
const { ctx, firstMessageVariantGate, managers, hooks } = args const { ctx, firstMessageVariantGate, managers, hooks } = args
const dispatchToHooks = async (input: EventInput): Promise<void> => { // Avoid triggering multiple abort+continue cycles for the same failing assistant message.
const lastHandledModelErrorMessageID = new Map<string, string>()
const lastHandledRetryStatusKey = new Map<string, string>()
const lastKnownModelBySession = new Map<string, { providerID: string; modelID: string }>()
const dispatchToHooks = async (input: { event: { type: string; properties?: Record<string, unknown> } }): Promise<void> => {
await Promise.resolve(hooks.autoUpdateChecker?.event?.(input)) await Promise.resolve(hooks.autoUpdateChecker?.event?.(input))
await Promise.resolve(hooks.claudeCodeHooks?.event?.(input)) await Promise.resolve(hooks.claudeCodeHooks?.event?.(input))
await Promise.resolve(hooks.backgroundNotificationHook?.event?.(input)) await Promise.resolve(hooks.backgroundNotificationHook?.event?.(input))
@@ -46,10 +121,9 @@ export function createEventHandler(args: {
await Promise.resolve(hooks.rulesInjector?.event?.(input)) await Promise.resolve(hooks.rulesInjector?.event?.(input))
await Promise.resolve(hooks.thinkMode?.event?.(input)) await Promise.resolve(hooks.thinkMode?.event?.(input))
await Promise.resolve(hooks.anthropicContextWindowLimitRecovery?.event?.(input)) await Promise.resolve(hooks.anthropicContextWindowLimitRecovery?.event?.(input))
await Promise.resolve(hooks.runtimeFallback?.event?.(input))
await Promise.resolve(hooks.agentUsageReminder?.event?.(input)) await Promise.resolve(hooks.agentUsageReminder?.event?.(input))
await Promise.resolve(hooks.categorySkillReminder?.event?.(input)) await Promise.resolve(hooks.categorySkillReminder?.event?.(input))
await Promise.resolve(hooks.interactiveBashSession?.event?.(input as EventInput)) await Promise.resolve(hooks.interactiveBashSession?.event?.(input))
await Promise.resolve(hooks.ralphLoop?.event?.(input)) await Promise.resolve(hooks.ralphLoop?.event?.(input))
await Promise.resolve(hooks.stopContinuationGuard?.event?.(input)) await Promise.resolve(hooks.stopContinuationGuard?.event?.(input))
await Promise.resolve(hooks.compactionTodoPreserver?.event?.(input)) await Promise.resolve(hooks.compactionTodoPreserver?.event?.(input))
@@ -61,6 +135,15 @@ export function createEventHandler(args: {
const recentRealIdles = new Map<string, number>() const recentRealIdles = new Map<string, number>()
const DEDUP_WINDOW_MS = 500 const DEDUP_WINDOW_MS = 500
const shouldAutoRetrySession = (sessionID: string): boolean => {
if (syncSubagentSessions.has(sessionID)) return true
const mainSessionID = getMainSessionID()
if (mainSessionID) return sessionID === mainSessionID
// Headless runs (or resumed sessions) may not emit session.created, so mainSessionID can be unset.
// In that case, treat any non-subagent session as the "main" interactive session.
return !subagentSessions.has(sessionID)
}
return async (input): Promise<void> => { return async (input): Promise<void> => {
pruneRecentSyntheticIdles({ pruneRecentSyntheticIdles({
recentSyntheticIdles, recentSyntheticIdles,
@@ -92,7 +175,7 @@ export function createEventHandler(args: {
return return
} }
recentSyntheticIdles.set(sessionID, Date.now()) recentSyntheticIdles.set(sessionID, Date.now())
await dispatchToHooks(syntheticIdle as EventInput) await dispatchToHooks(syntheticIdle)
} }
const { event } = input const { event } = input
@@ -127,8 +210,15 @@ export function createEventHandler(args: {
if (sessionInfo?.id) { if (sessionInfo?.id) {
clearSessionAgent(sessionInfo.id) clearSessionAgent(sessionInfo.id)
lastHandledModelErrorMessageID.delete(sessionInfo.id)
lastHandledRetryStatusKey.delete(sessionInfo.id)
lastKnownModelBySession.delete(sessionInfo.id)
clearPendingModelFallback(sessionInfo.id)
clearSessionFallbackChain(sessionInfo.id)
resetMessageCursor(sessionInfo.id) resetMessageCursor(sessionInfo.id)
firstMessageVariantGate.clear(sessionInfo.id) firstMessageVariantGate.clear(sessionInfo.id)
clearSessionModel(sessionInfo.id)
syncSubagentSessions.delete(sessionInfo.id)
await managers.skillMcpManager.disconnectSession(sessionInfo.id) await managers.skillMcpManager.disconnectSession(sessionInfo.id)
await lspManager.cleanupTempDirectoryClients() await lspManager.cleanupTempDirectoryClients()
await managers.tmuxSessionManager.onSessionDeleted({ await managers.tmuxSessionManager.onSessionDeleted({
@@ -142,15 +232,141 @@ export function createEventHandler(args: {
const sessionID = info?.sessionID as string | undefined const sessionID = info?.sessionID as string | undefined
const agent = info?.agent as string | undefined const agent = info?.agent as string | undefined
const role = info?.role as string | undefined const role = info?.role as string | undefined
if (sessionID && agent && role === "user") { if (sessionID && role === "user") {
if (agent) {
updateSessionAgent(sessionID, agent) updateSessionAgent(sessionID, agent)
} }
const providerID = info?.providerID as string | undefined
const modelID = info?.modelID as string | undefined
if (providerID && modelID) {
lastKnownModelBySession.set(sessionID, { providerID, modelID })
setSessionModel(sessionID, { providerID, modelID })
}
}
// Model fallback: in practice, API/model failures often surface as assistant message errors.
// session.error events are not guaranteed for all providers, so we also observe message.updated.
if (sessionID && role === "assistant") {
const assistantMessageID = info?.id as string | undefined
const assistantError = info?.error
if (assistantMessageID && assistantError) {
const lastHandled = lastHandledModelErrorMessageID.get(sessionID)
if (lastHandled === assistantMessageID) {
return
}
const errorName = extractErrorName(assistantError)
const errorMessage = extractErrorMessage(assistantError)
const errorInfo = { name: errorName, message: errorMessage }
if (shouldRetryError(errorInfo)) {
// Prefer the agent/model/provider from the assistant message payload.
let agentName = agent ?? getSessionAgent(sessionID)
if (!agentName && sessionID === getMainSessionID()) {
if (errorMessage.includes("claude-opus") || errorMessage.includes("opus")) {
agentName = "sisyphus"
} else if (errorMessage.includes("gpt-5")) {
agentName = "hephaestus"
} else {
agentName = "sisyphus"
}
}
if (agentName) {
const currentProvider = (info?.providerID as string | undefined) ?? "opencode"
const rawModel = (info?.modelID as string | undefined) ?? "claude-opus-4-6"
const currentModel = normalizeFallbackModelID(rawModel)
const setFallback = setPendingModelFallback(
sessionID,
agentName,
currentProvider,
currentModel,
)
if (setFallback && shouldAutoRetrySession(sessionID) && !hooks.stopContinuationGuard?.isStopped(sessionID)) {
lastHandledModelErrorMessageID.set(sessionID, assistantMessageID)
await ctx.client.session.abort({ path: { id: sessionID } }).catch(() => {})
await ctx.client.session
.prompt({
path: { id: sessionID },
body: { parts: [{ type: "text", text: "continue" }] },
query: { directory: ctx.directory },
})
.catch(() => {})
}
}
}
}
}
}
if (event.type === "session.status") {
const sessionID = props?.sessionID as string | undefined
const status = props?.status as
| { type?: string; attempt?: number; message?: string; next?: number }
| undefined
if (sessionID && status?.type === "retry") {
const retryMessage = typeof status.message === "string" ? status.message : ""
const retryKey = `${status.attempt ?? "?"}:${status.next ?? "?"}:${retryMessage}`
if (lastHandledRetryStatusKey.get(sessionID) === retryKey) {
return
}
lastHandledRetryStatusKey.set(sessionID, retryKey)
const errorInfo = { name: undefined, message: retryMessage }
if (shouldRetryError(errorInfo)) {
let agentName = getSessionAgent(sessionID)
if (!agentName && sessionID === getMainSessionID()) {
if (retryMessage.includes("claude-opus") || retryMessage.includes("opus")) {
agentName = "sisyphus"
} else if (retryMessage.includes("gpt-5")) {
agentName = "hephaestus"
} else {
agentName = "sisyphus"
}
}
if (agentName) {
const parsed = extractProviderModelFromErrorMessage(retryMessage)
const lastKnown = lastKnownModelBySession.get(sessionID)
const currentProvider = parsed.providerID ?? lastKnown?.providerID ?? "opencode"
let currentModel = parsed.modelID ?? lastKnown?.modelID ?? "claude-opus-4-6"
currentModel = normalizeFallbackModelID(currentModel)
const setFallback = setPendingModelFallback(
sessionID,
agentName,
currentProvider,
currentModel,
)
if (setFallback && shouldAutoRetrySession(sessionID) && !hooks.stopContinuationGuard?.isStopped(sessionID)) {
await ctx.client.session.abort({ path: { id: sessionID } }).catch(() => {})
await ctx.client.session
.prompt({
path: { id: sessionID },
body: { parts: [{ type: "text", text: "continue" }] },
query: { directory: ctx.directory },
})
.catch(() => {})
}
}
}
}
} }
if (event.type === "session.error") { if (event.type === "session.error") {
const sessionID = props?.sessionID as string | undefined const sessionID = props?.sessionID as string | undefined
const error = props?.error const error = props?.error
const errorName = extractErrorName(error)
const errorMessage = extractErrorMessage(error)
const errorInfo = { name: errorName, message: errorMessage }
// First, try session recovery for internal errors (thinking blocks, tool results, etc.)
if (hooks.sessionRecovery?.isRecoverableError(error)) { if (hooks.sessionRecovery?.isRecoverableError(error)) {
const messageInfo = { const messageInfo = {
id: props?.messageID as string | undefined, id: props?.messageID as string | undefined,
@@ -175,6 +391,52 @@ export function createEventHandler(args: {
.catch(() => {}) .catch(() => {})
} }
} }
// Second, try model fallback for model errors (rate limit, quota, provider issues, etc.)
else if (sessionID && shouldRetryError(errorInfo)) {
// Get the current agent for this session, or default to "sisyphus" for main sessions
let agentName = getSessionAgent(sessionID)
// For main sessions, if no agent is set, try to infer from the error or default to sisyphus
if (!agentName && sessionID === getMainSessionID()) {
// Try to infer agent from model in error message
if (errorMessage.includes("claude-opus") || errorMessage.includes("opus")) {
agentName = "sisyphus"
} else if (errorMessage.includes("gpt-5")) {
agentName = "hephaestus"
} else {
// Default to sisyphus for main session errors
agentName = "sisyphus"
}
}
if (agentName) {
const parsed = extractProviderModelFromErrorMessage(errorMessage)
const currentProvider = props?.providerID as string || parsed.providerID || "opencode"
let currentModel = props?.modelID as string || parsed.modelID || "claude-opus-4-6"
currentModel = normalizeFallbackModelID(currentModel)
// Try to set pending model fallback
const setFallback = setPendingModelFallback(
sessionID,
agentName,
currentProvider,
currentModel,
)
if (setFallback && shouldAutoRetrySession(sessionID) && !hooks.stopContinuationGuard?.isStopped(sessionID)) {
// Abort the current session and prompt with "continue" to trigger the fallback
await ctx.client.session.abort({ path: { id: sessionID } }).catch(() => {})
await ctx.client.session
.prompt({
path: { id: sessionID },
body: { parts: [{ type: "text", text: "continue" }] },
query: { directory: ctx.directory },
})
.catch(() => {})
}
}
}
} }
} }
} }

View File

@@ -7,6 +7,7 @@ import {
createSessionRecoveryHook, createSessionRecoveryHook,
createSessionNotification, createSessionNotification,
createThinkModeHook, createThinkModeHook,
createModelFallbackHook,
createAnthropicContextWindowLimitRecoveryHook, createAnthropicContextWindowLimitRecoveryHook,
createAutoUpdateCheckerHook, createAutoUpdateCheckerHook,
createAgentUsageReminderHook, createAgentUsageReminderHook,
@@ -24,13 +25,13 @@ import {
createNoHephaestusNonGptHook, createNoHephaestusNonGptHook,
createQuestionLabelTruncatorHook, createQuestionLabelTruncatorHook,
createPreemptiveCompactionHook, createPreemptiveCompactionHook,
createRuntimeFallbackHook,
} from "../../hooks" } from "../../hooks"
import { createAnthropicEffortHook } from "../../hooks/anthropic-effort" import { createAnthropicEffortHook } from "../../hooks/anthropic-effort"
import { import {
detectExternalNotificationPlugin, detectExternalNotificationPlugin,
getNotificationConflictWarning, getNotificationConflictWarning,
log, log,
normalizeSDKResponse,
} from "../../shared" } from "../../shared"
import { safeCreateHook } from "../../shared/safe-create-hook" import { safeCreateHook } from "../../shared/safe-create-hook"
import { sessionExists } from "../../tools" import { sessionExists } from "../../tools"
@@ -41,6 +42,7 @@ export type SessionHooks = {
sessionRecovery: ReturnType<typeof createSessionRecoveryHook> | null sessionRecovery: ReturnType<typeof createSessionRecoveryHook> | null
sessionNotification: ReturnType<typeof createSessionNotification> | null sessionNotification: ReturnType<typeof createSessionNotification> | null
thinkMode: ReturnType<typeof createThinkModeHook> | null thinkMode: ReturnType<typeof createThinkModeHook> | null
modelFallback: ReturnType<typeof createModelFallbackHook> | null
anthropicContextWindowLimitRecovery: ReturnType<typeof createAnthropicContextWindowLimitRecoveryHook> | null anthropicContextWindowLimitRecovery: ReturnType<typeof createAnthropicContextWindowLimitRecoveryHook> | null
autoUpdateChecker: ReturnType<typeof createAutoUpdateCheckerHook> | null autoUpdateChecker: ReturnType<typeof createAutoUpdateCheckerHook> | null
agentUsageReminder: ReturnType<typeof createAgentUsageReminderHook> | null agentUsageReminder: ReturnType<typeof createAgentUsageReminderHook> | null
@@ -58,7 +60,6 @@ export type SessionHooks = {
questionLabelTruncator: ReturnType<typeof createQuestionLabelTruncatorHook> questionLabelTruncator: ReturnType<typeof createQuestionLabelTruncatorHook>
taskResumeInfo: ReturnType<typeof createTaskResumeInfoHook> taskResumeInfo: ReturnType<typeof createTaskResumeInfoHook>
anthropicEffort: ReturnType<typeof createAnthropicEffortHook> | null anthropicEffort: ReturnType<typeof createAnthropicEffortHook> | null
runtimeFallback: ReturnType<typeof createRuntimeFallbackHook> | null
} }
export function createSessionHooks(args: { export function createSessionHooks(args: {
@@ -104,6 +105,73 @@ export function createSessionHooks(args: {
? safeHook("think-mode", () => createThinkModeHook()) ? safeHook("think-mode", () => createThinkModeHook())
: null : null
const enableFallbackTitle = pluginConfig.experimental?.model_fallback_title ?? false
const fallbackTitleMaxEntries = 200
const fallbackTitleState = new Map<string, { baseTitle?: string; lastKey?: string }>()
const updateFallbackTitle = async (input: {
sessionID: string
providerID: string
modelID: string
variant?: string
}) => {
if (!enableFallbackTitle) return
const key = `${input.providerID}/${input.modelID}${input.variant ? `:${input.variant}` : ""}`
const existing = fallbackTitleState.get(input.sessionID) ?? {}
if (existing.lastKey === key) return
if (!existing.baseTitle) {
const sessionResp = await ctx.client.session.get({ path: { id: input.sessionID } }).catch(() => null)
const sessionInfo = sessionResp
? normalizeSDKResponse(sessionResp, null as { title?: string } | null, { preferResponseOnMissingData: true })
: null
const rawTitle = sessionInfo?.title
if (typeof rawTitle === "string" && rawTitle.length > 0) {
existing.baseTitle = rawTitle.replace(/\s*\[fallback:[^\]]+\]$/i, "").trim()
} else {
existing.baseTitle = "Session"
}
}
const variantLabel = input.variant ? ` ${input.variant}` : ""
const newTitle = `${existing.baseTitle} [fallback: ${input.providerID}/${input.modelID}${variantLabel}]`
await ctx.client.session
.update({
path: { id: input.sessionID },
body: { title: newTitle },
query: { directory: ctx.directory },
})
.catch(() => {})
existing.lastKey = key
fallbackTitleState.set(input.sessionID, existing)
if (fallbackTitleState.size > fallbackTitleMaxEntries) {
const oldestKey = fallbackTitleState.keys().next().value
if (oldestKey) fallbackTitleState.delete(oldestKey)
}
}
// Model fallback hook (configurable via disabled_hooks)
// This handles automatic model switching when model errors occur
const modelFallback = isHookEnabled("model-fallback")
? safeHook("model-fallback", () =>
createModelFallbackHook({
toast: async ({ title, message, variant, duration }) => {
await ctx.client.tui
.showToast({
body: {
title,
message,
variant: variant ?? "warning",
duration: duration ?? 5000,
},
})
.catch(() => {})
},
onApplied: enableFallbackTitle ? updateFallbackTitle : undefined,
}))
: null
const anthropicContextWindowLimitRecovery = isHookEnabled("anthropic-context-window-limit-recovery") const anthropicContextWindowLimitRecovery = isHookEnabled("anthropic-context-window-limit-recovery")
? safeHook("anthropic-context-window-limit-recovery", () => ? safeHook("anthropic-context-window-limit-recovery", () =>
createAnthropicContextWindowLimitRecoveryHook(ctx, { experimental: pluginConfig.experimental })) createAnthropicContextWindowLimitRecoveryHook(ctx, { experimental: pluginConfig.experimental }))
@@ -177,19 +245,13 @@ export function createSessionHooks(args: {
? safeHook("anthropic-effort", () => createAnthropicEffortHook()) ? safeHook("anthropic-effort", () => createAnthropicEffortHook())
: null : null
const runtimeFallback = isHookEnabled("runtime-fallback")
? safeHook("runtime-fallback", () =>
createRuntimeFallbackHook(ctx, {
config: pluginConfig.runtime_fallback,
pluginConfig,
}))
: null
return { return {
contextWindowMonitor, contextWindowMonitor,
preemptiveCompaction, preemptiveCompaction,
sessionRecovery, sessionRecovery,
sessionNotification, sessionNotification,
thinkMode, thinkMode,
modelFallback,
anthropicContextWindowLimitRecovery, anthropicContextWindowLimitRecovery,
autoUpdateChecker, autoUpdateChecker,
agentUsageReminder, agentUsageReminder,
@@ -207,6 +269,5 @@ export function createSessionHooks(args: {
questionLabelTruncator, questionLabelTruncator,
taskResumeInfo, taskResumeInfo,
anthropicEffort, anthropicEffort,
runtimeFallback,
} }
} }

View File

@@ -5,6 +5,7 @@ import {
createClaudeCodeHooksHook, createClaudeCodeHooksHook,
createKeywordDetectorHook, createKeywordDetectorHook,
createThinkingBlockValidatorHook, createThinkingBlockValidatorHook,
createBeastModeSystemHook,
} from "../../hooks" } from "../../hooks"
import { import {
contextCollector, contextCollector,
@@ -17,6 +18,7 @@ export type TransformHooks = {
keywordDetector: ReturnType<typeof createKeywordDetectorHook> | null keywordDetector: ReturnType<typeof createKeywordDetectorHook> | null
contextInjectorMessagesTransform: ReturnType<typeof createContextInjectorMessagesTransformHook> contextInjectorMessagesTransform: ReturnType<typeof createContextInjectorMessagesTransformHook>
thinkingBlockValidator: ReturnType<typeof createThinkingBlockValidatorHook> | null thinkingBlockValidator: ReturnType<typeof createThinkingBlockValidatorHook> | null
beastModeSystem: ReturnType<typeof createBeastModeSystemHook> | null
} }
export function createTransformHooks(args: { export function createTransformHooks(args: {
@@ -56,10 +58,19 @@ export function createTransformHooks(args: {
) )
: null : null
const beastModeSystem = isHookEnabled("beast-mode-system")
? safeCreateHook(
"beast-mode-system",
() => createBeastModeSystemHook(),
{ enabled: safeHookEnabled },
)
: null
return { return {
claudeCodeHooks, claudeCodeHooks,
keywordDetector, keywordDetector,
contextInjectorMessagesTransform, contextInjectorMessagesTransform,
thinkingBlockValidator, thinkingBlockValidator,
beastModeSystem,
} }
} }

View File

@@ -0,0 +1,12 @@
import type { CreatedHooks } from "../create-hooks"
export function createSystemTransformHandler(args: {
hooks: CreatedHooks
}): (input: { sessionID: string }, output: { system: string[] }) => Promise<void> {
return async (input, output): Promise<void> => {
await args.hooks.beastModeSystem?.["experimental.chat.system.transform"]?.(
input,
output,
)
}
}

View File

@@ -75,6 +75,14 @@ function findVariantInChain(
return entry.variant return entry.variant
} }
} }
// Some providers expose identical model IDs (e.g. OpenAI models via different providers).
// If we didn't find an exact provider+model match, fall back to model-only matching.
for (const entry of fallbackChain) {
if (entry.model === currentModel.modelID) {
return entry.variant
}
}
return undefined return undefined
} }

View File

@@ -34,7 +34,6 @@ export * from "./system-directive"
export * from "./agent-tool-restrictions" export * from "./agent-tool-restrictions"
export * from "./model-requirements" export * from "./model-requirements"
export * from "./model-resolver" export * from "./model-resolver"
export { normalizeFallbackModels } from "./model-resolver"
export { resolveModelPipeline } from "./model-resolution-pipeline" export { resolveModelPipeline } from "./model-resolution-pipeline"
export type { export type {
ModelResolutionRequest, ModelResolutionRequest,
@@ -59,4 +58,3 @@ export * from "./normalize-sdk-response"
export * from "./session-directory-resolver" export * from "./session-directory-resolver"
export * from "./prompt-tools" export * from "./prompt-tools"
export * from "./internal-initiator-marker" export * from "./internal-initiator-marker"
export { SessionCategoryRegistry } from "./session-category-registry"

View File

@@ -0,0 +1,76 @@
import { describe, expect, test, beforeEach, afterEach, spyOn } from "bun:test"
import { mkdirSync, rmSync, writeFileSync, existsSync } from "node:fs"
import { join } from "node:path"
import * as dataPath from "./data-path"
import { shouldRetryError, selectFallbackProvider } from "./model-error-classifier"
const TEST_CACHE_DIR = join(import.meta.dir, "__test-cache__")
describe("model-error-classifier", () => {
let cacheDirSpy: ReturnType<typeof spyOn>
beforeEach(() => {
cacheDirSpy = spyOn(dataPath, "getOmoOpenCodeCacheDir").mockReturnValue(TEST_CACHE_DIR)
if (existsSync(TEST_CACHE_DIR)) {
rmSync(TEST_CACHE_DIR, { recursive: true })
}
mkdirSync(TEST_CACHE_DIR, { recursive: true })
})
afterEach(() => {
cacheDirSpy.mockRestore()
if (existsSync(TEST_CACHE_DIR)) {
rmSync(TEST_CACHE_DIR, { recursive: true })
}
})
test("treats overloaded retry messages as retryable", () => {
//#given
const error = { message: "Provider is overloaded" }
//#when
const result = shouldRetryError(error)
//#then
expect(result).toBe(true)
})
test("selectFallbackProvider prefers first connected provider in preference order", () => {
//#given
writeFileSync(
join(TEST_CACHE_DIR, "connected-providers.json"),
JSON.stringify({ connected: ["quotio", "nvidia"], updatedAt: new Date().toISOString() }, null, 2),
)
//#when
const provider = selectFallbackProvider(["quotio", "nvidia"], "nvidia")
//#then
expect(provider).toBe("quotio")
})
test("selectFallbackProvider falls back to next connected provider when first is disconnected", () => {
//#given
writeFileSync(
join(TEST_CACHE_DIR, "connected-providers.json"),
JSON.stringify({ connected: ["nvidia"], updatedAt: new Date().toISOString() }, null, 2),
)
//#when
const provider = selectFallbackProvider(["quotio", "nvidia"])
//#then
expect(provider).toBe("nvidia")
})
test("selectFallbackProvider uses provider preference order when cache is missing", () => {
//#given - no cache file
//#when
const provider = selectFallbackProvider(["quotio", "nvidia"], "nvidia")
//#then
expect(provider).toBe("quotio")
})
})

View File

@@ -0,0 +1,135 @@
import type { FallbackEntry } from "./model-requirements"
import { readConnectedProvidersCache } from "./connected-providers-cache"
/**
* Error names that indicate a retryable model error (deadstop).
* These errors completely halt the action loop and should trigger fallback retry.
*/
const RETRYABLE_ERROR_NAMES = new Set([
"ProviderModelNotFoundError",
"RateLimitError",
"QuotaExceededError",
"InsufficientCreditsError",
"ModelUnavailableError",
"ProviderConnectionError",
"AuthenticationError",
])
/**
* Error names that should NOT trigger retry.
* These errors are typically user-induced or fixable without switching models.
*/
const NON_RETRYABLE_ERROR_NAMES = new Set([
"MessageAbortedError",
"PermissionDeniedError",
"ContextLengthError",
"TimeoutError",
"ValidationError",
"SyntaxError",
"UserError",
])
/**
* Message patterns that indicate a retryable error even without a known error name.
*/
const RETRYABLE_MESSAGE_PATTERNS = [
"rate_limit",
"rate limit",
"quota",
"not found",
"unavailable",
"insufficient",
"too many requests",
"over limit",
"overloaded",
"bad gateway",
"unknown provider",
"provider not found",
"connection error",
"network error",
"timeout",
"service unavailable",
"internal_server_error",
"503",
"502",
"504",
]
export interface ErrorInfo {
name?: string
message?: string
}
/**
* Determines if an error is a retryable model error.
* Returns true if the error is a known retryable type OR matches retryable message patterns.
*/
export function isRetryableModelError(error: ErrorInfo): boolean {
// If we have an error name, check against known lists
if (error.name) {
// Explicit non-retryable takes precedence
if (NON_RETRYABLE_ERROR_NAMES.has(error.name)) {
return false
}
// Check if it's a known retryable error
if (RETRYABLE_ERROR_NAMES.has(error.name)) {
return true
}
}
// Check message patterns for unknown errors
const msg = error.message?.toLowerCase() ?? ""
return RETRYABLE_MESSAGE_PATTERNS.some((pattern) => msg.includes(pattern))
}
/**
* Determines if an error should trigger a fallback retry.
* Returns true for deadstop errors that completely halt the action loop.
*/
export function shouldRetryError(error: ErrorInfo): boolean {
return isRetryableModelError(error)
}
/**
* Gets the next fallback model from the chain based on attempt count.
* Returns undefined if all fallbacks have been exhausted.
*/
export function getNextFallback(
fallbackChain: FallbackEntry[],
attemptCount: number,
): FallbackEntry | undefined {
return fallbackChain[attemptCount]
}
/**
* Checks if there are more fallbacks available after the current attempt.
*/
export function hasMoreFallbacks(
fallbackChain: FallbackEntry[],
attemptCount: number,
): boolean {
return attemptCount < fallbackChain.length
}
/**
* Selects the best provider for a fallback entry.
* Priority:
* 1) First connected provider in the entry's provider preference order
* 2) First provider listed in the fallback entry (when cache is missing)
*/
export function selectFallbackProvider(
providers: string[],
preferredProviderID?: string,
): string {
const connectedProviders = readConnectedProvidersCache()
if (connectedProviders) {
const connectedSet = new Set(connectedProviders)
for (const provider of providers) {
if (connectedSet.has(provider)) {
return provider
}
}
}
return providers[0] || preferredProviderID || "quotio"
}

View File

@@ -6,493 +6,158 @@ import {
type ModelRequirement, type ModelRequirement,
} from "./model-requirements" } from "./model-requirements"
function flattenChains(): FallbackEntry[] {
return [
...Object.values(AGENT_MODEL_REQUIREMENTS).flatMap((r) => r.fallbackChain),
...Object.values(CATEGORY_MODEL_REQUIREMENTS).flatMap((r) => r.fallbackChain),
]
}
function assertNoExcludedModels(entry: FallbackEntry): void {
// User exclusions.
expect(entry.model).not.toBe("grok-code-fast-1")
if (entry.providers.includes("quotio")) {
expect(entry.model).not.toBe("tstars2.0")
expect(entry.model).not.toMatch(/^kiro-/i)
expect(entry.model).not.toMatch(/^tab_/i)
}
// Remove codex-mini models per request.
expect(entry.model).not.toMatch(/codex-mini/i)
}
function assertNoOpencodeProvider(entry: FallbackEntry): void {
expect(entry.providers).not.toContain("opencode")
}
function assertNoProviderPrefixForNonNamespacedProviders(entry: FallbackEntry): void {
// For these providers, model IDs should not be written as "provider/model".
const nonNamespaced = ["quotio", "openai", "github-copilot", "minimax", "minimax-coding-plan"]
for (const provider of entry.providers) {
if (!nonNamespaced.includes(provider)) continue
expect(entry.model.startsWith(`${provider}/`)).toBe(false)
}
}
describe("AGENT_MODEL_REQUIREMENTS", () => { describe("AGENT_MODEL_REQUIREMENTS", () => {
test("oracle has valid fallbackChain with gpt-5.2 as primary", () => { test("defines all 10 builtin agents", () => {
// given - oracle agent requirement expect(Object.keys(AGENT_MODEL_REQUIREMENTS).sort()).toEqual([
const oracle = AGENT_MODEL_REQUIREMENTS["oracle"] "atlas",
// when - accessing oracle requirement
// then - fallbackChain exists with gpt-5.2 as first entry
expect(oracle).toBeDefined()
expect(oracle.fallbackChain).toBeArray()
expect(oracle.fallbackChain.length).toBeGreaterThan(0)
const primary = oracle.fallbackChain[0]
expect(primary.providers).toContain("openai")
expect(primary.model).toBe("gpt-5.2")
expect(primary.variant).toBe("high")
})
test("sisyphus has claude-opus-4-6 as primary and requiresAnyModel", () => {
// #given - sisyphus agent requirement
const sisyphus = AGENT_MODEL_REQUIREMENTS["sisyphus"]
// #when - accessing Sisyphus requirement
// #then - fallbackChain has claude-opus-4-6 first, big-pickle last
expect(sisyphus).toBeDefined()
expect(sisyphus.fallbackChain).toBeArray()
expect(sisyphus.fallbackChain).toHaveLength(5)
expect(sisyphus.requiresAnyModel).toBe(true)
const primary = sisyphus.fallbackChain[0]
expect(primary.providers).toEqual(["anthropic", "github-copilot", "opencode"])
expect(primary.model).toBe("claude-opus-4-6")
expect(primary.variant).toBe("max")
const last = sisyphus.fallbackChain[4]
expect(last.providers[0]).toBe("opencode")
expect(last.model).toBe("big-pickle")
})
test("librarian has valid fallbackChain with gemini-3-flash as primary", () => {
// given - librarian agent requirement
const librarian = AGENT_MODEL_REQUIREMENTS["librarian"]
// when - accessing librarian requirement
// then - fallbackChain exists with gemini-3-flash as first entry
expect(librarian).toBeDefined()
expect(librarian.fallbackChain).toBeArray()
expect(librarian.fallbackChain.length).toBeGreaterThan(0)
const primary = librarian.fallbackChain[0]
expect(primary.providers[0]).toBe("google")
expect(primary.model).toBe("gemini-3-flash")
})
test("explore has valid fallbackChain with grok-code-fast-1 as primary", () => {
// given - explore agent requirement
const explore = AGENT_MODEL_REQUIREMENTS["explore"]
// when - accessing explore requirement
// then - fallbackChain: grok → minimax-free → haiku → nano
expect(explore).toBeDefined()
expect(explore.fallbackChain).toBeArray()
expect(explore.fallbackChain).toHaveLength(4)
const primary = explore.fallbackChain[0]
expect(primary.providers).toContain("github-copilot")
expect(primary.model).toBe("grok-code-fast-1")
const secondary = explore.fallbackChain[1]
expect(secondary.providers).toContain("opencode")
expect(secondary.model).toBe("minimax-m2.5-free")
const tertiary = explore.fallbackChain[2]
expect(tertiary.providers).toContain("anthropic")
expect(tertiary.model).toBe("claude-haiku-4-5")
const quaternary = explore.fallbackChain[3]
expect(quaternary.providers).toContain("opencode")
expect(quaternary.model).toBe("gpt-5-nano")
})
test("multimodal-looker has valid fallbackChain with k2p5 as primary", () => {
// given - multimodal-looker agent requirement
const multimodalLooker = AGENT_MODEL_REQUIREMENTS["multimodal-looker"]
// when - accessing multimodal-looker requirement
// then - fallbackChain exists with k2p5 as first entry
expect(multimodalLooker).toBeDefined()
expect(multimodalLooker.fallbackChain).toBeArray()
expect(multimodalLooker.fallbackChain.length).toBeGreaterThan(0)
const primary = multimodalLooker.fallbackChain[0]
expect(primary.providers[0]).toBe("kimi-for-coding")
expect(primary.model).toBe("k2p5")
})
test("prometheus has claude-opus-4-6 as primary", () => {
// #given - prometheus agent requirement
const prometheus = AGENT_MODEL_REQUIREMENTS["prometheus"]
// #when - accessing Prometheus requirement
// #then - claude-opus-4-6 is first
expect(prometheus).toBeDefined()
expect(prometheus.fallbackChain).toBeArray()
expect(prometheus.fallbackChain.length).toBeGreaterThan(1)
const primary = prometheus.fallbackChain[0]
expect(primary.model).toBe("claude-opus-4-6")
expect(primary.providers).toEqual(["anthropic", "github-copilot", "opencode"])
expect(primary.variant).toBe("max")
})
test("metis has claude-opus-4-6 as primary", () => {
// #given - metis agent requirement
const metis = AGENT_MODEL_REQUIREMENTS["metis"]
// #when - accessing Metis requirement
// #then - claude-opus-4-6 is first
expect(metis).toBeDefined()
expect(metis.fallbackChain).toBeArray()
expect(metis.fallbackChain.length).toBeGreaterThan(1)
const primary = metis.fallbackChain[0]
expect(primary.model).toBe("claude-opus-4-6")
expect(primary.providers).toEqual(["anthropic", "github-copilot", "opencode"])
expect(primary.variant).toBe("max")
})
test("momus has valid fallbackChain with gpt-5.2 as primary", () => {
// given - momus agent requirement
const momus = AGENT_MODEL_REQUIREMENTS["momus"]
// when - accessing Momus requirement
// then - fallbackChain exists with gpt-5.2 as first entry, variant medium
expect(momus).toBeDefined()
expect(momus.fallbackChain).toBeArray()
expect(momus.fallbackChain.length).toBeGreaterThan(0)
const primary = momus.fallbackChain[0]
expect(primary.model).toBe("gpt-5.2")
expect(primary.variant).toBe("medium")
expect(primary.providers[0]).toBe("openai")
})
test("atlas has valid fallbackChain with k2p5 as primary (kimi-for-coding prioritized)", () => {
// given - atlas agent requirement
const atlas = AGENT_MODEL_REQUIREMENTS["atlas"]
// when - accessing Atlas requirement
// then - fallbackChain exists with k2p5 as first entry (kimi-for-coding prioritized)
expect(atlas).toBeDefined()
expect(atlas.fallbackChain).toBeArray()
expect(atlas.fallbackChain.length).toBeGreaterThan(0)
const primary = atlas.fallbackChain[0]
expect(primary.model).toBe("k2p5")
expect(primary.providers[0]).toBe("kimi-for-coding")
})
test("hephaestus requires openai/github-copilot/opencode provider", () => {
// #given - hephaestus agent requirement
const hephaestus = AGENT_MODEL_REQUIREMENTS["hephaestus"]
// #when - accessing hephaestus requirement
// #then - requiresProvider is set to openai, github-copilot, opencode (not requiresModel)
expect(hephaestus).toBeDefined()
expect(hephaestus.requiresProvider).toEqual(["openai", "github-copilot", "opencode"])
expect(hephaestus.requiresModel).toBeUndefined()
})
test("all 10 builtin agents have valid fallbackChain arrays", () => {
// #given - list of 10 agent names
const expectedAgents = [
"sisyphus",
"hephaestus",
"oracle",
"librarian",
"explore", "explore",
"multimodal-looker", "hephaestus",
"prometheus", "librarian",
"metis", "metis",
"momus", "momus",
"atlas", "multimodal-looker",
] "oracle",
"prometheus",
"sisyphus",
])
})
// when - checking AGENT_MODEL_REQUIREMENTS test("sisyphus: 2nd fallback is quotio gpt-5.3-codex (high)", () => {
const definedAgents = Object.keys(AGENT_MODEL_REQUIREMENTS) const sisyphus = AGENT_MODEL_REQUIREMENTS["sisyphus"]
expect(sisyphus.requiresAnyModel).toBe(true)
expect(sisyphus.fallbackChain.length).toBeGreaterThan(2)
// #then - all agents present with valid fallbackChain expect(sisyphus.fallbackChain[0]).toEqual({
expect(definedAgents).toHaveLength(10) providers: ["quotio"],
for (const agent of expectedAgents) { model: "claude-opus-4-6",
const requirement = AGENT_MODEL_REQUIREMENTS[agent] variant: "max",
expect(requirement).toBeDefined() })
expect(requirement.fallbackChain).toBeArray()
expect(requirement.fallbackChain.length).toBeGreaterThan(0)
for (const entry of requirement.fallbackChain) { expect(sisyphus.fallbackChain[1]).toEqual({
expect(entry.providers).toBeArray() providers: ["quotio"],
expect(entry.providers.length).toBeGreaterThan(0) model: "gpt-5.3-codex",
expect(typeof entry.model).toBe("string") variant: "high",
expect(entry.model.length).toBeGreaterThan(0) })
} })
}
test("explore: uses speed chain, includes rome, and gpt-5-mini is copilot-first", () => {
const explore = AGENT_MODEL_REQUIREMENTS["explore"]
expect(explore.fallbackChain.length).toBeGreaterThan(4)
expect(explore.fallbackChain[0].model).toBe("claude-haiku-4-5")
expect(explore.fallbackChain.some((e) => e.model === "iflow-rome-30ba3b")).toBe(true)
const gptMini = explore.fallbackChain.find((e) => e.model === "gpt-5-mini")
expect(gptMini).toBeDefined()
expect(gptMini!.providers[0]).toBe("github-copilot")
expect(gptMini!.variant).toBe("high")
})
test("multimodal-looker: prefers gemini image model first", () => {
const multimodal = AGENT_MODEL_REQUIREMENTS["multimodal-looker"]
expect(multimodal.fallbackChain[0]).toEqual({
providers: ["quotio"],
model: "gemini-3-pro-image",
})
})
test("includes NVIDIA NIM additions in at least one agent chain", () => {
const all = Object.values(AGENT_MODEL_REQUIREMENTS).flatMap((r) => r.fallbackChain)
expect(all.some((e) => e.providers.includes("nvidia") && e.model === "qwen/qwen3.5-397b-a17b")).toBe(true)
expect(all.some((e) => e.providers.includes("nvidia") && e.model === "stepfun-ai/step-3.5-flash")).toBe(true)
expect(all.some((e) => e.providers.includes("nvidia") && e.model === "bytedance/seed-oss-36b-instruct")).toBe(true)
}) })
}) })
describe("CATEGORY_MODEL_REQUIREMENTS", () => { describe("CATEGORY_MODEL_REQUIREMENTS", () => {
test("ultrabrain has valid fallbackChain with gpt-5.3-codex as primary", () => { test("defines all 8 categories", () => {
// given - ultrabrain category requirement expect(Object.keys(CATEGORY_MODEL_REQUIREMENTS).sort()).toEqual([
const ultrabrain = CATEGORY_MODEL_REQUIREMENTS["ultrabrain"]
// when - accessing ultrabrain requirement
// then - fallbackChain exists with gpt-5.3-codex as first entry
expect(ultrabrain).toBeDefined()
expect(ultrabrain.fallbackChain).toBeArray()
expect(ultrabrain.fallbackChain.length).toBeGreaterThan(0)
const primary = ultrabrain.fallbackChain[0]
expect(primary.variant).toBe("xhigh")
expect(primary.model).toBe("gpt-5.3-codex")
expect(primary.providers[0]).toBe("openai")
})
test("deep has valid fallbackChain with gpt-5.3-codex as primary", () => {
// given - deep category requirement
const deep = CATEGORY_MODEL_REQUIREMENTS["deep"]
// when - accessing deep requirement
// then - fallbackChain exists with gpt-5.3-codex as first entry, medium variant
expect(deep).toBeDefined()
expect(deep.fallbackChain).toBeArray()
expect(deep.fallbackChain.length).toBeGreaterThan(0)
const primary = deep.fallbackChain[0]
expect(primary.variant).toBe("medium")
expect(primary.model).toBe("gpt-5.3-codex")
expect(primary.providers[0]).toBe("openai")
})
test("visual-engineering has valid fallbackChain with gemini-3-pro high as primary", () => {
// given - visual-engineering category requirement
const visualEngineering = CATEGORY_MODEL_REQUIREMENTS["visual-engineering"]
// when - accessing visual-engineering requirement
// then - fallbackChain: gemini-3-pro(high) → glm-5 → opus-4-6(max) → k2p5
expect(visualEngineering).toBeDefined()
expect(visualEngineering.fallbackChain).toBeArray()
expect(visualEngineering.fallbackChain).toHaveLength(4)
const primary = visualEngineering.fallbackChain[0]
expect(primary.providers[0]).toBe("google")
expect(primary.model).toBe("gemini-3-pro")
expect(primary.variant).toBe("high")
const second = visualEngineering.fallbackChain[1]
expect(second.providers[0]).toBe("zai-coding-plan")
expect(second.model).toBe("glm-5")
const third = visualEngineering.fallbackChain[2]
expect(third.model).toBe("claude-opus-4-6")
expect(third.variant).toBe("max")
const fourth = visualEngineering.fallbackChain[3]
expect(fourth.providers[0]).toBe("kimi-for-coding")
expect(fourth.model).toBe("k2p5")
})
test("quick has valid fallbackChain with claude-haiku-4-5 as primary", () => {
// given - quick category requirement
const quick = CATEGORY_MODEL_REQUIREMENTS["quick"]
// when - accessing quick requirement
// then - fallbackChain exists with claude-haiku-4-5 as first entry
expect(quick).toBeDefined()
expect(quick.fallbackChain).toBeArray()
expect(quick.fallbackChain.length).toBeGreaterThan(0)
const primary = quick.fallbackChain[0]
expect(primary.model).toBe("claude-haiku-4-5")
expect(primary.providers[0]).toBe("anthropic")
})
test("unspecified-low has valid fallbackChain with claude-sonnet-4-6 as primary", () => {
// given - unspecified-low category requirement
const unspecifiedLow = CATEGORY_MODEL_REQUIREMENTS["unspecified-low"]
// when - accessing unspecified-low requirement
// then - fallbackChain exists with claude-sonnet-4-6 as first entry
expect(unspecifiedLow).toBeDefined()
expect(unspecifiedLow.fallbackChain).toBeArray()
expect(unspecifiedLow.fallbackChain.length).toBeGreaterThan(0)
const primary = unspecifiedLow.fallbackChain[0]
expect(primary.model).toBe("claude-sonnet-4-6")
expect(primary.providers[0]).toBe("anthropic")
})
test("unspecified-high has claude-opus-4-6 as primary", () => {
// #given - unspecified-high category requirement
const unspecifiedHigh = CATEGORY_MODEL_REQUIREMENTS["unspecified-high"]
// #when - accessing unspecified-high requirement
// #then - claude-opus-4-6 is first
expect(unspecifiedHigh).toBeDefined()
expect(unspecifiedHigh.fallbackChain).toBeArray()
expect(unspecifiedHigh.fallbackChain.length).toBeGreaterThan(1)
const primary = unspecifiedHigh.fallbackChain[0]
expect(primary.model).toBe("claude-opus-4-6")
expect(primary.variant).toBe("max")
expect(primary.providers).toEqual(["anthropic", "github-copilot", "opencode"])
})
test("artistry has valid fallbackChain with gemini-3-pro as primary", () => {
// given - artistry category requirement
const artistry = CATEGORY_MODEL_REQUIREMENTS["artistry"]
// when - accessing artistry requirement
// then - fallbackChain exists with gemini-3-pro as first entry
expect(artistry).toBeDefined()
expect(artistry.fallbackChain).toBeArray()
expect(artistry.fallbackChain.length).toBeGreaterThan(0)
const primary = artistry.fallbackChain[0]
expect(primary.model).toBe("gemini-3-pro")
expect(primary.variant).toBe("high")
expect(primary.providers[0]).toBe("google")
})
test("writing has valid fallbackChain with k2p5 as primary (kimi-for-coding)", () => {
// given - writing category requirement
const writing = CATEGORY_MODEL_REQUIREMENTS["writing"]
// when - accessing writing requirement
// then - fallbackChain: k2p5 → gemini-3-flash → claude-sonnet-4-6
expect(writing).toBeDefined()
expect(writing.fallbackChain).toBeArray()
expect(writing.fallbackChain).toHaveLength(3)
const primary = writing.fallbackChain[0]
expect(primary.model).toBe("k2p5")
expect(primary.providers[0]).toBe("kimi-for-coding")
const second = writing.fallbackChain[1]
expect(second.model).toBe("gemini-3-flash")
expect(second.providers[0]).toBe("google")
})
test("all 8 categories have valid fallbackChain arrays", () => {
// given - list of 8 category names
const expectedCategories = [
"visual-engineering",
"ultrabrain",
"deep",
"artistry", "artistry",
"deep",
"quick", "quick",
"unspecified-low", "ultrabrain",
"unspecified-high", "unspecified-high",
"unspecified-low",
"visual-engineering",
"writing", "writing",
] ])
})
// when - checking CATEGORY_MODEL_REQUIREMENTS test("deep requires gpt-5.3-codex", () => {
const definedCategories = Object.keys(CATEGORY_MODEL_REQUIREMENTS) expect(CATEGORY_MODEL_REQUIREMENTS["deep"].requiresModel).toBe("gpt-5.3-codex")
})
// then - all categories present with valid fallbackChain test("quick uses the speed chain (haiku primary)", () => {
expect(definedCategories).toHaveLength(8) expect(CATEGORY_MODEL_REQUIREMENTS["quick"].fallbackChain[0].model).toBe("claude-haiku-4-5")
for (const category of expectedCategories) { })
const requirement = CATEGORY_MODEL_REQUIREMENTS[category]
expect(requirement).toBeDefined()
expect(requirement.fallbackChain).toBeArray()
expect(requirement.fallbackChain.length).toBeGreaterThan(0)
for (const entry of requirement.fallbackChain) { test("ultrabrain starts with gpt-5.3-codex (high)", () => {
expect(entry.providers).toBeArray() const ultrabrain = CATEGORY_MODEL_REQUIREMENTS["ultrabrain"]
expect(ultrabrain.fallbackChain[0]).toEqual({
providers: ["quotio"],
model: "gpt-5.3-codex",
variant: "high",
})
})
})
describe("ModelRequirements invariants", () => {
test("all entries have non-empty providers and a non-empty model", () => {
for (const entry of flattenChains()) {
expect(entry.providers.length).toBeGreaterThan(0) expect(entry.providers.length).toBeGreaterThan(0)
expect(typeof entry.model).toBe("string") expect(typeof entry.model).toBe("string")
expect(entry.model.length).toBeGreaterThan(0) expect(entry.model.length).toBeGreaterThan(0)
} }
})
test("no entry uses opencode provider and no excluded models are present", () => {
for (const entry of flattenChains()) {
assertNoOpencodeProvider(entry)
assertNoExcludedModels(entry)
assertNoProviderPrefixForNonNamespacedProviders(entry)
} }
}) })
}) })
describe("FallbackEntry type", () => { describe("Type sanity", () => {
test("FallbackEntry structure is correct", () => { test("FallbackEntry.variant is optional", () => {
// given - a valid FallbackEntry object const entry: FallbackEntry = { providers: ["quotio"], model: "claude-haiku-4-5" }
const entry: FallbackEntry = {
providers: ["anthropic", "github-copilot", "opencode"],
model: "claude-opus-4-6",
variant: "high",
}
// when - accessing properties
// then - all properties are accessible
expect(entry.providers).toEqual(["anthropic", "github-copilot", "opencode"])
expect(entry.model).toBe("claude-opus-4-6")
expect(entry.variant).toBe("high")
})
test("FallbackEntry variant is optional", () => {
// given - a FallbackEntry without variant
const entry: FallbackEntry = {
providers: ["opencode", "anthropic"],
model: "big-pickle",
}
// when - accessing variant
// then - variant is undefined
expect(entry.variant).toBeUndefined() expect(entry.variant).toBeUndefined()
}) })
})
describe("ModelRequirement type", () => { test("ModelRequirement.variant is optional", () => {
test("ModelRequirement structure with fallbackChain is correct", () => { const req: ModelRequirement = { fallbackChain: [{ providers: ["quotio"], model: "claude-haiku-4-5" }] }
// given - a valid ModelRequirement object expect(req.variant).toBeUndefined()
const requirement: ModelRequirement = {
fallbackChain: [
{ providers: ["anthropic", "github-copilot"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["openai", "github-copilot"], model: "gpt-5.2", variant: "high" },
],
}
// when - accessing properties
// then - fallbackChain is accessible with correct structure
expect(requirement.fallbackChain).toBeArray()
expect(requirement.fallbackChain).toHaveLength(2)
expect(requirement.fallbackChain[0].model).toBe("claude-opus-4-6")
expect(requirement.fallbackChain[1].model).toBe("gpt-5.2")
})
test("ModelRequirement variant is optional", () => {
// given - a ModelRequirement without top-level variant
const requirement: ModelRequirement = {
fallbackChain: [{ providers: ["opencode"], model: "big-pickle" }],
}
// when - accessing variant
// then - variant is undefined
expect(requirement.variant).toBeUndefined()
})
test("no model in fallbackChain has provider prefix", () => {
// given - all agent and category requirements
const allRequirements = [
...Object.values(AGENT_MODEL_REQUIREMENTS),
...Object.values(CATEGORY_MODEL_REQUIREMENTS),
]
// when - checking each model in fallbackChain
// then - none contain "/" (provider prefix)
for (const req of allRequirements) {
for (const entry of req.fallbackChain) {
expect(entry.model).not.toContain("/")
}
}
})
test("all fallbackChain entries have non-empty providers array", () => {
// given - all agent and category requirements
const allRequirements = [
...Object.values(AGENT_MODEL_REQUIREMENTS),
...Object.values(CATEGORY_MODEL_REQUIREMENTS),
]
// when - checking each entry in fallbackChain
// then - all have non-empty providers array
for (const req of allRequirements) {
for (const entry of req.fallbackChain) {
expect(entry.providers).toBeArray()
expect(entry.providers.length).toBeGreaterThan(0)
}
}
})
})
describe("requiresModel field in categories", () => {
test("deep category has requiresModel set to gpt-5.3-codex", () => {
// given
const deep = CATEGORY_MODEL_REQUIREMENTS["deep"]
// when / #then
expect(deep.requiresModel).toBe("gpt-5.3-codex")
})
test("artistry category has requiresModel set to gemini-3-pro", () => {
// given
const artistry = CATEGORY_MODEL_REQUIREMENTS["artistry"]
// when / #then
expect(artistry.requiresModel).toBe("gemini-3-pro")
}) })
}) })

View File

@@ -12,147 +12,203 @@ export type ModelRequirement = {
requiresProvider?: string[] // If set, only activates when any of these providers is connected requiresProvider?: string[] // If set, only activates when any of these providers is connected
} }
function fb(providers: string[] | string, model: string, variant?: string): FallbackEntry {
return {
providers: Array.isArray(providers) ? providers : [providers],
model,
...(variant !== undefined ? { variant } : {}),
}
}
function dedupeChain(chain: FallbackEntry[]): FallbackEntry[] {
const seen = new Set<string>()
const result: FallbackEntry[] = []
for (const entry of chain) {
const key = `${entry.model}:${entry.variant ?? ""}`
if (seen.has(key)) continue
seen.add(key)
result.push(entry)
}
return result
}
// Provider preference rules:
// - Never use the paid `opencode` provider as an automatic fallback.
// - Prefer `quotio` when the same model exists across multiple providers.
// - Prefer `github-copilot` first for `gpt-5-mini` (unlimited), fall back to `quotio`.
// Note: user requested "Quotio-first" and to avoid the OpenCode provider; we keep runtime fallbacks on
// `quotio` + `nvidia` (+ `github-copilot` for unlimited GPT mini) unless explicitly requested otherwise.
const P_GPT: string[] = ["quotio"]
const P_GPT_MINI: string[] = ["github-copilot", "quotio"]
// Benchmark-driven ordering (user-provided table + NVIDIA NIM docs), tuned per-agent for quality vs speed.
const SPEED_CHAIN: FallbackEntry[] = [
fb("quotio", "claude-haiku-4-5"), fb("quotio", "oswe-vscode-prime"),
fb(P_GPT_MINI, "gpt-5-mini", "high"), fb(P_GPT_MINI, "gpt-4.1"),
fb("nvidia", "nvidia/nemotron-3-nano-30b-a3b"), fb("quotio", "iflow-rome-30ba3b"),
fb("minimax-coding-plan", "MiniMax-M2.5"), fb("nvidia", "bytedance/seed-oss-36b-instruct"),
fb("quotio", "claude-sonnet-4-5"),
]
const QUALITY_CODING_CHAIN: FallbackEntry[] = [
fb("quotio", "claude-opus-4-6-thinking"),
fb("nvidia", "stepfun-ai/step-3.5-flash"),
fb("nvidia", "qwen/qwen3.5-397b-a17b"),
fb("quotio", "glm-5"),
fb("nvidia", "z-ai/glm5"),
fb("quotio", "deepseek-v3.2-reasoner"),
fb("quotio", "deepseek-r1"),
fb("nvidia", "deepseek-ai/deepseek-r1"),
fb("quotio", "qwen3-235b-a22b-thinking-2507"),
fb("nvidia", "qwen/qwen3-next-80b-a3b-thinking"),
fb("nvidia", "qwen/qwen3-coder-480b-a35b-instruct"),
fb("nvidia", "bytedance/seed-oss-36b-instruct"),
fb("quotio", "kimi-k2-thinking"),
fb("quotio", "kimi-k2.5"),
fb("nvidia", "moonshotai/kimi-k2.5"),
fb("minimax-coding-plan", "MiniMax-M2.5"),
fb("minimax-coding-plan", "MiniMax-M2.5-highspeed"),
fb("minimax", "MiniMax-M2.5"),
fb("quotio", "minimax-m2.5"),
fb("quotio", "claude-sonnet-4-5-thinking"),
]
export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = { export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
sisyphus: { sisyphus: {
fallbackChain: [ fallbackChain: [
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, // 1st fallback: switch away from Opus Thinking to the non-thinking model (often more available).
{ providers: ["kimi-for-coding"], model: "k2p5" }, fb("quotio", "claude-opus-4-6", "max"),
{ providers: ["opencode"], model: "kimi-k2.5-free" }, // 2nd fallback: user-requested.
{ providers: ["zai-coding-plan", "opencode"], model: "glm-5" }, fb("quotio", "gpt-5.3-codex", "high"),
{ providers: ["opencode"], model: "big-pickle" }, ...QUALITY_CODING_CHAIN,
...SPEED_CHAIN,
], ],
requiresAnyModel: true, requiresAnyModel: true,
}, },
hephaestus: { hephaestus: {
fallbackChain: [ fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" }, fb("quotio", "gpt-5.3-codex", "high"),
...QUALITY_CODING_CHAIN,
], ],
requiresProvider: ["openai", "github-copilot", "opencode"], requiresAnyModel: true,
}, },
oracle: { oracle: {
fallbackChain: [ fallbackChain: dedupeChain([
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" }, fb("quotio", "gpt-5.3-codex", "high"),
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "claude-sonnet-4-5-thinking"),
], ...QUALITY_CODING_CHAIN,
]),
}, },
librarian: { librarian: {
fallbackChain: [ fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" }, fb("quotio", "claude-sonnet-4-5"),
{ providers: ["opencode"], model: "minimax-m2.5-free" }, ...SPEED_CHAIN,
{ providers: ["opencode"], model: "big-pickle" }, ...QUALITY_CODING_CHAIN,
], ],
}, },
explore: { explore: {
fallbackChain: [ fallbackChain: SPEED_CHAIN,
{ providers: ["github-copilot"], model: "grok-code-fast-1" },
{ providers: ["opencode"], model: "minimax-m2.5-free" },
{ providers: ["anthropic", "opencode"], model: "claude-haiku-4-5" },
{ providers: ["opencode"], model: "gpt-5-nano" },
],
}, },
"multimodal-looker": { "multimodal-looker": {
fallbackChain: [ fallbackChain: [
{ providers: ["kimi-for-coding"], model: "k2p5" }, fb("quotio", "gemini-3-pro-image"),
{ providers: ["opencode"], model: "kimi-k2.5-free" }, fb("quotio", "gemini-3-pro-high"),
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" }, fb("quotio", "gemini-3-flash"),
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" }, fb("quotio", "kimi-k2.5"),
{ providers: ["zai-coding-plan"], model: "glm-4.6v" }, fb("quotio", "claude-opus-4-6-thinking"),
fb("quotio", "claude-sonnet-4-5-thinking"),
fb("quotio", "claude-haiku-4-5"),
], ],
}, },
prometheus: { prometheus: {
fallbackChain: [ fallbackChain: dedupeChain([
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" }, fb("quotio", "gpt-5.3-codex", "high"),
{ providers: ["kimi-for-coding"], model: "k2p5" }, fb("quotio", "claude-sonnet-4-5-thinking"),
{ providers: ["opencode"], model: "kimi-k2.5-free" }, ...QUALITY_CODING_CHAIN,
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro" }, ]),
],
}, },
metis: { metis: {
fallbackChain: [ fallbackChain: dedupeChain([
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["kimi-for-coding"], model: "k2p5" }, fb("quotio", "gpt-5.3-codex", "high"),
{ providers: ["opencode"], model: "kimi-k2.5-free" }, fb("quotio", "claude-sonnet-4-5-thinking"),
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" }, ...QUALITY_CODING_CHAIN,
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" }, ]),
],
}, },
momus: { momus: {
fallbackChain: [ fallbackChain: dedupeChain([
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "medium" }, fb("quotio", "gpt-5.3-codex", "high"),
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" }, ...QUALITY_CODING_CHAIN,
], ]),
}, },
atlas: { atlas: {
fallbackChain: [ fallbackChain: dedupeChain([
{ providers: ["kimi-for-coding"], model: "k2p5" }, fb("quotio", "claude-sonnet-4-5-thinking"),
{ providers: ["opencode"], model: "kimi-k2.5-free" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-6" }, fb("quotio", "gpt-5.3-codex", "medium"),
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" }, ...QUALITY_CODING_CHAIN,
], ]),
}, },
} }
export const CATEGORY_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = { export const CATEGORY_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
"visual-engineering": { "visual-engineering": {
fallbackChain: [ fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["zai-coding-plan", "opencode"], model: "glm-5" }, fb("quotio", "gemini-3-pro-image"),
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "kimi-k2-thinking"),
{ providers: ["kimi-for-coding"], model: "k2p5" }, fb("quotio", "kimi-k2.5"),
fb("quotio", "claude-sonnet-4-5-thinking"),
fb("quotio", "gpt-5.3-codex", "medium"),
], ],
}, },
ultrabrain: { ultrabrain: {
fallbackChain: [ fallbackChain: dedupeChain([
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "xhigh" }, fb("quotio", "gpt-5.3-codex", "high"),
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" }, ...QUALITY_CODING_CHAIN,
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, ]),
],
}, },
deep: { deep: {
fallbackChain: [ fallbackChain: [
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" }, fb("quotio", "gpt-5.3-codex", "medium"),
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" }, fb("quotio", "claude-sonnet-4-5-thinking"),
...QUALITY_CODING_CHAIN,
], ],
requiresModel: "gpt-5.3-codex", requiresModel: "gpt-5.3-codex",
}, },
artistry: { artistry: {
fallbackChain: [ fallbackChain: [
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro", variant: "high" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "claude-sonnet-4-5-thinking"),
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2" }, fb("quotio", "claude-sonnet-4-5"),
], ],
requiresModel: "gemini-3-pro", requiresModel: "claude-opus-4-6",
}, },
quick: { quick: {
fallbackChain: [ fallbackChain: SPEED_CHAIN,
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-haiku-4-5" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" },
{ providers: ["opencode"], model: "gpt-5-nano" },
],
}, },
"unspecified-low": { "unspecified-low": {
fallbackChain: [ fallbackChain: SPEED_CHAIN,
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-6" },
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.3-codex", variant: "medium" },
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" },
],
}, },
"unspecified-high": { "unspecified-high": {
fallbackChain: [ fallbackChain: dedupeChain([
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" }, fb("quotio", "claude-opus-4-6-thinking"),
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2", variant: "high" }, fb("quotio", "gpt-5.3-codex", "high"),
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-pro" }, ...QUALITY_CODING_CHAIN,
], ]),
}, },
writing: { writing: {
fallbackChain: [ fallbackChain: [
{ providers: ["kimi-for-coding"], model: "k2p5" }, fb("quotio", "claude-sonnet-4-5"),
{ providers: ["google", "github-copilot", "opencode"], model: "gemini-3-flash" }, fb("quotio", "glm-5"),
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-6" }, fb("quotio", "kimi-k2.5"),
fb("quotio", "claude-haiku-4-5"),
fb("quotio", "gemini-3-flash"),
], ],
}, },
} }

View File

@@ -2,12 +2,12 @@ import { log } from "./logger"
import * as connectedProvidersCache from "./connected-providers-cache" import * as connectedProvidersCache from "./connected-providers-cache"
import { fuzzyMatchModel } from "./model-availability" import { fuzzyMatchModel } from "./model-availability"
import type { FallbackEntry } from "./model-requirements" import type { FallbackEntry } from "./model-requirements"
import { transformModelForProvider } from "./provider-model-id-transform"
export type ModelResolutionRequest = { export type ModelResolutionRequest = {
intent?: { intent?: {
uiSelectedModel?: string uiSelectedModel?: string
userModel?: string userModel?: string
userFallbackModels?: string[]
categoryDefaultModel?: string categoryDefaultModel?: string
} }
constraints: { constraints: {
@@ -86,10 +86,13 @@ export function resolveModelPipeline(
if (parts.length >= 2) { if (parts.length >= 2) {
const provider = parts[0] const provider = parts[0]
if (connectedProviders.includes(provider)) { if (connectedProviders.includes(provider)) {
const modelName = parts.slice(1).join("/")
const transformedModel = `${provider}/${transformModelForProvider(provider, modelName)}`
log("Model resolved via category default (connected provider)", { log("Model resolved via category default (connected provider)", {
model: normalizedCategoryDefault, model: transformedModel,
original: normalizedCategoryDefault,
}) })
return { model: normalizedCategoryDefault, provenance: "category-default", attempted } return { model: transformedModel, provenance: "category-default", attempted }
} }
} }
} }
@@ -98,42 +101,6 @@ export function resolveModelPipeline(
}) })
} }
//#when - user configured fallback_models, try them before hardcoded fallback chain
const userFallbackModels = intent?.userFallbackModels
if (userFallbackModels && userFallbackModels.length > 0) {
if (availableModels.size === 0) {
const connectedProviders = constraints.connectedProviders ?? connectedProvidersCache.readConnectedProvidersCache()
const connectedSet = connectedProviders ? new Set(connectedProviders) : null
if (connectedSet !== null) {
for (const model of userFallbackModels) {
attempted.push(model)
const parts = model.split("/")
if (parts.length >= 2) {
const provider = parts[0]
if (connectedSet.has(provider)) {
log("Model resolved via user fallback_models (connected provider)", { model })
return { model, provenance: "provider-fallback", attempted }
}
}
}
log("No connected provider found in user fallback_models, falling through to hardcoded chain")
}
} else {
for (const model of userFallbackModels) {
attempted.push(model)
const parts = model.split("/")
const providerHint = parts.length >= 2 ? [parts[0]] : undefined
const match = fuzzyMatchModel(model, availableModels, providerHint)
if (match) {
log("Model resolved via user fallback_models (availability confirmed)", { model: model, match })
return { model: match, provenance: "provider-fallback", attempted }
}
}
log("No available model found in user fallback_models, falling through to hardcoded chain")
}
}
if (fallbackChain && fallbackChain.length > 0) { if (fallbackChain && fallbackChain.length > 0) {
if (availableModels.size === 0) { if (availableModels.size === 0) {
const connectedProviders = constraints.connectedProviders ?? connectedProvidersCache.readConnectedProvidersCache() const connectedProviders = constraints.connectedProviders ?? connectedProvidersCache.readConnectedProvidersCache()
@@ -145,10 +112,11 @@ export function resolveModelPipeline(
for (const entry of fallbackChain) { for (const entry of fallbackChain) {
for (const provider of entry.providers) { for (const provider of entry.providers) {
if (connectedSet.has(provider)) { if (connectedSet.has(provider)) {
const model = `${provider}/${entry.model}` const transformedModelId = transformModelForProvider(provider, entry.model)
const model = `${provider}/${transformedModelId}`
log("Model resolved via fallback chain (connected provider)", { log("Model resolved via fallback chain (connected provider)", {
provider, provider,
model: entry.model, model: transformedModelId,
variant: entry.variant, variant: entry.variant,
}) })
return { return {

View File

@@ -543,7 +543,8 @@ describe("resolveModelWithFallback", () => {
const result = resolveModelWithFallback(input) const result = resolveModelWithFallback(input)
// then - should use github-copilot (second provider) since google not connected // then - should use github-copilot (second provider) since google not connected
expect(result!.model).toBe("github-copilot/gemini-3-pro") // model name is transformed to preview variant for github-copilot provider
expect(result!.model).toBe("github-copilot/gemini-3-pro-preview")
expect(result!.source).toBe("provider-fallback") expect(result!.source).toBe("provider-fallback")
cacheSpy.mockRestore() cacheSpy.mockRestore()
}) })
@@ -795,8 +796,82 @@ describe("resolveModelWithFallback", () => {
// when // when
const result = resolveModelWithFallback(input) const result = resolveModelWithFallback(input)
// then - should use categoryDefaultModel since google is connected // then - should use transformed categoryDefaultModel since google is connected
expect(result!.model).toBe("google/gemini-3-pro") expect(result!.model).toBe("google/gemini-3-pro-preview")
expect(result!.source).toBe("category-default")
cacheSpy.mockRestore()
})
test("transforms gemini-3-flash in categoryDefaultModel for google connected provider", () => {
// given - google connected, category default uses gemini-3-flash
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(["google"])
const input: ExtendedModelResolutionInput = {
categoryDefaultModel: "google/gemini-3-flash",
availableModels: new Set(),
systemDefaultModel: "anthropic/claude-sonnet-4-5",
}
// when
const result = resolveModelWithFallback(input)
// then - gemini-3-flash should be transformed to gemini-3-flash-preview
expect(result!.model).toBe("google/gemini-3-flash-preview")
expect(result!.source).toBe("category-default")
cacheSpy.mockRestore()
})
test("does not double-transform categoryDefaultModel already containing -preview", () => {
// given - category default already has -preview suffix
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(["google"])
const input: ExtendedModelResolutionInput = {
categoryDefaultModel: "google/gemini-3-pro-preview",
availableModels: new Set(),
systemDefaultModel: "anthropic/claude-sonnet-4-5",
}
// when
const result = resolveModelWithFallback(input)
// then - should NOT become gemini-3-pro-preview-preview
expect(result!.model).toBe("google/gemini-3-pro-preview")
expect(result!.source).toBe("category-default")
cacheSpy.mockRestore()
})
test("transforms gemini-3-pro in fallback chain for google connected provider", () => {
// given - google connected, fallback chain has gemini-3-pro
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(["google"])
const input: ExtendedModelResolutionInput = {
fallbackChain: [
{ providers: ["google", "github-copilot"], model: "gemini-3-pro" },
],
availableModels: new Set(),
systemDefaultModel: "anthropic/claude-sonnet-4-5",
}
// when
const result = resolveModelWithFallback(input)
// then - should transform to preview variant for google provider
expect(result!.model).toBe("google/gemini-3-pro-preview")
expect(result!.source).toBe("provider-fallback")
cacheSpy.mockRestore()
})
test("passes through non-gemini-3 models for google connected provider", () => {
// given - google connected, category default uses gemini-2.5-flash (no transform needed)
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(["google"])
const input: ExtendedModelResolutionInput = {
categoryDefaultModel: "google/gemini-2.5-flash",
availableModels: new Set(),
systemDefaultModel: "anthropic/claude-sonnet-4-5",
}
// when
const result = resolveModelWithFallback(input)
// then - should pass through unchanged
expect(result!.model).toBe("google/gemini-2.5-flash")
expect(result!.source).toBe("category-default") expect(result!.source).toBe("category-default")
cacheSpy.mockRestore() cacheSpy.mockRestore()
}) })

View File

@@ -22,7 +22,6 @@ export type ModelResolutionResult = {
export type ExtendedModelResolutionInput = { export type ExtendedModelResolutionInput = {
uiSelectedModel?: string uiSelectedModel?: string
userModel?: string userModel?: string
userFallbackModels?: string[]
categoryDefaultModel?: string categoryDefaultModel?: string
fallbackChain?: FallbackEntry[] fallbackChain?: FallbackEntry[]
availableModels: Set<string> availableModels: Set<string>
@@ -45,9 +44,9 @@ export function resolveModel(input: ModelResolutionInput): string | undefined {
export function resolveModelWithFallback( export function resolveModelWithFallback(
input: ExtendedModelResolutionInput, input: ExtendedModelResolutionInput,
): ModelResolutionResult | undefined { ): ModelResolutionResult | undefined {
const { uiSelectedModel, userModel, userFallbackModels, categoryDefaultModel, fallbackChain, availableModels, systemDefaultModel } = input const { uiSelectedModel, userModel, categoryDefaultModel, fallbackChain, availableModels, systemDefaultModel } = input
const resolved = resolveModelPipeline({ const resolved = resolveModelPipeline({
intent: { uiSelectedModel, userModel, userFallbackModels, categoryDefaultModel }, intent: { uiSelectedModel, userModel, categoryDefaultModel },
constraints: { availableModels }, constraints: { availableModels },
policy: { fallbackChain, systemDefaultModel }, policy: { fallbackChain, systemDefaultModel },
}) })
@@ -62,13 +61,3 @@ export function resolveModelWithFallback(
variant: resolved.variant, variant: resolved.variant,
} }
} }
/**
* Normalizes fallback_models config (which can be string or string[]) to string[]
* Centralized helper to avoid duplicated normalization logic
*/
export function normalizeFallbackModels(models: string | string[] | undefined): string[] | undefined {
if (!models) return undefined
if (typeof models === "string") return [models]
return models
}

View File

@@ -0,0 +1,18 @@
export function transformModelForProvider(provider: string, model: string): string {
if (provider === "github-copilot") {
return model
.replace("claude-opus-4-6", "claude-opus-4.6")
.replace("claude-sonnet-4-6", "claude-sonnet-4.6")
.replace("claude-sonnet-4-5", "claude-sonnet-4.5")
.replace("claude-haiku-4-5", "claude-haiku-4.5")
.replace("claude-sonnet-4", "claude-sonnet-4")
.replace(/gemini-3-pro(?!-)/g, "gemini-3-pro-preview")
.replace(/gemini-3-flash(?!-)/g, "gemini-3-flash-preview")
}
if (provider === "google") {
return model
.replace(/gemini-3-pro(?!-)/g, "gemini-3-pro-preview")
.replace(/gemini-3-flash(?!-)/g, "gemini-3-flash-preview")
}
return model
}

View File

@@ -1,53 +0,0 @@
/**
* Session Category Registry
*
* Maintains a mapping of session IDs to their assigned categories.
* Used by runtime-fallback hook to lookup category-specific fallback_models.
*/
// Map of sessionID -> category name
const sessionCategoryMap = new Map<string, string>()
export const SessionCategoryRegistry = {
/**
* Register a session with its category
*/
register: (sessionID: string, category: string): void => {
sessionCategoryMap.set(sessionID, category)
},
/**
* Get the category for a session
*/
get: (sessionID: string): string | undefined => {
return sessionCategoryMap.get(sessionID)
},
/**
* Remove a session from the registry (cleanup)
*/
remove: (sessionID: string): void => {
sessionCategoryMap.delete(sessionID)
},
/**
* Check if a session is registered
*/
has: (sessionID: string): boolean => {
return sessionCategoryMap.has(sessionID)
},
/**
* Get the size of the registry (for debugging)
*/
size: (): number => {
return sessionCategoryMap.size
},
/**
* Clear all entries (use with caution, mainly for testing)
*/
clear: (): void => {
sessionCategoryMap.clear()
},
}

View File

@@ -0,0 +1,30 @@
import { describe, expect, test } from "bun:test"
import { clearSessionModel, getSessionModel, setSessionModel } from "./session-model-state"
describe("session-model-state", () => {
test("stores and retrieves a session model", () => {
//#given
const sessionID = "ses_test"
//#when
setSessionModel(sessionID, { providerID: "github-copilot", modelID: "gpt-4.1" })
//#then
expect(getSessionModel(sessionID)).toEqual({
providerID: "github-copilot",
modelID: "gpt-4.1",
})
})
test("clears a session model", () => {
//#given
const sessionID = "ses_clear"
setSessionModel(sessionID, { providerID: "quotio", modelID: "gpt-5.3-codex" })
//#when
clearSessionModel(sessionID)
//#then
expect(getSessionModel(sessionID)).toBeUndefined()
})
})

View File

@@ -0,0 +1,15 @@
export type SessionModel = { providerID: string; modelID: string }
const sessionModels = new Map<string, SessionModel>()
export function setSessionModel(sessionID: string, model: SessionModel): void {
sessionModels.set(sessionID, model)
}
export function getSessionModel(sessionID: string): SessionModel | undefined {
return sessionModels.get(sessionID)
}
export function clearSessionModel(sessionID: string): void {
sessionModels.delete(sessionID)
}

View File

@@ -1,6 +1,6 @@
import type { CallOmoAgentArgs } from "./types" import type { CallOmoAgentArgs } from "./types"
import type { PluginInput } from "@opencode-ai/plugin" import type { PluginInput } from "@opencode-ai/plugin"
import { subagentSessions } from "../../features/claude-code-session-state" import { subagentSessions, syncSubagentSessions } from "../../features/claude-code-session-state"
import { log } from "../../shared" import { log } from "../../shared"
export async function createOrGetSession( export async function createOrGetSession(
@@ -64,6 +64,7 @@ Original error: ${createResult.error}`)
const sessionID = createResult.data.id const sessionID = createResult.data.id
log(`[call_omo_agent] Created session: ${sessionID}`) log(`[call_omo_agent] Created session: ${sessionID}`)
subagentSessions.add(sessionID) subagentSessions.add(sessionID)
syncSubagentSessions.add(sessionID)
return { sessionID, isNew: true } return { sessionID, isNew: true }
} }
} }

View File

@@ -1,7 +1,7 @@
import type { PluginInput } from "@opencode-ai/plugin" import type { PluginInput } from "@opencode-ai/plugin"
import { log } from "../../shared" import { log } from "../../shared"
import { resolveSessionDirectory } from "../../shared" import { resolveSessionDirectory } from "../../shared"
import { subagentSessions } from "../../features/claude-code-session-state" import { subagentSessions, syncSubagentSessions } from "../../features/claude-code-session-state"
import type { CallOmoAgentArgs } from "./types" import type { CallOmoAgentArgs } from "./types"
import type { ToolContextWithMetadata } from "./tool-context-with-metadata" import type { ToolContextWithMetadata } from "./tool-context-with-metadata"
@@ -69,5 +69,6 @@ Original error: ${createResult.error}`,
const sessionID = createResult.data.id const sessionID = createResult.data.id
log(`[call_omo_agent] Created session: ${sessionID}`) log(`[call_omo_agent] Created session: ${sessionID}`)
subagentSessions.add(sessionID) subagentSessions.add(sessionID)
syncSubagentSessions.add(sessionID)
return { ok: true, sessionID } return { ok: true, sessionID }
} }

View File

@@ -1,10 +1,10 @@
import type { DelegateTaskArgs, ToolContextWithMetadata } from "./types" import type { DelegateTaskArgs, ToolContextWithMetadata } from "./types"
import type { ExecutorContext, ParentContext } from "./executor-types" import type { ExecutorContext, ParentContext } from "./executor-types"
import type { FallbackEntry } from "../../shared/model-requirements"
import { getTimingConfig } from "./timing" import { getTimingConfig } from "./timing"
import { storeToolMetadata } from "../../features/tool-metadata-store" import { storeToolMetadata } from "../../features/tool-metadata-store"
import { formatDetailedError } from "./error-formatting" import { formatDetailedError } from "./error-formatting"
import { getSessionTools } from "../../shared/session-tools-store" import { getSessionTools } from "../../shared/session-tools-store"
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
export async function executeBackgroundTask( export async function executeBackgroundTask(
args: DelegateTaskArgs, args: DelegateTaskArgs,
@@ -13,7 +13,8 @@ export async function executeBackgroundTask(
parentContext: ParentContext, parentContext: ParentContext,
agentToUse: string, agentToUse: string,
categoryModel: { providerID: string; modelID: string; variant?: string } | undefined, categoryModel: { providerID: string; modelID: string; variant?: string } | undefined,
systemContent: string | undefined systemContent: string | undefined,
fallbackChain?: FallbackEntry[],
): Promise<string> { ): Promise<string> {
const { manager } = executorCtx const { manager } = executorCtx
@@ -28,6 +29,7 @@ export async function executeBackgroundTask(
parentAgent: parentContext.agent, parentAgent: parentContext.agent,
parentTools: getSessionTools(parentContext.sessionID), parentTools: getSessionTools(parentContext.sessionID),
model: categoryModel, model: categoryModel,
fallbackChain,
skills: args.load_skills.length > 0 ? args.load_skills : undefined, skills: args.load_skills.length > 0 ? args.load_skills : undefined,
skillContent: systemContent, skillContent: systemContent,
category: args.category, category: args.category,
@@ -49,10 +51,6 @@ export async function executeBackgroundTask(
sessionId = updated?.sessionID sessionId = updated?.sessionID
} }
if (args.category && sessionId) {
SessionCategoryRegistry.register(sessionId, args.category)
}
const unstableMeta = { const unstableMeta = {
title: args.description, title: args.description,
metadata: { metadata: {

View File

@@ -1,6 +1,7 @@
import type { ModelFallbackInfo } from "../../features/task-toast-manager/types" import type { ModelFallbackInfo } from "../../features/task-toast-manager/types"
import type { DelegateTaskArgs } from "./types" import type { DelegateTaskArgs } from "./types"
import type { ExecutorContext } from "./executor-types" import type { ExecutorContext } from "./executor-types"
import type { FallbackEntry } from "../../shared/model-requirements"
import { mergeCategories } from "../../shared/merge-categories" import { mergeCategories } from "../../shared/merge-categories"
import { SISYPHUS_JUNIOR_AGENT } from "./sisyphus-junior-agent" import { SISYPHUS_JUNIOR_AGENT } from "./sisyphus-junior-agent"
import { resolveCategoryConfig } from "./categories" import { resolveCategoryConfig } from "./categories"
@@ -16,6 +17,7 @@ export interface CategoryResolutionResult {
modelInfo: ModelFallbackInfo | undefined modelInfo: ModelFallbackInfo | undefined
actualModel: string | undefined actualModel: string | undefined
isUnstableAgent: boolean isUnstableAgent: boolean
fallbackChain?: FallbackEntry[] // For runtime retry on model errors
error?: string error?: string
} }
@@ -177,5 +179,6 @@ Available categories: ${categoryNames.join(", ")}`,
modelInfo, modelInfo,
actualModel, actualModel,
isUnstableAgent, isUnstableAgent,
fallbackChain: requirement?.fallbackChain,
} }
} }

View File

@@ -1,5 +1,6 @@
import type { FallbackEntry } from "../../shared/model-requirements" import type { FallbackEntry } from "../../shared/model-requirements"
import { fuzzyMatchModel } from "../../shared/model-availability" import { fuzzyMatchModel } from "../../shared/model-availability"
import { transformModelForProvider } from "../../shared/provider-model-id-transform"
function normalizeModel(model?: string): string | undefined { function normalizeModel(model?: string): string | undefined {
const trimmed = model?.trim() const trimmed = model?.trim()
@@ -38,7 +39,8 @@ export function resolveModelForDelegateTask(input: {
const first = fallbackChain[0] const first = fallbackChain[0]
const provider = first?.providers?.[0] const provider = first?.providers?.[0]
if (provider) { if (provider) {
return { model: `${provider}/${first.model}`, variant: first.variant } const transformedModelId = transformModelForProvider(provider, first.model)
return { model: `${provider}/${transformedModelId}`, variant: first.variant }
} }
} else { } else {
for (const entry of fallbackChain) { for (const entry of fallbackChain) {

View File

@@ -8,6 +8,7 @@ import { getAgentDisplayName, getAgentConfigKey } from "../../shared/agent-displ
import { normalizeSDKResponse } from "../../shared" import { normalizeSDKResponse } from "../../shared"
import { log } from "../../shared/logger" import { log } from "../../shared/logger"
import { getAvailableModelsForDelegateTask } from "./available-models" import { getAvailableModelsForDelegateTask } from "./available-models"
import type { FallbackEntry } from "../../shared/model-requirements"
import { resolveModelForDelegateTask } from "./model-selection" import { resolveModelForDelegateTask } from "./model-selection"
export async function resolveSubagentExecution( export async function resolveSubagentExecution(
@@ -15,7 +16,7 @@ export async function resolveSubagentExecution(
executorCtx: ExecutorContext, executorCtx: ExecutorContext,
parentAgent: string | undefined, parentAgent: string | undefined,
categoryExamples: string categoryExamples: string
): Promise<{ agentToUse: string; categoryModel: { providerID: string; modelID: string; variant?: string } | undefined; error?: string }> { ): Promise<{ agentToUse: string; categoryModel: { providerID: string; modelID: string; variant?: string } | undefined; fallbackChain?: FallbackEntry[]; error?: string }> {
const { client, agentOverrides } = executorCtx const { client, agentOverrides } = executorCtx
if (!args.subagent_type?.trim()) { if (!args.subagent_type?.trim()) {
@@ -46,6 +47,7 @@ Create the work plan directly - that's your job as the planning agent.`,
let agentToUse = agentName let agentToUse = agentName
let categoryModel: { providerID: string; modelID: string; variant?: string } | undefined let categoryModel: { providerID: string; modelID: string; variant?: string } | undefined
let fallbackChain: FallbackEntry[] | undefined = undefined
try { try {
const agentsResult = await client.app.agents() const agentsResult = await client.app.agents()
@@ -92,6 +94,7 @@ Create the work plan directly - that's your job as the planning agent.`,
const agentOverride = agentOverrides?.[agentConfigKey as keyof typeof agentOverrides] const agentOverride = agentOverrides?.[agentConfigKey as keyof typeof agentOverrides]
?? (agentOverrides ? Object.entries(agentOverrides).find(([key]) => key.toLowerCase() === agentConfigKey)?.[1] : undefined) ?? (agentOverrides ? Object.entries(agentOverrides).find(([key]) => key.toLowerCase() === agentConfigKey)?.[1] : undefined)
const agentRequirement = AGENT_MODEL_REQUIREMENTS[agentConfigKey] const agentRequirement = AGENT_MODEL_REQUIREMENTS[agentConfigKey]
fallbackChain = agentRequirement?.fallbackChain
if (agentOverride?.model || agentRequirement || matchedAgent.model) { if (agentOverride?.model || agentRequirement || matchedAgent.model) {
const availableModels = await getAvailableModelsForDelegateTask(client) const availableModels = await getAvailableModelsForDelegateTask(client)
@@ -135,5 +138,5 @@ Create the work plan directly - that's your job as the planning agent.`,
} }
} }
return { agentToUse, categoryModel } return { agentToUse, categoryModel, fallbackChain }
} }

View File

@@ -100,7 +100,7 @@ describe("executeSyncTask - cleanup on error paths", () => {
//#when - executeSyncTask with fetchSyncResult failing //#when - executeSyncTask with fetchSyncResult failing
const result = await executeSyncTask(args, mockCtx, mockExecutorCtx, { const result = await executeSyncTask(args, mockCtx, mockExecutorCtx, {
sessionID: "parent-session", sessionID: "parent-session",
}, "test-agent", undefined, undefined, undefined, deps) }, "test-agent", undefined, undefined, undefined, undefined, deps)
//#then - should return error and cleanup resources //#then - should return error and cleanup resources
expect(result).toBe("Fetch failed") expect(result).toBe("Fetch failed")
@@ -150,7 +150,7 @@ describe("executeSyncTask - cleanup on error paths", () => {
//#when - executeSyncTask with pollSyncSession failing //#when - executeSyncTask with pollSyncSession failing
const result = await executeSyncTask(args, mockCtx, mockExecutorCtx, { const result = await executeSyncTask(args, mockCtx, mockExecutorCtx, {
sessionID: "parent-session", sessionID: "parent-session",
}, "test-agent", undefined, undefined, undefined, deps) }, "test-agent", undefined, undefined, undefined, undefined, deps)
//#then - should return error and cleanup resources //#then - should return error and cleanup resources
expect(result).toBe("Poll error") expect(result).toBe("Poll error")
@@ -200,7 +200,7 @@ describe("executeSyncTask - cleanup on error paths", () => {
//#when - executeSyncTask completes successfully //#when - executeSyncTask completes successfully
const result = await executeSyncTask(args, mockCtx, mockExecutorCtx, { const result = await executeSyncTask(args, mockCtx, mockExecutorCtx, {
sessionID: "parent-session", sessionID: "parent-session",
}, "test-agent", undefined, undefined, undefined, deps) }, "test-agent", undefined, undefined, undefined, undefined, deps)
//#then - should complete and cleanup resources //#then - should complete and cleanup resources
expect(result).toContain("Task completed") expect(result).toContain("Task completed")

View File

@@ -3,12 +3,12 @@ import type { DelegateTaskArgs, ToolContextWithMetadata } from "./types"
import type { ExecutorContext, ParentContext } from "./executor-types" import type { ExecutorContext, ParentContext } from "./executor-types"
import { getTaskToastManager } from "../../features/task-toast-manager" import { getTaskToastManager } from "../../features/task-toast-manager"
import { storeToolMetadata } from "../../features/tool-metadata-store" import { storeToolMetadata } from "../../features/tool-metadata-store"
import { subagentSessions } from "../../features/claude-code-session-state" import { subagentSessions, syncSubagentSessions, setSessionAgent } from "../../features/claude-code-session-state"
import { log } from "../../shared/logger" import { log } from "../../shared/logger"
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
import { formatDuration } from "./time-formatter" import { formatDuration } from "./time-formatter"
import { formatDetailedError } from "./error-formatting" import { formatDetailedError } from "./error-formatting"
import { syncTaskDeps, type SyncTaskDeps } from "./sync-task-deps" import { syncTaskDeps, type SyncTaskDeps } from "./sync-task-deps"
import { setSessionFallbackChain, clearSessionFallbackChain } from "../../hooks/model-fallback/hook"
export async function executeSyncTask( export async function executeSyncTask(
args: DelegateTaskArgs, args: DelegateTaskArgs,
@@ -19,6 +19,7 @@ export async function executeSyncTask(
categoryModel: { providerID: string; modelID: string; variant?: string } | undefined, categoryModel: { providerID: string; modelID: string; variant?: string } | undefined,
systemContent: string | undefined, systemContent: string | undefined,
modelInfo?: ModelFallbackInfo, modelInfo?: ModelFallbackInfo,
fallbackChain?: import("../../shared/model-requirements").FallbackEntry[],
deps: SyncTaskDeps = syncTaskDeps deps: SyncTaskDeps = syncTaskDeps
): Promise<string> { ): Promise<string> {
const { client, directory, onSyncSessionCreated } = executorCtx const { client, directory, onSyncSessionCreated } = executorCtx
@@ -41,10 +42,9 @@ export async function executeSyncTask(
const sessionID = createSessionResult.sessionID const sessionID = createSessionResult.sessionID
syncSessionID = sessionID syncSessionID = sessionID
subagentSessions.add(sessionID) subagentSessions.add(sessionID)
syncSubagentSessions.add(sessionID)
if (args.category) { setSessionAgent(sessionID, agentToUse)
SessionCategoryRegistry.register(sessionID, args.category) setSessionFallbackChain(sessionID, fallbackChain)
}
if (onSyncSessionCreated) { if (onSyncSessionCreated) {
log("[task] Invoking onSyncSessionCreated callback", { sessionID, parentID: parentContext.sessionID }) log("[task] Invoking onSyncSessionCreated callback", { sessionID, parentID: parentContext.sessionID })
@@ -64,6 +64,7 @@ export async function executeSyncTask(
if (toastManager) { if (toastManager) {
toastManager.addTask({ toastManager.addTask({
id: taskId, id: taskId,
sessionID,
description: args.description, description: args.description,
agent: agentToUse, agent: agentToUse,
isBackground: false, isBackground: false,
@@ -150,7 +151,8 @@ session_id: ${sessionID}
} finally { } finally {
if (syncSessionID) { if (syncSessionID) {
subagentSessions.delete(syncSessionID) subagentSessions.delete(syncSessionID)
SessionCategoryRegistry.remove(syncSessionID) syncSubagentSessions.delete(syncSessionID)
clearSessionFallbackChain(syncSessionID)
} }
} }
} }

View File

@@ -1044,7 +1044,7 @@ describe("sisyphus-task", () => {
modelID: "claude-opus-4-6", modelID: "claude-opus-4-6",
variant: "max", variant: "max",
}) })
}, { timeout: 20000 }) })
test("DEFAULT_CATEGORIES variant passes to sync session.prompt WITHOUT userCategories", async () => { test("DEFAULT_CATEGORIES variant passes to sync session.prompt WITHOUT userCategories", async () => {
// given - NO userCategories, testing DEFAULT_CATEGORIES for sync mode // given - NO userCategories, testing DEFAULT_CATEGORIES for sync mode
@@ -2624,17 +2624,15 @@ describe("sisyphus-task", () => {
toolContext toolContext
) )
// then - agent-browser skill should be resolved // then - agent-browser skill should be resolved (not in notFound)
expect(promptBody).toBeDefined() expect(promptBody).toBeDefined()
expect(promptBody.system).toBeDefined() expect(promptBody.system).toBeDefined()
expect(promptBody.system).toContain("<Category_Context>") expect(promptBody.system).toContain("agent-browser")
expect(String(promptBody.system).startsWith("<Category_Context>")).toBe(false)
}, { timeout: 20000 }) }, { timeout: 20000 })
test("should resolve agent-browser skill even when browserProvider is not set", async () => { test("should NOT resolve agent-browser skill when browserProvider is not set", async () => {
// given - delegate_task without browserProvider // given - task without browserProvider (defaults to playwright)
const { createDelegateTask } = require("./tools") const { createDelegateTask } = require("./tools")
let promptBody: any
const mockManager = { launch: async () => ({}) } const mockManager = { launch: async () => ({}) }
const mockClient = { const mockClient = {
@@ -2643,10 +2641,8 @@ describe("sisyphus-task", () => {
session: { session: {
get: async () => ({ data: { directory: "/project" } }), get: async () => ({ data: { directory: "/project" } }),
create: async () => ({ data: { id: "ses_no_browser_provider" } }), create: async () => ({ data: { id: "ses_no_browser_provider" } }),
prompt: async (input: any) => { prompt: async () => ({ data: {} }),
promptBody = input.body promptAsync: async () => ({ data: {} }),
return { data: {} }
},
messages: async () => ({ messages: async () => ({
data: [{ info: { role: "assistant" }, parts: [{ type: "text", text: "Done" }] }] data: [{ info: { role: "assistant" }, parts: [{ type: "text", text: "Done" }] }]
}), }),
@@ -2679,7 +2675,7 @@ describe("sisyphus-task", () => {
toolContext toolContext
) )
// then - agent-browser skill should NOT resolve without browserProvider // then - should return skill not found error
expect(result).toContain("Skills not found") expect(result).toContain("Skills not found")
expect(result).toContain("agent-browser") expect(result).toContain("agent-browser")
}) })

View File

@@ -164,6 +164,7 @@ Prompts MUST be in English.`
let modelInfo: import("../../features/task-toast-manager/types").ModelFallbackInfo | undefined let modelInfo: import("../../features/task-toast-manager/types").ModelFallbackInfo | undefined
let actualModel: string | undefined let actualModel: string | undefined
let isUnstableAgent = false let isUnstableAgent = false
let fallbackChain: import("../../shared/model-requirements").FallbackEntry[] | undefined
if (args.category) { if (args.category) {
const resolution = await resolveCategoryExecution(args, options, inheritedModel, systemDefaultModel) const resolution = await resolveCategoryExecution(args, options, inheritedModel, systemDefaultModel)
@@ -176,6 +177,7 @@ Prompts MUST be in English.`
modelInfo = resolution.modelInfo modelInfo = resolution.modelInfo
actualModel = resolution.actualModel actualModel = resolution.actualModel
isUnstableAgent = resolution.isUnstableAgent isUnstableAgent = resolution.isUnstableAgent
fallbackChain = resolution.fallbackChain
const isRunInBackgroundExplicitlyFalse = args.run_in_background === false || args.run_in_background === "false" as unknown as boolean const isRunInBackgroundExplicitlyFalse = args.run_in_background === false || args.run_in_background === "false" as unknown as boolean
@@ -206,6 +208,7 @@ Prompts MUST be in English.`
} }
agentToUse = resolution.agentToUse agentToUse = resolution.agentToUse
categoryModel = resolution.categoryModel categoryModel = resolution.categoryModel
fallbackChain = resolution.fallbackChain
} }
const systemContent = buildSystemContent({ const systemContent = buildSystemContent({
@@ -217,10 +220,10 @@ Prompts MUST be in English.`
}) })
if (runInBackground) { if (runInBackground) {
return executeBackgroundTask(args, ctx, options, parentContext, agentToUse, categoryModel, systemContent) return executeBackgroundTask(args, ctx, options, parentContext, agentToUse, categoryModel, systemContent, fallbackChain)
} }
return executeSyncTask(args, ctx, options, parentContext, agentToUse, categoryModel, systemContent, modelInfo) return executeSyncTask(args, ctx, options, parentContext, agentToUse, categoryModel, systemContent, modelInfo, fallbackChain)
}, },
}) })
} }