diff --git a/src/features/background-agent/manager.ts b/src/features/background-agent/manager.ts
index ab564eba9..21e18c9a9 100644
--- a/src/features/background-agent/manager.ts
+++ b/src/features/background-agent/manager.ts
@@ -5,7 +5,7 @@ import type {
LaunchInput,
ResumeInput,
} from "./types"
-import { log, getAgentToolRestrictions } from "../../shared"
+import { log, getAgentToolRestrictions, promptWithModelSuggestionRetry } from "../../shared"
import { ConcurrencyManager } from "./concurrency"
import type { BackgroundTaskConfig, TmuxConfig } from "../../config/schema"
import { isInsideTmux } from "../../shared/tmux"
@@ -307,7 +307,7 @@ export class BackgroundManager {
: undefined
const launchVariant = input.model?.variant
- this.client.session.prompt({
+ promptWithModelSuggestionRetry(this.client, {
path: { id: sessionID },
body: {
agent: input.agent,
diff --git a/src/features/builtin-commands/commands.ts b/src/features/builtin-commands/commands.ts
index 78a033973..eec67a9af 100644
--- a/src/features/builtin-commands/commands.ts
+++ b/src/features/builtin-commands/commands.ts
@@ -55,6 +55,7 @@ ${REFACTOR_TEMPLATE}
},
"start-work": {
description: "(builtin) Start Sisyphus work session from Prometheus plan",
+ agent: "atlas",
template: `
${START_WORK_TEMPLATE}
diff --git a/src/shared/index.ts b/src/shared/index.ts
index d9105ec4d..cd67ddc55 100644
--- a/src/shared/index.ts
+++ b/src/shared/index.ts
@@ -32,3 +32,4 @@ export * from "./connected-providers-cache"
export * from "./case-insensitive"
export * from "./session-utils"
export * from "./tmux"
+export * from "./model-suggestion-retry"
diff --git a/src/shared/model-resolver.ts b/src/shared/model-resolver.ts
index 9026a9c4e..41ca66511 100644
--- a/src/shared/model-resolver.ts
+++ b/src/shared/model-resolver.ts
@@ -60,7 +60,7 @@ export function resolveModelWithFallback(
return { model: normalizedUserModel, source: "override" }
}
- // Step 3: Provider fallback chain (with availability check)
+ // Step 3: Provider fallback chain (exact match → fuzzy match → next provider)
if (fallbackChain && fallbackChain.length > 0) {
if (availableModels.size === 0) {
const connectedProviders = readConnectedProvidersCache()
@@ -73,7 +73,7 @@ export function resolveModelWithFallback(
for (const provider of entry.providers) {
if (connectedSet.has(provider)) {
const model = `${provider}/${entry.model}`
- log("Model resolved via fallback chain (no model cache, using connected provider)", {
+ log("Model resolved via fallback chain (connected provider)", {
provider,
model: entry.model,
variant: entry.variant,
@@ -84,19 +84,19 @@ export function resolveModelWithFallback(
}
log("No connected provider found in fallback chain, falling through to system default")
}
- }
-
- for (const entry of fallbackChain) {
- for (const provider of entry.providers) {
- const fullModel = `${provider}/${entry.model}`
- const match = fuzzyMatchModel(fullModel, availableModels, [provider])
- if (match) {
- log("Model resolved via fallback chain (availability confirmed)", { provider, model: entry.model, match, variant: entry.variant })
- return { model: match, source: "provider-fallback", variant: entry.variant }
+ } else {
+ for (const entry of fallbackChain) {
+ for (const provider of entry.providers) {
+ const fullModel = `${provider}/${entry.model}`
+ const match = fuzzyMatchModel(fullModel, availableModels, [provider])
+ if (match) {
+ log("Model resolved via fallback chain (availability confirmed)", { provider, model: entry.model, match, variant: entry.variant })
+ return { model: match, source: "provider-fallback", variant: entry.variant }
+ }
}
}
+ log("No available model found in fallback chain, falling through to system default")
}
- log("No available model found in fallback chain, falling through to system default")
}
// Step 4: System default (if provided)
diff --git a/src/shared/model-suggestion-retry.test.ts b/src/shared/model-suggestion-retry.test.ts
new file mode 100644
index 000000000..d04c1c701
--- /dev/null
+++ b/src/shared/model-suggestion-retry.test.ts
@@ -0,0 +1,401 @@
+import { describe, it, expect, mock } from "bun:test"
+import { parseModelSuggestion, promptWithModelSuggestionRetry } from "./model-suggestion-retry"
+
+describe("parseModelSuggestion", () => {
+ describe("structured NamedError format", () => {
+ it("should extract suggestion from ProviderModelNotFoundError", () => {
+ //#given a structured NamedError with suggestions
+ const error = {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ suggestions: ["claude-sonnet-4", "claude-sonnet-4-5"],
+ },
+ }
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should return the first suggestion
+ expect(result).toEqual({
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ suggestion: "claude-sonnet-4",
+ })
+ })
+
+ it("should return null when suggestions array is empty", () => {
+ //#given a NamedError with empty suggestions
+ const error = {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ suggestions: [],
+ },
+ }
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should return null
+ expect(result).toBeNull()
+ })
+
+ it("should return null when suggestions field is missing", () => {
+ //#given a NamedError without suggestions
+ const error = {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ },
+ }
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should return null
+ expect(result).toBeNull()
+ })
+ })
+
+ describe("nested error format", () => {
+ it("should extract suggestion from nested data.error", () => {
+ //#given an error with nested NamedError in data field
+ const error = {
+ data: {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "openai",
+ modelID: "gpt-5",
+ suggestions: ["gpt-5.2"],
+ },
+ },
+ }
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should extract from nested structure
+ expect(result).toEqual({
+ providerID: "openai",
+ modelID: "gpt-5",
+ suggestion: "gpt-5.2",
+ })
+ })
+
+ it("should extract suggestion from nested error field", () => {
+ //#given an error with nested NamedError in error field
+ const error = {
+ error: {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "google",
+ modelID: "gemini-3-flsh",
+ suggestions: ["gemini-3-flash"],
+ },
+ },
+ }
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should extract from nested error field
+ expect(result).toEqual({
+ providerID: "google",
+ modelID: "gemini-3-flsh",
+ suggestion: "gemini-3-flash",
+ })
+ })
+ })
+
+ describe("string message format", () => {
+ it("should parse suggestion from error message string", () => {
+ //#given an Error with model-not-found message and suggestion
+ const error = new Error(
+ "Model not found: anthropic/claude-sonet-4. Did you mean: claude-sonnet-4, claude-sonnet-4-5?"
+ )
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should extract from message string
+ expect(result).toEqual({
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ suggestion: "claude-sonnet-4",
+ })
+ })
+
+ it("should parse from plain string error", () => {
+ //#given a plain string error message
+ const error =
+ "Model not found: openai/gtp-5. Did you mean: gpt-5?"
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should extract from string
+ expect(result).toEqual({
+ providerID: "openai",
+ modelID: "gtp-5",
+ suggestion: "gpt-5",
+ })
+ })
+
+ it("should parse from object with message property", () => {
+ //#given an object with message property
+ const error = {
+ message: "Model not found: google/gemini-3-flsh. Did you mean: gemini-3-flash?",
+ }
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should extract from message property
+ expect(result).toEqual({
+ providerID: "google",
+ modelID: "gemini-3-flsh",
+ suggestion: "gemini-3-flash",
+ })
+ })
+
+ it("should return null when message has no suggestion", () => {
+ //#given an error without Did you mean
+ const error = new Error("Model not found: anthropic/nonexistent.")
+
+ //#when parsing the error
+ const result = parseModelSuggestion(error)
+
+ //#then should return null
+ expect(result).toBeNull()
+ })
+ })
+
+ describe("edge cases", () => {
+ it("should return null for null error", () => {
+ //#given null
+ //#when parsing
+ const result = parseModelSuggestion(null)
+ //#then should return null
+ expect(result).toBeNull()
+ })
+
+ it("should return null for undefined error", () => {
+ //#given undefined
+ //#when parsing
+ const result = parseModelSuggestion(undefined)
+ //#then should return null
+ expect(result).toBeNull()
+ })
+
+ it("should return null for unrelated error", () => {
+ //#given an unrelated error
+ const error = new Error("Connection timeout")
+ //#when parsing
+ const result = parseModelSuggestion(error)
+ //#then should return null
+ expect(result).toBeNull()
+ })
+
+ it("should return null for empty object", () => {
+ //#given empty object
+ //#when parsing
+ const result = parseModelSuggestion({})
+ //#then should return null
+ expect(result).toBeNull()
+ })
+ })
+})
+
+describe("promptWithModelSuggestionRetry", () => {
+ it("should succeed on first try without retry", async () => {
+ //#given a client where prompt succeeds
+ const promptMock = mock(() => Promise.resolve())
+ const client = { session: { prompt: promptMock } }
+
+ //#when calling promptWithModelSuggestionRetry
+ await promptWithModelSuggestionRetry(client as any, {
+ path: { id: "session-1" },
+ body: {
+ parts: [{ type: "text", text: "hello" }],
+ model: { providerID: "anthropic", modelID: "claude-sonnet-4" },
+ },
+ })
+
+ //#then should call prompt exactly once
+ expect(promptMock).toHaveBeenCalledTimes(1)
+ })
+
+ it("should retry with suggestion on model-not-found error", async () => {
+ //#given a client that fails first with model-not-found, then succeeds
+ const promptMock = mock()
+ .mockRejectedValueOnce({
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ suggestions: ["claude-sonnet-4"],
+ },
+ })
+ .mockResolvedValueOnce(undefined)
+ const client = { session: { prompt: promptMock } }
+
+ //#when calling promptWithModelSuggestionRetry
+ await promptWithModelSuggestionRetry(client as any, {
+ path: { id: "session-1" },
+ body: {
+ agent: "explore",
+ parts: [{ type: "text", text: "hello" }],
+ model: { providerID: "anthropic", modelID: "claude-sonet-4" },
+ },
+ })
+
+ //#then should call prompt twice - first with original, then with suggestion
+ expect(promptMock).toHaveBeenCalledTimes(2)
+ const retryCall = promptMock.mock.calls[1][0]
+ expect(retryCall.body.model).toEqual({
+ providerID: "anthropic",
+ modelID: "claude-sonnet-4",
+ })
+ })
+
+ it("should throw original error when no suggestion available", async () => {
+ //#given a client that fails with a non-model-not-found error
+ const originalError = new Error("Connection refused")
+ const promptMock = mock().mockRejectedValueOnce(originalError)
+ const client = { session: { prompt: promptMock } }
+
+ //#when calling promptWithModelSuggestionRetry
+ //#then should throw the original error
+ await expect(
+ promptWithModelSuggestionRetry(client as any, {
+ path: { id: "session-1" },
+ body: {
+ parts: [{ type: "text", text: "hello" }],
+ model: { providerID: "anthropic", modelID: "claude-sonnet-4" },
+ },
+ })
+ ).rejects.toThrow("Connection refused")
+
+ expect(promptMock).toHaveBeenCalledTimes(1)
+ })
+
+ it("should throw original error when retry also fails", async () => {
+ //#given a client that fails with model-not-found, retry also fails
+ const modelNotFoundError = {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ suggestions: ["claude-sonnet-4"],
+ },
+ }
+ const retryError = new Error("Still not found")
+ const promptMock = mock()
+ .mockRejectedValueOnce(modelNotFoundError)
+ .mockRejectedValueOnce(retryError)
+ const client = { session: { prompt: promptMock } }
+
+ //#when calling promptWithModelSuggestionRetry
+ //#then should throw the retry error (not the original)
+ await expect(
+ promptWithModelSuggestionRetry(client as any, {
+ path: { id: "session-1" },
+ body: {
+ parts: [{ type: "text", text: "hello" }],
+ model: { providerID: "anthropic", modelID: "claude-sonet-4" },
+ },
+ })
+ ).rejects.toThrow("Still not found")
+
+ expect(promptMock).toHaveBeenCalledTimes(2)
+ })
+
+ it("should preserve other body fields during retry", async () => {
+ //#given a client that fails first with model-not-found
+ const promptMock = mock()
+ .mockRejectedValueOnce({
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "anthropic",
+ modelID: "claude-sonet-4",
+ suggestions: ["claude-sonnet-4"],
+ },
+ })
+ .mockResolvedValueOnce(undefined)
+ const client = { session: { prompt: promptMock } }
+
+ //#when calling with additional body fields
+ await promptWithModelSuggestionRetry(client as any, {
+ path: { id: "session-1" },
+ body: {
+ agent: "explore",
+ system: "You are a helpful agent",
+ tools: { task: false },
+ parts: [{ type: "text", text: "hello" }],
+ model: { providerID: "anthropic", modelID: "claude-sonet-4" },
+ variant: "max",
+ },
+ })
+
+ //#then retry call should preserve all fields except corrected model
+ const retryCall = promptMock.mock.calls[1][0]
+ expect(retryCall.body.agent).toBe("explore")
+ expect(retryCall.body.system).toBe("You are a helpful agent")
+ expect(retryCall.body.tools).toEqual({ task: false })
+ expect(retryCall.body.variant).toBe("max")
+ expect(retryCall.body.model).toEqual({
+ providerID: "anthropic",
+ modelID: "claude-sonnet-4",
+ })
+ })
+
+ it("should handle string error message with suggestion", async () => {
+ //#given a client that fails with a string error containing suggestion
+ const promptMock = mock()
+ .mockRejectedValueOnce(
+ new Error("Model not found: anthropic/claude-sonet-4. Did you mean: claude-sonnet-4?")
+ )
+ .mockResolvedValueOnce(undefined)
+ const client = { session: { prompt: promptMock } }
+
+ //#when calling promptWithModelSuggestionRetry
+ await promptWithModelSuggestionRetry(client as any, {
+ path: { id: "session-1" },
+ body: {
+ parts: [{ type: "text", text: "hello" }],
+ model: { providerID: "anthropic", modelID: "claude-sonet-4" },
+ },
+ })
+
+ //#then should retry with suggested model
+ expect(promptMock).toHaveBeenCalledTimes(2)
+ const retryCall = promptMock.mock.calls[1][0]
+ expect(retryCall.body.model.modelID).toBe("claude-sonnet-4")
+ })
+
+ it("should not retry when no model in original request", async () => {
+ //#given a client that fails with model-not-found but original has no model param
+ const modelNotFoundError = new Error(
+ "Model not found: anthropic/claude-sonet-4. Did you mean: claude-sonnet-4?"
+ )
+ const promptMock = mock().mockRejectedValueOnce(modelNotFoundError)
+ const client = { session: { prompt: promptMock } }
+
+ //#when calling without model in body
+ //#then should throw without retrying
+ await expect(
+ promptWithModelSuggestionRetry(client as any, {
+ path: { id: "session-1" },
+ body: {
+ parts: [{ type: "text", text: "hello" }],
+ },
+ })
+ ).rejects.toThrow()
+
+ expect(promptMock).toHaveBeenCalledTimes(1)
+ })
+})
diff --git a/src/shared/model-suggestion-retry.ts b/src/shared/model-suggestion-retry.ts
new file mode 100644
index 000000000..4b2e6486c
--- /dev/null
+++ b/src/shared/model-suggestion-retry.ts
@@ -0,0 +1,111 @@
+import type { createOpencodeClient } from "@opencode-ai/sdk"
+import { log } from "./logger"
+
+type Client = ReturnType
+
+export interface ModelSuggestionInfo {
+ providerID: string
+ modelID: string
+ suggestion: string
+}
+
+function extractMessage(error: unknown): string {
+ if (typeof error === "string") return error
+ if (error instanceof Error) return error.message
+ if (typeof error === "object" && error !== null) {
+ const obj = error as Record
+ if (typeof obj.message === "string") return obj.message
+ try {
+ return JSON.stringify(error)
+ } catch {
+ return ""
+ }
+ }
+ return String(error)
+}
+
+export function parseModelSuggestion(error: unknown): ModelSuggestionInfo | null {
+ if (!error) return null
+
+ if (typeof error === "object") {
+ const errObj = error as Record
+
+ if (errObj.name === "ProviderModelNotFoundError" && typeof errObj.data === "object" && errObj.data !== null) {
+ const data = errObj.data as Record
+ const suggestions = data.suggestions
+ if (Array.isArray(suggestions) && suggestions.length > 0 && typeof suggestions[0] === "string") {
+ return {
+ providerID: String(data.providerID ?? ""),
+ modelID: String(data.modelID ?? ""),
+ suggestion: suggestions[0],
+ }
+ }
+ return null
+ }
+
+ for (const key of ["data", "error", "cause"] as const) {
+ const nested = errObj[key]
+ if (nested && typeof nested === "object") {
+ const result = parseModelSuggestion(nested)
+ if (result) return result
+ }
+ }
+ }
+
+ const message = extractMessage(error)
+ if (!message) return null
+
+ const modelMatch = message.match(/model not found:\s*([^/\s]+)\s*\/\s*([^.\s]+)/i)
+ const suggestionMatch = message.match(/did you mean:\s*([^,?]+)/i)
+
+ if (modelMatch && suggestionMatch) {
+ return {
+ providerID: modelMatch[1].trim(),
+ modelID: modelMatch[2].trim(),
+ suggestion: suggestionMatch[1].trim(),
+ }
+ }
+
+ return null
+}
+
+interface PromptBody {
+ model?: { providerID: string; modelID: string }
+ [key: string]: unknown
+}
+
+interface PromptArgs {
+ path: { id: string }
+ body: PromptBody
+ [key: string]: unknown
+}
+
+export async function promptWithModelSuggestionRetry(
+ client: Client,
+ args: PromptArgs,
+): Promise {
+ try {
+ await client.session.prompt(args as Parameters[0])
+ } catch (error) {
+ const suggestion = parseModelSuggestion(error)
+ if (!suggestion || !args.body.model) {
+ throw error
+ }
+
+ log("[model-suggestion-retry] Model not found, retrying with suggestion", {
+ original: `${suggestion.providerID}/${suggestion.modelID}`,
+ suggested: suggestion.suggestion,
+ })
+
+ await client.session.prompt({
+ ...args,
+ body: {
+ ...args.body,
+ model: {
+ providerID: suggestion.providerID,
+ modelID: suggestion.suggestion,
+ },
+ },
+ } as Parameters[0])
+ }
+}
diff --git a/src/tools/delegate-task/tools.ts b/src/tools/delegate-task/tools.ts
index 6d152daa7..93ccc7041 100644
--- a/src/tools/delegate-task/tools.ts
+++ b/src/tools/delegate-task/tools.ts
@@ -12,7 +12,7 @@ import { discoverSkills } from "../../features/opencode-skill-loader"
import { getTaskToastManager } from "../../features/task-toast-manager"
import type { ModelFallbackInfo } from "../../features/task-toast-manager/types"
import { subagentSessions, getSessionAgent } from "../../features/claude-code-session-state"
-import { log, getAgentToolRestrictions, resolveModel, getOpenCodeConfigPaths, findByNameCaseInsensitive, equalsIgnoreCase } from "../../shared"
+import { log, getAgentToolRestrictions, resolveModel, getOpenCodeConfigPaths, findByNameCaseInsensitive, equalsIgnoreCase, promptWithModelSuggestionRetry } from "../../shared"
import { fetchAvailableModels } from "../../shared/model-availability"
import { readConnectedProvidersCache } from "../../shared/connected-providers-cache"
import { resolveModelWithFallback } from "../../shared/model-resolver"
@@ -819,12 +819,6 @@ Create the work plan directly - that's your job as the planning agent.`
// If we can't fetch agents, proceed anyway - the session.prompt will fail with a clearer error
}
- // When using subagent_type directly, inherit parent model so agents don't default
- // to their hardcoded models (like grok-code) which may not be available
- if (parentModel) {
- categoryModel = parentModel
- modelInfo = { model: `${parentModel.providerID}/${parentModel.modelID}`, type: "inherited" }
- }
}
const systemContent = buildSystemContent({ skillContent, categoryPromptAppend, agentName: agentToUse })
@@ -953,7 +947,7 @@ To continue this session: session_id="${task.sessionID}"`
try {
const allowDelegateTask = isPlanAgent(agentToUse)
- await client.session.prompt({
+ await promptWithModelSuggestionRetry(client, {
path: { id: sessionID },
body: {
agent: agentToUse,