fix(review): align model compatibility and prompt param helpers

This commit is contained in:
Ravi Tharuma
2026-03-25 09:28:59 +01:00
parent 1f5d7702ff
commit f86b8b3336
5 changed files with 65 additions and 94 deletions

View File

@@ -15,36 +15,8 @@ import {
resolveInheritedPromptTools,
createInternalAgentTextPart,
} from "../../shared"
import { applySessionPromptParams } from "../../shared/session-prompt-params-helpers"
import { setSessionTools } from "../../shared/session-tools-store"
import { setSessionPromptParams } from "../../shared/session-prompt-params-state"
type PromptParamsModel = {
reasoningEffort?: string
thinking?: { type: "enabled" | "disabled"; budgetTokens?: number }
maxTokens?: number
temperature?: number
top_p?: number
}
function applySessionPromptParams(sessionID: string, model: PromptParamsModel): void {
const promptOptions: Record<string, unknown> = {
...(model.reasoningEffort ? { reasoningEffort: model.reasoningEffort } : {}),
...(model.thinking ? { thinking: model.thinking } : {}),
...(model.maxTokens !== undefined ? { maxTokens: model.maxTokens } : {}),
}
if (
model.temperature !== undefined ||
model.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(sessionID, {
...(model.temperature !== undefined ? { temperature: model.temperature } : {}),
...(model.top_p !== undefined ? { topP: model.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
import { ConcurrencyManager } from "./concurrency"
import type { BackgroundTaskConfig, TmuxConfig } from "../../config/schema"

View File

@@ -2,7 +2,7 @@ import type { BackgroundTask, LaunchInput, ResumeInput } from "./types"
import type { OpencodeClient, OnSubagentSessionCreated, QueueItem } from "./constants"
import { TMUX_CALLBACK_DELAY_MS } from "./constants"
import { log, getAgentToolRestrictions, promptWithModelSuggestionRetry, createInternalAgentTextPart } from "../../shared"
import { setSessionPromptParams } from "../../shared/session-prompt-params-state"
import { applySessionPromptParams } from "../../shared/session-prompt-params-helpers"
import { subagentSessions } from "../claude-code-session-state"
import { getTaskToastManager } from "../task-toast-manager"
import { isInsideTmux } from "../../shared/tmux"
@@ -136,25 +136,7 @@ export async function startTask(
: undefined
const launchVariant = input.model?.variant
if (input.model) {
const promptOptions: Record<string, unknown> = {
...(input.model.reasoningEffort ? { reasoningEffort: input.model.reasoningEffort } : {}),
...(input.model.thinking ? { thinking: input.model.thinking } : {}),
...(input.model.maxTokens !== undefined ? { maxTokens: input.model.maxTokens } : {}),
}
if (
input.model.temperature !== undefined ||
input.model.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(sessionID, {
...(input.model.temperature !== undefined ? { temperature: input.model.temperature } : {}),
...(input.model.top_p !== undefined ? { topP: input.model.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
applySessionPromptParams(sessionID, input.model)
promptWithModelSuggestionRetry(client, {
path: { id: sessionID },
@@ -244,25 +226,7 @@ export async function resumeTask(
: undefined
const resumeVariant = task.model?.variant
if (task.model) {
const promptOptions: Record<string, unknown> = {
...(task.model.reasoningEffort ? { reasoningEffort: task.model.reasoningEffort } : {}),
...(task.model.thinking ? { thinking: task.model.thinking } : {}),
...(task.model.maxTokens !== undefined ? { maxTokens: task.model.maxTokens } : {}),
}
if (
task.model.temperature !== undefined ||
task.model.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(task.sessionID, {
...(task.model.temperature !== undefined ? { temperature: task.model.temperature } : {}),
...(task.model.top_p !== undefined ? { topP: task.model.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
applySessionPromptParams(task.sessionID, task.model)
client.session.promptAsync({
path: { id: task.sessionID },

View File

@@ -156,7 +156,9 @@ export function createChatParamsHandler(args: {
providerID: normalizedInput.model.providerID,
modelID: normalizedInput.model.modelID,
desired: {
variant: normalizedInput.message.variant,
variant: typeof normalizedInput.message.variant === "string"
? normalizedInput.message.variant
: undefined,
reasoningEffort: typeof output.options.reasoningEffort === "string"
? output.options.reasoningEffort
: undefined,

View File

@@ -210,19 +210,19 @@ describe("resolveCompatibleModelSettings", () => {
})
})
test("downgrades unsupported GPT reasoningEffort to nearest lower level", () => {
test("drops reasoningEffort for standard GPT models (gpt-4.1)", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "gpt-4.1",
desired: { reasoningEffort: "xhigh" },
desired: { reasoningEffort: "high" },
})
expect(result.reasoningEffort).toBe("high")
expect(result.reasoningEffort).toBeUndefined()
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "xhigh",
to: "high",
from: "high",
to: undefined,
reason: "unsupported-by-model-family",
},
])
@@ -459,6 +459,57 @@ describe("resolveCompatibleModelSettings", () => {
})
})
// Reasoning effort downgrade within families that support it
test("o-series downgrades xhigh reasoningEffort to high", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "o3-mini",
desired: { reasoningEffort: "xhigh" },
})
expect(result.reasoningEffort).toBe("high")
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "xhigh",
to: "high",
reason: "unsupported-by-model-family",
},
])
})
test("GPT-5 keeps xhigh but would downgrade a hypothetical beyond-max level", () => {
// GPT-5 supports up to "xhigh" — verify the ladder works by requesting
// a value that IS in the ladder but NOT in the family's allowed list.
// Since "xhigh" is the max for GPT-5 reasoningEffort, we verify it stays.
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "gpt-5.4",
desired: { reasoningEffort: "xhigh" },
})
expect(result.reasoningEffort).toBe("xhigh")
expect(result.changes).toEqual([])
})
test("o-series downgrades unsupported variant to high", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "o3-mini",
desired: { variant: "max" },
})
expect(result.variant).toBe("high")
expect(result.changes).toEqual([
{
field: "variant",
from: "max",
to: "high",
reason: "unsupported-by-model-family",
},
])
})
// Passthrough: undefined desired values produce no changes
test("no-op when desired settings are empty", () => {
const result = resolveCompatibleModelSettings({

View File

@@ -7,8 +7,8 @@ import {
} from "../../shared/model-suggestion-retry"
import { formatDetailedError } from "./error-formatting"
import { getAgentToolRestrictions } from "../../shared/agent-tool-restrictions"
import { applySessionPromptParams } from "../../shared/session-prompt-params-helpers"
import { setSessionTools } from "../../shared/session-tools-store"
import { setSessionPromptParams } from "../../shared/session-prompt-params-state"
import { createInternalAgentTextPart } from "../../shared/internal-initiator-marker"
type SendSyncPromptDeps = {
@@ -54,25 +54,7 @@ export async function sendSyncPrompt(
}
setSessionTools(input.sessionID, tools)
if (input.categoryModel) {
const promptOptions: Record<string, unknown> = {
...(input.categoryModel.reasoningEffort ? { reasoningEffort: input.categoryModel.reasoningEffort } : {}),
...(input.categoryModel.thinking ? { thinking: input.categoryModel.thinking } : {}),
...(input.categoryModel.maxTokens !== undefined ? { maxTokens: input.categoryModel.maxTokens } : {}),
}
if (
input.categoryModel.temperature !== undefined ||
input.categoryModel.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(input.sessionID, {
...(input.categoryModel.temperature !== undefined ? { temperature: input.categoryModel.temperature } : {}),
...(input.categoryModel.top_p !== undefined ? { topP: input.categoryModel.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
applySessionPromptParams(input.sessionID, input.categoryModel)
const promptArgs = {
path: { id: input.sessionID },