Merge pull request #2674 from RaviTharuma/fix/dedup-delegated-model-config

refactor: deduplicate DelegatedModelConfig into shared module
This commit is contained in:
YeonGyu-Kim
2026-03-25 21:43:31 +09:00
committed by GitHub
11 changed files with 338 additions and 390 deletions

View File

@@ -15,36 +15,8 @@ import {
resolveInheritedPromptTools,
createInternalAgentTextPart,
} from "../../shared"
import { applySessionPromptParams } from "../../shared/session-prompt-params-helpers"
import { setSessionTools } from "../../shared/session-tools-store"
import { setSessionPromptParams } from "../../shared/session-prompt-params-state"
type PromptParamsModel = {
reasoningEffort?: string
thinking?: { type: "enabled" | "disabled"; budgetTokens?: number }
maxTokens?: number
temperature?: number
top_p?: number
}
function applySessionPromptParams(sessionID: string, model: PromptParamsModel): void {
const promptOptions: Record<string, unknown> = {
...(model.reasoningEffort ? { reasoningEffort: model.reasoningEffort } : {}),
...(model.thinking ? { thinking: model.thinking } : {}),
...(model.maxTokens !== undefined ? { maxTokens: model.maxTokens } : {}),
}
if (
model.temperature !== undefined ||
model.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(sessionID, {
...(model.temperature !== undefined ? { temperature: model.temperature } : {}),
...(model.top_p !== undefined ? { topP: model.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
import { SessionCategoryRegistry } from "../../shared/session-category-registry"
import { ConcurrencyManager } from "./concurrency"
import type { BackgroundTaskConfig, TmuxConfig } from "../../config/schema"

View File

@@ -2,7 +2,7 @@ import type { BackgroundTask, LaunchInput, ResumeInput } from "./types"
import type { OpencodeClient, OnSubagentSessionCreated, QueueItem } from "./constants"
import { TMUX_CALLBACK_DELAY_MS } from "./constants"
import { log, getAgentToolRestrictions, promptWithModelSuggestionRetry, createInternalAgentTextPart } from "../../shared"
import { setSessionPromptParams } from "../../shared/session-prompt-params-state"
import { applySessionPromptParams } from "../../shared/session-prompt-params-helpers"
import { subagentSessions } from "../claude-code-session-state"
import { getTaskToastManager } from "../task-toast-manager"
import { isInsideTmux } from "../../shared/tmux"
@@ -136,25 +136,7 @@ export async function startTask(
: undefined
const launchVariant = input.model?.variant
if (input.model) {
const promptOptions: Record<string, unknown> = {
...(input.model.reasoningEffort ? { reasoningEffort: input.model.reasoningEffort } : {}),
...(input.model.thinking ? { thinking: input.model.thinking } : {}),
...(input.model.maxTokens !== undefined ? { maxTokens: input.model.maxTokens } : {}),
}
if (
input.model.temperature !== undefined ||
input.model.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(sessionID, {
...(input.model.temperature !== undefined ? { temperature: input.model.temperature } : {}),
...(input.model.top_p !== undefined ? { topP: input.model.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
applySessionPromptParams(sessionID, input.model)
promptWithModelSuggestionRetry(client, {
path: { id: sessionID },
@@ -244,25 +226,7 @@ export async function resumeTask(
: undefined
const resumeVariant = task.model?.variant
if (task.model) {
const promptOptions: Record<string, unknown> = {
...(task.model.reasoningEffort ? { reasoningEffort: task.model.reasoningEffort } : {}),
...(task.model.thinking ? { thinking: task.model.thinking } : {}),
...(task.model.maxTokens !== undefined ? { maxTokens: task.model.maxTokens } : {}),
}
if (
task.model.temperature !== undefined ||
task.model.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(task.sessionID, {
...(task.model.temperature !== undefined ? { temperature: task.model.temperature } : {}),
...(task.model.top_p !== undefined ? { topP: task.model.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
applySessionPromptParams(task.sessionID, task.model)
client.session.promptAsync({
path: { id: task.sessionID },

View File

@@ -1,4 +1,5 @@
import type { FallbackEntry } from "../../shared/model-requirements"
import type { DelegatedModelConfig } from "../../shared/model-resolution-types"
import type { SessionPermissionRule } from "../../shared/question-denied-session-permission"
export type BackgroundTaskStatus =
@@ -25,17 +26,6 @@ export interface TaskProgress {
lastMessageAt?: Date
}
type DelegatedModelConfig = {
providerID: string
modelID: string
variant?: string
reasoningEffort?: string
temperature?: number
top_p?: number
maxTokens?: number
thinking?: { type: "enabled" | "disabled"; budgetTokens?: number }
}
export interface BackgroundTask {
id: string
sessionID?: string

View File

@@ -156,7 +156,9 @@ export function createChatParamsHandler(args: {
providerID: normalizedInput.model.providerID,
modelID: normalizedInput.model.modelID,
desired: {
variant: normalizedInput.message.variant,
variant: typeof normalizedInput.message.variant === "string"
? normalizedInput.message.variant
: undefined,
reasoningEffort: typeof output.options.reasoningEffort === "string"
? output.options.reasoningEffort
: undefined,

View File

@@ -57,13 +57,11 @@ export function parseFallbackModelObjectEntry(
contextProviderID: string | undefined,
defaultProviderID = "opencode",
): FallbackEntry | undefined {
// Reuse the string-based parser for provider/model/variant extraction.
const base = parseFallbackModelEntry(obj.model, contextProviderID, defaultProviderID)
if (!base) return undefined
return {
...base,
// Explicit object variant overrides any inline variant in the model string.
variant: obj.variant ?? base.variant,
reasoningEffort: obj.reasoningEffort,
temperature: obj.temperature,

View File

@@ -1,5 +1,16 @@
import type { FallbackEntry } from "./model-requirements"
export interface DelegatedModelConfig {
providerID: string
modelID: string
variant?: string
reasoningEffort?: string
temperature?: number
top_p?: number
maxTokens?: number
thinking?: { type: "enabled" | "disabled"; budgetTokens?: number }
}
export type ModelResolutionRequest = {
intent?: {
uiSelectedModel?: string

View File

@@ -90,29 +90,6 @@ describe("resolveCompatibleModelSettings", () => {
})
})
test("downgrades gpt-5 reasoningEffort max to xhigh", () => {
// given
const input = {
providerID: "openai",
modelID: "gpt-5.4",
desired: { reasoningEffort: "max" },
}
// when
const result = resolveCompatibleModelSettings(input)
// then
expect(result.reasoningEffort).toBe("xhigh")
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "max",
to: "xhigh",
reason: "unsupported-by-model-family",
},
])
})
test("keeps supported OpenAI reasoning-family effort for o-series models", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
@@ -127,75 +104,6 @@ describe("resolveCompatibleModelSettings", () => {
})
})
test("downgrades openai reasoning-family effort xhigh to high", () => {
// given
const input = {
providerID: "openai",
modelID: "o3-mini",
desired: { reasoningEffort: "xhigh" },
}
// when
const result = resolveCompatibleModelSettings(input)
// then
expect(result.reasoningEffort).toBe("high")
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "xhigh",
to: "high",
reason: "unsupported-by-model-family",
},
])
})
test("drops reasoningEffort for gpt-5 mini models", () => {
// given
const input = {
providerID: "openai",
modelID: "gpt-5.4-mini",
desired: { reasoningEffort: "high" },
}
// when
const result = resolveCompatibleModelSettings(input)
// then
expect(result.reasoningEffort).toBeUndefined()
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "high",
to: undefined,
reason: "unsupported-by-model-family",
},
])
})
test("treats non-openai o-series models as unknown", () => {
// given
const input = {
providerID: "ollama",
modelID: "o3",
desired: { reasoningEffort: "high" },
}
// when
const result = resolveCompatibleModelSettings(input)
// then
expect(result.reasoningEffort).toBeUndefined()
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "high",
to: undefined,
reason: "unknown-model-family",
},
])
})
test("does not record case-only normalization as a compatibility downgrade", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
@@ -210,19 +118,19 @@ describe("resolveCompatibleModelSettings", () => {
})
})
test("downgrades unsupported GPT reasoningEffort to nearest lower level", () => {
test("drops reasoningEffort for standard GPT models (gpt-4.1)", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "gpt-4.1",
desired: { reasoningEffort: "xhigh" },
desired: { reasoningEffort: "high" },
})
expect(result.reasoningEffort).toBe("high")
expect(result.reasoningEffort).toBeUndefined()
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "xhigh",
to: "high",
from: "high",
to: undefined,
reason: "unsupported-by-model-family",
},
])
@@ -299,4 +207,229 @@ describe("resolveCompatibleModelSettings", () => {
],
})
})
// Provider-agnostic detection: model ID is the source of truth, not provider ID
test("detects Claude via any provider (provider-agnostic)", () => {
for (const providerID of ["anthropic", "aws-bedrock", "bedrock", "amazon-bedrock", "opencode", "my-custom-proxy", "google-vertex-anthropic"]) {
const result = resolveCompatibleModelSettings({
providerID,
modelID: "claude-sonnet-4-6",
desired: { variant: "max" },
})
expect(result.variant).toBe("high")
expect(result.changes[0]?.reason).toBe("unsupported-by-model-family")
}
})
test("detects Claude 3 Opus via any provider", () => {
const result = resolveCompatibleModelSettings({
providerID: "some-unknown-proxy",
modelID: "claude-3-opus-20240229",
desired: { variant: "max" },
})
expect(result.variant).toBe("max")
expect(result.changes).toEqual([])
})
test("detects OpenAI reasoning models without requiring openai provider", () => {
const result = resolveCompatibleModelSettings({
providerID: "azure-openai",
modelID: "o3-mini",
desired: { reasoningEffort: "high" },
})
expect(result.reasoningEffort).toBe("high")
expect(result.changes).toEqual([])
})
// -----------------------------------------------------------------------
// Registry coverage — every model family from FAMILY_CAPABILITIES
// -----------------------------------------------------------------------
describe("model family registry coverage", () => {
const familyCases: Array<{
name: string
modelID: string
expectedVariants: string[]
hasReasoningEffort: boolean
}> = [
{ name: "Gemini", modelID: "gemini-3.1-pro", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "Kimi (kimi)", modelID: "kimi-k2.5", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "Kimi (k2)", modelID: "k2-v2", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "GLM", modelID: "glm-5", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "Minimax", modelID: "minimax-m2.5", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "DeepSeek", modelID: "deepseek-r2", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "Mistral", modelID: "mistral-large-next", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "Codestral → Mistral", modelID: "codestral-2506", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
{ name: "Llama", modelID: "llama-4-maverick", expectedVariants: ["low", "medium", "high"], hasReasoningEffort: false },
]
for (const { name, modelID, expectedVariants, hasReasoningEffort } of familyCases) {
test(`${name} (${modelID}): keeps supported variant`, () => {
const highest = expectedVariants[expectedVariants.length - 1]
const result = resolveCompatibleModelSettings({
providerID: "any-provider",
modelID,
desired: { variant: highest },
})
expect(result.variant).toBe(highest)
expect(result.changes).toEqual([])
})
test(`${name} (${modelID}): downgrades unsupported variant`, () => {
const result = resolveCompatibleModelSettings({
providerID: "any-provider",
modelID,
desired: { variant: "max" },
})
const highest = expectedVariants[expectedVariants.length - 1]
expect(result.variant).toBe(highest)
expect(result.changes[0]?.reason).toBe("unsupported-by-model-family")
})
test(`${name} (${modelID}): ${hasReasoningEffort ? "keeps" : "drops"} reasoningEffort`, () => {
const result = resolveCompatibleModelSettings({
providerID: "any-provider",
modelID,
desired: { reasoningEffort: "high" },
})
if (hasReasoningEffort) {
expect(result.reasoningEffort).toBe("high")
expect(result.changes).toEqual([])
} else {
expect(result.reasoningEffort).toBeUndefined()
expect(result.changes[0]?.reason).toBe("unsupported-by-model-family")
}
})
}
})
// GPT-5 specific: supports xhigh variant and xhigh reasoningEffort
test("GPT-5 keeps xhigh variant and reasoningEffort", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "gpt-5.4",
desired: { variant: "xhigh", reasoningEffort: "xhigh" },
})
expect(result).toEqual({
variant: "xhigh",
reasoningEffort: "xhigh",
changes: [],
})
})
// Reasoning effort: "none" and "minimal" are valid per Vercel AI SDK
test("GPT-5 keeps none reasoningEffort", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "gpt-5.4",
desired: { reasoningEffort: "none" },
})
expect(result).toEqual({
variant: undefined,
reasoningEffort: "none",
changes: [],
})
})
test("GPT-5 keeps minimal reasoningEffort", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "gpt-5.4",
desired: { reasoningEffort: "minimal" },
})
expect(result).toEqual({
variant: undefined,
reasoningEffort: "minimal",
changes: [],
})
})
test("o-series keeps none reasoningEffort", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "o3-mini",
desired: { reasoningEffort: "none" },
})
expect(result).toEqual({
variant: undefined,
reasoningEffort: "none",
changes: [],
})
})
// Reasoning effort downgrade within families that support it
test("o-series downgrades xhigh reasoningEffort to high", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "o3-mini",
desired: { reasoningEffort: "xhigh" },
})
expect(result.reasoningEffort).toBe("high")
expect(result.changes).toEqual([
{
field: "reasoningEffort",
from: "xhigh",
to: "high",
reason: "unsupported-by-model-family",
},
])
})
test("GPT-5 keeps xhigh but would downgrade a hypothetical beyond-max level", () => {
// GPT-5 supports up to "xhigh" — verify the ladder works by requesting
// a value that IS in the ladder but NOT in the family's allowed list.
// Since "xhigh" is the max for GPT-5 reasoningEffort, we verify it stays.
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "gpt-5.4",
desired: { reasoningEffort: "xhigh" },
})
expect(result.reasoningEffort).toBe("xhigh")
expect(result.changes).toEqual([])
})
test("o-series downgrades unsupported variant to high", () => {
const result = resolveCompatibleModelSettings({
providerID: "openai",
modelID: "o3-mini",
desired: { variant: "max" },
})
expect(result.variant).toBe("high")
expect(result.changes).toEqual([
{
field: "variant",
from: "max",
to: "high",
reason: "unsupported-by-model-family",
},
])
})
// Passthrough: undefined desired values produce no changes
test("no-op when desired settings are empty", () => {
const result = resolveCompatibleModelSettings({
providerID: "anthropic",
modelID: "claude-opus-4-6",
desired: {},
})
expect(result).toEqual({
variant: undefined,
reasoningEffort: undefined,
changes: [],
})
})
})

View File

@@ -31,54 +31,65 @@ export type ModelSettingsCompatibilityResult = {
changes: ModelSettingsCompatibilityChange[]
}
type ModelFamily = "claude-opus" | "claude-non-opus" | "openai-reasoning" | "gpt-5" | "gpt-5-mini" | "gpt-legacy" | "unknown"
// ---------------------------------------------------------------------------
// Unified model family registry — detection rules + capabilities in ONE row.
// New model family = one entry. Zero code changes anywhere else.
// Order matters: more-specific patterns first (claude-opus before claude).
// ---------------------------------------------------------------------------
type FamilyDefinition = {
/** Substring(s) in normalised model ID that identify this family (OR) */
includes?: string[]
/** Regex when substring matching isn't enough */
pattern?: RegExp
/** Supported variant levels (ordered low -> max) */
variants: string[]
/** Supported reasoning-effort levels. Omit = not supported. */
reasoningEffort?: string[]
}
const MODEL_FAMILY_REGISTRY: ReadonlyArray<readonly [string, FamilyDefinition]> = [
["claude-opus", { pattern: /claude(?:-\d+(?:-\d+)*)?-opus/, variants: ["low", "medium", "high", "max"] }],
["claude-non-opus", { includes: ["claude"], variants: ["low", "medium", "high"] }],
["openai-reasoning", { pattern: /^o\d(?:$|-)/, variants: ["low", "medium", "high"], reasoningEffort: ["none", "minimal", "low", "medium", "high"] }],
["gpt-5", { includes: ["gpt-5"], variants: ["low", "medium", "high", "xhigh", "max"], reasoningEffort: ["none", "minimal", "low", "medium", "high", "xhigh"] }],
["gpt-legacy", { includes: ["gpt"], variants: ["low", "medium", "high"] }],
["gemini", { includes: ["gemini"], variants: ["low", "medium", "high"] }],
["kimi", { includes: ["kimi", "k2"], variants: ["low", "medium", "high"] }],
["glm", { includes: ["glm"], variants: ["low", "medium", "high"] }],
["minimax", { includes: ["minimax"], variants: ["low", "medium", "high"] }],
["deepseek", { includes: ["deepseek"], variants: ["low", "medium", "high"] }],
["mistral", { includes: ["mistral", "codestral"], variants: ["low", "medium", "high"] }],
["llama", { includes: ["llama"], variants: ["low", "medium", "high"] }],
]
const VARIANT_LADDER = ["low", "medium", "high", "xhigh", "max"]
const REASONING_LADDER = ["none", "minimal", "low", "medium", "high", "xhigh", "max"]
const REASONING_LADDER = ["none", "minimal", "low", "medium", "high", "xhigh"]
function detectModelFamily(providerID: string, modelID: string): ModelFamily {
const provider = providerID.toLowerCase()
// ---------------------------------------------------------------------------
// Model family detection — single pass over the registry
// ---------------------------------------------------------------------------
function detectFamily(_providerID: string, modelID: string): FamilyDefinition | undefined {
const model = normalizeModelID(modelID).toLowerCase()
const isClaudeProvider = [
"anthropic",
"google-vertex-anthropic",
"aws-bedrock-anthropic",
].includes(provider)
|| (["github-copilot", "opencode", "aws-bedrock", "bedrock"].includes(provider) && model.includes("claude"))
if (isClaudeProvider) {
return /claude(?:-\d+(?:-\d+)*)?-opus/.test(model) ? "claude-opus" : "claude-non-opus"
for (const [, def] of MODEL_FAMILY_REGISTRY) {
if (def.pattern?.test(model)) return def
if (def.includes?.some((s) => model.includes(s))) return def
}
const isOpenAiReasoningFamily = provider === "openai" && (/^o\d(?:$|-)/.test(model) || model.includes("reasoning"))
if (isOpenAiReasoningFamily) {
return "openai-reasoning"
}
if (/gpt-5.*-mini/.test(model)) {
return "gpt-5-mini"
}
if (model.includes("gpt-5")) {
return "gpt-5"
}
if (model.includes("gpt") || (provider === "openai" && /^o\d(?:$|-)/.test(model))) {
return "gpt-legacy"
}
return "unknown"
return undefined
}
// ---------------------------------------------------------------------------
// Generic resolution — one function for both fields
// ---------------------------------------------------------------------------
function downgradeWithinLadder(value: string, allowed: string[], ladder: string[]): string | undefined {
const requestedIndex = ladder.indexOf(value)
if (requestedIndex === -1) return undefined
for (let index = requestedIndex; index >= 0; index -= 1) {
const candidate = ladder[index]
if (allowed.includes(candidate)) {
return candidate
if (allowed.includes(ladder[index])) {
return ladder[index]
}
}
@@ -89,172 +100,76 @@ function normalizeCapabilitiesVariants(capabilities: VariantCapabilities | undef
if (!capabilities?.variants || capabilities.variants.length === 0) {
return undefined
}
return capabilities.variants.map((variant) => variant.toLowerCase())
return capabilities.variants.map((v) => v.toLowerCase())
}
function resolveVariant(
modelFamily: ModelFamily,
variant: string,
capabilities?: VariantCapabilities,
): { value?: string; reason?: ModelSettingsCompatibilityChange["reason"] } {
const normalized = variant.toLowerCase()
const metadataVariants = normalizeCapabilitiesVariants(capabilities)
type FieldResolution = { value?: string; reason?: ModelSettingsCompatibilityChange["reason"] }
if (metadataVariants) {
if (metadataVariants.includes(normalized)) {
return { value: normalized }
}
function resolveField(
normalized: string,
familyCaps: string[] | undefined,
ladder: string[],
familyKnown: boolean,
metadataOverride?: string[],
): FieldResolution {
// Priority 1: runtime metadata from provider
if (metadataOverride) {
if (metadataOverride.includes(normalized)) return { value: normalized }
return {
value: downgradeWithinLadder(normalized, metadataVariants, VARIANT_LADDER),
value: downgradeWithinLadder(normalized, metadataOverride, ladder),
reason: "unsupported-by-model-metadata",
}
}
if (modelFamily === "claude-opus") {
const allowed = ["low", "medium", "high", "max"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
// Priority 2: family heuristic from registry
if (familyCaps) {
if (familyCaps.includes(normalized)) return { value: normalized }
return {
value: downgradeWithinLadder(normalized, allowed, VARIANT_LADDER),
value: downgradeWithinLadder(normalized, familyCaps, ladder),
reason: "unsupported-by-model-family",
}
}
if (modelFamily === "claude-non-opus") {
const allowed = ["low", "medium", "high"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
return {
value: downgradeWithinLadder(normalized, allowed, VARIANT_LADDER),
reason: "unsupported-by-model-family",
}
}
if (modelFamily === "gpt-5" || modelFamily === "gpt-5-mini") {
const allowed = ["low", "medium", "high", "xhigh", "max"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
return {
value: downgradeWithinLadder(normalized, allowed, VARIANT_LADDER),
reason: "unsupported-by-model-family",
}
}
if (modelFamily === "openai-reasoning") {
const allowed = ["low", "medium", "high"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
return {
value: downgradeWithinLadder(normalized, allowed, VARIANT_LADDER),
reason: "unsupported-by-model-family",
}
}
if (modelFamily === "gpt-legacy") {
const allowed = ["low", "medium", "high"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
return {
value: downgradeWithinLadder(normalized, allowed, VARIANT_LADDER),
reason: "unsupported-by-model-family",
}
}
return { value: undefined, reason: "unknown-model-family" }
}
function resolveReasoningEffort(modelFamily: ModelFamily, reasoningEffort: string): { value?: string; reason?: ModelSettingsCompatibilityChange["reason"] } {
const normalized = reasoningEffort.toLowerCase()
if (modelFamily === "gpt-5") {
const allowed = ["none", "minimal", "low", "medium", "high", "xhigh"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
return {
value: downgradeWithinLadder(normalized, allowed, REASONING_LADDER),
reason: "unsupported-by-model-family",
}
}
if (modelFamily === "gpt-5-mini") {
return { value: undefined, reason: "unsupported-by-model-family" }
}
if (modelFamily === "openai-reasoning") {
const allowed = ["none", "minimal", "low", "medium", "high"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
return {
value: downgradeWithinLadder(normalized, allowed, REASONING_LADDER),
reason: "unsupported-by-model-family",
}
}
if (modelFamily === "gpt-legacy") {
const allowed = ["none", "minimal", "low", "medium", "high"]
if (allowed.includes(normalized)) {
return { value: normalized }
}
return {
value: downgradeWithinLadder(normalized, allowed, REASONING_LADDER),
reason: "unsupported-by-model-family",
}
}
if (modelFamily === "claude-opus" || modelFamily === "claude-non-opus") {
// Known family but field not in registry (e.g. Claude + reasoningEffort)
if (familyKnown) {
return { value: undefined, reason: "unsupported-by-model-family" }
}
// Unknown family — drop the value
return { value: undefined, reason: "unknown-model-family" }
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
export function resolveCompatibleModelSettings(
input: ModelSettingsCompatibilityInput,
): ModelSettingsCompatibilityResult {
const modelFamily = detectModelFamily(input.providerID, input.modelID)
const family = detectFamily(input.providerID, input.modelID)
const familyKnown = family !== undefined
const changes: ModelSettingsCompatibilityChange[] = []
const metadataVariants = normalizeCapabilitiesVariants(input.capabilities)
let variant = input.desired.variant
if (variant !== undefined) {
const normalizedVariant = variant.toLowerCase()
const resolved = resolveVariant(modelFamily, normalizedVariant, input.capabilities)
if (resolved.value !== normalizedVariant && resolved.reason) {
changes.push({
field: "variant",
from: variant,
to: resolved.value,
reason: resolved.reason,
})
const normalized = variant.toLowerCase()
const resolved = resolveField(normalized, family?.variants, VARIANT_LADDER, familyKnown, metadataVariants)
if (resolved.value !== normalized && resolved.reason) {
changes.push({ field: "variant", from: variant, to: resolved.value, reason: resolved.reason })
}
variant = resolved.value
}
let reasoningEffort = input.desired.reasoningEffort
if (reasoningEffort !== undefined) {
const normalizedReasoningEffort = reasoningEffort.toLowerCase()
const resolved = resolveReasoningEffort(modelFamily, normalizedReasoningEffort)
if (resolved.value !== normalizedReasoningEffort && resolved.reason) {
changes.push({
field: "reasoningEffort",
from: reasoningEffort,
to: resolved.value,
reason: resolved.reason,
})
const normalized = reasoningEffort.toLowerCase()
const resolved = resolveField(normalized, family?.reasoningEffort, REASONING_LADDER, familyKnown)
if (resolved.value !== normalized && resolved.reason) {
changes.push({ field: "reasoningEffort", from: reasoningEffort, to: resolved.value, reason: resolved.reason })
}
reasoningEffort = resolved.value
}
return {
variant,
reasoningEffort,
changes,
}
return { variant, reasoningEffort, changes }
}

View File

@@ -7,8 +7,8 @@ import {
} from "../../shared/model-suggestion-retry"
import { formatDetailedError } from "./error-formatting"
import { getAgentToolRestrictions } from "../../shared/agent-tool-restrictions"
import { applySessionPromptParams } from "../../shared/session-prompt-params-helpers"
import { setSessionTools } from "../../shared/session-tools-store"
import { setSessionPromptParams } from "../../shared/session-prompt-params-state"
import { createInternalAgentTextPart } from "../../shared/internal-initiator-marker"
type SendSyncPromptDeps = {
@@ -54,25 +54,7 @@ export async function sendSyncPrompt(
}
setSessionTools(input.sessionID, tools)
if (input.categoryModel) {
const promptOptions: Record<string, unknown> = {
...(input.categoryModel.reasoningEffort ? { reasoningEffort: input.categoryModel.reasoningEffort } : {}),
...(input.categoryModel.thinking ? { thinking: input.categoryModel.thinking } : {}),
...(input.categoryModel.maxTokens !== undefined ? { maxTokens: input.categoryModel.maxTokens } : {}),
}
if (
input.categoryModel.temperature !== undefined ||
input.categoryModel.top_p !== undefined ||
Object.keys(promptOptions).length > 0
) {
setSessionPromptParams(input.sessionID, {
...(input.categoryModel.temperature !== undefined ? { temperature: input.categoryModel.temperature } : {}),
...(input.categoryModel.top_p !== undefined ? { topP: input.categoryModel.top_p } : {}),
...(Object.keys(promptOptions).length > 0 ? { options: promptOptions } : {}),
})
}
}
applySessionPromptParams(input.sessionID, input.categoryModel)
const promptArgs = {
path: { id: input.sessionID },

View File

@@ -1,5 +1,5 @@
import { tool, type ToolDefinition } from "@opencode-ai/plugin"
import type { DelegateTaskArgs, ToolContextWithMetadata, DelegateTaskToolOptions } from "./types"
import type { DelegateTaskArgs, DelegatedModelConfig, ToolContextWithMetadata, DelegateTaskToolOptions } from "./types"
import { CATEGORY_DESCRIPTIONS } from "./constants"
import { SISYPHUS_JUNIOR_AGENT } from "./sisyphus-junior-agent"
import { mergeCategories } from "../../shared/merge-categories"
@@ -178,18 +178,7 @@ export function createDelegateTask(options: DelegateTaskToolOptions): ToolDefini
: undefined
let agentToUse: string
let categoryModel:
| {
providerID: string
modelID: string
variant?: string
reasoningEffort?: string
temperature?: number
top_p?: number
maxTokens?: number
thinking?: { type: "enabled" | "disabled"; budgetTokens?: number }
}
| undefined
let categoryModel: DelegatedModelConfig | undefined
let categoryPromptAppend: string | undefined
let modelInfo: import("../../features/task-toast-manager/types").ModelFallbackInfo | undefined
let actualModel: string | undefined

View File

@@ -71,16 +71,8 @@ export interface DelegateTaskToolOptions {
syncPollTimeoutMs?: number
}
export interface DelegatedModelConfig {
providerID: string
modelID: string
variant?: string
reasoningEffort?: string
temperature?: number
top_p?: number
maxTokens?: number
thinking?: { type: "enabled" | "disabled"; budgetTokens?: number }
}
import type { DelegatedModelConfig } from "../../shared/model-resolution-types"
export type { DelegatedModelConfig }
export interface BuildSystemContentInput {
skillContent?: string