refactor(think-mode): migrate hook from chat.params to chat.message and remove thinking config injection

Drop provider-specific thinking config injection (THINKING_CONFIGS, getThinkingConfig,
resolveProvider) and instead rely on the provider to handle thinking based on the variant field.
Hook now fires on chat.message using model from input rather than from the message object.

🤖 Generated with assistance of [OhMyOpenCode](https://github.com/code-yeongyu/oh-my-opencode)
This commit is contained in:
YeonGyu-Kim
2026-02-26 12:00:45 +09:00
parent 0c59d2dbe7
commit b7b6721796
6 changed files with 141 additions and 1023 deletions

View File

@@ -1,6 +1,6 @@
import { detectThinkKeyword, extractPromptText } from "./detector"
import { getHighVariant, getThinkingConfig, isAlreadyHighVariant } from "./switcher"
import type { ThinkModeInput, ThinkModeState } from "./types"
import { getHighVariant, isAlreadyHighVariant } from "./switcher"
import type { ThinkModeState } from "./types"
import { log } from "../../shared"
const thinkModeState = new Map<string, ThinkModeState>()
@@ -10,53 +10,24 @@ export function clearThinkModeState(sessionID: string): void {
}
export function createThinkModeHook() {
function isDisabledThinkingConfig(config: Record<string, unknown>): boolean {
const thinkingConfig = config.thinking
if (
typeof thinkingConfig === "object" &&
thinkingConfig !== null &&
"type" in thinkingConfig &&
(thinkingConfig as { type?: string }).type === "disabled"
) {
return true
}
const providerOptions = config.providerOptions
if (typeof providerOptions !== "object" || providerOptions === null) {
return false
}
return Object.values(providerOptions as Record<string, unknown>).some(
(providerConfig) => {
if (typeof providerConfig !== "object" || providerConfig === null) {
return false
}
const providerConfigMap = providerConfig as Record<string, unknown>
const extraBody = providerConfigMap.extra_body
if (typeof extraBody !== "object" || extraBody === null) {
return false
}
const extraBodyMap = extraBody as Record<string, unknown>
const extraThinking = extraBodyMap.thinking
return (
typeof extraThinking === "object" &&
extraThinking !== null &&
(extraThinking as { type?: string }).type === "disabled"
)
}
)
}
return {
"chat.params": async (output: ThinkModeInput, sessionID: string): Promise<void> => {
"chat.message": async (
input: {
sessionID: string
model?: { providerID: string; modelID: string }
},
output: {
message: Record<string, unknown>
parts: Array<{ type: string; text?: string; [key: string]: unknown }>
}
): Promise<void> => {
const promptText = extractPromptText(output.parts)
const sessionID = input.sessionID
const state: ThinkModeState = {
requested: false,
modelSwitched: false,
thinkingConfigInjected: false,
variantSet: false,
}
if (!detectThinkKeyword(promptText)) {
@@ -66,7 +37,12 @@ export function createThinkModeHook() {
state.requested = true
const currentModel = output.message.model
if (typeof output.message.variant === "string") {
thinkModeState.set(sessionID, state)
return
}
const currentModel = input.model
if (!currentModel) {
thinkModeState.set(sessionID, state)
return
@@ -81,14 +57,15 @@ export function createThinkModeHook() {
}
const highVariant = getHighVariant(currentModel.modelID)
const thinkingConfig = getThinkingConfig(currentModel.providerID, currentModel.modelID)
if (highVariant) {
output.message.model = {
providerID: currentModel.providerID,
modelID: highVariant,
}
output.message.variant = "high"
state.modelSwitched = true
state.variantSet = true
log("Think mode: model switched to high variant", {
sessionID,
from: currentModel.modelID,
@@ -96,42 +73,6 @@ export function createThinkModeHook() {
})
}
if (thinkingConfig) {
const messageData = output.message as Record<string, unknown>
const agentThinking = messageData.thinking as { type?: string } | undefined
const agentProviderOptions = messageData.providerOptions
const agentDisabledThinking = agentThinking?.type === "disabled"
const agentHasCustomProviderOptions = Boolean(agentProviderOptions)
if (agentDisabledThinking) {
log("Think mode: skipping - agent has thinking disabled", {
sessionID,
provider: currentModel.providerID,
})
} else if (agentHasCustomProviderOptions) {
log("Think mode: skipping - agent has custom providerOptions", {
sessionID,
provider: currentModel.providerID,
})
} else if (
!isDisabledThinkingConfig(thinkingConfig as Record<string, unknown>)
) {
Object.assign(output.message, thinkingConfig)
state.thinkingConfigInjected = true
log("Think mode: thinking config injected", {
sessionID,
provider: currentModel.providerID,
config: thinkingConfig,
})
} else {
log("Think mode: skipping disabled thinking config", {
sessionID,
provider: currentModel.providerID,
})
}
}
thinkModeState.set(sessionID, state)
},

View File

@@ -1,452 +1,155 @@
import { describe, expect, it, beforeEach } from "bun:test"
import type { ThinkModeInput } from "./types"
import { beforeEach, describe, expect, it } from "bun:test"
const { createThinkModeHook, clearThinkModeState } = await import("./index")
const { clearThinkModeState, createThinkModeHook } = await import("./index")
type ThinkModeHookInput = {
sessionID: string
model?: { providerID: string; modelID: string }
}
type ThinkModeHookOutput = {
message: Record<string, unknown>
parts: Array<{ type: string; text?: string; [key: string]: unknown }>
}
function createHookInput(args: {
sessionID?: string
providerID?: string
modelID?: string
}): ThinkModeHookInput {
const { sessionID = "test-session-id", providerID, modelID } = args
if (!providerID || !modelID) {
return { sessionID }
}
/**
* Helper to create a mock ThinkModeInput for testing
*/
function createMockInput(
providerID: string,
modelID: string,
promptText: string
): ThinkModeInput {
return {
parts: [{ type: "text", text: promptText }],
message: {
model: {
providerID,
modelID,
},
},
sessionID,
model: { providerID, modelID },
}
}
/**
* Type helper for accessing dynamically injected properties on message
*/
type MessageWithInjectedProps = Record<string, unknown>
function createHookOutput(promptText: string, variant?: string): ThinkModeHookOutput {
return {
message: variant ? { variant } : {},
parts: [{ type: "text", text: promptText }],
}
}
describe("createThinkModeHook integration", () => {
describe("createThinkModeHook", () => {
const sessionID = "test-session-id"
beforeEach(() => {
clearThinkModeState(sessionID)
})
describe("GitHub Copilot provider integration", () => {
describe("Claude models", () => {
it("should activate thinking mode for github-copilot Claude with think keyword", async () => {
// given a github-copilot Claude model and prompt with "think" keyword
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-6",
"Please think deeply about this problem"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant and inject thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
expect(message.thinking).toBeDefined()
expect((message.thinking as Record<string, unknown>)?.type).toBe(
"enabled"
)
expect(
(message.thinking as Record<string, unknown>)?.budgetTokens
).toBe(64000)
})
it("should handle github-copilot Claude with dots in version", async () => {
// given a github-copilot Claude model with dot format (claude-opus-4.6)
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4.6",
"ultrathink mode"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant (hyphen format)
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
expect(message.thinking).toBeDefined()
})
it("should handle github-copilot Claude Sonnet", async () => {
// given a github-copilot Claude Sonnet model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-sonnet-4-6",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-6-high")
expect(message.thinking).toBeDefined()
})
it("sets high variant and switches model when think keyword is present", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "github-copilot",
modelID: "claude-opus-4-6",
})
const output = createHookOutput("Please think deeply about this")
describe("Gemini models", () => {
it("should activate thinking mode for github-copilot Gemini Pro", async () => {
// given a github-copilot Gemini Pro model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gemini-3-pro",
"think about this"
)
// when
await hook["chat.message"](input, output)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant and inject google thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-pro-high")
expect(message.providerOptions).toBeDefined()
const googleOptions = (
message.providerOptions as Record<string, unknown>
)?.google as Record<string, unknown>
expect(googleOptions?.thinkingConfig).toBeDefined()
})
it("should activate thinking mode for github-copilot Gemini Flash", async () => {
// given a github-copilot Gemini Flash model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gemini-3-flash",
"ultrathink"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-flash-high")
expect(message.providerOptions).toBeDefined()
})
})
describe("GPT models", () => {
it("should activate thinking mode for github-copilot GPT-5.2", async () => {
// given a github-copilot GPT-5.2 model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gpt-5.2",
"please think"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant and inject openai thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-2-high")
expect(message.reasoning_effort).toBe("high")
})
it("should activate thinking mode for github-copilot GPT-5", async () => {
// given a github-copilot GPT-5 model
const hook = createThinkModeHook()
const input = createMockInput("github-copilot", "gpt-5", "think deeply")
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should upgrade to high variant
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-high")
expect(message.reasoning_effort).toBe("high")
})
})
describe("No think keyword", () => {
it("should NOT activate for github-copilot without think keyword", async () => {
// given a prompt without any think keyword
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-6",
"Just do this task"
)
const originalModelID = input.message.model?.modelID
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT change model or inject config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe(originalModelID)
expect(message.thinking).toBeUndefined()
})
// then
expect(output.message.variant).toBe("high")
expect(output.message.model).toEqual({
providerID: "github-copilot",
modelID: "claude-opus-4-6-high",
})
})
describe("Backwards compatibility with direct providers", () => {
it("should still work for direct anthropic provider", async () => {
// given direct anthropic provider
const hook = createThinkModeHook()
const input = createMockInput(
"anthropic",
"claude-sonnet-4-6",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should work as before
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-6-high")
expect(message.thinking).toBeDefined()
it("supports dotted model IDs by switching to normalized high variant", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "github-copilot",
modelID: "gpt-5.2",
})
const output = createHookOutput("ultrathink about this")
it("should work for direct google-vertex-anthropic provider", async () => {
//#given direct google-vertex-anthropic provider
const hook = createThinkModeHook()
const input = createMockInput(
"google-vertex-anthropic",
"claude-opus-4-6",
"think deeply"
)
// when
await hook["chat.message"](input, output)
//#when the chat.params hook is called
await hook["chat.params"](input, sessionID)
//#then should upgrade model and inject Claude thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
expect(message.thinking).toBeDefined()
expect((message.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should still work for direct google provider", async () => {
// given direct google provider
const hook = createThinkModeHook()
const input = createMockInput(
"google",
"gemini-3-pro",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should work as before
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gemini-3-pro-high")
expect(message.providerOptions).toBeDefined()
})
it("should still work for direct openai provider", async () => {
// given direct openai provider
const hook = createThinkModeHook()
const input = createMockInput("openai", "gpt-5", "think about this")
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should work
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5-high")
expect(message.reasoning_effort).toBe("high")
})
it("should still work for amazon-bedrock provider", async () => {
// given amazon-bedrock provider
const hook = createThinkModeHook()
const input = createMockInput(
"amazon-bedrock",
"claude-sonnet-4-6",
"think"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should inject bedrock thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-sonnet-4-6-high")
expect(message.reasoningConfig).toBeDefined()
// then
expect(output.message.variant).toBe("high")
expect(output.message.model).toEqual({
providerID: "github-copilot",
modelID: "gpt-5-2-high",
})
})
describe("Already-high variants", () => {
it("should NOT re-upgrade already-high variants", async () => {
// given an already-high variant model
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"claude-opus-4-6-high",
"think deeply"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT modify the model (already high)
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
// No additional thinking config should be injected
expect(message.thinking).toBeUndefined()
it("skips when message variant is already set", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "github-copilot",
modelID: "claude-sonnet-4-6",
})
const output = createHookOutput("think through this", "max")
it("should NOT re-upgrade already-high GPT variants", async () => {
// given an already-high GPT variant
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"gpt-5.2-high",
"ultrathink"
)
// when
await hook["chat.message"](input, output)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT modify the model
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("gpt-5.2-high")
expect(message.reasoning_effort).toBeUndefined()
})
// then
expect(output.message.variant).toBe("max")
expect(output.message.model).toBeUndefined()
})
describe("Unknown models", () => {
it("should not crash for unknown models via github-copilot", async () => {
// given an unknown model type
const hook = createThinkModeHook()
const input = createMockInput(
"github-copilot",
"llama-3-70b",
"think about this"
)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should not crash and model should remain unchanged
expect(input.message.model?.modelID).toBe("llama-3-70b")
it("does nothing when think keyword is absent", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "google",
modelID: "gemini-3-pro",
})
const output = createHookOutput("Please solve this directly")
// when
await hook["chat.message"](input, output)
// then
expect(output.message.variant).toBeUndefined()
expect(output.message.model).toBeUndefined()
})
describe("Edge cases", () => {
it("should handle missing model gracefully", async () => {
// given input without a model
const hook = createThinkModeHook()
const input: ThinkModeInput = {
parts: [{ type: "text", text: "think about this" }],
message: {},
}
// when the chat.params hook is called
// then should not crash
await expect(
hook["chat.params"](input, sessionID)
).resolves.toBeUndefined()
it("does not modify already-high models", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({
sessionID,
providerID: "openai",
modelID: "gpt-5-high",
})
const output = createHookOutput("think deeply")
it("should handle empty prompt gracefully", async () => {
// given empty prompt
const hook = createThinkModeHook()
const input = createMockInput("github-copilot", "claude-opus-4-6", "")
// when
await hook["chat.message"](input, output)
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should not upgrade (no think keyword)
expect(input.message.model?.modelID).toBe("claude-opus-4-6")
})
// then
expect(output.message.variant).toBeUndefined()
expect(output.message.model).toBeUndefined()
})
describe("Agent-level thinking configuration respect", () => {
it("should omit Z.ai GLM disabled thinking config", async () => {
//#given a Z.ai GLM model with think prompt
const hook = createThinkModeHook()
const input = createMockInput(
"zai-coding-plan",
"glm-5",
"ultrathink mode"
)
it("handles missing input model without crashing", async () => {
// given
const hook = createThinkModeHook()
const input = createHookInput({ sessionID })
const output = createHookOutput("think about this")
//#when think mode resolves Z.ai thinking configuration
await hook["chat.params"](input, sessionID)
// when
await expect(hook["chat.message"](input, output)).resolves.toBeUndefined()
//#then thinking config should be omitted from request
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("glm-5")
expect(message.thinking).toBeUndefined()
expect(message.providerOptions).toBeUndefined()
})
it("should NOT inject thinking config when agent has thinking disabled", async () => {
// given agent with thinking explicitly disabled
const hook = createThinkModeHook()
const input: ThinkModeInput = {
parts: [{ type: "text", text: "ultrathink deeply" }],
message: {
model: { providerID: "google", modelID: "gemini-3-pro" },
thinking: { type: "disabled" },
} as ThinkModeInput["message"],
}
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT override agent's thinking disabled setting
const message = input.message as MessageWithInjectedProps
expect((message.thinking as { type: string }).type).toBe("disabled")
expect(message.providerOptions).toBeUndefined()
})
it("should NOT inject thinking config when agent has custom providerOptions", async () => {
// given agent with custom providerOptions
const hook = createThinkModeHook()
const input: ThinkModeInput = {
parts: [{ type: "text", text: "ultrathink" }],
message: {
model: { providerID: "google", modelID: "gemini-3-flash" },
providerOptions: {
google: { thinkingConfig: { thinkingBudget: 0 } },
},
} as ThinkModeInput["message"],
}
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should NOT override agent's providerOptions
const message = input.message as MessageWithInjectedProps
const providerOpts = message.providerOptions as Record<string, unknown>
expect((providerOpts.google as Record<string, unknown>).thinkingConfig).toEqual({
thinkingBudget: 0,
})
})
it("should still inject thinking config when agent has no thinking override", async () => {
// given agent without thinking override
const hook = createThinkModeHook()
const input = createMockInput("google", "gemini-3-pro", "ultrathink")
// when the chat.params hook is called
await hook["chat.params"](input, sessionID)
// then should inject thinking config as normal
const message = input.message as MessageWithInjectedProps
expect(message.providerOptions).toBeDefined()
})
// then
expect(output.message.variant).toBeUndefined()
expect(output.message.model).toBeUndefined()
})
})

View File

@@ -1,128 +1,10 @@
import { describe, expect, it } from "bun:test"
import {
getHighVariant,
getThinkingConfig,
isAlreadyHighVariant,
THINKING_CONFIGS,
} from "./switcher"
describe("think-mode switcher", () => {
describe("GitHub Copilot provider support", () => {
describe("Claude models via github-copilot", () => {
it("should resolve github-copilot Claude Opus to anthropic config", () => {
// given a github-copilot provider with Claude Opus model
const providerID = "github-copilot"
const modelID = "claude-opus-4-6"
// when getting thinking config
const config = getThinkingConfig(providerID, modelID)
// then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe(
"enabled"
)
expect((config?.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should resolve github-copilot Claude Sonnet to anthropic config", () => {
// given a github-copilot provider with Claude Sonnet model
const config = getThinkingConfig("github-copilot", "claude-sonnet-4-6")
// then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
it("should handle Claude with dots in version number", () => {
// given a model ID with dots (claude-opus-4.6)
const config = getThinkingConfig("github-copilot", "claude-opus-4.6")
// then should still return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
})
describe("Gemini models via github-copilot", () => {
it("should resolve github-copilot Gemini Pro to google config", () => {
// given a github-copilot provider with Gemini Pro model
const config = getThinkingConfig("github-copilot", "gemini-3-pro")
// then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const googleOptions = (
config?.providerOptions as Record<string, unknown>
)?.google as Record<string, unknown>
expect(googleOptions?.thinkingConfig).toBeDefined()
})
it("should resolve github-copilot Gemini Flash to google config", () => {
// given a github-copilot provider with Gemini Flash model
const config = getThinkingConfig(
"github-copilot",
"gemini-3-flash"
)
// then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
})
describe("GPT models via github-copilot", () => {
it("should resolve github-copilot GPT-5.2 to openai config", () => {
// given a github-copilot provider with GPT-5.2 model
const config = getThinkingConfig("github-copilot", "gpt-5.2")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot GPT-5 to openai config", () => {
// given a github-copilot provider with GPT-5 model
const config = getThinkingConfig("github-copilot", "gpt-5")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot o1 to openai config", () => {
// given a github-copilot provider with o1 model
const config = getThinkingConfig("github-copilot", "o1-preview")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
it("should resolve github-copilot o3 to openai config", () => {
// given a github-copilot provider with o3 model
const config = getThinkingConfig("github-copilot", "o3-mini")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
})
describe("Unknown models via github-copilot", () => {
it("should return null for unknown model types", () => {
// given a github-copilot provider with unknown model
const config = getThinkingConfig("github-copilot", "llama-3-70b")
// then should return null (no matching provider)
expect(config).toBeNull()
})
})
})
describe("Model ID normalization", () => {
describe("getHighVariant with dots vs hyphens", () => {
it("should handle dots in Claude version numbers", () => {
@@ -217,149 +99,6 @@ describe("think-mode switcher", () => {
})
})
describe("getThinkingConfig", () => {
describe("Already high variants", () => {
it("should return null for already-high variants", () => {
// given already-high model variants
expect(
getThinkingConfig("anthropic", "claude-opus-4-6-high")
).toBeNull()
expect(getThinkingConfig("openai", "gpt-5-2-high")).toBeNull()
expect(getThinkingConfig("google", "gemini-3-pro-high")).toBeNull()
})
it("should return null for already-high variants via github-copilot", () => {
// given already-high model variants via github-copilot
expect(
getThinkingConfig("github-copilot", "claude-opus-4-6-high")
).toBeNull()
expect(getThinkingConfig("github-copilot", "gpt-5.2-high")).toBeNull()
})
})
describe("Non-thinking-capable models", () => {
it("should return null for non-thinking-capable models", () => {
// given models that don't support thinking mode
expect(getThinkingConfig("anthropic", "claude-2")).toBeNull()
expect(getThinkingConfig("openai", "gpt-4")).toBeNull()
expect(getThinkingConfig("google", "gemini-1")).toBeNull()
})
})
describe("Unknown providers", () => {
it("should return null for unknown providers", () => {
// given unknown provider IDs
expect(getThinkingConfig("unknown-provider", "some-model")).toBeNull()
expect(getThinkingConfig("azure", "gpt-5")).toBeNull()
})
})
})
describe("Direct provider configs (backwards compatibility)", () => {
it("should still work for direct anthropic provider", () => {
// given direct anthropic provider
const config = getThinkingConfig("anthropic", "claude-opus-4-6")
// then should return anthropic thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe("enabled")
})
it("should work for direct google-vertex-anthropic provider", () => {
//#given direct google-vertex-anthropic provider
const config = getThinkingConfig(
"google-vertex-anthropic",
"claude-opus-4-6"
)
//#when thinking config is resolved
//#then it should return anthropic-style thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe("enabled")
expect((config?.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should still work for direct google provider", () => {
// given direct google provider
const config = getThinkingConfig("google", "gemini-3-pro")
// then should return google thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
it("should still work for amazon-bedrock provider", () => {
// given amazon-bedrock provider with claude model
const config = getThinkingConfig("amazon-bedrock", "claude-sonnet-4-6")
// then should return bedrock thinking config
expect(config).not.toBeNull()
expect(config?.reasoningConfig).toBeDefined()
})
it("should still work for google-vertex provider", () => {
// given google-vertex provider
const config = getThinkingConfig("google-vertex", "gemini-3-pro")
// then should return google-vertex thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const vertexOptions = (config?.providerOptions as Record<string, unknown>)?.[
"google-vertex"
] as Record<string, unknown>
expect(vertexOptions?.thinkingConfig).toBeDefined()
})
it("should work for direct openai provider", () => {
// given direct openai provider
const config = getThinkingConfig("openai", "gpt-5")
// then should return openai thinking config
expect(config).not.toBeNull()
expect(config?.reasoning_effort).toBe("high")
})
})
describe("THINKING_CONFIGS structure", () => {
it("should have correct structure for anthropic", () => {
const config = THINKING_CONFIGS.anthropic
expect(config.thinking).toBeDefined()
expect(config.maxTokens).toBe(128000)
})
it("should have correct structure for google-vertex-anthropic", () => {
//#given google-vertex-anthropic config entry
const config = THINKING_CONFIGS["google-vertex-anthropic"]
//#when structure is validated
//#then it should match anthropic style structure
expect(config.thinking).toBeDefined()
expect(config.maxTokens).toBe(128000)
})
it("should have correct structure for google", () => {
const config = THINKING_CONFIGS.google
expect(config.providerOptions).toBeDefined()
})
it("should have correct structure for openai", () => {
const config = THINKING_CONFIGS.openai
expect(config.reasoning_effort).toBe("high")
})
it("should have correct structure for amazon-bedrock", () => {
const config = THINKING_CONFIGS["amazon-bedrock"]
expect(config.reasoningConfig).toBeDefined()
expect(config.maxTokens).toBe(64000)
})
})
describe("Custom provider prefixes support", () => {
describe("getHighVariant with prefixes", () => {
it("should preserve vertex_ai/ prefix when getting high variant", () => {
@@ -426,141 +165,6 @@ describe("think-mode switcher", () => {
expect(isAlreadyHighVariant("vertex_ai/gpt-5.2-high")).toBe(true)
})
})
describe("getThinkingConfig with prefixes", () => {
it("should return null for custom providers (not in THINKING_CONFIGS)", () => {
// given custom provider with prefixed Claude model
const config = getThinkingConfig("dia-llm", "vertex_ai/claude-sonnet-4-6")
// then should return null (custom provider not in THINKING_CONFIGS)
expect(config).toBeNull()
})
it("should work with prefixed models on known providers", () => {
// given known provider (anthropic) with prefixed model
// This tests that the base model name is correctly extracted for capability check
const config = getThinkingConfig("anthropic", "custom-prefix/claude-opus-4-6")
// then should return thinking config (base model is capable)
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
})
it("should return null for prefixed models that are already high", () => {
// given prefixed already-high model
const config = getThinkingConfig("anthropic", "vertex_ai/claude-opus-4-6-high")
// then should return null
expect(config).toBeNull()
})
})
describe("Real-world custom provider scenario", () => {
it("should handle LLM proxy with vertex_ai prefix correctly", () => {
// given a custom LLM proxy provider using vertex_ai/ prefix
const providerID = "dia-llm"
const modelID = "vertex_ai/claude-sonnet-4-6"
// when getting high variant
const highVariant = getHighVariant(modelID)
// then should preserve the prefix
expect(highVariant).toBe("vertex_ai/claude-sonnet-4-6-high")
// #and when checking if already high
expect(isAlreadyHighVariant(modelID)).toBe(false)
expect(isAlreadyHighVariant(highVariant!)).toBe(true)
// #and when getting thinking config for custom provider
const config = getThinkingConfig(providerID, modelID)
// then should return null (custom provider, not anthropic)
// This prevents applying incompatible thinking configs to custom providers
expect(config).toBeNull()
})
it("should not break when switching to high variant in think mode", () => {
// given think mode switching vertex_ai/claude model to high variant
const original = "vertex_ai/claude-opus-4-6"
const high = getHighVariant(original)
// then the high variant should be valid
expect(high).toBe("vertex_ai/claude-opus-4-6-high")
// #and should be recognized as already high
expect(isAlreadyHighVariant(high!)).toBe(true)
// #and switching again should return null (already high)
expect(getHighVariant(high!)).toBeNull()
})
})
})
describe("Z.AI GLM-4.7 provider support", () => {
describe("getThinkingConfig for zai-coding-plan", () => {
it("should return thinking config for glm-5", () => {
//#given a Z.ai GLM model
const config = getThinkingConfig("zai-coding-plan", "glm-5")
//#when thinking config is resolved
//#then thinking type is "disabled"
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
const zaiOptions = (config?.providerOptions as Record<string, unknown>)?.[
"zai-coding-plan"
] as Record<string, unknown>
expect(zaiOptions?.extra_body).toBeDefined()
const extraBody = zaiOptions?.extra_body as Record<string, unknown>
expect(extraBody?.thinking).toBeDefined()
expect((extraBody?.thinking as Record<string, unknown>)?.type).toBe("disabled")
})
it("should return thinking config for glm-4.6v (multimodal)", () => {
// given zai-coding-plan provider with glm-4.6v model
const config = getThinkingConfig("zai-coding-plan", "glm-4.6v")
// then should return zai-coding-plan thinking config
expect(config).not.toBeNull()
expect(config?.providerOptions).toBeDefined()
})
it("should return null for non-GLM models on zai-coding-plan", () => {
// given zai-coding-plan provider with unknown model
const config = getThinkingConfig("zai-coding-plan", "some-other-model")
// then should return null
expect(config).toBeNull()
})
})
describe("HIGH_VARIANT_MAP for GLM", () => {
it("should NOT have high variant for glm-5", () => {
// given glm-5 model
const variant = getHighVariant("glm-5")
// then should return null (no high variant needed)
expect(variant).toBeNull()
})
it("should NOT have high variant for glm-4.6v", () => {
// given glm-4.6v model
const variant = getHighVariant("glm-4.6v")
// then should return null
expect(variant).toBeNull()
})
})
})
describe("THINKING_CONFIGS structure for zai-coding-plan", () => {
it("should have correct structure for zai-coding-plan", () => {
const config = THINKING_CONFIGS["zai-coding-plan"]
expect(config.providerOptions).toBeDefined()
const zaiOptions = (config.providerOptions as Record<string, unknown>)?.[
"zai-coding-plan"
] as Record<string, unknown>
expect(zaiOptions?.extra_body).toBeDefined()
})
})
})
})

View File

@@ -53,35 +53,7 @@ function normalizeModelID(modelID: string): string {
return modelID.replace(/\.(\d+)/g, "-$1")
}
/**
* Resolves proxy providers (like github-copilot) to their underlying provider.
* This allows GitHub Copilot to inherit thinking configurations from the actual
* model provider (Anthropic, Google, OpenAI).
*
* @example
* resolveProvider("github-copilot", "claude-opus-4-6") // "anthropic"
* resolveProvider("github-copilot", "gemini-3-pro") // "google"
* resolveProvider("github-copilot", "gpt-5.2") // "openai"
* resolveProvider("anthropic", "claude-opus-4-6") // "anthropic" (unchanged)
*/
function resolveProvider(providerID: string, modelID: string): string {
// GitHub Copilot is a proxy - infer actual provider from model name
if (providerID === "github-copilot") {
const modelLower = modelID.toLowerCase()
if (modelLower.includes("claude")) return "anthropic"
if (modelLower.includes("gemini")) return "google"
if (
modelLower.includes("gpt") ||
modelLower.includes("o1") ||
modelLower.includes("o3")
) {
return "openai"
}
}
// Direct providers or unknown - return as-is
return providerID
}
// Maps model IDs to their "high reasoning" variant (internal convention)
// For OpenAI models, this signals that reasoning_effort should be set to "high"
@@ -116,71 +88,6 @@ const HIGH_VARIANT_MAP: Record<string, string> = {
const ALREADY_HIGH: Set<string> = new Set(Object.values(HIGH_VARIANT_MAP))
export const THINKING_CONFIGS = {
anthropic: {
thinking: {
type: "enabled",
budgetTokens: 64000,
},
maxTokens: 128000,
},
"google-vertex-anthropic": {
thinking: {
type: "enabled",
budgetTokens: 64000,
},
maxTokens: 128000,
},
"amazon-bedrock": {
reasoningConfig: {
type: "enabled",
budgetTokens: 32000,
},
maxTokens: 64000,
},
google: {
providerOptions: {
google: {
thinkingConfig: {
thinkingLevel: "HIGH",
},
},
},
},
"google-vertex": {
providerOptions: {
"google-vertex": {
thinkingConfig: {
thinkingLevel: "HIGH",
},
},
},
},
openai: {
reasoning_effort: "high",
},
"zai-coding-plan": {
providerOptions: {
"zai-coding-plan": {
extra_body: {
thinking: {
type: "disabled",
},
},
},
},
},
} as const satisfies Record<string, Record<string, unknown>>
const THINKING_CAPABLE_MODELS = {
anthropic: ["claude-sonnet-4", "claude-opus-4", "claude-3"],
"google-vertex-anthropic": ["claude-sonnet-4", "claude-opus-4", "claude-3"],
"amazon-bedrock": ["claude", "anthropic"],
google: ["gemini-2", "gemini-3"],
"google-vertex": ["gemini-2", "gemini-3"],
openai: ["gpt-5", "o1", "o3"],
"zai-coding-plan": ["glm"],
} as const satisfies Record<string, readonly string[]>
export function getHighVariant(modelID: string): string | null {
const normalized = normalizeModelID(modelID)
@@ -207,37 +114,4 @@ export function isAlreadyHighVariant(modelID: string): boolean {
return ALREADY_HIGH.has(base) || base.endsWith("-high")
}
type ThinkingProvider = keyof typeof THINKING_CONFIGS
function isThinkingProvider(provider: string): provider is ThinkingProvider {
return provider in THINKING_CONFIGS
}
export function getThinkingConfig(
providerID: string,
modelID: string
): Record<string, unknown> | null {
const normalized = normalizeModelID(modelID)
const { base } = extractModelPrefix(normalized)
if (isAlreadyHighVariant(normalized)) {
return null
}
const resolvedProvider = resolveProvider(providerID, modelID)
if (!isThinkingProvider(resolvedProvider)) {
return null
}
const config = THINKING_CONFIGS[resolvedProvider]
const capablePatterns = THINKING_CAPABLE_MODELS[resolvedProvider]
// Check capability using base model name (without prefix)
const baseLower = base.toLowerCase()
const isCapable = capablePatterns.some((pattern) =>
baseLower.includes(pattern.toLowerCase())
)
return isCapable ? config : null
}

View File

@@ -1,21 +1,16 @@
export interface ThinkModeState {
requested: boolean
modelSwitched: boolean
thinkingConfigInjected: boolean
variantSet: boolean
providerID?: string
modelID?: string
}
export interface ModelRef {
interface ModelRef {
providerID: string
modelID: string
}
export interface MessageWithModel {
interface MessageWithModel {
model?: ModelRef
}
export interface ThinkModeInput {
parts: Array<{ type: string; text?: string }>
message: MessageWithModel
}

View File

@@ -100,6 +100,7 @@ export function createChatMessageHandler(args: {
await hooks.backgroundNotificationHook?.["chat.message"]?.(input, output)
await hooks.runtimeFallback?.["chat.message"]?.(input, output)
await hooks.keywordDetector?.["chat.message"]?.(input, output)
await hooks.thinkMode?.["chat.message"]?.(input, output)
await hooks.claudeCodeHooks?.["chat.message"]?.(input, output)
await hooks.autoSlashCommand?.["chat.message"]?.(input, output)
await hooks.noSisyphusGpt?.["chat.message"]?.(input, output)