Merge branch 'dev' into fix/1671-tmux-layout

This commit is contained in:
YeonGyu-Kim
2026-02-17 01:58:59 +09:00
committed by GitHub
14 changed files with 346 additions and 51 deletions

View File

@@ -7,7 +7,7 @@ function normalizeModelID(modelID: string): string {
}
function isClaudeProvider(providerID: string, modelID: string): boolean {
if (["anthropic", "opencode"].includes(providerID)) return true
if (["anthropic", "google-vertex-anthropic", "opencode"].includes(providerID)) return true
if (providerID === "github-copilot" && modelID.toLowerCase().includes("claude")) return true
return false
}

View File

@@ -88,6 +88,21 @@ describe("createAnthropicEffortHook", () => {
expect(output.options.effort).toBe("max")
})
it("should inject effort max for google-vertex-anthropic provider", async () => {
//#given google-vertex-anthropic provider with claude-opus-4-6
const hook = createAnthropicEffortHook()
const { input, output } = createMockParams({
providerID: "google-vertex-anthropic",
modelID: "claude-opus-4-6",
})
//#when chat.params hook is called
await hook["chat.params"](input, output)
//#then effort should be injected
expect(output.options.effort).toBe("max")
})
it("should handle normalized model ID with dots (opus-4.6)", async () => {
//#given model ID with dots instead of hyphens
const hook = createAnthropicEffortHook()

View File

@@ -113,6 +113,42 @@ describe("context-window-monitor", () => {
expect(ctx.client.session.messages).not.toHaveBeenCalled()
})
it("should append context reminder for google-vertex-anthropic provider", async () => {
//#given cached usage for google-vertex-anthropic above threshold
const hook = createContextWindowMonitorHook(ctx as never)
const sessionID = "ses_vertex_anthropic_high_usage"
await hook.event({
event: {
type: "message.updated",
properties: {
info: {
role: "assistant",
sessionID,
providerID: "google-vertex-anthropic",
finish: true,
tokens: {
input: 150000,
output: 1000,
reasoning: 0,
cache: { read: 10000, write: 0 },
},
},
},
},
})
//#when tool.execute.after runs
const output = { title: "", output: "original", metadata: null }
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_1" },
output
)
//#then context reminder should be appended
expect(output.output).toContain("context remaining")
})
// #given session is deleted
// #when session.deleted event fires
// #then cached data should be cleaned up

View File

@@ -27,6 +27,10 @@ interface CachedTokenState {
tokens: TokenInfo
}
function isAnthropicProvider(providerID: string): boolean {
return providerID === "anthropic" || providerID === "google-vertex-anthropic"
}
export function createContextWindowMonitorHook(_ctx: PluginInput) {
const remindedSessions = new Set<string>()
const tokenCache = new Map<string, CachedTokenState>()
@@ -42,7 +46,7 @@ export function createContextWindowMonitorHook(_ctx: PluginInput) {
const cached = tokenCache.get(sessionID)
if (!cached) return
if (cached.providerID !== "anthropic") return
if (!isAnthropicProvider(cached.providerID)) return
const lastTokens = cached.tokens
const totalInputTokens = (lastTokens?.input ?? 0) + (lastTokens?.cache?.read ?? 0)

View File

@@ -123,6 +123,43 @@ describe("preemptive-compaction", () => {
expect(ctx.client.session.summarize).toHaveBeenCalled()
})
it("should trigger compaction for google-vertex-anthropic provider", async () => {
//#given google-vertex-anthropic usage above threshold
const hook = createPreemptiveCompactionHook(ctx as never)
const sessionID = "ses_vertex_anthropic_high"
await hook.event({
event: {
type: "message.updated",
properties: {
info: {
role: "assistant",
sessionID,
providerID: "google-vertex-anthropic",
modelID: "claude-sonnet-4-5",
finish: true,
tokens: {
input: 170000,
output: 1000,
reasoning: 0,
cache: { read: 10000, write: 0 },
},
},
},
},
})
//#when tool.execute.after runs
const output = { title: "", output: "test", metadata: null }
await hook["tool.execute.after"](
{ tool: "bash", sessionID, callID: "call_1" },
output
)
//#then summarize should be triggered
expect(ctx.client.session.summarize).toHaveBeenCalled()
})
// #given session deleted
// #then cache should be cleaned up
it("should clean up cache on session.deleted", async () => {

View File

@@ -23,6 +23,10 @@ interface CachedCompactionState {
tokens: TokenInfo
}
function isAnthropicProvider(providerID: string): boolean {
return providerID === "anthropic" || providerID === "google-vertex-anthropic"
}
type PluginInput = {
client: {
session: {
@@ -55,7 +59,7 @@ export function createPreemptiveCompactionHook(ctx: PluginInput) {
if (!cached) return
const actualLimit =
cached.providerID === "anthropic"
isAnthropicProvider(cached.providerID)
? ANTHROPIC_ACTUAL_LIMIT
: DEFAULT_ACTUAL_LIMIT

View File

@@ -214,6 +214,27 @@ describe("createThinkModeHook integration", () => {
expect(message.thinking).toBeDefined()
})
it("should work for direct google-vertex-anthropic provider", async () => {
//#given direct google-vertex-anthropic provider
const hook = createThinkModeHook()
const input = createMockInput(
"google-vertex-anthropic",
"claude-opus-4-6",
"think deeply"
)
//#when the chat.params hook is called
await hook["chat.params"](input, sessionID)
//#then should upgrade model and inject Claude thinking config
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("claude-opus-4-6-high")
expect(message.thinking).toBeDefined()
expect((message.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should still work for direct google provider", async () => {
// given direct google provider
const hook = createThinkModeHook()

View File

@@ -266,6 +266,24 @@ describe("think-mode switcher", () => {
expect((config?.thinking as Record<string, unknown>)?.type).toBe("enabled")
})
it("should work for direct google-vertex-anthropic provider", () => {
//#given direct google-vertex-anthropic provider
const config = getThinkingConfig(
"google-vertex-anthropic",
"claude-opus-4-6"
)
//#when thinking config is resolved
//#then it should return anthropic-style thinking config
expect(config).not.toBeNull()
expect(config?.thinking).toBeDefined()
expect((config?.thinking as Record<string, unknown>)?.type).toBe("enabled")
expect((config?.thinking as Record<string, unknown>)?.budgetTokens).toBe(
64000
)
})
it("should still work for direct google provider", () => {
// given direct google provider
const config = getThinkingConfig("google", "gemini-3-pro")
@@ -314,6 +332,17 @@ describe("think-mode switcher", () => {
expect(config.maxTokens).toBe(128000)
})
it("should have correct structure for google-vertex-anthropic", () => {
//#given google-vertex-anthropic config entry
const config = THINKING_CONFIGS["google-vertex-anthropic"]
//#when structure is validated
//#then it should match anthropic style structure
expect(config.thinking).toBeDefined()
expect(config.maxTokens).toBe(128000)
})
it("should have correct structure for google", () => {
const config = THINKING_CONFIGS.google
expect(config.providerOptions).toBeDefined()

View File

@@ -121,6 +121,13 @@ export const THINKING_CONFIGS = {
},
maxTokens: 128000,
},
"google-vertex-anthropic": {
thinking: {
type: "enabled",
budgetTokens: 64000,
},
maxTokens: 128000,
},
"amazon-bedrock": {
reasoningConfig: {
type: "enabled",
@@ -164,6 +171,7 @@ export const THINKING_CONFIGS = {
const THINKING_CAPABLE_MODELS = {
anthropic: ["claude-sonnet-4", "claude-opus-4", "claude-3"],
"google-vertex-anthropic": ["claude-sonnet-4", "claude-opus-4", "claude-3"],
"amazon-bedrock": ["claude", "anthropic"],
google: ["gemini-2", "gemini-3"],
"google-vertex": ["gemini-2", "gemini-3"],

View File

@@ -233,6 +233,27 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("anthropic/claude-opus-4-6")
})
// given github-copilot serves claude versions with dot notation
// when fallback chain uses hyphen notation in requested model
// then normalize both forms and match github-copilot model
it("should match github-copilot claude-opus-4-6 to claude-opus-4.6", () => {
const available = new Set([
"github-copilot/claude-opus-4.6",
"opencode/glm-4.7-free",
])
const result = fuzzyMatchModel("claude-opus-4-6", available, ["github-copilot"])
expect(result).toBe("github-copilot/claude-opus-4.6")
})
// given claude models can evolve to newer version numbers
// when matching across dot and hyphen version separators
// then normalize generically without hardcoding specific versions
it("should normalize claude version separators for future versions", () => {
const available = new Set(["github-copilot/claude-sonnet-5.1"])
const result = fuzzyMatchModel("claude-sonnet-5-1", available, ["github-copilot"])
expect(result).toBe("github-copilot/claude-sonnet-5.1")
})
// given available models from multiple providers
// when providers filter is specified
// then only search models from specified providers

View File

@@ -28,8 +28,7 @@ import { normalizeSDKResponse } from "./normalize-sdk-response"
function normalizeModelName(name: string): string {
return name
.toLowerCase()
.replace(/claude-(opus|sonnet|haiku)-4-5/g, "claude-$1-4.5")
.replace(/claude-(opus|sonnet|haiku)-4\.5/g, "claude-$1-4.5")
.replace(/claude-(opus|sonnet|haiku)-(\d+)[.-](\d+)/g, "claude-$1-$2.$3")
}
export function fuzzyMatchModel(

View File

@@ -3,8 +3,7 @@ import { log } from "./logger"
function normalizeModelName(name: string): string {
return name
.toLowerCase()
.replace(/claude-(opus|sonnet|haiku)-4-5/g, "claude-$1-4.5")
.replace(/claude-(opus|sonnet|haiku)-4\.5/g, "claude-$1-4.5")
.replace(/claude-(opus|sonnet|haiku)-(\d+)[.-](\d+)/g, "claude-$1-$2.$3")
}
export function fuzzyMatchModel(
@@ -82,4 +81,3 @@ export function fuzzyMatchModel(
log("[fuzzyMatchModel] shortest match", { result })
return result
}

View File

@@ -1,12 +1,17 @@
const { describe, test, expect, mock } = require("bun:test")
const {
describe: bunDescribe,
test: bunTest,
expect: bunExpect,
mock: bunMock,
} = require("bun:test")
describe("sendSyncPrompt", () => {
test("passes question=false via tools parameter", async () => {
bunDescribe("sendSyncPrompt", () => {
bunTest("passes question=false via tools parameter", async () => {
//#given
const { sendSyncPrompt } = require("./sync-prompt-sender")
let promptArgs: any
const promptAsync = mock(async (input: any) => {
const promptAsync = bunMock(async (input: any) => {
promptArgs = input
return { data: {} }
})
@@ -33,19 +38,19 @@ describe("sendSyncPrompt", () => {
}
//#when
await sendSyncPrompt(mockClient as any, input)
await sendSyncPrompt(mockClient, input)
//#then
expect(promptAsync).toHaveBeenCalled()
expect(promptArgs.body.tools.question).toBe(false)
bunExpect(promptAsync).toHaveBeenCalled()
bunExpect(promptArgs.body.tools.question).toBe(false)
})
test("applies agent tool restrictions for explore agent", async () => {
bunTest("applies agent tool restrictions for explore agent", async () => {
//#given
const { sendSyncPrompt } = require("./sync-prompt-sender")
let promptArgs: any
const promptAsync = mock(async (input: any) => {
const promptAsync = bunMock(async (input: any) => {
promptArgs = input
return { data: {} }
})
@@ -73,19 +78,19 @@ describe("sendSyncPrompt", () => {
}
//#when
await sendSyncPrompt(mockClient as any, input)
await sendSyncPrompt(mockClient, input)
//#then
expect(promptAsync).toHaveBeenCalled()
expect(promptArgs.body.tools.call_omo_agent).toBe(false)
bunExpect(promptAsync).toHaveBeenCalled()
bunExpect(promptArgs.body.tools.call_omo_agent).toBe(false)
})
test("applies agent tool restrictions for librarian agent", async () => {
bunTest("applies agent tool restrictions for librarian agent", async () => {
//#given
const { sendSyncPrompt } = require("./sync-prompt-sender")
let promptArgs: any
const promptAsync = mock(async (input: any) => {
const promptAsync = bunMock(async (input: any) => {
promptArgs = input
return { data: {} }
})
@@ -113,19 +118,19 @@ describe("sendSyncPrompt", () => {
}
//#when
await sendSyncPrompt(mockClient as any, input)
await sendSyncPrompt(mockClient, input)
//#then
expect(promptAsync).toHaveBeenCalled()
expect(promptArgs.body.tools.call_omo_agent).toBe(false)
bunExpect(promptAsync).toHaveBeenCalled()
bunExpect(promptArgs.body.tools.call_omo_agent).toBe(false)
})
test("does not restrict call_omo_agent for sisyphus agent", async () => {
bunTest("does not restrict call_omo_agent for sisyphus agent", async () => {
//#given
const { sendSyncPrompt } = require("./sync-prompt-sender")
let promptArgs: any
const promptAsync = mock(async (input: any) => {
const promptAsync = bunMock(async (input: any) => {
promptArgs = input
return { data: {} }
})
@@ -153,10 +158,90 @@ describe("sendSyncPrompt", () => {
}
//#when
await sendSyncPrompt(mockClient as any, input)
await sendSyncPrompt(mockClient, input)
//#then
expect(promptAsync).toHaveBeenCalled()
expect(promptArgs.body.tools.call_omo_agent).toBe(true)
bunExpect(promptAsync).toHaveBeenCalled()
bunExpect(promptArgs.body.tools.call_omo_agent).toBe(true)
})
bunTest("retries with promptSync for oracle when promptAsync fails with unexpected EOF", async () => {
//#given
const { sendSyncPrompt } = require("./sync-prompt-sender")
const promptWithModelSuggestionRetry = bunMock(async () => {
throw new Error("JSON Parse error: Unexpected EOF")
})
const promptSyncWithModelSuggestionRetry = bunMock(async () => {})
const input = {
sessionID: "test-session",
agentToUse: "oracle",
args: {
description: "test task",
prompt: "test prompt",
run_in_background: false,
load_skills: [],
},
systemContent: undefined,
categoryModel: undefined,
toastManager: null,
taskId: undefined,
}
//#when
const result = await sendSyncPrompt(
{ session: { promptAsync: bunMock(async () => ({ data: {} })) } },
input,
{
promptWithModelSuggestionRetry,
promptSyncWithModelSuggestionRetry,
},
)
//#then
bunExpect(result).toBeNull()
bunExpect(promptWithModelSuggestionRetry).toHaveBeenCalledTimes(1)
bunExpect(promptSyncWithModelSuggestionRetry).toHaveBeenCalledTimes(1)
})
bunTest("does not retry with promptSync for non-oracle on unexpected EOF", async () => {
//#given
const { sendSyncPrompt } = require("./sync-prompt-sender")
const promptWithModelSuggestionRetry = bunMock(async () => {
throw new Error("JSON Parse error: Unexpected EOF")
})
const promptSyncWithModelSuggestionRetry = bunMock(async () => {})
const input = {
sessionID: "test-session",
agentToUse: "metis",
args: {
description: "test task",
prompt: "test prompt",
run_in_background: false,
load_skills: [],
},
systemContent: undefined,
categoryModel: undefined,
toastManager: null,
taskId: undefined,
}
//#when
const result = await sendSyncPrompt(
{ session: { promptAsync: bunMock(async () => ({ data: {} })) } },
input,
{
promptWithModelSuggestionRetry,
promptSyncWithModelSuggestionRetry,
},
)
//#then
bunExpect(result).toContain("JSON Parse error: Unexpected EOF")
bunExpect(promptWithModelSuggestionRetry).toHaveBeenCalledTimes(1)
bunExpect(promptSyncWithModelSuggestionRetry).toHaveBeenCalledTimes(0)
})
})

View File

@@ -1,10 +1,33 @@
import type { DelegateTaskArgs, OpencodeClient } from "./types"
import { isPlanFamily } from "./constants"
import { promptWithModelSuggestionRetry } from "../../shared/model-suggestion-retry"
import {
promptSyncWithModelSuggestionRetry,
promptWithModelSuggestionRetry,
} from "../../shared/model-suggestion-retry"
import { formatDetailedError } from "./error-formatting"
import { getAgentToolRestrictions } from "../../shared/agent-tool-restrictions"
import { setSessionTools } from "../../shared/session-tools-store"
type SendSyncPromptDeps = {
promptWithModelSuggestionRetry: typeof promptWithModelSuggestionRetry
promptSyncWithModelSuggestionRetry: typeof promptSyncWithModelSuggestionRetry
}
const sendSyncPromptDeps: SendSyncPromptDeps = {
promptWithModelSuggestionRetry,
promptSyncWithModelSuggestionRetry,
}
function isOracleAgent(agentToUse: string): boolean {
return agentToUse.toLowerCase() === "oracle"
}
function isUnexpectedEofError(error: unknown): boolean {
const message = error instanceof Error ? error.message : String(error)
const lowered = message.toLowerCase()
return lowered.includes("unexpected eof") || lowered.includes("json parse error")
}
export async function sendSyncPrompt(
client: OpencodeClient,
input: {
@@ -15,29 +38,44 @@ export async function sendSyncPrompt(
categoryModel: { providerID: string; modelID: string; variant?: string } | undefined
toastManager: { removeTask: (id: string) => void } | null | undefined
taskId: string | undefined
}
},
deps: SendSyncPromptDeps = sendSyncPromptDeps
): Promise<string | null> {
const allowTask = isPlanFamily(input.agentToUse)
const tools = {
task: allowTask,
call_omo_agent: true,
question: false,
...getAgentToolRestrictions(input.agentToUse),
}
setSessionTools(input.sessionID, tools)
const promptArgs = {
path: { id: input.sessionID },
body: {
agent: input.agentToUse,
system: input.systemContent,
tools,
parts: [{ type: "text", text: input.args.prompt }],
...(input.categoryModel
? { model: { providerID: input.categoryModel.providerID, modelID: input.categoryModel.modelID } }
: {}),
...(input.categoryModel?.variant ? { variant: input.categoryModel.variant } : {}),
},
}
try {
const allowTask = isPlanFamily(input.agentToUse)
const tools = {
task: allowTask,
call_omo_agent: true,
question: false,
...getAgentToolRestrictions(input.agentToUse),
}
setSessionTools(input.sessionID, tools)
await promptWithModelSuggestionRetry(client, {
path: { id: input.sessionID },
body: {
agent: input.agentToUse,
system: input.systemContent,
tools,
parts: [{ type: "text", text: input.args.prompt }],
...(input.categoryModel ? { model: { providerID: input.categoryModel.providerID, modelID: input.categoryModel.modelID } } : {}),
...(input.categoryModel?.variant ? { variant: input.categoryModel.variant } : {}),
},
})
await deps.promptWithModelSuggestionRetry(client, promptArgs)
} catch (promptError) {
if (isOracleAgent(input.agentToUse) && isUnexpectedEofError(promptError)) {
try {
await deps.promptSyncWithModelSuggestionRetry(client, promptArgs)
return null
} catch (oracleRetryError) {
promptError = oracleRetryError
}
}
if (input.toastManager && input.taskId !== undefined) {
input.toastManager.removeTask(input.taskId)
}