refactor(models): upgrade zai-coding-plan default from glm-4.7 to glm-5

This commit is contained in:
YeonGyu-Kim
2026-02-19 14:29:05 +09:00
parent 86a1bfa493
commit c8eb0dbae3
14 changed files with 50 additions and 50 deletions

View File

@@ -18,7 +18,7 @@ describe("createBuiltinAgents with model overrides", () => {
"anthropic/claude-opus-4-6",
"kimi-for-coding/k2p5",
"opencode/kimi-k2.5-free",
"zai-coding-plan/glm-4.7",
"zai-coding-plan/glm-5",
"opencode/big-pickle",
])
)
@@ -259,7 +259,7 @@ describe("createBuiltinAgents with model overrides", () => {
"anthropic/claude-opus-4-6",
"kimi-for-coding/k2p5",
"opencode/kimi-k2.5-free",
"zai-coding-plan/glm-4.7",
"zai-coding-plan/glm-5",
"opencode/big-pickle",
"openai/gpt-5.2",
])
@@ -505,7 +505,7 @@ describe("createBuiltinAgents without systemDefaultModel", () => {
"anthropic/claude-opus-4-6",
"kimi-for-coding/k2p5",
"opencode/kimi-k2.5-free",
"zai-coding-plan/glm-4.7",
"zai-coding-plan/glm-5",
"opencode/big-pickle",
])
)

View File

@@ -900,7 +900,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian whe
"model": "opencode/gpt-5-nano",
},
"librarian": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
"metis": {
"model": "opencode/big-pickle",
@@ -918,7 +918,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian whe
"model": "opencode/big-pickle",
},
"sisyphus": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
},
"categories": {
@@ -955,7 +955,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian wit
"model": "opencode/gpt-5-nano",
},
"librarian": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
"metis": {
"model": "opencode/big-pickle",
@@ -973,7 +973,7 @@ exports[`generateModelConfig fallback providers uses ZAI model for librarian wit
"model": "opencode/big-pickle",
},
"sisyphus": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
},
"categories": {
@@ -1158,7 +1158,7 @@ exports[`generateModelConfig mixed provider scenarios uses Claude + ZAI combinat
"model": "anthropic/claude-haiku-4-5",
},
"librarian": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
"metis": {
"model": "anthropic/claude-opus-4-6",
@@ -1289,7 +1289,7 @@ exports[`generateModelConfig mixed provider scenarios uses all fallback provider
"variant": "medium",
},
"librarian": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
"metis": {
"model": "github-copilot/claude-opus-4.6",
@@ -1363,7 +1363,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers togethe
"variant": "medium",
},
"librarian": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
"metis": {
"model": "anthropic/claude-opus-4-6",
@@ -1437,7 +1437,7 @@ exports[`generateModelConfig mixed provider scenarios uses all providers with is
"variant": "medium",
},
"librarian": {
"model": "zai-coding-plan/glm-4.7",
"model": "zai-coding-plan/glm-5",
},
"metis": {
"model": "anthropic/claude-opus-4-6",

View File

@@ -44,7 +44,7 @@ Model Providers (Priority: Native > Copilot > OpenCode Zen > Z.ai > Kimi):
Gemini Native google/ models (Gemini 3 Pro, Flash)
Copilot github-copilot/ models (fallback)
OpenCode Zen opencode/ models (opencode/claude-opus-4-6, etc.)
Z.ai zai-coding-plan/glm-4.7 (Librarian priority)
Z.ai zai-coding-plan/glm-5 (Librarian priority)
Kimi kimi-for-coding/k2p5 (Sisyphus/Prometheus fallback)
`)
.action(async (options) => {

View File

@@ -281,7 +281,7 @@ describe("generateOmoConfig - model fallback system", () => {
expect((result.agents as Record<string, { model: string }>).sisyphus).toBeUndefined()
})
test("uses zai-coding-plan/glm-4.7 for librarian when Z.ai available", () => {
test("uses zai-coding-plan/glm-5 for librarian when Z.ai available", () => {
// #given user has Z.ai and Claude max20
const config: InstallConfig = {
hasClaude: true,
@@ -297,8 +297,8 @@ describe("generateOmoConfig - model fallback system", () => {
// #when generating config
const result = generateOmoConfig(config)
// #then librarian should use zai-coding-plan/glm-4.7
expect((result.agents as Record<string, { model: string }>).librarian.model).toBe("zai-coding-plan/glm-4.7")
// #then librarian should use zai-coding-plan/glm-5
expect((result.agents as Record<string, { model: string }>).librarian.model).toBe("zai-coding-plan/glm-5")
// #then Sisyphus uses Claude (OR logic)
expect((result.agents as Record<string, { model: string }>).sisyphus.model).toBe("anthropic/claude-opus-4-6")
})

View File

@@ -491,7 +491,7 @@ describe("generateModelConfig", () => {
const result = generateModelConfig(config)
// #then librarian should use ZAI_MODEL
expect(result.agents?.librarian?.model).toBe("zai-coding-plan/glm-4.7")
expect(result.agents?.librarian?.model).toBe("zai-coding-plan/glm-5")
})
test("librarian uses claude-sonnet when ZAI not available but Claude is", () => {

View File

@@ -16,7 +16,7 @@ import {
export type { GeneratedOmoConfig } from "./model-fallback-types"
const ZAI_MODEL = "zai-coding-plan/glm-4.7"
const ZAI_MODEL = "zai-coding-plan/glm-5"
const ULTIMATE_FALLBACK = "opencode/big-pickle"
const SCHEMA_URL = "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json"

View File

@@ -378,7 +378,7 @@ describe("createThinkModeHook integration", () => {
const hook = createThinkModeHook()
const input = createMockInput(
"zai-coding-plan",
"glm-4.7",
"glm-5",
"ultrathink mode"
)
@@ -387,7 +387,7 @@ describe("createThinkModeHook integration", () => {
//#then thinking config should be omitted from request
const message = input.message as MessageWithInjectedProps
expect(input.message.model?.modelID).toBe("glm-4.7")
expect(input.message.model?.modelID).toBe("glm-5")
expect(message.thinking).toBeUndefined()
expect(message.providerOptions).toBeUndefined()
})

View File

@@ -498,9 +498,9 @@ describe("think-mode switcher", () => {
describe("Z.AI GLM-4.7 provider support", () => {
describe("getThinkingConfig for zai-coding-plan", () => {
it("should return thinking config for glm-4.7", () => {
it("should return thinking config for glm-5", () => {
//#given a Z.ai GLM model
const config = getThinkingConfig("zai-coding-plan", "glm-4.7")
const config = getThinkingConfig("zai-coding-plan", "glm-5")
//#when thinking config is resolved
@@ -535,9 +535,9 @@ describe("think-mode switcher", () => {
})
describe("HIGH_VARIANT_MAP for GLM", () => {
it("should NOT have high variant for glm-4.7", () => {
// given glm-4.7 model
const variant = getHighVariant("glm-4.7")
it("should NOT have high variant for glm-5", () => {
// given glm-5 model
const variant = getHighVariant("glm-5")
// then should return null (no high variant needed)
expect(variant).toBeNull()

View File

@@ -163,7 +163,7 @@ describe("resolveVariantForModel", () => {
test("returns variant for zai-coding-plan provider without variant", () => {
// given
const config = {} as OhMyOpenCodeConfig
const model = { providerID: "zai-coding-plan", modelID: "glm-4.7" }
const model = { providerID: "zai-coding-plan", modelID: "glm-5" }
// when
const variant = resolveVariantForModel(config, "sisyphus", model)

View File

@@ -327,12 +327,12 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("anthropic/claude-opus-4-6")
})
// given available models with similar model IDs (e.g., glm-4.7 and big-pickle)
// given available models with similar model IDs (e.g., glm-5 and big-pickle)
// when searching for the longer variant (big-pickle)
// then return exact model ID match, not the shorter one
it("should prefer exact model ID match over shorter substring match", () => {
const available = new Set([
"zai-coding-plan/glm-4.7",
"zai-coding-plan/glm-5",
"zai-coding-plan/big-pickle",
])
const result = fuzzyMatchModel("big-pickle", available)
@@ -344,11 +344,11 @@ describe("fuzzyMatchModel", () => {
// then return the shorter match (existing behavior preserved)
it("should still prefer shorter match when searching for shorter variant", () => {
const available = new Set([
"zai-coding-plan/glm-4.7",
"zai-coding-plan/glm-5",
"zai-coding-plan/big-pickle",
])
const result = fuzzyMatchModel("glm-4.7", available)
expect(result).toBe("zai-coding-plan/glm-4.7")
const result = fuzzyMatchModel("glm-5", available)
expect(result).toBe("zai-coding-plan/glm-5")
})
// given same model ID from multiple providers

View File

@@ -81,7 +81,7 @@ export function fuzzyMatchModel(
}
// Priority 2: Exact model ID match (part after provider/)
// This ensures "big-pickle" matches "zai-coding-plan/big-pickle" over "zai-coding-plan/glm-4.7"
// This ensures "big-pickle" matches "zai-coding-plan/big-pickle" over "zai-coding-plan/glm-5"
// Use filter + shortest to handle multi-provider cases (e.g., openai/gpt-5.2 + opencode/gpt-5.2)
const exactModelIdMatches = matches.filter((model) => {
const modelId = model.split("/").slice(1).join("/")

View File

@@ -44,19 +44,19 @@ describe("AGENT_MODEL_REQUIREMENTS", () => {
expect(last.model).toBe("big-pickle")
})
test("librarian has valid fallbackChain with glm-4.7 as primary", () => {
test("librarian has valid fallbackChain with glm-5 as primary", () => {
// given - librarian agent requirement
const librarian = AGENT_MODEL_REQUIREMENTS["librarian"]
// when - accessing librarian requirement
// then - fallbackChain exists with glm-4.7 as first entry
// then - fallbackChain exists with glm-5 as first entry
expect(librarian).toBeDefined()
expect(librarian.fallbackChain).toBeArray()
expect(librarian.fallbackChain.length).toBeGreaterThan(0)
const primary = librarian.fallbackChain[0]
expect(primary.providers[0]).toBe("zai-coding-plan")
expect(primary.model).toBe("glm-4.7")
expect(primary.model).toBe("glm-5")
})
test("explore has valid fallbackChain with grok-code-fast-1 as primary", () => {

View File

@@ -18,7 +18,7 @@ export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-opus-4-6", variant: "max" },
{ providers: ["kimi-for-coding"], model: "k2p5" },
{ providers: ["opencode"], model: "kimi-k2.5-free" },
{ providers: ["zai-coding-plan"], model: "glm-4.7" },
{ providers: ["zai-coding-plan"], model: "glm-5" },
{ providers: ["opencode"], model: "big-pickle" },
],
requiresAnyModel: true,
@@ -38,7 +38,7 @@ export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
},
librarian: {
fallbackChain: [
{ providers: ["zai-coding-plan"], model: "glm-4.7" },
{ providers: ["zai-coding-plan"], model: "glm-5" },
{ providers: ["opencode"], model: "big-pickle" },
{ providers: ["anthropic", "github-copilot", "opencode"], model: "claude-sonnet-4-6" },
],

View File

@@ -390,36 +390,36 @@ describe("resolveModelWithFallback", () => {
})
test("cross-provider fuzzy match when preferred provider unavailable (librarian scenario)", () => {
// given - glm-4.7 is defined for zai-coding-plan, but only opencode has it
// given - glm-5 is defined for zai-coding-plan, but only opencode has it
const input: ExtendedModelResolutionInput = {
fallbackChain: [
{ providers: ["zai-coding-plan"], model: "glm-4.7" },
{ providers: ["zai-coding-plan"], model: "glm-5" },
{ providers: ["anthropic"], model: "claude-sonnet-4-6" },
],
availableModels: new Set(["opencode/glm-4.7", "anthropic/claude-sonnet-4-6"]),
availableModels: new Set(["opencode/glm-5", "anthropic/claude-sonnet-4-6"]),
systemDefaultModel: "google/gemini-3-pro",
}
// when
const result = resolveModelWithFallback(input)
// then - should find glm-4.7 from opencode via cross-provider fuzzy match
expect(result!.model).toBe("opencode/glm-4.7")
// then - should find glm-5 from opencode via cross-provider fuzzy match
expect(result!.model).toBe("opencode/glm-5")
expect(result!.source).toBe("provider-fallback")
expect(logSpy).toHaveBeenCalledWith("Model resolved via fallback chain (cross-provider fuzzy match)", {
model: "glm-4.7",
match: "opencode/glm-4.7",
model: "glm-5",
match: "opencode/glm-5",
variant: undefined,
})
})
test("prefers specified provider over cross-provider match", () => {
// given - both zai-coding-plan and opencode have glm-4.7
// given - both zai-coding-plan and opencode have glm-5
const input: ExtendedModelResolutionInput = {
fallbackChain: [
{ providers: ["zai-coding-plan"], model: "glm-4.7" },
{ providers: ["zai-coding-plan"], model: "glm-5" },
],
availableModels: new Set(["zai-coding-plan/glm-4.7", "opencode/glm-4.7"]),
availableModels: new Set(["zai-coding-plan/glm-5", "opencode/glm-5"]),
systemDefaultModel: "google/gemini-3-pro",
}
@@ -427,7 +427,7 @@ describe("resolveModelWithFallback", () => {
const result = resolveModelWithFallback(input)
// then - should prefer zai-coding-plan (specified provider) over opencode
expect(result!.model).toBe("zai-coding-plan/glm-4.7")
expect(result!.model).toBe("zai-coding-plan/glm-5")
expect(result!.source).toBe("provider-fallback")
})
@@ -435,9 +435,9 @@ describe("resolveModelWithFallback", () => {
// given - entry has variant, model found via cross-provider
const input: ExtendedModelResolutionInput = {
fallbackChain: [
{ providers: ["zai-coding-plan"], model: "glm-4.7", variant: "high" },
{ providers: ["zai-coding-plan"], model: "glm-5", variant: "high" },
],
availableModels: new Set(["opencode/glm-4.7"]),
availableModels: new Set(["opencode/glm-5"]),
systemDefaultModel: "google/gemini-3-pro",
}
@@ -445,7 +445,7 @@ describe("resolveModelWithFallback", () => {
const result = resolveModelWithFallback(input)
// then - variant should be preserved
expect(result!.model).toBe("opencode/glm-4.7")
expect(result!.model).toBe("opencode/glm-5")
expect(result!.variant).toBe("high")
})