refactor(shared): deduplicate model resolution utility functions

Extract normalizeModel() (3 identical copies) and normalizeModelID()
(2 identical copies) into canonical src/shared/model-normalization.ts.
Delete dead-end duplicate model-name-matcher.ts. Update all consumers.
This commit is contained in:
IYODA Atsushi
2026-03-02 16:38:22 +09:00
parent a666612354
commit 4b366926d4
11 changed files with 140 additions and 120 deletions

View File

@@ -1,11 +1,7 @@
import { log } from "../../shared"
import { log, normalizeModelID } from "../../shared"
const OPUS_4_6_PATTERN = /claude-opus-4[-.]6/i
function normalizeModelID(modelID: string): string {
return modelID.replace(/\.(\d+)/g, "-$1")
}
function isClaudeProvider(providerID: string, modelID: string): boolean {
if (["anthropic", "google-vertex-anthropic", "opencode"].includes(providerID)) return true
if (providerID === "github-copilot" && modelID.toLowerCase().includes("claude")) return true

View File

@@ -16,6 +16,8 @@
* inconsistencies defensively while maintaining backwards compatibility.
*/
import { normalizeModelID } from "../../shared"
/**
* Extracts provider-specific prefix from model ID (if present).
* Custom providers may use prefixes for routing (e.g., vertex_ai/, openai/).
@@ -36,24 +38,6 @@ function extractModelPrefix(modelID: string): { prefix: string; base: string } {
}
}
/**
* Normalizes model IDs to use consistent hyphen formatting.
* GitHub Copilot may use dots (claude-opus-4.6) but our maps use hyphens (claude-opus-4-6).
* This ensures lookups work regardless of format.
*
* @example
* normalizeModelID("claude-opus-4.6") // "claude-opus-4-6"
* normalizeModelID("gemini-3.5-pro") // "gemini-3-5-pro"
* normalizeModelID("gpt-5.2") // "gpt-5-2"
* normalizeModelID("vertex_ai/claude-opus-4.6") // "vertex_ai/claude-opus-4-6"
*/
function normalizeModelID(modelID: string): string {
// Replace dots with hyphens when followed by a digit
// This handles version numbers like 4.5 → 4-5, 5.2 → 5-2
return modelID.replace(/\.(\d+)/g, "-$1")
}
// Maps model IDs to their "high reasoning" variant (internal convention)
// For OpenAI models, this signals that reasoning_effort should be set to "high"

View File

@@ -33,7 +33,7 @@ resolveModel(input)
4. System default: Ultimate fallback
```
Key files: `model-resolver.ts` (entry), `model-resolution-pipeline.ts` (orchestration), `model-requirements.ts` (fallback chains), `model-name-matcher.ts` (fuzzy matching).
Key files: `model-resolver.ts` (entry), `model-resolution-pipeline.ts` (orchestration), `model-requirements.ts` (fallback chains), `model-availability.ts` (fuzzy matching).
## MIGRATION SYSTEM

View File

@@ -1,6 +1,6 @@
import { readConnectedProvidersCache } from "./connected-providers-cache"
import { log } from "./logger"
import { fuzzyMatchModel } from "./model-name-matcher"
import { fuzzyMatchModel } from "./model-availability"
type FallbackEntry = { providers: string[]; model: string }

View File

@@ -34,6 +34,7 @@ export * from "./system-directive"
export * from "./agent-tool-restrictions"
export * from "./model-requirements"
export * from "./model-resolver"
export { normalizeModel, normalizeModelID } from "./model-normalization"
export { normalizeFallbackModels } from "./model-resolver"
export { resolveModelPipeline } from "./model-resolution-pipeline"
export type {

View File

@@ -1,83 +0,0 @@
import { log } from "./logger"
function normalizeModelName(name: string): string {
return name
.toLowerCase()
.replace(/claude-(opus|sonnet|haiku)-(\d+)[.-](\d+)/g, "claude-$1-$2.$3")
}
export function fuzzyMatchModel(
target: string,
available: Set<string>,
providers?: string[],
): string | null {
log("[fuzzyMatchModel] called", { target, availableCount: available.size, providers })
if (available.size === 0) {
log("[fuzzyMatchModel] empty available set")
return null
}
const targetNormalized = normalizeModelName(target)
let candidates = Array.from(available)
if (providers && providers.length > 0) {
const providerSet = new Set(providers)
candidates = candidates.filter((model) => {
const [provider] = model.split("/")
return providerSet.has(provider)
})
log("[fuzzyMatchModel] filtered by providers", {
candidateCount: candidates.length,
candidates: candidates.slice(0, 10),
})
}
if (candidates.length === 0) {
log("[fuzzyMatchModel] no candidates after filter")
return null
}
const matches = candidates.filter((model) =>
normalizeModelName(model).includes(targetNormalized),
)
log("[fuzzyMatchModel] substring matches", {
targetNormalized,
matchCount: matches.length,
matches,
})
if (matches.length === 0) {
return null
}
const exactMatch = matches.find(
(model) => normalizeModelName(model) === targetNormalized,
)
if (exactMatch) {
log("[fuzzyMatchModel] exact match found", { exactMatch })
return exactMatch
}
const exactModelIdMatches = matches.filter((model) => {
const modelId = model.split("/").slice(1).join("/")
return normalizeModelName(modelId) === targetNormalized
})
if (exactModelIdMatches.length > 0) {
const result = exactModelIdMatches.reduce((shortest, current) =>
current.length < shortest.length ? current : shortest,
)
log("[fuzzyMatchModel] exact model ID match found", {
result,
candidateCount: exactModelIdMatches.length,
})
return result
}
const result = matches.reduce((shortest, current) =>
current.length < shortest.length ? current : shortest,
)
log("[fuzzyMatchModel] shortest match", { result })
return result
}

View File

@@ -0,0 +1,123 @@
import { describe, expect, test } from "bun:test"
import { normalizeModel, normalizeModelID } from "./model-normalization"
describe("normalizeModel", () => {
describe("#given undefined input", () => {
test("#when normalizeModel is called with undefined #then returns undefined", () => {
// given
const input = undefined
// when
const result = normalizeModel(input)
// then
expect(result).toBeUndefined()
})
})
describe("#given empty string", () => {
test("#when normalizeModel is called with empty string #then returns undefined", () => {
// given
const input = ""
// when
const result = normalizeModel(input)
// then
expect(result).toBeUndefined()
})
})
describe("#given whitespace-only string", () => {
test("#when normalizeModel is called with whitespace-only string #then returns undefined", () => {
// given
const input = " "
// when
const result = normalizeModel(input)
// then
expect(result).toBeUndefined()
})
})
describe("#given valid model string", () => {
test("#when normalizeModel is called with valid model string #then returns same string", () => {
// given
const input = "claude-3-opus"
// when
const result = normalizeModel(input)
// then
expect(result).toBe("claude-3-opus")
})
})
describe("#given string with leading and trailing spaces", () => {
test("#when normalizeModel is called with spaces #then returns trimmed string", () => {
// given
const input = " claude-3-opus "
// when
const result = normalizeModel(input)
// then
expect(result).toBe("claude-3-opus")
})
})
describe("#given string with only spaces", () => {
test("#when normalizeModel is called with only spaces #then returns undefined", () => {
// given
const input = " "
// when
const result = normalizeModel(input)
// then
expect(result).toBeUndefined()
})
})
})
describe("normalizeModelID", () => {
describe("#given model with dots in version numbers", () => {
test("#when normalizeModelID is called with claude-3.5-sonnet #then returns claude-3-5-sonnet", () => {
// given
const input = "claude-3.5-sonnet"
// when
const result = normalizeModelID(input)
// then
expect(result).toBe("claude-3-5-sonnet")
})
})
describe("#given model without dots", () => {
test("#when normalizeModelID is called with claude-opus #then returns unchanged", () => {
// given
const input = "claude-opus"
// when
const result = normalizeModelID(input)
// then
expect(result).toBe("claude-opus")
})
})
describe("#given model with multiple dot-numbers", () => {
test("#when normalizeModelID is called with model.1.2 #then returns model-1-2", () => {
// given
const input = "model.1.2"
// when
const result = normalizeModelID(input)
// then
expect(result).toBe("model-1-2")
})
})
})

View File

@@ -0,0 +1,8 @@
export function normalizeModel(model?: string): string | undefined {
const trimmed = model?.trim()
return trimmed || undefined
}
export function normalizeModelID(modelID: string): string {
return modelID.replace(/\.(\d+)/g, "-$1")
}

View File

@@ -3,6 +3,7 @@ import * as connectedProvidersCache from "./connected-providers-cache"
import { fuzzyMatchModel } from "./model-availability"
import type { FallbackEntry } from "./model-requirements"
import { transformModelForProvider } from "./provider-model-id-transform"
import { normalizeModel } from "./model-normalization"
export type ModelResolutionRequest = {
intent?: {
@@ -35,10 +36,6 @@ export type ModelResolutionResult = {
reason?: string
}
function normalizeModel(model?: string): string | undefined {
const trimmed = model?.trim()
return trimmed || undefined
}
export function resolveModelPipeline(
request: ModelResolutionRequest,

View File

@@ -1,4 +1,5 @@
import type { FallbackEntry } from "./model-requirements"
import { normalizeModel } from "./model-normalization"
import { resolveModelPipeline } from "./model-resolution-pipeline"
export type ModelResolutionInput = {
@@ -29,10 +30,6 @@ export type ExtendedModelResolutionInput = {
systemDefaultModel?: string
}
function normalizeModel(model?: string): string | undefined {
const trimmed = model?.trim()
return trimmed || undefined
}
export function resolveModel(input: ModelResolutionInput): string | undefined {
return (

View File

@@ -1,11 +1,8 @@
import type { FallbackEntry } from "../../shared/model-requirements"
import { normalizeModel } from "../../shared/model-normalization"
import { fuzzyMatchModel } from "../../shared/model-availability"
import { transformModelForProvider } from "../../shared/provider-model-id-transform"
function normalizeModel(model?: string): string | undefined {
const trimmed = model?.trim()
return trimmed || undefined
}
export function resolveModelForDelegateTask(input: {
userModel?: string