feat: check provider connectivity instead of specific model for hephaestus availability
Hephaestus now appears when any of its providers (openai, github-copilot, opencode) is connected, rather than requiring the exact gpt-5.2-codex model. This allows users with newer codex models (e.g., gpt-5.3-codex) to use Hephaestus without manual config overrides. - Add requiresProvider field to ModelRequirement type - Add isAnyProviderConnected() helper in model-availability - Update hephaestus config from requiresModel to requiresProvider - Update cli model-fallback to handle requiresProvider checks
This commit is contained in:
@@ -240,13 +240,13 @@ describe("createBuiltinAgents without systemDefaultModel", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("createBuiltinAgents with requiresModel gating", () => {
|
||||
test("hephaestus is not created when gpt-5.2-codex is unavailable and provider not connected", async () => {
|
||||
// #given
|
||||
describe("createBuiltinAgents with requiresProvider gating (hephaestus)", () => {
|
||||
test("hephaestus is not created when no required provider is connected", async () => {
|
||||
// #given - only anthropic models available, not in hephaestus requiresProvider
|
||||
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
|
||||
new Set(["anthropic/claude-opus-4-5"])
|
||||
)
|
||||
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue([])
|
||||
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(["anthropic"])
|
||||
|
||||
try {
|
||||
// #when
|
||||
@@ -260,8 +260,8 @@ describe("createBuiltinAgents with requiresModel gating", () => {
|
||||
}
|
||||
})
|
||||
|
||||
test("hephaestus is created when gpt-5.2-codex is available", async () => {
|
||||
// #given
|
||||
test("hephaestus is created when openai provider is connected", async () => {
|
||||
// #given - openai provider has models available
|
||||
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
|
||||
new Set(["openai/gpt-5.2-codex"])
|
||||
)
|
||||
@@ -277,6 +277,40 @@ describe("createBuiltinAgents with requiresModel gating", () => {
|
||||
}
|
||||
})
|
||||
|
||||
test("hephaestus is created when github-copilot provider is connected", async () => {
|
||||
// #given - github-copilot provider has models available
|
||||
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
|
||||
new Set(["github-copilot/gpt-5.2-codex"])
|
||||
)
|
||||
|
||||
try {
|
||||
// #when
|
||||
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], {})
|
||||
|
||||
// #then
|
||||
expect(agents.hephaestus).toBeDefined()
|
||||
} finally {
|
||||
fetchSpy.mockRestore()
|
||||
}
|
||||
})
|
||||
|
||||
test("hephaestus is created when opencode provider is connected", async () => {
|
||||
// #given - opencode provider has models available
|
||||
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
|
||||
new Set(["opencode/gpt-5.2-codex"])
|
||||
)
|
||||
|
||||
try {
|
||||
// #when
|
||||
const agents = await createBuiltinAgents([], {}, undefined, TEST_DEFAULT_MODEL, undefined, undefined, [], {})
|
||||
|
||||
// #then
|
||||
expect(agents.hephaestus).toBeDefined()
|
||||
} finally {
|
||||
fetchSpy.mockRestore()
|
||||
}
|
||||
})
|
||||
|
||||
test("hephaestus is created on first run when no availableModels or cache exist", async () => {
|
||||
// #given
|
||||
const cacheSpy = spyOn(connectedProvidersCache, "readConnectedProvidersCache").mockReturnValue(null)
|
||||
@@ -295,7 +329,7 @@ describe("createBuiltinAgents with requiresModel gating", () => {
|
||||
}
|
||||
})
|
||||
|
||||
test("hephaestus is created when explicit config provided even if model unavailable", async () => {
|
||||
test("hephaestus is created when explicit config provided even if provider unavailable", async () => {
|
||||
// #given
|
||||
const fetchSpy = spyOn(shared, "fetchAvailableModels").mockResolvedValue(
|
||||
new Set(["anthropic/claude-opus-4-5"])
|
||||
|
||||
@@ -11,7 +11,7 @@ import { createAtlasAgent, atlasPromptMetadata } from "./atlas"
|
||||
import { createMomusAgent, momusPromptMetadata } from "./momus"
|
||||
import { createHephaestusAgent } from "./hephaestus"
|
||||
import type { AvailableAgent, AvailableCategory, AvailableSkill } from "./dynamic-agent-prompt-builder"
|
||||
import { deepMerge, fetchAvailableModels, resolveModelPipeline, AGENT_MODEL_REQUIREMENTS, readConnectedProvidersCache, isModelAvailable, isAnyFallbackModelAvailable, migrateAgentConfig } from "../shared"
|
||||
import { deepMerge, fetchAvailableModels, resolveModelPipeline, AGENT_MODEL_REQUIREMENTS, readConnectedProvidersCache, isModelAvailable, isAnyFallbackModelAvailable, isAnyProviderConnected, migrateAgentConfig } from "../shared"
|
||||
import { DEFAULT_CATEGORIES, CATEGORY_DESCRIPTIONS } from "../tools/delegate-task/constants"
|
||||
import { resolveMultipleSkills } from "../features/opencode-skill-loader/skill-content"
|
||||
import { createBuiltinSkills } from "../features/builtin-skills"
|
||||
@@ -394,13 +394,13 @@ export async function createBuiltinAgents(
|
||||
const hephaestusRequirement = AGENT_MODEL_REQUIREMENTS["hephaestus"]
|
||||
const hasHephaestusExplicitConfig = hephaestusOverride !== undefined
|
||||
|
||||
const hasRequiredModel =
|
||||
!hephaestusRequirement?.requiresModel ||
|
||||
const hasRequiredProvider =
|
||||
!hephaestusRequirement?.requiresProvider ||
|
||||
hasHephaestusExplicitConfig ||
|
||||
isFirstRunNoCache ||
|
||||
isAnyFallbackModelAvailable(hephaestusRequirement.fallbackChain, availableModels)
|
||||
isAnyProviderConnected(hephaestusRequirement.requiresProvider, availableModels)
|
||||
|
||||
if (hasRequiredModel) {
|
||||
if (hasRequiredProvider) {
|
||||
let hephaestusResolution = applyModelResolution({
|
||||
userModel: hephaestusOverride?.model,
|
||||
requirement: hephaestusRequirement,
|
||||
|
||||
@@ -409,7 +409,7 @@ describe("generateModelConfig", () => {
|
||||
})
|
||||
|
||||
describe("Hephaestus agent special cases", () => {
|
||||
test("Hephaestus is created when OpenAI is available (has gpt-5.2-codex)", () => {
|
||||
test("Hephaestus is created when OpenAI is available (openai provider connected)", () => {
|
||||
// #given
|
||||
const config = createConfig({ hasOpenAI: true })
|
||||
|
||||
@@ -421,7 +421,7 @@ describe("generateModelConfig", () => {
|
||||
expect(result.agents?.hephaestus?.variant).toBe("medium")
|
||||
})
|
||||
|
||||
test("Hephaestus is created when Copilot is available (has gpt-5.2-codex)", () => {
|
||||
test("Hephaestus is created when Copilot is available (github-copilot provider connected)", () => {
|
||||
// #given
|
||||
const config = createConfig({ hasCopilot: true })
|
||||
|
||||
@@ -433,7 +433,7 @@ describe("generateModelConfig", () => {
|
||||
expect(result.agents?.hephaestus?.variant).toBe("medium")
|
||||
})
|
||||
|
||||
test("Hephaestus is created when OpenCode Zen is available (has gpt-5.2-codex)", () => {
|
||||
test("Hephaestus is created when OpenCode Zen is available (opencode provider connected)", () => {
|
||||
// #given
|
||||
const config = createConfig({ hasOpencodeZen: true })
|
||||
|
||||
@@ -445,7 +445,7 @@ describe("generateModelConfig", () => {
|
||||
expect(result.agents?.hephaestus?.variant).toBe("medium")
|
||||
})
|
||||
|
||||
test("Hephaestus is omitted when only Claude is available (no gpt-5.2-codex)", () => {
|
||||
test("Hephaestus is omitted when only Claude is available (no required provider connected)", () => {
|
||||
// #given
|
||||
const config = createConfig({ hasClaude: true })
|
||||
|
||||
@@ -456,7 +456,7 @@ describe("generateModelConfig", () => {
|
||||
expect(result.agents?.hephaestus).toBeUndefined()
|
||||
})
|
||||
|
||||
test("Hephaestus is omitted when only Gemini is available (no gpt-5.2-codex)", () => {
|
||||
test("Hephaestus is omitted when only Gemini is available (no required provider connected)", () => {
|
||||
// #given
|
||||
const config = createConfig({ hasGemini: true })
|
||||
|
||||
@@ -467,7 +467,7 @@ describe("generateModelConfig", () => {
|
||||
expect(result.agents?.hephaestus).toBeUndefined()
|
||||
})
|
||||
|
||||
test("Hephaestus is omitted when only ZAI is available (no gpt-5.2-codex)", () => {
|
||||
test("Hephaestus is omitted when only ZAI is available (no required provider connected)", () => {
|
||||
// #given
|
||||
const config = createConfig({ hasZaiCodingPlan: true })
|
||||
|
||||
|
||||
@@ -122,6 +122,13 @@ function isRequiredModelAvailable(
|
||||
return matchingEntry.providers.some((provider) => isProviderAvailable(provider, avail))
|
||||
}
|
||||
|
||||
function isRequiredProviderAvailable(
|
||||
requiredProviders: string[],
|
||||
avail: ProviderAvailability
|
||||
): boolean {
|
||||
return requiredProviders.some((provider) => isProviderAvailable(provider, avail))
|
||||
}
|
||||
|
||||
export function generateModelConfig(config: InstallConfig): GeneratedOmoConfig {
|
||||
const avail = toProviderAvailability(config)
|
||||
const hasAnyProvider =
|
||||
@@ -185,6 +192,9 @@ export function generateModelConfig(config: InstallConfig): GeneratedOmoConfig {
|
||||
if (req.requiresModel && !isRequiredModelAvailable(req.requiresModel, req.fallbackChain, avail)) {
|
||||
continue
|
||||
}
|
||||
if (req.requiresProvider && !isRequiredProviderAvailable(req.requiresProvider, avail)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const resolved = resolveModelFromChain(req.fallbackChain, avail)
|
||||
if (resolved) {
|
||||
@@ -205,6 +215,9 @@ export function generateModelConfig(config: InstallConfig): GeneratedOmoConfig {
|
||||
if (req.requiresModel && !isRequiredModelAvailable(req.requiresModel, req.fallbackChain, avail)) {
|
||||
continue
|
||||
}
|
||||
if (req.requiresProvider && !isRequiredProviderAvailable(req.requiresProvider, avail)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const resolved = resolveModelFromChain(fallbackChain, avail)
|
||||
if (resolved) {
|
||||
|
||||
@@ -317,6 +317,35 @@ export function isAnyFallbackModelAvailable(
|
||||
return false
|
||||
}
|
||||
|
||||
export function isAnyProviderConnected(
|
||||
providers: string[],
|
||||
availableModels: Set<string>,
|
||||
): boolean {
|
||||
if (availableModels.size > 0) {
|
||||
const providerSet = new Set(providers)
|
||||
for (const model of availableModels) {
|
||||
const [provider] = model.split("/")
|
||||
if (providerSet.has(provider)) {
|
||||
log("[isAnyProviderConnected] found model from required provider", { provider, model })
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const connectedProviders = readConnectedProvidersCache()
|
||||
if (connectedProviders) {
|
||||
const connectedSet = new Set(connectedProviders)
|
||||
for (const provider of providers) {
|
||||
if (connectedSet.has(provider)) {
|
||||
log("[isAnyProviderConnected] provider connected via cache", { provider })
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
export function __resetModelCache(): void {}
|
||||
|
||||
export function isModelCacheAvailable(): boolean {
|
||||
|
||||
@@ -176,14 +176,15 @@ describe("AGENT_MODEL_REQUIREMENTS", () => {
|
||||
expect(primary.providers[0]).toBe("kimi-for-coding")
|
||||
})
|
||||
|
||||
test("hephaestus requires gpt-5.2-codex", () => {
|
||||
test("hephaestus requires openai/github-copilot/opencode provider", () => {
|
||||
// #given - hephaestus agent requirement
|
||||
const hephaestus = AGENT_MODEL_REQUIREMENTS["hephaestus"]
|
||||
|
||||
// #when - accessing hephaestus requirement
|
||||
// #then - requiresModel is set to gpt-5.2-codex
|
||||
// #then - requiresProvider is set to openai, github-copilot, opencode (not requiresModel)
|
||||
expect(hephaestus).toBeDefined()
|
||||
expect(hephaestus.requiresModel).toBe("gpt-5.2-codex")
|
||||
expect(hephaestus.requiresProvider).toEqual(["openai", "github-copilot", "opencode"])
|
||||
expect(hephaestus.requiresModel).toBeUndefined()
|
||||
})
|
||||
|
||||
test("all 10 builtin agents have valid fallbackChain arrays", () => {
|
||||
|
||||
@@ -9,6 +9,7 @@ export type ModelRequirement = {
|
||||
variant?: string // Default variant (used when entry doesn't specify one)
|
||||
requiresModel?: string // If set, only activates when this model is available (fuzzy match)
|
||||
requiresAnyModel?: boolean // If true, requires at least ONE model in fallbackChain to be available (or empty availability treated as unavailable)
|
||||
requiresProvider?: string[] // If set, only activates when any of these providers is connected
|
||||
}
|
||||
|
||||
export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
|
||||
@@ -27,7 +28,7 @@ export const AGENT_MODEL_REQUIREMENTS: Record<string, ModelRequirement> = {
|
||||
fallbackChain: [
|
||||
{ providers: ["openai", "github-copilot", "opencode"], model: "gpt-5.2-codex", variant: "medium" },
|
||||
],
|
||||
requiresModel: "gpt-5.2-codex",
|
||||
requiresProvider: ["openai", "github-copilot", "opencode"],
|
||||
},
|
||||
oracle: {
|
||||
fallbackChain: [
|
||||
|
||||
Reference in New Issue
Block a user