Compare commits

..

1 Commits

Author SHA1 Message Date
YeonGyu-Kim
ef8f22caba fix(boulder-state): treat plans without checkboxes as incomplete (fixes #2648)
GPT/Gemini Prometheus plans sometimes lack markdown checkboxes.
Previously getPlanProgress() returned isComplete=true for 0/0,
causing /start-work to skip Atlas execution.

Now total=0 correctly returns isComplete=false so start-work
detects the invalid plan format.

🤖 Generated with assistance of [OhMyOpenCode](https://github.com/code-yeongyu/oh-my-opencode)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-18 12:06:24 +09:00
44 changed files with 572 additions and 1073 deletions

View File

@@ -136,36 +136,7 @@ fi
---
## Phase 3: Spawn Subagents (Individual Tool Calls)
**CRITICAL: Create tasks ONE BY ONE using individual `task_create` tool calls. NEVER batch or script.**
For each item, execute these steps sequentially:
### Step 3.1: Create Task Record
```typescript
task_create(
subject="Triage: #{number} {title}",
description="GitHub {issue|PR} triage analysis - {type}",
metadata={"type": "{ISSUE_QUESTION|ISSUE_BUG|ISSUE_FEATURE|ISSUE_OTHER|PR_BUGFIX|PR_OTHER}", "number": {number}}
)
```
### Step 3.2: Spawn Analysis Subagent (Background)
```typescript
task(
category="quick",
run_in_background=true,
load_skills=[],
prompt=SUBAGENT_PROMPT
)
```
**ABSOLUTE RULES for Subagents:**
- **ONLY ANALYZE** - Never take action on GitHub (no comments, merges, closes)
- **READ-ONLY** - Use tools only for reading code/GitHub data
- **WRITE REPORT ONLY** - Output goes to `{REPORT_DIR}/{issue|pr}-{number}.md` via Write tool
- **EVIDENCE REQUIRED** - Every claim must have GitHub permalink as proof
## Phase 3: Spawn Subagents
```
For each item:
@@ -199,7 +170,6 @@ ABSOLUTE RULES (violating ANY = critical failure):
- Your ONLY writable output: {REPORT_DIR}/{issue|pr}-{number}.md via the Write tool
```
---
### ISSUE_QUESTION

View File

@@ -3716,10 +3716,15 @@
"minimum": 10,
"maximum": 9007199254740991
},
"consecutiveThreshold": {
"windowSize": {
"type": "integer",
"minimum": 5,
"maximum": 9007199254740991
},
"repetitionThresholdPercent": {
"type": "number",
"exclusiveMinimum": 0,
"maximum": 100
}
},
"additionalProperties": false

View File

@@ -1,32 +1,20 @@
import { afterAll, beforeAll, describe, expect, mock, test } from "bun:test"
import { afterAll, beforeAll, describe, expect, test } from "bun:test"
import { mkdirSync, rmSync, writeFileSync } from "node:fs"
import * as os from "node:os"
import { tmpdir } from "node:os"
import { homedir, tmpdir } from "node:os"
import { join } from "node:path"
const originalHomedir = os.homedir.bind(os)
let mockedHomeDir = ""
let moduleImportCounter = 0
let resolvePromptAppend: typeof import("./resolve-file-uri").resolvePromptAppend
mock.module("node:os", () => ({
...os,
homedir: () => mockedHomeDir || originalHomedir(),
}))
import { resolvePromptAppend } from "./resolve-file-uri"
describe("resolvePromptAppend", () => {
const fixtureRoot = join(tmpdir(), `resolve-file-uri-${Date.now()}`)
const configDir = join(fixtureRoot, "config")
const homeFixtureRoot = join(fixtureRoot, "home")
const homeFixtureDir = join(homeFixtureRoot, "fixture-home")
const homeFixtureDir = join(homedir(), `.resolve-file-uri-home-${Date.now()}`)
const absoluteFilePath = join(fixtureRoot, "absolute.txt")
const relativeFilePath = join(configDir, "relative.txt")
const spacedFilePath = join(fixtureRoot, "with space.txt")
const homeFilePath = join(homeFixtureDir, "home.txt")
beforeAll(async () => {
mockedHomeDir = homeFixtureRoot
beforeAll(() => {
mkdirSync(fixtureRoot, { recursive: true })
mkdirSync(configDir, { recursive: true })
mkdirSync(homeFixtureDir, { recursive: true })
@@ -35,14 +23,11 @@ describe("resolvePromptAppend", () => {
writeFileSync(relativeFilePath, "relative-content", "utf8")
writeFileSync(spacedFilePath, "encoded-content", "utf8")
writeFileSync(homeFilePath, "home-content", "utf8")
moduleImportCounter += 1
;({ resolvePromptAppend } = await import(`./resolve-file-uri?test=${moduleImportCounter}`))
})
afterAll(() => {
rmSync(fixtureRoot, { recursive: true, force: true })
mock.restore()
rmSync(homeFixtureDir, { recursive: true, force: true })
})
test("returns non-file URI strings unchanged", () => {
@@ -80,7 +65,7 @@ describe("resolvePromptAppend", () => {
test("resolves home directory URI path", () => {
//#given
const input = "file://~/fixture-home/home.txt"
const input = `file://~/${homeFixtureDir.split("/").pop()}/home.txt`
//#when
const resolved = resolvePromptAppend(input)

View File

@@ -45,26 +45,26 @@ export function writePaddedText(
return { output: text, atLineStart: text.endsWith("\n") }
}
const parts: string[] = []
let output = ""
let lineStart = atLineStart
for (let i = 0; i < text.length; i++) {
const ch = text[i]
if (lineStart) {
parts.push(" ")
output += " "
lineStart = false
}
if (ch === "\n") {
parts.push(" \n")
output += " \n"
lineStart = true
continue
}
parts.push(ch)
output += ch
}
return { output: parts.join(""), atLineStart: lineStart }
return { output, atLineStart: lineStart }
}
function colorizeWithProfileColor(text: string, hexColor?: string): string {

View File

@@ -8,24 +8,27 @@ describe("BackgroundTaskConfigSchema.circuitBreaker", () => {
const result = BackgroundTaskConfigSchema.parse({
circuitBreaker: {
maxToolCalls: 150,
consecutiveThreshold: 10,
windowSize: 10,
repetitionThresholdPercent: 70,
},
})
expect(result.circuitBreaker).toEqual({
maxToolCalls: 150,
consecutiveThreshold: 10,
windowSize: 10,
repetitionThresholdPercent: 70,
})
})
})
describe("#given consecutiveThreshold below minimum", () => {
describe("#given windowSize below minimum", () => {
test("#when parsed #then throws ZodError", () => {
let thrownError: unknown
try {
BackgroundTaskConfigSchema.parse({
circuitBreaker: {
consecutiveThreshold: 4,
windowSize: 4,
},
})
} catch (error) {
@@ -36,14 +39,14 @@ describe("BackgroundTaskConfigSchema.circuitBreaker", () => {
})
})
describe("#given consecutiveThreshold is zero", () => {
describe("#given repetitionThresholdPercent is zero", () => {
test("#when parsed #then throws ZodError", () => {
let thrownError: unknown
try {
BackgroundTaskConfigSchema.parse({
circuitBreaker: {
consecutiveThreshold: 0,
repetitionThresholdPercent: 0,
},
})
} catch (error) {

View File

@@ -3,7 +3,8 @@ import { z } from "zod"
const CircuitBreakerConfigSchema = z.object({
enabled: z.boolean().optional(),
maxToolCalls: z.number().int().min(10).optional(),
consecutiveThreshold: z.number().int().min(5).optional(),
windowSize: z.number().int().min(5).optional(),
repetitionThresholdPercent: z.number().gt(0).max(100).optional(),
})
export const BackgroundTaskConfigSchema = z.object({

View File

@@ -7,7 +7,8 @@ export const MIN_STABILITY_TIME_MS = 10 * 1000
export const DEFAULT_STALE_TIMEOUT_MS = 1_200_000
export const DEFAULT_MESSAGE_STALENESS_TIMEOUT_MS = 1_800_000
export const DEFAULT_MAX_TOOL_CALLS = 200
export const DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD = 20
export const DEFAULT_CIRCUIT_BREAKER_WINDOW_SIZE = 20
export const DEFAULT_CIRCUIT_BREAKER_REPETITION_THRESHOLD_PERCENT = 80
export const DEFAULT_CIRCUIT_BREAKER_ENABLED = true
export const MIN_RUNTIME_BEFORE_STALE_MS = 30_000
export const MIN_IDLE_TIME_MS = 5000

View File

@@ -37,14 +37,16 @@ describe("loop-detector", () => {
maxToolCalls: 200,
circuitBreaker: {
maxToolCalls: 120,
consecutiveThreshold: 7,
windowSize: 10,
repetitionThresholdPercent: 70,
},
})
expect(result).toEqual({
enabled: true,
maxToolCalls: 120,
consecutiveThreshold: 7,
windowSize: 10,
repetitionThresholdPercent: 70,
})
})
})
@@ -54,7 +56,8 @@ describe("loop-detector", () => {
const result = resolveCircuitBreakerSettings({
circuitBreaker: {
maxToolCalls: 100,
consecutiveThreshold: 5,
windowSize: 5,
repetitionThresholdPercent: 60,
},
})
@@ -68,7 +71,8 @@ describe("loop-detector", () => {
circuitBreaker: {
enabled: false,
maxToolCalls: 100,
consecutiveThreshold: 5,
windowSize: 5,
repetitionThresholdPercent: 60,
},
})
@@ -82,7 +86,8 @@ describe("loop-detector", () => {
circuitBreaker: {
enabled: true,
maxToolCalls: 100,
consecutiveThreshold: 5,
windowSize: 5,
repetitionThresholdPercent: 60,
},
})
@@ -146,52 +151,55 @@ describe("loop-detector", () => {
})
})
describe("#given the same tool is called consecutively", () => {
describe("#given the same tool dominates the recent window", () => {
test("#when evaluated #then it triggers", () => {
const window = buildWindow(Array.from({ length: 20 }, () => "read"))
const window = buildWindow([
"read",
"read",
"read",
"edit",
"read",
"read",
"read",
"read",
"grep",
"read",
], {
circuitBreaker: {
windowSize: 10,
repetitionThresholdPercent: 80,
},
})
const result = detectRepetitiveToolUse(window)
expect(result).toEqual({
triggered: true,
toolName: "read",
repeatedCount: 20,
repeatedCount: 8,
sampleSize: 10,
thresholdPercent: 80,
})
})
})
describe("#given consecutive calls are interrupted by different tool", () => {
test("#when evaluated #then it does not trigger", () => {
const window = buildWindow([
...Array.from({ length: 19 }, () => "read"),
"edit",
"read",
])
describe("#given the window is not full yet", () => {
test("#when the current sample crosses the threshold #then it still triggers", () => {
const window = buildWindow(["read", "read", "edit", "read", "read", "read", "read", "read"], {
circuitBreaker: {
windowSize: 10,
repetitionThresholdPercent: 80,
},
})
const result = detectRepetitiveToolUse(window)
expect(result).toEqual({ triggered: false })
})
})
describe("#given threshold boundary", () => {
test("#when below threshold #then it does not trigger", () => {
const belowThresholdWindow = buildWindow(Array.from({ length: 19 }, () => "read"))
const result = detectRepetitiveToolUse(belowThresholdWindow)
expect(result).toEqual({ triggered: false })
})
test("#when equal to threshold #then it triggers", () => {
const atThresholdWindow = buildWindow(Array.from({ length: 20 }, () => "read"))
const result = detectRepetitiveToolUse(atThresholdWindow)
expect(result).toEqual({
triggered: true,
toolName: "read",
repeatedCount: 20,
repeatedCount: 7,
sampleSize: 8,
thresholdPercent: 80,
})
})
})
@@ -202,7 +210,9 @@ describe("loop-detector", () => {
tool: "read",
input: { filePath: `/src/file-${i}.ts` },
}))
const window = buildWindowWithInputs(calls)
const window = buildWindowWithInputs(calls, {
circuitBreaker: { windowSize: 20, repetitionThresholdPercent: 80 },
})
const result = detectRepetitiveToolUse(window)
expect(result.triggered).toBe(false)
})
@@ -210,30 +220,38 @@ describe("loop-detector", () => {
describe("#given same tool with identical file inputs", () => {
test("#when evaluated #then it triggers with bare tool name", () => {
const calls = Array.from({ length: 20 }, () => ({
tool: "read",
input: { filePath: "/src/same.ts" },
}))
const window = buildWindowWithInputs(calls)
const result = detectRepetitiveToolUse(window)
expect(result).toEqual({
triggered: true,
toolName: "read",
repeatedCount: 20,
const calls = [
...Array.from({ length: 16 }, () => ({ tool: "read", input: { filePath: "/src/same.ts" } })),
{ tool: "grep", input: { pattern: "foo" } },
{ tool: "edit", input: { filePath: "/src/other.ts" } },
{ tool: "bash", input: { command: "ls" } },
{ tool: "glob", input: { pattern: "**/*.ts" } },
]
const window = buildWindowWithInputs(calls, {
circuitBreaker: { windowSize: 20, repetitionThresholdPercent: 80 },
})
const result = detectRepetitiveToolUse(window)
expect(result.triggered).toBe(true)
expect(result.toolName).toBe("read")
expect(result.repeatedCount).toBe(16)
})
})
describe("#given tool calls with no input", () => {
test("#when evaluated #then it triggers", () => {
const calls = Array.from({ length: 20 }, () => ({ tool: "read" }))
const window = buildWindowWithInputs(calls)
const result = detectRepetitiveToolUse(window)
expect(result).toEqual({
triggered: true,
toolName: "read",
repeatedCount: 20,
test("#when the same tool dominates #then falls back to name-only detection", () => {
const calls = [
...Array.from({ length: 16 }, () => ({ tool: "read" })),
{ tool: "grep" },
{ tool: "edit" },
{ tool: "bash" },
{ tool: "glob" },
]
const window = buildWindowWithInputs(calls, {
circuitBreaker: { windowSize: 20, repetitionThresholdPercent: 80 },
})
const result = detectRepetitiveToolUse(window)
expect(result.triggered).toBe(true)
expect(result.toolName).toBe("read")
})
})
})

View File

@@ -1,7 +1,8 @@
import type { BackgroundTaskConfig } from "../../config/schema"
import {
DEFAULT_CIRCUIT_BREAKER_ENABLED,
DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD,
DEFAULT_CIRCUIT_BREAKER_REPETITION_THRESHOLD_PERCENT,
DEFAULT_CIRCUIT_BREAKER_WINDOW_SIZE,
DEFAULT_MAX_TOOL_CALLS,
} from "./constants"
import type { ToolCallWindow } from "./types"
@@ -9,13 +10,16 @@ import type { ToolCallWindow } from "./types"
export interface CircuitBreakerSettings {
enabled: boolean
maxToolCalls: number
consecutiveThreshold: number
windowSize: number
repetitionThresholdPercent: number
}
export interface ToolLoopDetectionResult {
triggered: boolean
toolName?: string
repeatedCount?: number
sampleSize?: number
thresholdPercent?: number
}
export function resolveCircuitBreakerSettings(
@@ -25,8 +29,10 @@ export function resolveCircuitBreakerSettings(
enabled: config?.circuitBreaker?.enabled ?? DEFAULT_CIRCUIT_BREAKER_ENABLED,
maxToolCalls:
config?.circuitBreaker?.maxToolCalls ?? config?.maxToolCalls ?? DEFAULT_MAX_TOOL_CALLS,
consecutiveThreshold:
config?.circuitBreaker?.consecutiveThreshold ?? DEFAULT_CIRCUIT_BREAKER_CONSECUTIVE_THRESHOLD,
windowSize: config?.circuitBreaker?.windowSize ?? DEFAULT_CIRCUIT_BREAKER_WINDOW_SIZE,
repetitionThresholdPercent:
config?.circuitBreaker?.repetitionThresholdPercent ??
DEFAULT_CIRCUIT_BREAKER_REPETITION_THRESHOLD_PERCENT,
}
}
@@ -36,20 +42,14 @@ export function recordToolCall(
settings: CircuitBreakerSettings,
toolInput?: Record<string, unknown> | null
): ToolCallWindow {
const previous = window?.toolSignatures ?? []
const signature = createToolCallSignature(toolName, toolInput)
if (window && window.lastSignature === signature) {
return {
lastSignature: signature,
consecutiveCount: window.consecutiveCount + 1,
threshold: settings.consecutiveThreshold,
}
}
const toolSignatures = [...previous, signature].slice(-settings.windowSize)
return {
lastSignature: signature,
consecutiveCount: 1,
threshold: settings.consecutiveThreshold,
toolSignatures,
windowSize: settings.windowSize,
thresholdPercent: settings.repetitionThresholdPercent,
}
}
@@ -82,13 +82,46 @@ export function createToolCallSignature(
export function detectRepetitiveToolUse(
window: ToolCallWindow | undefined
): ToolLoopDetectionResult {
if (!window || window.consecutiveCount < window.threshold) {
if (!window || window.toolSignatures.length === 0) {
return { triggered: false }
}
const counts = new Map<string, number>()
for (const signature of window.toolSignatures) {
counts.set(signature, (counts.get(signature) ?? 0) + 1)
}
let repeatedTool: string | undefined
let repeatedCount = 0
for (const [toolName, count] of counts.entries()) {
if (count > repeatedCount) {
repeatedTool = toolName
repeatedCount = count
}
}
const sampleSize = window.toolSignatures.length
const minimumSampleSize = Math.min(
window.windowSize,
Math.ceil((window.windowSize * window.thresholdPercent) / 100)
)
if (sampleSize < minimumSampleSize) {
return { triggered: false }
}
const thresholdCount = Math.ceil((sampleSize * window.thresholdPercent) / 100)
if (!repeatedTool || repeatedCount < thresholdCount) {
return { triggered: false }
}
return {
triggered: true,
toolName: window.lastSignature.split("::")[0],
repeatedCount: window.consecutiveCount,
toolName: repeatedTool.split("::")[0],
repeatedCount,
sampleSize,
thresholdPercent: window.thresholdPercent,
}
}

View File

@@ -38,11 +38,12 @@ async function flushAsyncWork() {
}
describe("BackgroundManager circuit breaker", () => {
describe("#given the same tool is called consecutively", () => {
test("#when consecutive tool events arrive #then the task is cancelled", async () => {
describe("#given the same tool dominates the recent window", () => {
test("#when tool events arrive #then the task is cancelled early", async () => {
const manager = createManager({
circuitBreaker: {
consecutiveThreshold: 20,
windowSize: 20,
repetitionThresholdPercent: 80,
},
})
const task: BackgroundTask = {
@@ -62,17 +63,38 @@ describe("BackgroundManager circuit breaker", () => {
}
getTaskMap(manager).set(task.id, task)
for (let i = 0; i < 20; i++) {
for (const toolName of [
"read",
"read",
"grep",
"read",
"edit",
"read",
"read",
"bash",
"read",
"read",
"read",
"glob",
"read",
"read",
"read",
"read",
"read",
"read",
"read",
"read",
]) {
manager.handleEvent({
type: "message.part.updated",
properties: { sessionID: task.sessionID, type: "tool", tool: "read" },
properties: { sessionID: task.sessionID, type: "tool", tool: toolName },
})
}
await flushAsyncWork()
expect(task.status).toBe("cancelled")
expect(task.error).toContain("read 20 consecutive times")
expect(task.error).toContain("repeatedly called read 16/20 times")
})
})
@@ -80,7 +102,8 @@ describe("BackgroundManager circuit breaker", () => {
test("#when the window fills #then the task keeps running", async () => {
const manager = createManager({
circuitBreaker: {
consecutiveThreshold: 10,
windowSize: 10,
repetitionThresholdPercent: 80,
},
})
const task: BackgroundTask = {
@@ -130,7 +153,8 @@ describe("BackgroundManager circuit breaker", () => {
const manager = createManager({
maxToolCalls: 3,
circuitBreaker: {
consecutiveThreshold: 95,
windowSize: 10,
repetitionThresholdPercent: 95,
},
})
const task: BackgroundTask = {
@@ -169,7 +193,8 @@ describe("BackgroundManager circuit breaker", () => {
const manager = createManager({
maxToolCalls: 2,
circuitBreaker: {
consecutiveThreshold: 5,
windowSize: 5,
repetitionThresholdPercent: 80,
},
})
const task: BackgroundTask = {
@@ -208,7 +233,7 @@ describe("BackgroundManager circuit breaker", () => {
expect(task.status).toBe("running")
expect(task.progress?.toolCalls).toBe(1)
expect(task.progress?.countedToolPartIDs).toEqual(new Set(["tool-1"]))
expect(task.progress?.countedToolPartIDs).toEqual(["tool-1"])
})
})
@@ -216,7 +241,8 @@ describe("BackgroundManager circuit breaker", () => {
test("#when tool events arrive with state.input #then task keeps running", async () => {
const manager = createManager({
circuitBreaker: {
consecutiveThreshold: 20,
windowSize: 20,
repetitionThresholdPercent: 80,
},
})
const task: BackgroundTask = {
@@ -261,7 +287,8 @@ describe("BackgroundManager circuit breaker", () => {
test("#when tool events arrive with state.input #then task is cancelled with bare tool name in error", async () => {
const manager = createManager({
circuitBreaker: {
consecutiveThreshold: 20,
windowSize: 20,
repetitionThresholdPercent: 80,
},
})
const task: BackgroundTask = {
@@ -298,7 +325,7 @@ describe("BackgroundManager circuit breaker", () => {
await flushAsyncWork()
expect(task.status).toBe("cancelled")
expect(task.error).toContain("read 20 consecutive times")
expect(task.error).toContain("repeatedly called read")
expect(task.error).not.toContain("::")
})
})
@@ -308,7 +335,8 @@ describe("BackgroundManager circuit breaker", () => {
const manager = createManager({
circuitBreaker: {
enabled: false,
consecutiveThreshold: 20,
windowSize: 20,
repetitionThresholdPercent: 80,
},
})
const task: BackgroundTask = {
@@ -351,7 +379,8 @@ describe("BackgroundManager circuit breaker", () => {
maxToolCalls: 3,
circuitBreaker: {
enabled: false,
consecutiveThreshold: 95,
windowSize: 10,
repetitionThresholdPercent: 95,
},
})
const task: BackgroundTask = {

View File

@@ -153,42 +153,4 @@ describe("BackgroundManager pollRunningTasks", () => {
expect(task.status).toBe("running")
})
})
describe("#given a running task whose session has terminal non-idle status", () => {
test('#when session status is "interrupted" #then completes the task', async () => {
//#given
const manager = createManagerWithClient({
status: async () => ({ data: { "ses-interrupted": { type: "interrupted" } } }),
})
const task = createRunningTask("ses-interrupted")
injectTask(manager, task)
//#when
const poll = (manager as unknown as { pollRunningTasks: () => Promise<void> }).pollRunningTasks
await poll.call(manager)
manager.shutdown()
//#then
expect(task.status).toBe("completed")
expect(task.completedAt).toBeDefined()
})
test('#when session status is an unknown type #then completes the task', async () => {
//#given
const manager = createManagerWithClient({
status: async () => ({ data: { "ses-unknown": { type: "some-weird-status" } } }),
})
const task = createRunningTask("ses-unknown")
injectTask(manager, task)
//#when
const poll = (manager as unknown as { pollRunningTasks: () => Promise<void> }).pollRunningTasks
await poll.call(manager)
manager.shutdown()
//#then
expect(task.status).toBe("completed")
expect(task.completedAt).toBeDefined()
})
})
})

View File

@@ -52,12 +52,10 @@ import { join } from "node:path"
import { pruneStaleTasksAndNotifications } from "./task-poller"
import { checkAndInterruptStaleTasks } from "./task-poller"
import { removeTaskToastTracking } from "./remove-task-toast-tracking"
import { isActiveSessionStatus, isTerminalSessionStatus } from "./session-status-classifier"
import {
detectRepetitiveToolUse,
recordToolCall,
resolveCircuitBreakerSettings,
type CircuitBreakerSettings,
} from "./loop-detector"
import {
createSubagentDepthLimitError,
@@ -153,7 +151,6 @@ export class BackgroundManager {
private preStartDescendantReservations: Set<string>
private enableParentSessionNotifications: boolean
readonly taskHistory = new TaskHistory()
private cachedCircuitBreakerSettings?: CircuitBreakerSettings
constructor(
ctx: PluginInput,
@@ -903,24 +900,23 @@ export class BackgroundManager {
task.progress.lastUpdate = new Date()
if (partInfo?.type === "tool" || partInfo?.tool) {
const countedToolPartIDs = task.progress.countedToolPartIDs ?? new Set<string>()
const countedToolPartIDs = task.progress.countedToolPartIDs ?? []
const shouldCountToolCall =
!partInfo.id ||
partInfo.state?.status !== "running" ||
!countedToolPartIDs.has(partInfo.id)
!countedToolPartIDs.includes(partInfo.id)
if (!shouldCountToolCall) {
return
}
if (partInfo.id && partInfo.state?.status === "running") {
countedToolPartIDs.add(partInfo.id)
task.progress.countedToolPartIDs = countedToolPartIDs
task.progress.countedToolPartIDs = [...countedToolPartIDs, partInfo.id]
}
task.progress.toolCalls += 1
task.progress.lastTool = partInfo.tool
const circuitBreaker = this.cachedCircuitBreakerSettings ?? (this.cachedCircuitBreakerSettings = resolveCircuitBreakerSettings(this.config))
const circuitBreaker = resolveCircuitBreakerSettings(this.config)
if (partInfo.tool) {
task.progress.toolCallWindow = recordToolCall(
task.progress.toolCallWindow,
@@ -932,16 +928,18 @@ export class BackgroundManager {
if (circuitBreaker.enabled) {
const loopDetection = detectRepetitiveToolUse(task.progress.toolCallWindow)
if (loopDetection.triggered) {
log("[background-agent] Circuit breaker: consecutive tool usage detected", {
log("[background-agent] Circuit breaker: repetitive tool usage detected", {
taskId: task.id,
agent: task.agent,
sessionID,
toolName: loopDetection.toolName,
repeatedCount: loopDetection.repeatedCount,
sampleSize: loopDetection.sampleSize,
thresholdPercent: loopDetection.thresholdPercent,
})
void this.cancelTask(task.id, {
source: "circuit-breaker",
reason: `Subagent called ${loopDetection.toolName} ${loopDetection.repeatedCount} consecutive times (threshold: ${circuitBreaker.consecutiveThreshold}). This usually indicates an infinite loop. The task was automatically cancelled to prevent excessive token usage.`,
reason: `Subagent repeatedly called ${loopDetection.toolName} ${loopDetection.repeatedCount}/${loopDetection.sampleSize} times in the recent tool-call window (${loopDetection.thresholdPercent}% threshold). This usually indicates an infinite loop. The task was automatically cancelled to prevent excessive token usage.`,
})
return
}
@@ -1784,9 +1782,11 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
}
}
// Only skip completion when session status is actively running.
// Unknown or terminal statuses (like "interrupted") fall through to completion.
if (sessionStatus && isActiveSessionStatus(sessionStatus.type)) {
// Match sync-session-poller pattern: only skip completion check when
// status EXISTS and is not idle (i.e., session is actively running).
// When sessionStatus is undefined, the session has completed and dropped
// from the status response — fall through to completion detection.
if (sessionStatus && sessionStatus.type !== "idle") {
log("[background-agent] Session still running, relying on event-based progress:", {
taskId: task.id,
sessionID,
@@ -1796,24 +1796,6 @@ Use \`background_output(task_id="${task.id}")\` to retrieve this result when rea
continue
}
// Explicit terminal non-idle status (e.g., "interrupted") — complete immediately,
// skipping output validation (session will never produce more output).
// Unknown statuses fall through to the idle/gone path with output validation.
if (sessionStatus && isTerminalSessionStatus(sessionStatus.type)) {
await this.tryCompleteTask(task, `polling (terminal session status: ${sessionStatus.type})`)
continue
}
// Unknown non-idle status — not active, not terminal, not idle.
// Fall through to idle/gone completion path with output validation.
if (sessionStatus && sessionStatus.type !== "idle") {
log("[background-agent] Unknown session status, treating as potentially idle:", {
taskId: task.id,
sessionID,
sessionStatus: sessionStatus.type,
})
}
// Session is idle or no longer in status response (completed/disappeared)
const completionSource = sessionStatus?.type === "idle"
? "polling (idle status)"

View File

@@ -1,66 +0,0 @@
import { describe, test, expect, mock } from "bun:test"
import { isActiveSessionStatus, isTerminalSessionStatus } from "./session-status-classifier"
const mockLog = mock()
mock.module("../../shared", () => ({ log: mockLog }))
describe("isActiveSessionStatus", () => {
describe("#given a known active session status", () => {
test('#when type is "busy" #then returns true', () => {
expect(isActiveSessionStatus("busy")).toBe(true)
})
test('#when type is "retry" #then returns true', () => {
expect(isActiveSessionStatus("retry")).toBe(true)
})
test('#when type is "running" #then returns true', () => {
expect(isActiveSessionStatus("running")).toBe(true)
})
})
describe("#given a known terminal session status", () => {
test('#when type is "idle" #then returns false', () => {
expect(isActiveSessionStatus("idle")).toBe(false)
})
test('#when type is "interrupted" #then returns false and does not log', () => {
mockLog.mockClear()
expect(isActiveSessionStatus("interrupted")).toBe(false)
expect(mockLog).not.toHaveBeenCalled()
})
})
describe("#given an unknown session status", () => {
test('#when type is an arbitrary unknown string #then returns false and logs warning', () => {
mockLog.mockClear()
expect(isActiveSessionStatus("some-unknown-status")).toBe(false)
expect(mockLog).toHaveBeenCalledWith(
"[background-agent] Unknown session status type encountered:",
"some-unknown-status",
)
})
test('#when type is empty string #then returns false', () => {
expect(isActiveSessionStatus("")).toBe(false)
})
})
})
describe("isTerminalSessionStatus", () => {
test('#when type is "interrupted" #then returns true', () => {
expect(isTerminalSessionStatus("interrupted")).toBe(true)
})
test('#when type is "idle" #then returns false (idle is handled separately)', () => {
expect(isTerminalSessionStatus("idle")).toBe(false)
})
test('#when type is "busy" #then returns false', () => {
expect(isTerminalSessionStatus("busy")).toBe(false)
})
test('#when type is an unknown string #then returns false', () => {
expect(isTerminalSessionStatus("some-unknown")).toBe(false)
})
})

View File

@@ -1,20 +0,0 @@
import { log } from "../../shared"
const ACTIVE_SESSION_STATUSES = new Set(["busy", "retry", "running"])
const KNOWN_TERMINAL_STATUSES = new Set(["idle", "interrupted"])
export function isActiveSessionStatus(type: string): boolean {
if (ACTIVE_SESSION_STATUSES.has(type)) {
return true
}
if (!KNOWN_TERMINAL_STATUSES.has(type)) {
log("[background-agent] Unknown session status type encountered:", type)
}
return false
}
export function isTerminalSessionStatus(type: string): boolean {
return KNOWN_TERMINAL_STATUSES.has(type) && type !== "idle"
}

View File

@@ -417,56 +417,6 @@ describe("checkAndInterruptStaleTasks", () => {
expect(task.status).toBe("cancelled")
expect(onTaskInterrupted).toHaveBeenCalledWith(task)
})
it('should NOT protect task when session has terminal non-idle status like "interrupted"', async () => {
//#given — lastUpdate is 5min old, session is "interrupted" (terminal, not active)
const task = createRunningTask({
startedAt: new Date(Date.now() - 300_000),
progress: {
toolCalls: 2,
lastUpdate: new Date(Date.now() - 300_000),
},
})
//#when — session status is "interrupted" (terminal)
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: { staleTimeoutMs: 180_000 },
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: { "ses-1": { type: "interrupted" } },
})
//#then — terminal statuses should not protect from stale timeout
expect(task.status).toBe("cancelled")
expect(task.error).toContain("Stale timeout")
})
it('should NOT protect task when session has unknown status type', async () => {
//#given — lastUpdate is 5min old, session has an unknown status
const task = createRunningTask({
startedAt: new Date(Date.now() - 300_000),
progress: {
toolCalls: 2,
lastUpdate: new Date(Date.now() - 300_000),
},
})
//#when — session has unknown status type
await checkAndInterruptStaleTasks({
tasks: [task],
client: mockClient as never,
config: { staleTimeoutMs: 180_000 },
concurrencyManager: mockConcurrencyManager as never,
notifyParentSession: mockNotify,
sessionStatuses: { "ses-1": { type: "some-weird-status" } },
})
//#then — unknown statuses should not protect from stale timeout
expect(task.status).toBe("cancelled")
expect(task.error).toContain("Stale timeout")
})
})
describe("pruneStaleTasksAndNotifications", () => {

View File

@@ -14,7 +14,6 @@ import {
} from "./constants"
import { removeTaskToastTracking } from "./remove-task-toast-tracking"
import { isActiveSessionStatus } from "./session-status-classifier"
const TERMINAL_TASK_STATUSES = new Set<BackgroundTask["status"]>([
"completed",
"error",
@@ -121,7 +120,7 @@ export async function checkAndInterruptStaleTasks(args: {
if (!startedAt || !sessionID) continue
const sessionStatus = sessionStatuses?.[sessionID]?.type
const sessionIsRunning = sessionStatus !== undefined && isActiveSessionStatus(sessionStatus)
const sessionIsRunning = sessionStatus !== undefined && sessionStatus !== "idle"
const runtime = now - startedAt.getTime()
if (!task.progress?.lastUpdate) {

View File

@@ -10,16 +10,16 @@ export type BackgroundTaskStatus =
| "interrupt"
export interface ToolCallWindow {
lastSignature: string
consecutiveCount: number
threshold: number
toolSignatures: string[]
windowSize: number
thresholdPercent: number
}
export interface TaskProgress {
toolCalls: number
lastTool?: string
toolCallWindow?: ToolCallWindow
countedToolPartIDs?: Set<string>
countedToolPartIDs?: string[]
lastUpdate: Date
lastMessage?: string
lastMessageAt?: Date

View File

@@ -351,7 +351,7 @@ describe("boulder-state", () => {
expect(progress.isComplete).toBe(true)
})
test("should return isComplete true for empty plan", () => {
test("should return isComplete false for plan with content but no checkboxes", () => {
// given - plan with no checkboxes
const planPath = join(TEST_DIR, "empty-plan.md")
writeFileSync(planPath, "# Plan\nNo tasks here")
@@ -361,7 +361,7 @@ describe("boulder-state", () => {
// then
expect(progress.total).toBe(0)
expect(progress.isComplete).toBe(true)
expect(progress.isComplete).toBe(false)
})
test("should handle non-existent file", () => {

View File

@@ -133,7 +133,7 @@ export function getPlanProgress(planPath: string): PlanProgress {
return {
total,
completed,
isComplete: total === 0 || completed === total,
isComplete: total > 0 && completed === total,
}
} catch {
return { total: 0, completed: 0, isComplete: true }

View File

@@ -70,7 +70,7 @@ function isTokenLimitError(text: string): boolean {
return false
}
const lower = text.toLowerCase()
return TOKEN_LIMIT_KEYWORDS.some((kw) => lower.includes(kw))
return TOKEN_LIMIT_KEYWORDS.some((kw) => lower.includes(kw.toLowerCase()))
}
export function parseAnthropicTokenLimitError(err: unknown): ParsedTokenLimitError | null {

View File

@@ -18,9 +18,9 @@ function getLastAgentFromMessageDir(messageDir: string): string | null {
const files = readdirSync(messageDir)
.filter((fileName) => fileName.endsWith(".json"))
.sort()
.reverse()
for (let i = files.length - 1; i >= 0; i--) {
const fileName = files[i]
for (const fileName of files) {
try {
const content = readFileSync(join(messageDir, fileName), "utf-8")
const parsed = JSON.parse(content) as { agent?: unknown }

View File

@@ -44,6 +44,12 @@ export interface ExecutorOptions {
agent?: string
}
function filterDiscoveredCommandsByScope(
commands: DiscoveredCommandInfo[],
scope: DiscoveredCommandInfo["scope"],
): DiscoveredCommandInfo[] {
return commands.filter(command => command.scope === scope)
}
async function discoverAllCommands(options?: ExecutorOptions): Promise<CommandInfo[]> {
const discoveredCommands = discoverCommandsSync(process.cwd(), {
@@ -54,18 +60,14 @@ async function discoverAllCommands(options?: ExecutorOptions): Promise<CommandIn
const skills = options?.skills ?? await discoverAllSkills()
const skillCommands = skills.map(skillToCommandInfo)
const scopeOrder: DiscoveredCommandInfo["scope"][] = ["project", "user", "opencode-project", "opencode", "builtin", "plugin"]
const grouped = new Map<string, DiscoveredCommandInfo[]>()
for (const cmd of discoveredCommands) {
const list = grouped.get(cmd.scope) ?? []
list.push(cmd)
grouped.set(cmd.scope, list)
}
const orderedCommands = scopeOrder.flatMap((scope) => grouped.get(scope) ?? [])
return [
...skillCommands,
...orderedCommands,
...filterDiscoveredCommandsByScope(discoveredCommands, "project"),
...filterDiscoveredCommandsByScope(discoveredCommands, "user"),
...filterDiscoveredCommandsByScope(discoveredCommands, "opencode-project"),
...filterDiscoveredCommandsByScope(discoveredCommands, "opencode"),
...filterDiscoveredCommandsByScope(discoveredCommands, "builtin"),
...filterDiscoveredCommandsByScope(discoveredCommands, "plugin"),
]
}

View File

@@ -79,6 +79,8 @@ export function createToolExecuteAfterHandler(ctx: PluginInput, config: PluginCo
return
}
const claudeConfig = await loadClaudeHooksConfig()
const extendedConfig = await loadPluginExtendedConfig()
const cachedInput = getToolInput(input.sessionID, input.tool, input.callID) || {}
@@ -94,9 +96,6 @@ export function createToolExecuteAfterHandler(ctx: PluginInput, config: PluginCo
return
}
const claudeConfig = await loadClaudeHooksConfig()
const extendedConfig = await loadPluginExtendedConfig()
const postClient: PostToolUseClient = {
session: {
messages: (opts) => ctx.client.session.messages(opts),

View File

@@ -43,6 +43,8 @@ export function createToolExecuteBeforeHandler(ctx: PluginInput, config: PluginC
log("todowrite: parsed todos string to array", { sessionID: input.sessionID })
}
const claudeConfig = await loadClaudeHooksConfig()
const extendedConfig = await loadPluginExtendedConfig()
appendTranscriptEntry(input.sessionID, {
type: "tool_use",
@@ -57,9 +59,6 @@ export function createToolExecuteBeforeHandler(ctx: PluginInput, config: PluginC
return
}
const claudeConfig = await loadClaudeHooksConfig()
const extendedConfig = await loadPluginExtendedConfig()
const preCtx: PreToolUseContext = {
sessionId: input.sessionID,
toolName: input.tool,

View File

@@ -3,18 +3,6 @@ import type { CommentCheckerConfig } from "../../config/schema"
import z from "zod"
const ApplyPatchMetadataSchema = z.object({
files: z.array(
z.object({
filePath: z.string(),
movePath: z.string().optional(),
before: z.string(),
after: z.string(),
type: z.string().optional(),
}),
),
})
import {
initializeCommentCheckerCli,
getCommentCheckerCliPathPromise,
@@ -116,6 +104,17 @@ export function createCommentCheckerHooks(config?: CommentCheckerConfig) {
return
}
const ApplyPatchMetadataSchema = z.object({
files: z.array(
z.object({
filePath: z.string(),
movePath: z.string().optional(),
before: z.string(),
after: z.string(),
type: z.string().optional(),
}),
),
})
if (toolLower === "apply_patch") {
const parsed = ApplyPatchMetadataSchema.safeParse(output.metadata)

View File

@@ -23,10 +23,6 @@ export async function handleDetectedCompletion(
const { sessionID, state, loopState, directory, apiTimeoutMs } = input
if (state.ultrawork && !state.verification_pending) {
if (state.verification_session_id) {
ctx.client.session.abort({ path: { id: state.verification_session_id } }).catch(() => {})
}
const verificationState = loopState.markVerificationPending(sessionID)
if (!verificationState) {
log(`[${HOOK_NAME}] Failed to transition ultrawork loop to verification`, {

View File

@@ -10,7 +10,6 @@ describe("ulw-loop verification", () => {
const testDir = join(tmpdir(), `ulw-loop-verification-${Date.now()}`)
let promptCalls: Array<{ sessionID: string; text: string }>
let toastCalls: Array<{ title: string; message: string; variant: string }>
let abortCalls: Array<{ id: string }>
let parentTranscriptPath: string
let oracleTranscriptPath: string
@@ -26,10 +25,6 @@ describe("ulw-loop verification", () => {
return {}
},
messages: async () => ({ data: [] }),
abort: async (opts: { path: { id: string } }) => {
abortCalls.push({ id: opts.path.id })
return {}
},
},
tui: {
showToast: async (opts: { body: { title: string; message: string; variant: string } }) => {
@@ -45,7 +40,6 @@ describe("ulw-loop verification", () => {
beforeEach(() => {
promptCalls = []
toastCalls = []
abortCalls = []
parentTranscriptPath = join(testDir, "transcript-parent.jsonl")
oracleTranscriptPath = join(testDir, "transcript-oracle.jsonl")
@@ -391,96 +385,4 @@ describe("ulw-loop verification", () => {
expect(promptCalls).toHaveLength(2)
expect(promptCalls[1]?.text).toContain("Verification failed")
})
test("#given oracle verification fails #when loop restarts #then old oracle session is aborted", async () => {
const sessionMessages: Record<string, unknown[]> = {
"session-123": [{}, {}, {}],
}
const hook = createRalphLoopHook({
...createMockPluginInput(),
client: {
...createMockPluginInput().client,
session: {
...createMockPluginInput().client.session,
messages: async (opts: { path: { id: string } }) => ({
data: sessionMessages[opts.path.id] ?? [],
}),
},
},
} as Parameters<typeof createRalphLoopHook>[0], {
getTranscriptPath: (sessionID) => sessionID === "ses-oracle" ? oracleTranscriptPath : parentTranscriptPath,
})
hook.startLoop("session-123", "Build API", { ultrawork: true })
writeFileSync(
parentTranscriptPath,
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
)
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
writeState(testDir, {
...hook.getState()!,
verification_session_id: "ses-oracle",
})
writeFileSync(
oracleTranscriptPath,
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "verification failed: missing tests" } })}\n`,
)
await hook.event({ event: { type: "session.idle", properties: { sessionID: "ses-oracle" } } })
expect(abortCalls).toHaveLength(1)
expect(abortCalls[0].id).toBe("ses-oracle")
})
test("#given ulw loop re-enters verification #when DONE detected again after failed verification #then previous verification session is aborted", async () => {
const sessionMessages: Record<string, unknown[]> = {
"session-123": [{}, {}, {}],
}
const hook = createRalphLoopHook({
...createMockPluginInput(),
client: {
...createMockPluginInput().client,
session: {
...createMockPluginInput().client.session,
messages: async (opts: { path: { id: string } }) => ({
data: sessionMessages[opts.path.id] ?? [],
}),
},
},
} as Parameters<typeof createRalphLoopHook>[0], {
getTranscriptPath: (sessionID) => sessionID === "ses-oracle" ? oracleTranscriptPath : parentTranscriptPath,
})
hook.startLoop("session-123", "Build API", { ultrawork: true })
writeFileSync(
parentTranscriptPath,
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "done <promise>DONE</promise>" } })}\n`,
)
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
writeState(testDir, {
...hook.getState()!,
verification_session_id: "ses-oracle",
})
writeFileSync(
oracleTranscriptPath,
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "failed" } })}\n`,
)
await hook.event({ event: { type: "session.idle", properties: { sessionID: "ses-oracle" } } })
abortCalls.length = 0
writeFileSync(
parentTranscriptPath,
`${JSON.stringify({ type: "tool_result", timestamp: new Date().toISOString(), tool_output: { output: "fixed it <promise>DONE</promise>" } })}\n`,
)
writeState(testDir, {
...hook.getState()!,
verification_session_id: "ses-oracle-old",
})
await hook.event({ event: { type: "session.idle", properties: { sessionID: "session-123" } } })
expect(abortCalls).toHaveLength(1)
expect(abortCalls[0].id).toBe("ses-oracle-old")
})
})

View File

@@ -68,10 +68,6 @@ export async function handleFailedVerification(
return false
}
if (state.verification_session_id) {
ctx.client.session.abort({ path: { id: state.verification_session_id } }).catch(() => {})
}
const resumedState = loopState.restartAfterFailedVerification(
parentSessionID,
messageCountAtStart,

View File

@@ -28,8 +28,6 @@ export function getErrorMessage(error: unknown): string {
}
}
const DEFAULT_RETRY_PATTERN = new RegExp(`\\b(${DEFAULT_CONFIG.retry_on_errors.join("|")})\\b`)
export function extractStatusCode(error: unknown, retryOnErrors?: number[]): number | undefined {
if (!error) return undefined
@@ -47,9 +45,8 @@ export function extractStatusCode(error: unknown, retryOnErrors?: number[]): num
return statusCode
}
const pattern = retryOnErrors
? new RegExp(`\\b(${retryOnErrors.join("|")})\\b`)
: DEFAULT_RETRY_PATTERN
const codes = retryOnErrors ?? DEFAULT_CONFIG.retry_on_errors
const pattern = new RegExp(`\\b(${codes.join("|")})\\b`)
const message = getErrorMessage(error)
const statusMatch = message.match(pattern)
if (statusMatch) {

View File

@@ -32,10 +32,8 @@ const MULTILINGUAL_KEYWORDS = [
"fikir", "berfikir",
]
const COMBINED_THINK_PATTERN = new RegExp(
`\\b(?:ultrathink|think)\\b|${MULTILINGUAL_KEYWORDS.join("|")}`,
"i"
)
const MULTILINGUAL_PATTERNS = MULTILINGUAL_KEYWORDS.map((kw) => new RegExp(kw, "i"))
const THINK_PATTERNS = [...ENGLISH_PATTERNS, ...MULTILINGUAL_PATTERNS]
const CODE_BLOCK_PATTERN = /```[\s\S]*?```/g
const INLINE_CODE_PATTERN = /`[^`]+`/g
@@ -46,7 +44,7 @@ function removeCodeBlocks(text: string): string {
export function detectThinkKeyword(text: string): boolean {
const textWithoutCode = removeCodeBlocks(text)
return COMBINED_THINK_PATTERN.test(textWithoutCode)
return THINK_PATTERNS.some((pattern) => pattern.test(textWithoutCode))
}
export function extractPromptText(

View File

@@ -97,7 +97,6 @@ export async function handleSessionIdle(args: {
}
if (!todos || todos.length === 0) {
sessionStateStore.resetContinuationProgress(sessionID)
sessionStateStore.resetContinuationProgress(sessionID)
log(`[${HOOK_NAME}] No todos`, { sessionID })
return
@@ -105,7 +104,6 @@ export async function handleSessionIdle(args: {
const incompleteCount = getIncompleteCount(todos)
if (incompleteCount === 0) {
sessionStateStore.resetContinuationProgress(sessionID)
sessionStateStore.resetContinuationProgress(sessionID)
log(`[${HOOK_NAME}] All todos complete`, { sessionID, total: todos.length })
return
@@ -126,14 +124,22 @@ export async function handleSessionIdle(args: {
}
if (state.consecutiveFailures >= MAX_CONSECUTIVE_FAILURES) {
log(`[${HOOK_NAME}] Skipped: max consecutive failures reached`, { sessionID, consecutiveFailures: state.consecutiveFailures })
log(`[${HOOK_NAME}] Skipped: max consecutive failures reached`, {
sessionID,
consecutiveFailures: state.consecutiveFailures,
maxConsecutiveFailures: MAX_CONSECUTIVE_FAILURES,
})
return
}
const effectiveCooldown =
CONTINUATION_COOLDOWN_MS * Math.pow(2, Math.min(state.consecutiveFailures, 5))
if (state.lastInjectedAt && Date.now() - state.lastInjectedAt < effectiveCooldown) {
log(`[${HOOK_NAME}] Skipped: cooldown active`, { sessionID, effectiveCooldown, consecutiveFailures: state.consecutiveFailures })
log(`[${HOOK_NAME}] Skipped: cooldown active`, {
sessionID,
effectiveCooldown,
consecutiveFailures: state.consecutiveFailures,
})
return
}

View File

@@ -411,7 +411,10 @@ describe("createWriteExistingFileGuardHook", () => {
try {
symlinkSync(targetFile, symlinkPath)
} catch (error) {
// Symlinks not supported in this environment — skip
console.warn(
"Skipping symlink test: symlinks are not supported or cannot be created in this environment.",
error
)
return
}

View File

@@ -32,7 +32,10 @@ export function createPluginInterface(args: {
return {
tool: tools,
"chat.params": createChatParamsHandler({ anthropicEffort: hooks.anthropicEffort }),
"chat.params": async (input: unknown, output: unknown) => {
const handler = createChatParamsHandler({ anthropicEffort: hooks.anthropicEffort })
await handler(input, output)
},
"chat.headers": createChatHeadersHandler({ ctx }),

View File

@@ -1,30 +1,45 @@
/// <reference types="bun-types" />
import { beforeEach, afterEach, describe, expect, test } from "bun:test"
import { beforeAll, beforeEach, afterEach, describe, expect, mock, test } from "bun:test"
import { existsSync, mkdirSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from "node:fs"
import { tmpdir } from "node:os"
import { join } from "node:path"
import {
createConnectedProvidersCacheStore,
} from "./connected-providers-cache"
import * as dataPath from "./data-path"
let fakeUserCacheRoot = ""
let testCacheDir = ""
let testCacheStore: ReturnType<typeof createConnectedProvidersCacheStore>
let moduleImportCounter = 0
const getOmoOpenCodeCacheDirMock = mock(() => testCacheDir)
let updateConnectedProvidersCache: typeof import("./connected-providers-cache").updateConnectedProvidersCache
let readProviderModelsCache: typeof import("./connected-providers-cache").readProviderModelsCache
async function prepareConnectedProvidersCacheTestModule(): Promise<void> {
testCacheDir = mkdtempSync(join(tmpdir(), "connected-providers-cache-test-"))
getOmoOpenCodeCacheDirMock.mockClear()
mock.module("./data-path", () => ({
getOmoOpenCodeCacheDir: getOmoOpenCodeCacheDirMock,
}))
moduleImportCounter += 1
;({ updateConnectedProvidersCache, readProviderModelsCache } = await import(`./connected-providers-cache?test=${moduleImportCounter}`))
}
describe("updateConnectedProvidersCache", () => {
beforeEach(() => {
fakeUserCacheRoot = mkdtempSync(join(tmpdir(), "connected-providers-user-cache-"))
testCacheDir = join(fakeUserCacheRoot, "oh-my-opencode")
testCacheStore = createConnectedProvidersCacheStore(() => testCacheDir)
beforeAll(() => {
mock.restore()
})
beforeEach(async () => {
mock.restore()
await prepareConnectedProvidersCacheTestModule()
})
afterEach(() => {
if (existsSync(fakeUserCacheRoot)) {
rmSync(fakeUserCacheRoot, { recursive: true, force: true })
mock.restore()
if (existsSync(testCacheDir)) {
rmSync(testCacheDir, { recursive: true, force: true })
}
fakeUserCacheRoot = ""
testCacheDir = ""
})
@@ -61,10 +76,10 @@ describe("updateConnectedProvidersCache", () => {
}
//#when
await testCacheStore.updateConnectedProvidersCache(mockClient)
await updateConnectedProvidersCache(mockClient)
//#then
const cache = testCacheStore.readProviderModelsCache()
const cache = readProviderModelsCache()
expect(cache).not.toBeNull()
expect(cache!.connected).toEqual(["openai", "anthropic"])
expect(cache!.models).toEqual({
@@ -94,10 +109,10 @@ describe("updateConnectedProvidersCache", () => {
}
//#when
await testCacheStore.updateConnectedProvidersCache(mockClient)
await updateConnectedProvidersCache(mockClient)
//#then
const cache = testCacheStore.readProviderModelsCache()
const cache = readProviderModelsCache()
expect(cache).not.toBeNull()
expect(cache!.models).toEqual({})
})
@@ -115,10 +130,10 @@ describe("updateConnectedProvidersCache", () => {
}
//#when
await testCacheStore.updateConnectedProvidersCache(mockClient)
await updateConnectedProvidersCache(mockClient)
//#then
const cache = testCacheStore.readProviderModelsCache()
const cache = readProviderModelsCache()
expect(cache).not.toBeNull()
expect(cache!.models).toEqual({})
})
@@ -128,44 +143,25 @@ describe("updateConnectedProvidersCache", () => {
const mockClient = {}
//#when
await testCacheStore.updateConnectedProvidersCache(mockClient)
await updateConnectedProvidersCache(mockClient)
//#then
const cache = testCacheStore.readProviderModelsCache()
const cache = readProviderModelsCache()
expect(cache).toBeNull()
})
test("does not remove unrelated files in the cache directory", async () => {
test("does not remove the user's real cache directory during test setup", async () => {
//#given
const realCacheDir = join(fakeUserCacheRoot, "oh-my-opencode")
const realCacheDir = join(dataPath.getCacheDir(), "oh-my-opencode")
const sentinelPath = join(realCacheDir, "connected-providers-cache.test-sentinel.json")
mkdirSync(realCacheDir, { recursive: true })
writeFileSync(sentinelPath, JSON.stringify({ keep: true }))
const mockClient = {
provider: {
list: async () => ({
data: {
connected: ["openai"],
all: [
{
id: "openai",
models: {
"gpt-5.4": { id: "gpt-5.4" },
},
},
],
},
}),
},
}
try {
//#when
await testCacheStore.updateConnectedProvidersCache(mockClient)
await prepareConnectedProvidersCacheTestModule()
//#then
expect(testCacheStore.readConnectedProvidersCache()).toEqual(["openai"])
expect(existsSync(sentinelPath)).toBe(true)
expect(readFileSync(sentinelPath, "utf-8")).toBe(JSON.stringify({ keep: true }))
} finally {

View File

@@ -25,190 +25,172 @@ interface ProviderModelsCache {
updatedAt: string
}
export function createConnectedProvidersCacheStore(
getCacheDir: () => string = dataPath.getOmoOpenCodeCacheDir
) {
function getCacheFilePath(filename: string): string {
return join(getCacheDir(), filename)
}
function getCacheFilePath(filename: string): string {
return join(dataPath.getOmoOpenCodeCacheDir(), filename)
}
let memConnected: string[] | null | undefined
let memProviderModels: ProviderModelsCache | null | undefined
function ensureCacheDir(): void {
const cacheDir = getCacheDir()
if (!existsSync(cacheDir)) {
mkdirSync(cacheDir, { recursive: true })
}
}
function readConnectedProvidersCache(): string[] | null {
if (memConnected !== undefined) return memConnected
const cacheFile = getCacheFilePath(CONNECTED_PROVIDERS_CACHE_FILE)
if (!existsSync(cacheFile)) {
log("[connected-providers-cache] Cache file not found", { cacheFile })
memConnected = null
return null
}
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as ConnectedProvidersCache
log("[connected-providers-cache] Read cache", { count: data.connected.length, updatedAt: data.updatedAt })
memConnected = data.connected
return data.connected
} catch (err) {
log("[connected-providers-cache] Error reading cache", { error: String(err) })
memConnected = null
return null
}
}
function hasConnectedProvidersCache(): boolean {
const cacheFile = getCacheFilePath(CONNECTED_PROVIDERS_CACHE_FILE)
return existsSync(cacheFile)
}
function writeConnectedProvidersCache(connected: string[]): void {
ensureCacheDir()
const cacheFile = getCacheFilePath(CONNECTED_PROVIDERS_CACHE_FILE)
const data: ConnectedProvidersCache = {
connected,
updatedAt: new Date().toISOString(),
}
try {
writeFileSync(cacheFile, JSON.stringify(data, null, 2))
memConnected = connected
log("[connected-providers-cache] Cache written", { count: connected.length })
} catch (err) {
log("[connected-providers-cache] Error writing cache", { error: String(err) })
}
}
function readProviderModelsCache(): ProviderModelsCache | null {
if (memProviderModels !== undefined) return memProviderModels
const cacheFile = getCacheFilePath(PROVIDER_MODELS_CACHE_FILE)
if (!existsSync(cacheFile)) {
log("[connected-providers-cache] Provider-models cache file not found", { cacheFile })
memProviderModels = null
return null
}
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as ProviderModelsCache
log("[connected-providers-cache] Read provider-models cache", {
providerCount: Object.keys(data.models).length,
updatedAt: data.updatedAt,
})
memProviderModels = data
return data
} catch (err) {
log("[connected-providers-cache] Error reading provider-models cache", { error: String(err) })
memProviderModels = null
return null
}
}
function hasProviderModelsCache(): boolean {
const cacheFile = getCacheFilePath(PROVIDER_MODELS_CACHE_FILE)
return existsSync(cacheFile)
}
function writeProviderModelsCache(data: { models: Record<string, string[]>; connected: string[] }): void {
ensureCacheDir()
const cacheFile = getCacheFilePath(PROVIDER_MODELS_CACHE_FILE)
const cacheData: ProviderModelsCache = {
...data,
updatedAt: new Date().toISOString(),
}
try {
writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2))
memProviderModels = cacheData
log("[connected-providers-cache] Provider-models cache written", {
providerCount: Object.keys(data.models).length,
})
} catch (err) {
log("[connected-providers-cache] Error writing provider-models cache", { error: String(err) })
}
}
async function updateConnectedProvidersCache(client: {
provider?: {
list?: () => Promise<{
data?: {
connected?: string[]
all?: Array<{ id: string; models?: Record<string, unknown> }>
}
}>
}
}): Promise<void> {
if (!client?.provider?.list) {
log("[connected-providers-cache] client.provider.list not available")
return
}
try {
const result = await client.provider.list()
const connected = result.data?.connected ?? []
log("[connected-providers-cache] Fetched connected providers", {
count: connected.length,
providers: connected,
})
writeConnectedProvidersCache(connected)
const modelsByProvider: Record<string, string[]> = {}
const allProviders = result.data?.all ?? []
for (const provider of allProviders) {
if (provider.models) {
const modelIds = Object.keys(provider.models)
if (modelIds.length > 0) {
modelsByProvider[provider.id] = modelIds
}
}
}
log("[connected-providers-cache] Extracted models from provider list", {
providerCount: Object.keys(modelsByProvider).length,
totalModels: Object.values(modelsByProvider).reduce((sum, ids) => sum + ids.length, 0),
})
writeProviderModelsCache({
models: modelsByProvider,
connected,
})
} catch (err) {
log("[connected-providers-cache] Error updating cache", { error: String(err) })
}
}
return {
readConnectedProvidersCache,
hasConnectedProvidersCache,
readProviderModelsCache,
hasProviderModelsCache,
writeProviderModelsCache,
updateConnectedProvidersCache,
function ensureCacheDir(): void {
const cacheDir = dataPath.getOmoOpenCodeCacheDir()
if (!existsSync(cacheDir)) {
mkdirSync(cacheDir, { recursive: true })
}
}
const defaultConnectedProvidersCacheStore = createConnectedProvidersCacheStore(
() => dataPath.getOmoOpenCodeCacheDir()
)
/**
* Read the connected providers cache.
* Returns the list of connected provider IDs, or null if cache doesn't exist.
*/
export function readConnectedProvidersCache(): string[] | null {
const cacheFile = getCacheFilePath(CONNECTED_PROVIDERS_CACHE_FILE)
export const {
readConnectedProvidersCache,
hasConnectedProvidersCache,
readProviderModelsCache,
hasProviderModelsCache,
writeProviderModelsCache,
updateConnectedProvidersCache,
} = defaultConnectedProvidersCacheStore
if (!existsSync(cacheFile)) {
log("[connected-providers-cache] Cache file not found", { cacheFile })
return null
}
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as ConnectedProvidersCache
log("[connected-providers-cache] Read cache", { count: data.connected.length, updatedAt: data.updatedAt })
return data.connected
} catch (err) {
log("[connected-providers-cache] Error reading cache", { error: String(err) })
return null
}
}
/**
* Check if connected providers cache exists.
*/
export function hasConnectedProvidersCache(): boolean {
const cacheFile = getCacheFilePath(CONNECTED_PROVIDERS_CACHE_FILE)
return existsSync(cacheFile)
}
/**
* Write the connected providers cache.
*/
function writeConnectedProvidersCache(connected: string[]): void {
ensureCacheDir()
const cacheFile = getCacheFilePath(CONNECTED_PROVIDERS_CACHE_FILE)
const data: ConnectedProvidersCache = {
connected,
updatedAt: new Date().toISOString(),
}
try {
writeFileSync(cacheFile, JSON.stringify(data, null, 2))
log("[connected-providers-cache] Cache written", { count: connected.length })
} catch (err) {
log("[connected-providers-cache] Error writing cache", { error: String(err) })
}
}
/**
* Read the provider-models cache.
* Returns the cache data, or null if cache doesn't exist.
*/
export function readProviderModelsCache(): ProviderModelsCache | null {
const cacheFile = getCacheFilePath(PROVIDER_MODELS_CACHE_FILE)
if (!existsSync(cacheFile)) {
log("[connected-providers-cache] Provider-models cache file not found", { cacheFile })
return null
}
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as ProviderModelsCache
log("[connected-providers-cache] Read provider-models cache", {
providerCount: Object.keys(data.models).length,
updatedAt: data.updatedAt
})
return data
} catch (err) {
log("[connected-providers-cache] Error reading provider-models cache", { error: String(err) })
return null
}
}
/**
* Check if provider-models cache exists.
*/
export function hasProviderModelsCache(): boolean {
const cacheFile = getCacheFilePath(PROVIDER_MODELS_CACHE_FILE)
return existsSync(cacheFile)
}
/**
* Write the provider-models cache.
*/
export function writeProviderModelsCache(data: { models: Record<string, string[]>; connected: string[] }): void {
ensureCacheDir()
const cacheFile = getCacheFilePath(PROVIDER_MODELS_CACHE_FILE)
const cacheData: ProviderModelsCache = {
...data,
updatedAt: new Date().toISOString(),
}
try {
writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2))
log("[connected-providers-cache] Provider-models cache written", {
providerCount: Object.keys(data.models).length
})
} catch (err) {
log("[connected-providers-cache] Error writing provider-models cache", { error: String(err) })
}
}
/**
* Update the connected providers cache by fetching from the client.
* Also updates the provider-models cache with model lists per provider.
*/
export async function updateConnectedProvidersCache(client: {
provider?: {
list?: () => Promise<{
data?: {
connected?: string[]
all?: Array<{ id: string; models?: Record<string, unknown> }>
}
}>
}
}): Promise<void> {
if (!client?.provider?.list) {
log("[connected-providers-cache] client.provider.list not available")
return
}
try {
const result = await client.provider.list()
const connected = result.data?.connected ?? []
log("[connected-providers-cache] Fetched connected providers", { count: connected.length, providers: connected })
writeConnectedProvidersCache(connected)
const modelsByProvider: Record<string, string[]> = {}
const allProviders = result.data?.all ?? []
for (const provider of allProviders) {
if (provider.models) {
const modelIds = Object.keys(provider.models)
if (modelIds.length > 0) {
modelsByProvider[provider.id] = modelIds
}
}
}
log("[connected-providers-cache] Extracted models from provider list", {
providerCount: Object.keys(modelsByProvider).length,
totalModels: Object.values(modelsByProvider).reduce((sum, ids) => sum + ids.length, 0),
})
writeProviderModelsCache({
models: modelsByProvider,
connected,
})
} catch (err) {
log("[connected-providers-cache] Error updating cache", { error: String(err) })
}
}

View File

@@ -74,7 +74,7 @@ export async function resolveFileReferencesInText(
let resolved = text
for (const [pattern, replacement] of replacements.entries()) {
resolved = resolved.replaceAll(pattern, replacement)
resolved = resolved.split(pattern).join(replacement)
}
if (findFileReferences(resolved).length > 0 && depth + 1 < maxDepth) {

View File

@@ -1,42 +1,16 @@
// Shared logging utility for the plugin
import * as fs from "fs"
import * as os from "os"
import * as path from "path"
const logFile = path.join(os.tmpdir(), "oh-my-opencode.log")
let buffer: string[] = []
let flushTimer: ReturnType<typeof setTimeout> | null = null
const FLUSH_INTERVAL_MS = 500
const BUFFER_SIZE_LIMIT = 50
function flush(): void {
if (buffer.length === 0) return
const data = buffer.join("")
buffer = []
try {
fs.appendFileSync(logFile, data)
} catch {
}
}
function scheduleFlush(): void {
if (flushTimer) return
flushTimer = setTimeout(() => {
flushTimer = null
flush()
}, FLUSH_INTERVAL_MS)
}
export function log(message: string, data?: unknown): void {
try {
const timestamp = new Date().toISOString()
const logEntry = `[${timestamp}] ${message} ${data ? JSON.stringify(data) : ""}\n`
buffer.push(logEntry)
if (buffer.length >= BUFFER_SIZE_LIMIT) {
flush()
} else {
scheduleFlush()
}
fs.appendFileSync(logFile, logEntry)
} catch {
}
}

View File

@@ -9,8 +9,6 @@ function escapeRegexExceptAsterisk(str: string): string {
return str.replace(/[.+?^${}()|[\]\\]/g, "\\$&")
}
const regexCache = new Map<string, RegExp>()
export function matchesToolMatcher(toolName: string, matcher: string): boolean {
if (!matcher) {
return true
@@ -19,12 +17,8 @@ export function matchesToolMatcher(toolName: string, matcher: string): boolean {
return patterns.some((p) => {
if (p.includes("*")) {
// First escape regex special chars (except *), then convert * to .*
let regex = regexCache.get(p)
if (!regex) {
const escaped = escapeRegexExceptAsterisk(p)
regex = new RegExp(`^${escaped.replace(/\*/g, ".*")}$`, "i")
regexCache.set(p, regex)
}
const escaped = escapeRegexExceptAsterisk(p)
const regex = new RegExp(`^${escaped.replace(/\*/g, ".*")}$`, "i")
return regex.test(toolName)
}
return p.toLowerCase() === toolName.toLowerCase()

View File

@@ -1,4 +1,4 @@
import { afterEach, beforeEach, describe, expect, it, spyOn } from "bun:test"
import { describe, it, expect, beforeAll, afterAll } from "bun:test"
import {
isPortAvailable,
findAvailablePort,
@@ -6,283 +6,96 @@ import {
DEFAULT_SERVER_PORT,
} from "./port-utils"
const HOSTNAME = "127.0.0.1"
const REAL_PORT_SEARCH_WINDOW = 200
function supportsRealSocketBinding(): boolean {
try {
const server = Bun.serve({
port: 0,
hostname: HOSTNAME,
fetch: () => new Response("probe"),
})
server.stop(true)
return true
} catch {
return false
}
}
const canBindRealSockets = supportsRealSocketBinding()
describe("port-utils", () => {
if (canBindRealSockets) {
function startRealBlocker(port: number = 0) {
return Bun.serve({
describe("isPortAvailable", () => {
it("#given unused port #when checking availability #then returns true", async () => {
const port = 59999
const result = await isPortAvailable(port)
expect(result).toBe(true)
})
it("#given port in use #when checking availability #then returns false", async () => {
const port = 59998
const blocker = Bun.serve({
port,
hostname: HOSTNAME,
hostname: "127.0.0.1",
fetch: () => new Response("blocked"),
})
}
async function findContiguousAvailableStart(length: number): Promise<number> {
const probe = startRealBlocker()
const seedPort = probe.port
probe.stop(true)
for (let candidate = seedPort; candidate < seedPort + REAL_PORT_SEARCH_WINDOW; candidate++) {
const checks = await Promise.all(
Array.from({ length }, async (_, offset) => isPortAvailable(candidate + offset, HOSTNAME))
)
if (checks.every(Boolean)) {
return candidate
}
try {
const result = await isPortAvailable(port)
expect(result).toBe(false)
} finally {
blocker.stop(true)
}
throw new Error(`Could not find ${length} contiguous available ports`)
}
describe("with real sockets", () => {
describe("isPortAvailable", () => {
it("#given unused port #when checking availability #then returns true", async () => {
const blocker = startRealBlocker()
const port = blocker.port
blocker.stop(true)
const result = await isPortAvailable(port)
expect(result).toBe(true)
})
it("#given port in use #when checking availability #then returns false", async () => {
const blocker = startRealBlocker()
const port = blocker.port
try {
const result = await isPortAvailable(port)
expect(result).toBe(false)
} finally {
blocker.stop(true)
}
})
})
describe("findAvailablePort", () => {
it("#given start port available #when finding port #then returns start port", async () => {
const startPort = await findContiguousAvailableStart(1)
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort)
})
it("#given start port blocked #when finding port #then returns next available", async () => {
const startPort = await findContiguousAvailableStart(2)
const blocker = startRealBlocker(startPort)
try {
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort + 1)
} finally {
blocker.stop(true)
}
})
it("#given multiple ports blocked #when finding port #then skips all blocked", async () => {
const startPort = await findContiguousAvailableStart(4)
const blockers = [
startRealBlocker(startPort),
startRealBlocker(startPort + 1),
startRealBlocker(startPort + 2),
]
try {
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort + 3)
} finally {
blockers.forEach((blocker) => blocker.stop(true))
}
})
})
describe("getAvailableServerPort", () => {
it("#given preferred port available #when getting port #then returns preferred with wasAutoSelected=false", async () => {
const preferredPort = await findContiguousAvailableStart(1)
const result = await getAvailableServerPort(preferredPort)
expect(result.port).toBe(preferredPort)
expect(result.wasAutoSelected).toBe(false)
})
it("#given preferred port blocked #when getting port #then returns alternative with wasAutoSelected=true", async () => {
const preferredPort = await findContiguousAvailableStart(2)
const blocker = startRealBlocker(preferredPort)
try {
const result = await getAvailableServerPort(preferredPort)
expect(result.port).toBe(preferredPort + 1)
expect(result.wasAutoSelected).toBe(true)
} finally {
blocker.stop(true)
}
})
})
})
} else {
const blockedSockets = new Set<string>()
let serveSpy: ReturnType<typeof spyOn>
})
function getSocketKey(port: number, hostname: string): string {
return `${hostname}:${port}`
}
beforeEach(() => {
blockedSockets.clear()
serveSpy = spyOn(Bun, "serve").mockImplementation(({ port, hostname }) => {
if (typeof port !== "number") {
throw new Error("Test expected numeric port")
}
const resolvedHostname = typeof hostname === "string" ? hostname : HOSTNAME
const socketKey = getSocketKey(port, resolvedHostname)
if (blockedSockets.has(socketKey)) {
const error = new Error(`Failed to start server. Is port ${port} in use?`) as Error & {
code?: string
syscall?: string
errno?: number
address?: string
port?: number
}
error.code = "EADDRINUSE"
error.syscall = "listen"
error.errno = 0
error.address = resolvedHostname
error.port = port
throw error
}
blockedSockets.add(socketKey)
return {
stop: (_force?: boolean) => {
blockedSockets.delete(socketKey)
},
} as { stop: (force?: boolean) => void }
})
describe("findAvailablePort", () => {
it("#given start port available #when finding port #then returns start port", async () => {
const startPort = 59997
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort)
})
afterEach(() => {
expect(blockedSockets.size).toBe(0)
serveSpy.mockRestore()
blockedSockets.clear()
it("#given start port blocked #when finding port #then returns next available", async () => {
const startPort = 59996
const blocker = Bun.serve({
port: startPort,
hostname: "127.0.0.1",
fetch: () => new Response("blocked"),
})
try {
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort + 1)
} finally {
blocker.stop(true)
}
})
describe("with mocked sockets fallback", () => {
describe("isPortAvailable", () => {
it("#given unused port #when checking availability #then returns true", async () => {
const port = 59999
it("#given multiple ports blocked #when finding port #then skips all blocked", async () => {
const startPort = 59993
const blockers = [
Bun.serve({ port: startPort, hostname: "127.0.0.1", fetch: () => new Response() }),
Bun.serve({ port: startPort + 1, hostname: "127.0.0.1", fetch: () => new Response() }),
Bun.serve({ port: startPort + 2, hostname: "127.0.0.1", fetch: () => new Response() }),
]
const result = await isPortAvailable(port)
expect(result).toBe(true)
expect(blockedSockets.size).toBe(0)
})
it("#given port in use #when checking availability #then returns false", async () => {
const port = 59998
const blocker = Bun.serve({
port,
hostname: HOSTNAME,
fetch: () => new Response("blocked"),
})
try {
const result = await isPortAvailable(port)
expect(result).toBe(false)
} finally {
blocker.stop(true)
}
})
it("#given custom hostname #when checking availability #then passes hostname through to Bun.serve", async () => {
const hostname = "192.0.2.10"
await isPortAvailable(59995, hostname)
expect(serveSpy.mock.calls[0]?.[0]?.hostname).toBe(hostname)
})
})
describe("findAvailablePort", () => {
it("#given start port available #when finding port #then returns start port", async () => {
const startPort = 59997
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort)
})
it("#given start port blocked #when finding port #then returns next available", async () => {
const startPort = 59996
const blocker = Bun.serve({
port: startPort,
hostname: HOSTNAME,
fetch: () => new Response("blocked"),
})
try {
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort + 1)
} finally {
blocker.stop(true)
}
})
it("#given multiple ports blocked #when finding port #then skips all blocked", async () => {
const startPort = 59993
const blockers = [
Bun.serve({ port: startPort, hostname: HOSTNAME, fetch: () => new Response() }),
Bun.serve({ port: startPort + 1, hostname: HOSTNAME, fetch: () => new Response() }),
Bun.serve({ port: startPort + 2, hostname: HOSTNAME, fetch: () => new Response() }),
]
try {
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort + 3)
} finally {
blockers.forEach((blocker) => blocker.stop(true))
}
})
})
describe("getAvailableServerPort", () => {
it("#given preferred port available #when getting port #then returns preferred with wasAutoSelected=false", async () => {
const preferredPort = 59990
const result = await getAvailableServerPort(preferredPort)
expect(result.port).toBe(preferredPort)
expect(result.wasAutoSelected).toBe(false)
})
it("#given preferred port blocked #when getting port #then returns alternative with wasAutoSelected=true", async () => {
const preferredPort = 59989
const blocker = Bun.serve({
port: preferredPort,
hostname: HOSTNAME,
fetch: () => new Response("blocked"),
})
try {
const result = await getAvailableServerPort(preferredPort)
expect(result.port).toBe(preferredPort + 1)
expect(result.wasAutoSelected).toBe(true)
} finally {
blocker.stop(true)
}
})
})
try {
const result = await findAvailablePort(startPort)
expect(result).toBe(startPort + 3)
} finally {
blockers.forEach((b) => b.stop(true))
}
})
}
})
describe("getAvailableServerPort", () => {
it("#given preferred port available #when getting port #then returns preferred with wasAutoSelected=false", async () => {
const preferredPort = 59990
const result = await getAvailableServerPort(preferredPort)
expect(result.port).toBe(preferredPort)
expect(result.wasAutoSelected).toBe(false)
})
it("#given preferred port blocked #when getting port #then returns alternative with wasAutoSelected=true", async () => {
const preferredPort = 59989
const blocker = Bun.serve({
port: preferredPort,
hostname: "127.0.0.1",
fetch: () => new Response("blocked"),
})
try {
const result = await getAvailableServerPort(preferredPort)
expect(result.port).toBeGreaterThan(preferredPort)
expect(result.wasAutoSelected).toBe(true)
} finally {
blocker.stop(true)
}
})
})
describe("DEFAULT_SERVER_PORT", () => {
it("#given constant #when accessed #then returns 4096", () => {

View File

@@ -11,14 +11,6 @@ import {
} from "./edit-operation-primitives"
import { validateLineRefs } from "./validation"
function arraysEqual(a: string[], b: string[]): boolean {
if (a.length !== b.length) return false
for (let i = 0; i < a.length; i++) {
if (a[i] !== b[i]) return false
}
return true
}
export interface HashlineApplyReport {
content: string
noopEdits: number
@@ -59,7 +51,7 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
const next = edit.end
? applyReplaceLines(lines, edit.pos, edit.end, edit.lines, { skipValidation: true })
: applySetLine(lines, edit.pos, edit.lines, { skipValidation: true })
if (arraysEqual(next, lines)) {
if (next.join("\n") === lines.join("\n")) {
noopEdits += 1
break
}
@@ -70,7 +62,7 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
const next = edit.pos
? applyInsertAfter(lines, edit.pos, edit.lines, { skipValidation: true })
: applyAppend(lines, edit.lines)
if (arraysEqual(next, lines)) {
if (next.join("\n") === lines.join("\n")) {
noopEdits += 1
break
}
@@ -81,7 +73,7 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
const next = edit.pos
? applyInsertBefore(lines, edit.pos, edit.lines, { skipValidation: true })
: applyPrepend(lines, edit.lines)
if (arraysEqual(next, lines)) {
if (next.join("\n") === lines.join("\n")) {
noopEdits += 1
break
}

View File

@@ -86,17 +86,15 @@ export async function* streamHashLinesFromUtf8(
pending += text
const chunksToYield: string[] = []
let lastIdx = 0
while (true) {
const idx = pending.indexOf("\n", lastIdx)
const idx = pending.indexOf("\n")
if (idx === -1) break
const line = pending.slice(lastIdx, idx)
lastIdx = idx + 1
const line = pending.slice(0, idx)
pending = pending.slice(idx + 1)
endedWithNewline = true
chunksToYield.push(...pushLine(line))
}
pending = pending.slice(lastIdx)
if (pending.length > 0) endedWithNewline = false
return chunksToYield
}

View File

@@ -4,7 +4,7 @@ export function generateHashlineDiff(oldContent: string, newContent: string, fil
const oldLines = oldContent.split("\n")
const newLines = newContent.split("\n")
const parts: string[] = [`--- ${filePath}\n+++ ${filePath}\n`]
let diff = `--- ${filePath}\n+++ ${filePath}\n`
const maxLines = Math.max(oldLines.length, newLines.length)
for (let i = 0; i < maxLines; i += 1) {
@@ -14,18 +14,18 @@ export function generateHashlineDiff(oldContent: string, newContent: string, fil
const hash = computeLineHash(lineNum, newLine)
if (i >= oldLines.length) {
parts.push(`+ ${lineNum}#${hash}|${newLine}\n`)
diff += `+ ${lineNum}#${hash}|${newLine}\n`
continue
}
if (i >= newLines.length) {
parts.push(`- ${lineNum}# |${oldLine}\n`)
diff += `- ${lineNum}# |${oldLine}\n`
continue
}
if (oldLine !== newLine) {
parts.push(`- ${lineNum}# |${oldLine}\n`)
parts.push(`+ ${lineNum}#${hash}|${newLine}\n`)
diff += `- ${lineNum}# |${oldLine}\n`
diff += `+ ${lineNum}#${hash}|${newLine}\n`
}
}
return parts.join("")
return diff
}

View File

@@ -21,7 +21,7 @@ describe("isServerInstalled", () => {
try {
rmSync(tempDir, { recursive: true, force: true })
} catch (e) {
// cleanup failed — ignored
console.error(`Failed to clean up temp dir: ${e}`)
}
if (process.platform === "win32") {

View File

@@ -45,8 +45,6 @@ Returns summary format: id, subject, status, owner, blockedBy (not full descript
}
}
const taskMap = new Map(allTasks.map((t) => [t.id, t]))
// Filter out completed and deleted tasks
const activeTasks = allTasks.filter(
(task) => task.status !== "completed" && task.status !== "deleted"
@@ -56,7 +54,7 @@ Returns summary format: id, subject, status, owner, blockedBy (not full descript
const summaries: TaskSummary[] = activeTasks.map((task) => {
// Filter blockedBy to only include unresolved (non-completed) blockers
const unresolvedBlockers = task.blockedBy.filter((blockerId) => {
const blockerTask = taskMap.get(blockerId)
const blockerTask = allTasks.find((t) => t.id === blockerId)
// Include if blocker doesn't exist (missing) or if it's not completed
return !blockerTask || blockerTask.status !== "completed"
})