feat(gpt-permission-continuation): add context-aware continuation prompts

- Add buildContextualContinuationPrompt to include assistant message context
- Move extractPermissionPhrase to detector module for better separation
- Block continuation injection in subagent sessions
- Update handler to use contextual prompts with last response context
- Add tests for subagent session blocking and contextual prompts
- Update todo coordination test to verify new prompt format

🤖 Generated with assistance of OhMyOpenCode
This commit is contained in:
YeonGyu-Kim
2026-03-18 17:52:32 +09:00
parent ce8957e1e1
commit 6b66f69433
5 changed files with 96 additions and 21 deletions

View File

@@ -21,3 +21,12 @@ export function detectStallPattern(
return patterns.some((pattern) => trailingSegment.startsWith(pattern.toLowerCase()))
}
export function extractPermissionPhrase(text: string): string | null {
const tail = text.slice(-800)
const lines = tail.split("\n").map((line) => line.trim()).filter(Boolean)
const hotZone = lines.slice(-3).join(" ")
const sentenceParts = hotZone.trim().replace(/\s+/g, " ").split(/(?<=[.!?])\s+/)
const trailingSegment = sentenceParts[sentenceParts.length - 1]?.trim().toLowerCase() ?? ""
return trailingSegment || null
}

View File

@@ -1,8 +1,9 @@
/// <reference path="../../../bun-test.d.ts" />
import { createOpencodeClient } from "@opencode-ai/sdk"
import { describe, expect, it as test } from "bun:test"
import { afterEach, describe, expect, it as test } from "bun:test"
import { subagentSessions, _resetForTesting } from "../../features/claude-code-session-state"
import { createGptPermissionContinuationHook } from "."
type SessionMessage = {
@@ -109,7 +110,18 @@ function createUserMessage(id: string, text: string): SessionMessage {
}
}
function expectContinuationPrompts(promptCalls: string[], count: number): void {
expect(promptCalls).toHaveLength(count)
for (const call of promptCalls) {
expect(call.startsWith("continue")).toBe(true)
}
}
describe("gpt-permission-continuation", () => {
afterEach(() => {
_resetForTesting()
})
test("injects continue when the last GPT assistant reply asks for permission", async () => {
// given
const { ctx, promptCalls } = createMockPluginInput([
@@ -124,7 +136,7 @@ describe("gpt-permission-continuation", () => {
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-1" } } })
// then
expect(promptCalls).toEqual(["continue"])
expectContinuationPrompts(promptCalls, 1)
})
test("does not inject when the last assistant model is not GPT", async () => {
@@ -216,7 +228,7 @@ describe("gpt-permission-continuation", () => {
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-1" } } })
// then
expect(promptCalls).toEqual(["continue"])
expectContinuationPrompts(promptCalls, 1)
})
describe("#given repeated GPT permission tails in the same session", () => {
@@ -243,7 +255,7 @@ describe("gpt-permission-continuation", () => {
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-1" } } })
// then
expect(promptCalls).toEqual(["continue", "continue", "continue"])
expectContinuationPrompts(promptCalls, 3)
})
})
@@ -276,7 +288,7 @@ describe("gpt-permission-continuation", () => {
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-1" } } })
// then
expect(promptCalls).toEqual(["continue", "continue", "continue", "continue", "continue"])
expectContinuationPrompts(promptCalls, 5)
})
})
@@ -297,7 +309,7 @@ describe("gpt-permission-continuation", () => {
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-1" } } })
// then
expect(promptCalls).toEqual(["continue"])
expectContinuationPrompts(promptCalls, 1)
})
})
@@ -327,8 +339,46 @@ describe("gpt-permission-continuation", () => {
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-1" } } })
// then
expect(promptCalls).toEqual(["continue", "continue", "continue", "continue"])
expectContinuationPrompts(promptCalls, 4)
})
})
})
test("does not inject when the session is a subagent session", async () => {
// given
const { ctx, promptCalls } = createMockPluginInput([
{
info: { id: "msg-1", role: "assistant", modelID: "gpt-5.4" },
parts: [{ type: "text", text: "If you want, I can continue with the fix." }],
},
])
subagentSessions.add("ses-subagent")
const hook = createGptPermissionContinuationHook(ctx)
// when
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-subagent" } } })
// then
expect(promptCalls).toEqual([])
})
test("includes assistant text context in the continuation prompt", async () => {
// given
const assistantText = "I finished the analysis. If you want, I can apply the changes next."
const { ctx, promptCalls } = createMockPluginInput([
{
info: { id: "msg-1", role: "assistant", modelID: "gpt-5.4" },
parts: [{ type: "text", text: assistantText }],
},
])
const hook = createGptPermissionContinuationHook(ctx)
// when
await hook.handler({ event: { type: "session.idle", properties: { sessionID: "ses-1" } } })
// then
expect(promptCalls).toHaveLength(1)
expect(promptCalls[0].startsWith("continue")).toBe(true)
expect(promptCalls[0]).toContain("If you want, I can apply the changes next.")
})
})

View File

@@ -1,5 +1,6 @@
import type { PluginInput } from "@opencode-ai/plugin"
import { subagentSessions } from "../../features/claude-code-session-state"
import { normalizeSDKResponse } from "../../shared"
import { log } from "../../shared/logger"
@@ -14,7 +15,8 @@ import {
HOOK_NAME,
MAX_CONSECUTIVE_AUTO_CONTINUES,
} from "./constants"
import { detectStallPattern } from "./detector"
import { detectStallPattern, extractPermissionPhrase } from "./detector"
import { buildContextualContinuationPrompt } from "./prompt-builder"
import type { SessionStateStore } from "./session-state"
type SessionState = ReturnType<SessionStateStore["getState"]>
@@ -22,11 +24,13 @@ type SessionState = ReturnType<SessionStateStore["getState"]>
async function promptContinuation(
ctx: PluginInput,
sessionID: string,
assistantText: string,
): Promise<void> {
const prompt = buildContextualContinuationPrompt(assistantText)
const payload = {
path: { id: sessionID },
body: {
parts: [{ type: "text" as const, text: CONTINUATION_PROMPT }],
parts: [{ type: "text" as const, text: prompt }],
},
query: { directory: ctx.directory },
}
@@ -53,16 +57,8 @@ function getLastUserMessageBefore(
}
function isAutoContinuationUserMessage(message: SessionMessage): boolean {
return extractAssistantText(message).trim().toLowerCase() === CONTINUATION_PROMPT
}
function extractPermissionPhrase(text: string): string | null {
const tail = text.slice(-800)
const lines = tail.split("\n").map((line) => line.trim()).filter(Boolean)
const hotZone = lines.slice(-3).join(" ")
const sentenceParts = hotZone.trim().replace(/\s+/g, " ").split(/(?<=[.!?])\s+/)
const trailingSegment = sentenceParts[sentenceParts.length - 1]?.trim().toLowerCase() ?? ""
return trailingSegment || null
const text = extractAssistantText(message).trim().toLowerCase()
return text === CONTINUATION_PROMPT || text.startsWith(`${CONTINUATION_PROMPT}\n`)
}
function resetAutoContinuationState(state: SessionState): void {
@@ -94,6 +90,10 @@ export function createGptPermissionContinuationHandler(args: {
const sessionID = properties?.sessionID as string | undefined
if (!sessionID) return
if (subagentSessions.has(sessionID)) {
log(`[${HOOK_NAME}] Skipped: session is a subagent`, { sessionID })
return
}
if (isContinuationStopped?.(sessionID)) {
log(`[${HOOK_NAME}] Skipped: continuation stopped for session`, { sessionID })
return
@@ -181,7 +181,7 @@ export function createGptPermissionContinuationHandler(args: {
}
state.inFlight = true
await promptContinuation(ctx, sessionID)
await promptContinuation(ctx, sessionID, assistantText)
state.lastHandledMessageID = messageID
state.consecutiveAutoContinueCount += 1
state.awaitingAutoContinuationResponse = true

View File

@@ -0,0 +1,14 @@
import { CONTINUATION_PROMPT } from "./constants"
const CONTEXT_LINE_COUNT = 5
export function buildContextualContinuationPrompt(assistantText: string): string {
const lines = assistantText.split("\n").map((line) => line.trim()).filter(Boolean)
const contextLines = lines.slice(-CONTEXT_LINE_COUNT)
if (contextLines.length === 0) {
return CONTINUATION_PROMPT
}
return `${CONTINUATION_PROMPT}\n\n[Your last response ended with:]\n${contextLines.join("\n")}`
}

View File

@@ -56,7 +56,9 @@ describe("gpt-permission-continuation coordination", () => {
})
// then
expect(promptCalls).toEqual(["continue"])
expect(promptCalls).toHaveLength(1)
expect(promptCalls[0].startsWith("continue")).toBe(true)
expect(promptCalls[0]).toContain("If you want, I can implement the fix next.")
expect(toastCalls).toEqual([])
})
})