Compare commits
7 Commits
v3.8.1
...
fix/ripgre
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
594c97d36b | ||
|
|
74008a8ceb | ||
|
|
62300791b2 | ||
|
|
ac81e1d7cd | ||
|
|
9390f98f01 | ||
|
|
e6868e9112 | ||
|
|
5d1d87cc10 |
@@ -7,9 +7,11 @@ import {
|
||||
DEFAULT_MAX_DEPTH,
|
||||
DEFAULT_MAX_OUTPUT_BYTES,
|
||||
RG_FILES_FLAGS,
|
||||
DEFAULT_RG_THREADS,
|
||||
} from "./constants"
|
||||
import type { GlobOptions, GlobResult, FileMatch } from "./types"
|
||||
import { stat } from "node:fs/promises"
|
||||
import { rgSemaphore } from "../shared/semaphore"
|
||||
|
||||
export interface ResolvedCli {
|
||||
path: string
|
||||
@@ -19,6 +21,7 @@ export interface ResolvedCli {
|
||||
function buildRgArgs(options: GlobOptions): string[] {
|
||||
const args: string[] = [
|
||||
...RG_FILES_FLAGS,
|
||||
`--threads=${Math.min(options.threads ?? DEFAULT_RG_THREADS, DEFAULT_RG_THREADS)}`,
|
||||
`--max-depth=${Math.min(options.maxDepth ?? DEFAULT_MAX_DEPTH, DEFAULT_MAX_DEPTH)}`,
|
||||
]
|
||||
|
||||
@@ -91,6 +94,18 @@ export { buildRgArgs, buildFindArgs, buildPowerShellCommand }
|
||||
export async function runRgFiles(
|
||||
options: GlobOptions,
|
||||
resolvedCli?: ResolvedCli
|
||||
): Promise<GlobResult> {
|
||||
await rgSemaphore.acquire()
|
||||
try {
|
||||
return await runRgFilesInternal(options, resolvedCli)
|
||||
} finally {
|
||||
rgSemaphore.release()
|
||||
}
|
||||
}
|
||||
|
||||
async function runRgFilesInternal(
|
||||
options: GlobOptions,
|
||||
resolvedCli?: ResolvedCli
|
||||
): Promise<GlobResult> {
|
||||
const cli = resolvedCli ?? resolveGrepCli()
|
||||
const timeout = Math.min(options.timeout ?? DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
export { resolveGrepCli, resolveGrepCliWithAutoInstall, type GrepBackend } from "../grep/constants"
|
||||
export { resolveGrepCli, resolveGrepCliWithAutoInstall, type GrepBackend, DEFAULT_RG_THREADS } from "../grep/constants"
|
||||
|
||||
export const DEFAULT_TIMEOUT_MS = 60_000
|
||||
export const DEFAULT_LIMIT = 100
|
||||
|
||||
@@ -19,4 +19,5 @@ export interface GlobOptions {
|
||||
maxDepth?: number
|
||||
timeout?: number
|
||||
limit?: number
|
||||
threads?: number // limit rg thread count
|
||||
}
|
||||
|
||||
@@ -8,14 +8,17 @@ import {
|
||||
DEFAULT_MAX_COLUMNS,
|
||||
DEFAULT_TIMEOUT_MS,
|
||||
DEFAULT_MAX_OUTPUT_BYTES,
|
||||
DEFAULT_RG_THREADS,
|
||||
RG_SAFETY_FLAGS,
|
||||
GREP_SAFETY_FLAGS,
|
||||
} from "./constants"
|
||||
import type { GrepOptions, GrepMatch, GrepResult, CountResult } from "./types"
|
||||
import { rgSemaphore } from "../shared/semaphore"
|
||||
|
||||
function buildRgArgs(options: GrepOptions): string[] {
|
||||
const args: string[] = [
|
||||
...RG_SAFETY_FLAGS,
|
||||
`--threads=${Math.min(options.threads ?? DEFAULT_RG_THREADS, DEFAULT_RG_THREADS)}`,
|
||||
`--max-depth=${Math.min(options.maxDepth ?? DEFAULT_MAX_DEPTH, DEFAULT_MAX_DEPTH)}`,
|
||||
`--max-filesize=${options.maxFilesize ?? DEFAULT_MAX_FILESIZE}`,
|
||||
`--max-count=${Math.min(options.maxCount ?? DEFAULT_MAX_COUNT, DEFAULT_MAX_COUNT)}`,
|
||||
@@ -51,6 +54,12 @@ function buildRgArgs(options: GrepOptions): string[] {
|
||||
}
|
||||
}
|
||||
|
||||
if (options.outputMode === "files_with_matches") {
|
||||
args.push("--files-with-matches")
|
||||
} else if (options.outputMode === "count") {
|
||||
args.push("--count")
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
@@ -86,7 +95,7 @@ function buildArgs(options: GrepOptions, backend: GrepBackend): string[] {
|
||||
return backend === "rg" ? buildRgArgs(options) : buildGrepArgs(options)
|
||||
}
|
||||
|
||||
function parseOutput(output: string): GrepMatch[] {
|
||||
function parseOutput(output: string, filesOnly = false): GrepMatch[] {
|
||||
if (!output.trim()) return []
|
||||
|
||||
const matches: GrepMatch[] = []
|
||||
@@ -95,6 +104,16 @@ function parseOutput(output: string): GrepMatch[] {
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue
|
||||
|
||||
if (filesOnly) {
|
||||
// --files-with-matches outputs only file paths, one per line
|
||||
matches.push({
|
||||
file: line.trim(),
|
||||
line: 0,
|
||||
text: "",
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
const match = line.match(/^(.+?):(\d+):(.*)$/)
|
||||
if (match) {
|
||||
matches.push({
|
||||
@@ -130,6 +149,15 @@ function parseCountOutput(output: string): CountResult[] {
|
||||
}
|
||||
|
||||
export async function runRg(options: GrepOptions): Promise<GrepResult> {
|
||||
await rgSemaphore.acquire()
|
||||
try {
|
||||
return await runRgInternal(options)
|
||||
} finally {
|
||||
rgSemaphore.release()
|
||||
}
|
||||
}
|
||||
|
||||
async function runRgInternal(options: GrepOptions): Promise<GrepResult> {
|
||||
const cli = resolveGrepCli()
|
||||
const args = buildArgs(options, cli.backend)
|
||||
const timeout = Math.min(options.timeout ?? DEFAULT_TIMEOUT_MS, DEFAULT_TIMEOUT_MS)
|
||||
@@ -173,14 +201,17 @@ export async function runRg(options: GrepOptions): Promise<GrepResult> {
|
||||
}
|
||||
}
|
||||
|
||||
const matches = parseOutput(outputToProcess)
|
||||
const filesSearched = new Set(matches.map((m) => m.file)).size
|
||||
const matches = parseOutput(outputToProcess, options.outputMode === "files_with_matches")
|
||||
const limited = options.headLimit && options.headLimit > 0
|
||||
? matches.slice(0, options.headLimit)
|
||||
: matches
|
||||
const filesSearched = new Set(limited.map((m) => m.file)).size
|
||||
|
||||
return {
|
||||
matches,
|
||||
totalMatches: matches.length,
|
||||
matches: limited,
|
||||
totalMatches: limited.length,
|
||||
filesSearched,
|
||||
truncated,
|
||||
truncated: truncated || (options.headLimit ? matches.length > options.headLimit : false),
|
||||
}
|
||||
} catch (e) {
|
||||
return {
|
||||
@@ -194,6 +225,15 @@ export async function runRg(options: GrepOptions): Promise<GrepResult> {
|
||||
}
|
||||
|
||||
export async function runRgCount(options: Omit<GrepOptions, "context">): Promise<CountResult[]> {
|
||||
await rgSemaphore.acquire()
|
||||
try {
|
||||
return await runRgCountInternal(options)
|
||||
} finally {
|
||||
rgSemaphore.release()
|
||||
}
|
||||
}
|
||||
|
||||
async function runRgCountInternal(options: Omit<GrepOptions, "context">): Promise<CountResult[]> {
|
||||
const cli = resolveGrepCli()
|
||||
const args = buildArgs({ ...options, context: 0 }, cli.backend)
|
||||
|
||||
|
||||
@@ -113,8 +113,9 @@ export const DEFAULT_MAX_FILESIZE = "10M"
|
||||
export const DEFAULT_MAX_COUNT = 500
|
||||
export const DEFAULT_MAX_COLUMNS = 1000
|
||||
export const DEFAULT_CONTEXT = 2
|
||||
export const DEFAULT_TIMEOUT_MS = 300_000
|
||||
export const DEFAULT_MAX_OUTPUT_BYTES = 10 * 1024 * 1024
|
||||
export const DEFAULT_TIMEOUT_MS = 60_000
|
||||
export const DEFAULT_MAX_OUTPUT_BYTES = 256 * 1024
|
||||
export const DEFAULT_RG_THREADS = 4
|
||||
|
||||
export const RG_SAFETY_FLAGS = [
|
||||
"--no-follow",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { existsSync, readdirSync } from "node:fs"
|
||||
import { join } from "node:path"
|
||||
import { extractZip as extractZipBase } from "../../shared"
|
||||
import { extractZip as extractZipBase } from "../../shared/zip-extractor"
|
||||
import {
|
||||
cleanupArchive,
|
||||
downloadArchive,
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
import type { PluginInput } from "@opencode-ai/plugin"
|
||||
import { tool, type ToolDefinition } from "@opencode-ai/plugin/tool"
|
||||
import { runRg } from "./cli"
|
||||
import { formatGrepResult } from "./result-formatter"
|
||||
import { runRg, runRgCount } from "./cli"
|
||||
import { formatGrepResult, formatCountResult } from "./result-formatter"
|
||||
|
||||
export function createGrepTools(ctx: PluginInput): Record<string, ToolDefinition> {
|
||||
const grep: ToolDefinition = tool({
|
||||
description:
|
||||
"Fast content search tool with safety limits (60s timeout, 10MB output). " +
|
||||
"Fast content search tool with safety limits (60s timeout, 256KB output). " +
|
||||
"Searches file contents using regular expressions. " +
|
||||
"Supports full regex syntax (eg. \"log.*Error\", \"function\\s+\\w+\", etc.). " +
|
||||
"Filter files by pattern with the include parameter (eg. \"*.js\", \"*.{ts,tsx}\"). " +
|
||||
"Returns file paths with matches sorted by modification time.",
|
||||
"Output modes: \"content\" shows matching lines, \"files_with_matches\" shows only file paths (default), \"count\" shows match counts per file.",
|
||||
args: {
|
||||
pattern: tool.schema.string().describe("The regex pattern to search for in file contents"),
|
||||
include: tool.schema
|
||||
@@ -21,18 +21,42 @@ export function createGrepTools(ctx: PluginInput): Record<string, ToolDefinition
|
||||
.string()
|
||||
.optional()
|
||||
.describe("The directory to search in. Defaults to the current working directory."),
|
||||
output_mode: tool.schema
|
||||
.enum(["content", "files_with_matches", "count"])
|
||||
.optional()
|
||||
.describe(
|
||||
"Output mode: \"content\" shows matching lines, \"files_with_matches\" shows only file paths (default), \"count\" shows match counts per file."
|
||||
),
|
||||
head_limit: tool.schema
|
||||
.number()
|
||||
.optional()
|
||||
.describe("Limit output to first N entries. 0 or omitted means no limit."),
|
||||
},
|
||||
execute: async (args) => {
|
||||
try {
|
||||
const globs = args.include ? [args.include] : undefined
|
||||
const searchPath = args.path ?? ctx.directory
|
||||
const paths = [searchPath]
|
||||
const outputMode = args.output_mode ?? "files_with_matches"
|
||||
const headLimit = args.head_limit ?? 0
|
||||
|
||||
if (outputMode === "count") {
|
||||
const results = await runRgCount({
|
||||
pattern: args.pattern,
|
||||
paths,
|
||||
globs,
|
||||
})
|
||||
const limited = headLimit > 0 ? results.slice(0, headLimit) : results
|
||||
return formatCountResult(limited)
|
||||
}
|
||||
|
||||
const result = await runRg({
|
||||
pattern: args.pattern,
|
||||
paths,
|
||||
globs,
|
||||
context: 0,
|
||||
outputMode,
|
||||
headLimit,
|
||||
})
|
||||
|
||||
return formatGrepResult(result)
|
||||
|
||||
@@ -31,6 +31,9 @@ export interface GrepOptions {
|
||||
noIgnore?: boolean
|
||||
fileType?: string[]
|
||||
timeout?: number
|
||||
threads?: number
|
||||
outputMode?: "content" | "files_with_matches" | "count"
|
||||
headLimit?: number
|
||||
}
|
||||
|
||||
export interface CountResult {
|
||||
|
||||
175
src/tools/hashline-edit/autocorrect-replacement-lines.ts
Normal file
175
src/tools/hashline-edit/autocorrect-replacement-lines.ts
Normal file
@@ -0,0 +1,175 @@
|
||||
function normalizeTokens(text: string): string {
|
||||
return text.replace(/\s+/g, "")
|
||||
}
|
||||
|
||||
function stripAllWhitespace(text: string): string {
|
||||
return normalizeTokens(text)
|
||||
}
|
||||
|
||||
export function stripTrailingContinuationTokens(text: string): string {
|
||||
return text.replace(/(?:&&|\|\||\?\?|\?|:|=|,|\+|-|\*|\/|\.|\()\s*$/u, "")
|
||||
}
|
||||
|
||||
export function stripMergeOperatorChars(text: string): string {
|
||||
return text.replace(/[|&?]/g, "")
|
||||
}
|
||||
|
||||
function leadingWhitespace(text: string): string {
|
||||
const match = text.match(/^\s*/)
|
||||
return match ? match[0] : ""
|
||||
}
|
||||
|
||||
export function restoreOldWrappedLines(originalLines: string[], replacementLines: string[]): string[] {
|
||||
if (originalLines.length === 0 || replacementLines.length < 2) return replacementLines
|
||||
|
||||
const canonicalToOriginal = new Map<string, { line: string; count: number }>()
|
||||
for (const line of originalLines) {
|
||||
const canonical = stripAllWhitespace(line)
|
||||
const existing = canonicalToOriginal.get(canonical)
|
||||
if (existing) {
|
||||
existing.count += 1
|
||||
} else {
|
||||
canonicalToOriginal.set(canonical, { line, count: 1 })
|
||||
}
|
||||
}
|
||||
|
||||
const candidates: { start: number; len: number; replacement: string; canonical: string }[] = []
|
||||
for (let start = 0; start < replacementLines.length; start += 1) {
|
||||
for (let len = 2; len <= 10 && start + len <= replacementLines.length; len += 1) {
|
||||
const canonicalSpan = stripAllWhitespace(replacementLines.slice(start, start + len).join(""))
|
||||
const original = canonicalToOriginal.get(canonicalSpan)
|
||||
if (original && original.count === 1 && canonicalSpan.length >= 6) {
|
||||
candidates.push({ start, len, replacement: original.line, canonical: canonicalSpan })
|
||||
}
|
||||
}
|
||||
}
|
||||
if (candidates.length === 0) return replacementLines
|
||||
|
||||
const canonicalCounts = new Map<string, number>()
|
||||
for (const candidate of candidates) {
|
||||
canonicalCounts.set(candidate.canonical, (canonicalCounts.get(candidate.canonical) ?? 0) + 1)
|
||||
}
|
||||
|
||||
const uniqueCandidates = candidates.filter((candidate) => (canonicalCounts.get(candidate.canonical) ?? 0) === 1)
|
||||
if (uniqueCandidates.length === 0) return replacementLines
|
||||
|
||||
uniqueCandidates.sort((a, b) => b.start - a.start)
|
||||
const correctedLines = [...replacementLines]
|
||||
for (const candidate of uniqueCandidates) {
|
||||
correctedLines.splice(candidate.start, candidate.len, candidate.replacement)
|
||||
}
|
||||
return correctedLines
|
||||
}
|
||||
|
||||
export function maybeExpandSingleLineMerge(
|
||||
originalLines: string[],
|
||||
replacementLines: string[]
|
||||
): string[] {
|
||||
if (replacementLines.length !== 1 || originalLines.length <= 1) {
|
||||
return replacementLines
|
||||
}
|
||||
|
||||
const merged = replacementLines[0]
|
||||
const parts = originalLines.map((line) => line.trim()).filter((line) => line.length > 0)
|
||||
if (parts.length !== originalLines.length) return replacementLines
|
||||
|
||||
const indices: number[] = []
|
||||
let offset = 0
|
||||
let orderedMatch = true
|
||||
for (const part of parts) {
|
||||
let idx = merged.indexOf(part, offset)
|
||||
let matchedLen = part.length
|
||||
if (idx === -1) {
|
||||
const stripped = stripTrailingContinuationTokens(part)
|
||||
if (stripped !== part) {
|
||||
idx = merged.indexOf(stripped, offset)
|
||||
if (idx !== -1) matchedLen = stripped.length
|
||||
}
|
||||
}
|
||||
if (idx === -1) {
|
||||
const segment = merged.slice(offset)
|
||||
const segmentStripped = stripMergeOperatorChars(segment)
|
||||
const partStripped = stripMergeOperatorChars(part)
|
||||
const fuzzyIdx = segmentStripped.indexOf(partStripped)
|
||||
if (fuzzyIdx !== -1) {
|
||||
let strippedPos = 0
|
||||
let originalPos = 0
|
||||
while (strippedPos < fuzzyIdx && originalPos < segment.length) {
|
||||
if (!/[|&?]/.test(segment[originalPos])) strippedPos += 1
|
||||
originalPos += 1
|
||||
}
|
||||
idx = offset + originalPos
|
||||
matchedLen = part.length
|
||||
}
|
||||
}
|
||||
if (idx === -1) {
|
||||
orderedMatch = false
|
||||
break
|
||||
}
|
||||
indices.push(idx)
|
||||
offset = idx + matchedLen
|
||||
}
|
||||
|
||||
const expanded: string[] = []
|
||||
if (orderedMatch) {
|
||||
for (let i = 0; i < indices.length; i += 1) {
|
||||
const start = indices[i]
|
||||
const end = i + 1 < indices.length ? indices[i + 1] : merged.length
|
||||
const candidate = merged.slice(start, end).trim()
|
||||
if (candidate.length === 0) {
|
||||
orderedMatch = false
|
||||
break
|
||||
}
|
||||
expanded.push(candidate)
|
||||
}
|
||||
}
|
||||
|
||||
if (orderedMatch && expanded.length === originalLines.length) {
|
||||
return expanded
|
||||
}
|
||||
|
||||
const semicolonSplit = merged
|
||||
.split(/;\s+/)
|
||||
.map((line, idx, arr) => {
|
||||
if (idx < arr.length - 1 && !line.endsWith(";")) {
|
||||
return `${line};`
|
||||
}
|
||||
return line
|
||||
})
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0)
|
||||
|
||||
if (semicolonSplit.length === originalLines.length) {
|
||||
return semicolonSplit
|
||||
}
|
||||
|
||||
return replacementLines
|
||||
}
|
||||
|
||||
export function restoreIndentForPairedReplacement(
|
||||
originalLines: string[],
|
||||
replacementLines: string[]
|
||||
): string[] {
|
||||
if (originalLines.length !== replacementLines.length) {
|
||||
return replacementLines
|
||||
}
|
||||
|
||||
return replacementLines.map((line, idx) => {
|
||||
if (line.length === 0) return line
|
||||
if (leadingWhitespace(line).length > 0) return line
|
||||
const indent = leadingWhitespace(originalLines[idx])
|
||||
if (indent.length === 0) return line
|
||||
return `${indent}${line}`
|
||||
})
|
||||
}
|
||||
|
||||
export function autocorrectReplacementLines(
|
||||
originalLines: string[],
|
||||
replacementLines: string[]
|
||||
): string[] {
|
||||
let next = replacementLines
|
||||
next = maybeExpandSingleLineMerge(originalLines, next)
|
||||
next = restoreOldWrappedLines(originalLines, next)
|
||||
next = restoreIndentForPairedReplacement(originalLines, next)
|
||||
return next
|
||||
}
|
||||
47
src/tools/hashline-edit/edit-deduplication.ts
Normal file
47
src/tools/hashline-edit/edit-deduplication.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import type { HashlineEdit } from "./types"
|
||||
import { toNewLines } from "./edit-text-normalization"
|
||||
|
||||
function normalizeEditPayload(payload: string | string[]): string {
|
||||
return toNewLines(payload).join("\n")
|
||||
}
|
||||
|
||||
function buildDedupeKey(edit: HashlineEdit): string {
|
||||
switch (edit.type) {
|
||||
case "set_line":
|
||||
return `set_line|${edit.line}|${normalizeEditPayload(edit.text)}`
|
||||
case "replace_lines":
|
||||
return `replace_lines|${edit.start_line}|${edit.end_line}|${normalizeEditPayload(edit.text)}`
|
||||
case "insert_after":
|
||||
return `insert_after|${edit.line}|${normalizeEditPayload(edit.text)}`
|
||||
case "insert_before":
|
||||
return `insert_before|${edit.line}|${normalizeEditPayload(edit.text)}`
|
||||
case "insert_between":
|
||||
return `insert_between|${edit.after_line}|${edit.before_line}|${normalizeEditPayload(edit.text)}`
|
||||
case "replace":
|
||||
return `replace|${edit.old_text}|${normalizeEditPayload(edit.new_text)}`
|
||||
case "append":
|
||||
return `append|${normalizeEditPayload(edit.text)}`
|
||||
case "prepend":
|
||||
return `prepend|${normalizeEditPayload(edit.text)}`
|
||||
default:
|
||||
return JSON.stringify(edit)
|
||||
}
|
||||
}
|
||||
|
||||
export function dedupeEdits(edits: HashlineEdit[]): { edits: HashlineEdit[]; deduplicatedEdits: number } {
|
||||
const seen = new Set<string>()
|
||||
const deduped: HashlineEdit[] = []
|
||||
let deduplicatedEdits = 0
|
||||
|
||||
for (const edit of edits) {
|
||||
const key = buildDedupeKey(edit)
|
||||
if (seen.has(key)) {
|
||||
deduplicatedEdits += 1
|
||||
continue
|
||||
}
|
||||
seen.add(key)
|
||||
deduped.push(edit)
|
||||
}
|
||||
|
||||
return { edits: deduped, deduplicatedEdits }
|
||||
}
|
||||
160
src/tools/hashline-edit/edit-operation-primitives.ts
Normal file
160
src/tools/hashline-edit/edit-operation-primitives.ts
Normal file
@@ -0,0 +1,160 @@
|
||||
import { autocorrectReplacementLines } from "./autocorrect-replacement-lines"
|
||||
import {
|
||||
restoreLeadingIndent,
|
||||
stripInsertAnchorEcho,
|
||||
stripInsertBeforeEcho,
|
||||
stripInsertBoundaryEcho,
|
||||
stripRangeBoundaryEcho,
|
||||
toNewLines,
|
||||
} from "./edit-text-normalization"
|
||||
import { parseLineRef, validateLineRef } from "./validation"
|
||||
|
||||
interface EditApplyOptions {
|
||||
skipValidation?: boolean
|
||||
}
|
||||
|
||||
function shouldValidate(options?: EditApplyOptions): boolean {
|
||||
return options?.skipValidation !== true
|
||||
}
|
||||
|
||||
export function applySetLine(
|
||||
lines: string[],
|
||||
anchor: string,
|
||||
newText: string | string[],
|
||||
options?: EditApplyOptions
|
||||
): string[] {
|
||||
if (shouldValidate(options)) validateLineRef(lines, anchor)
|
||||
const { line } = parseLineRef(anchor)
|
||||
const result = [...lines]
|
||||
const originalLine = lines[line - 1] ?? ""
|
||||
const corrected = autocorrectReplacementLines([originalLine], toNewLines(newText))
|
||||
const replacement = corrected.map((entry, idx) => {
|
||||
if (idx !== 0) return entry
|
||||
return restoreLeadingIndent(originalLine, entry)
|
||||
})
|
||||
result.splice(line - 1, 1, ...replacement)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyReplaceLines(
|
||||
lines: string[],
|
||||
startAnchor: string,
|
||||
endAnchor: string,
|
||||
newText: string | string[],
|
||||
options?: EditApplyOptions
|
||||
): string[] {
|
||||
if (shouldValidate(options)) {
|
||||
validateLineRef(lines, startAnchor)
|
||||
validateLineRef(lines, endAnchor)
|
||||
}
|
||||
|
||||
const { line: startLine } = parseLineRef(startAnchor)
|
||||
const { line: endLine } = parseLineRef(endAnchor)
|
||||
|
||||
if (startLine > endLine) {
|
||||
throw new Error(
|
||||
`Invalid range: start line ${startLine} cannot be greater than end line ${endLine}`
|
||||
)
|
||||
}
|
||||
|
||||
const result = [...lines]
|
||||
const originalRange = lines.slice(startLine - 1, endLine)
|
||||
const stripped = stripRangeBoundaryEcho(lines, startLine, endLine, toNewLines(newText))
|
||||
const corrected = autocorrectReplacementLines(originalRange, stripped)
|
||||
const restored = corrected.map((entry, idx) => {
|
||||
if (idx !== 0) return entry
|
||||
return restoreLeadingIndent(lines[startLine - 1], entry)
|
||||
})
|
||||
result.splice(startLine - 1, endLine - startLine + 1, ...restored)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyInsertAfter(
|
||||
lines: string[],
|
||||
anchor: string,
|
||||
text: string | string[],
|
||||
options?: EditApplyOptions
|
||||
): string[] {
|
||||
if (shouldValidate(options)) validateLineRef(lines, anchor)
|
||||
const { line } = parseLineRef(anchor)
|
||||
const result = [...lines]
|
||||
const newLines = stripInsertAnchorEcho(lines[line - 1], toNewLines(text))
|
||||
if (newLines.length === 0) {
|
||||
throw new Error(`insert_after requires non-empty text for ${anchor}`)
|
||||
}
|
||||
result.splice(line, 0, ...newLines)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyInsertBefore(
|
||||
lines: string[],
|
||||
anchor: string,
|
||||
text: string | string[],
|
||||
options?: EditApplyOptions
|
||||
): string[] {
|
||||
if (shouldValidate(options)) validateLineRef(lines, anchor)
|
||||
const { line } = parseLineRef(anchor)
|
||||
const result = [...lines]
|
||||
const newLines = stripInsertBeforeEcho(lines[line - 1], toNewLines(text))
|
||||
if (newLines.length === 0) {
|
||||
throw new Error(`insert_before requires non-empty text for ${anchor}`)
|
||||
}
|
||||
result.splice(line - 1, 0, ...newLines)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyInsertBetween(
|
||||
lines: string[],
|
||||
afterAnchor: string,
|
||||
beforeAnchor: string,
|
||||
text: string | string[],
|
||||
options?: EditApplyOptions
|
||||
): string[] {
|
||||
if (shouldValidate(options)) {
|
||||
validateLineRef(lines, afterAnchor)
|
||||
validateLineRef(lines, beforeAnchor)
|
||||
}
|
||||
const { line: afterLine } = parseLineRef(afterAnchor)
|
||||
const { line: beforeLine } = parseLineRef(beforeAnchor)
|
||||
if (beforeLine <= afterLine) {
|
||||
throw new Error(`insert_between requires after_line (${afterLine}) < before_line (${beforeLine})`)
|
||||
}
|
||||
|
||||
const result = [...lines]
|
||||
const newLines = stripInsertBoundaryEcho(lines[afterLine - 1], lines[beforeLine - 1], toNewLines(text))
|
||||
if (newLines.length === 0) {
|
||||
throw new Error(`insert_between requires non-empty text for ${afterAnchor}..${beforeAnchor}`)
|
||||
}
|
||||
result.splice(beforeLine - 1, 0, ...newLines)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyAppend(lines: string[], text: string | string[]): string[] {
|
||||
const normalized = toNewLines(text)
|
||||
if (normalized.length === 0) {
|
||||
throw new Error("append requires non-empty text")
|
||||
}
|
||||
if (lines.length === 1 && lines[0] === "") {
|
||||
return [...normalized]
|
||||
}
|
||||
return [...lines, ...normalized]
|
||||
}
|
||||
|
||||
export function applyPrepend(lines: string[], text: string | string[]): string[] {
|
||||
const normalized = toNewLines(text)
|
||||
if (normalized.length === 0) {
|
||||
throw new Error("prepend requires non-empty text")
|
||||
}
|
||||
if (lines.length === 1 && lines[0] === "") {
|
||||
return [...normalized]
|
||||
}
|
||||
return [...normalized, ...lines]
|
||||
}
|
||||
|
||||
export function applyReplace(content: string, oldText: string, newText: string | string[]): string {
|
||||
if (!content.includes(oldText)) {
|
||||
throw new Error(`Text not found: "${oldText}"`)
|
||||
}
|
||||
const replacement = Array.isArray(newText) ? newText.join("\n") : newText
|
||||
return content.replaceAll(oldText, replacement)
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
import { describe, expect, it } from "bun:test"
|
||||
import { applyHashlineEdits, applyInsertAfter, applyReplace, applyReplaceLines, applySetLine } from "./edit-operations"
|
||||
import { applyAppend, applyPrepend } from "./edit-operation-primitives"
|
||||
import { computeLineHash } from "./hash-computation"
|
||||
import type { HashlineEdit } from "./types"
|
||||
|
||||
@@ -246,6 +247,124 @@ describe("hashline edit operations", () => {
|
||||
const result = applyReplaceLines(lines, anchorFor(lines, 2), anchorFor(lines, 3), ["return 3", "return 4"])
|
||||
|
||||
//#then
|
||||
expect(result).toEqual(["if (x) {", " return 3", "return 4", "}"])
|
||||
expect(result).toEqual(["if (x) {", " return 3", " return 4", "}"])
|
||||
})
|
||||
|
||||
it("collapses wrapped replacement span back to unique original single line", () => {
|
||||
//#given
|
||||
const lines = [
|
||||
"const request = buildRequest({ method: \"GET\", retries: 3 })",
|
||||
"const done = true",
|
||||
]
|
||||
|
||||
//#when
|
||||
const result = applyReplaceLines(
|
||||
lines,
|
||||
anchorFor(lines, 1),
|
||||
anchorFor(lines, 1),
|
||||
["const request = buildRequest({", "method: \"GET\", retries: 3 })"]
|
||||
)
|
||||
|
||||
//#then
|
||||
expect(result).toEqual([
|
||||
"const request = buildRequest({ method: \"GET\", retries: 3 })",
|
||||
"const done = true",
|
||||
])
|
||||
})
|
||||
|
||||
it("keeps wrapped replacement when canonical match is not unique in original lines", () => {
|
||||
//#given
|
||||
const lines = ["const query = a + b", "const query = a+b", "const done = true"]
|
||||
|
||||
//#when
|
||||
const result = applyReplaceLines(lines, anchorFor(lines, 1), anchorFor(lines, 2), ["const query = a +", "b"])
|
||||
|
||||
//#then
|
||||
expect(result).toEqual(["const query = a +", "b", "const done = true"])
|
||||
})
|
||||
|
||||
it("keeps wrapped replacement when same canonical candidate appears multiple times", () => {
|
||||
//#given
|
||||
const lines = ["const expression = alpha + beta + gamma", "const done = true"]
|
||||
|
||||
//#when
|
||||
const result = applyReplaceLines(lines, anchorFor(lines, 1), anchorFor(lines, 1), [
|
||||
"const expression = alpha +",
|
||||
"beta + gamma",
|
||||
"const expression = alpha +",
|
||||
"beta + gamma",
|
||||
])
|
||||
|
||||
//#then
|
||||
expect(result).toEqual([
|
||||
"const expression = alpha +",
|
||||
"beta + gamma",
|
||||
"const expression = alpha +",
|
||||
"beta + gamma",
|
||||
"const done = true",
|
||||
])
|
||||
})
|
||||
|
||||
it("keeps wrapped replacement when canonical match is shorter than threshold", () => {
|
||||
//#given
|
||||
const lines = ["a + b", "const done = true"]
|
||||
|
||||
//#when
|
||||
const result = applyReplaceLines(lines, anchorFor(lines, 1), anchorFor(lines, 1), ["a +", "b"])
|
||||
|
||||
//#then
|
||||
expect(result).toEqual(["a +", "b", "const done = true"])
|
||||
})
|
||||
|
||||
it("applies append and prepend operations", () => {
|
||||
//#given
|
||||
const content = "line 1\nline 2"
|
||||
|
||||
//#when
|
||||
const result = applyHashlineEdits(content, [
|
||||
{ type: "append", text: ["line 3"] },
|
||||
{ type: "prepend", text: ["line 0"] },
|
||||
])
|
||||
|
||||
//#then
|
||||
expect(result).toEqual("line 0\nline 1\nline 2\nline 3")
|
||||
})
|
||||
|
||||
it("appends to empty file without extra blank line", () => {
|
||||
//#given
|
||||
const lines = [""]
|
||||
|
||||
//#when
|
||||
const result = applyAppend(lines, ["line1"])
|
||||
|
||||
//#then
|
||||
expect(result).toEqual(["line1"])
|
||||
})
|
||||
|
||||
it("prepends to empty file without extra blank line", () => {
|
||||
//#given
|
||||
const lines = [""]
|
||||
|
||||
//#when
|
||||
const result = applyPrepend(lines, ["line1"])
|
||||
|
||||
//#then
|
||||
expect(result).toEqual(["line1"])
|
||||
})
|
||||
|
||||
it("autocorrects single-line merged replacement into original line count", () => {
|
||||
//#given
|
||||
const lines = ["const a = 1;", "const b = 2;"]
|
||||
|
||||
//#when
|
||||
const result = applyReplaceLines(
|
||||
lines,
|
||||
anchorFor(lines, 1),
|
||||
anchorFor(lines, 2),
|
||||
"const a = 10; const b = 20;"
|
||||
)
|
||||
|
||||
//#then
|
||||
expect(result).toEqual(["const a = 10;", "const b = 20;"])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import { parseLineRef, validateLineRef, validateLineRefs } from "./validation"
|
||||
import { dedupeEdits } from "./edit-deduplication"
|
||||
import { collectLineRefs, getEditLineNumber } from "./edit-ordering"
|
||||
import type { HashlineEdit } from "./types"
|
||||
import {
|
||||
restoreLeadingIndent,
|
||||
stripInsertAnchorEcho,
|
||||
stripInsertBeforeEcho,
|
||||
stripInsertBoundaryEcho,
|
||||
stripRangeBoundaryEcho,
|
||||
toNewLines,
|
||||
} from "./edit-text-normalization"
|
||||
applyAppend,
|
||||
applyInsertAfter,
|
||||
applyInsertBefore,
|
||||
applyInsertBetween,
|
||||
applyPrepend,
|
||||
applyReplace,
|
||||
applyReplaceLines,
|
||||
applySetLine,
|
||||
} from "./edit-operation-primitives"
|
||||
import { validateLineRefs } from "./validation"
|
||||
|
||||
export interface HashlineApplyReport {
|
||||
content: string
|
||||
@@ -15,158 +19,6 @@ export interface HashlineApplyReport {
|
||||
deduplicatedEdits: number
|
||||
}
|
||||
|
||||
export function applySetLine(lines: string[], anchor: string, newText: string | string[]): string[] {
|
||||
validateLineRef(lines, anchor)
|
||||
const { line } = parseLineRef(anchor)
|
||||
const result = [...lines]
|
||||
const replacement = toNewLines(newText).map((entry, idx) => {
|
||||
if (idx !== 0) return entry
|
||||
return restoreLeadingIndent(lines[line - 1], entry)
|
||||
})
|
||||
result.splice(line - 1, 1, ...replacement)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyReplaceLines(
|
||||
lines: string[],
|
||||
startAnchor: string,
|
||||
endAnchor: string,
|
||||
newText: string | string[]
|
||||
): string[] {
|
||||
validateLineRef(lines, startAnchor)
|
||||
validateLineRef(lines, endAnchor)
|
||||
|
||||
const { line: startLine } = parseLineRef(startAnchor)
|
||||
const { line: endLine } = parseLineRef(endAnchor)
|
||||
|
||||
if (startLine > endLine) {
|
||||
throw new Error(
|
||||
`Invalid range: start line ${startLine} cannot be greater than end line ${endLine}`
|
||||
)
|
||||
}
|
||||
|
||||
const result = [...lines]
|
||||
const stripped = stripRangeBoundaryEcho(lines, startLine, endLine, toNewLines(newText))
|
||||
const restored = stripped.map((entry, idx) => {
|
||||
if (idx !== 0) return entry
|
||||
return restoreLeadingIndent(lines[startLine - 1], entry)
|
||||
})
|
||||
result.splice(startLine - 1, endLine - startLine + 1, ...restored)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyInsertAfter(lines: string[], anchor: string, text: string | string[]): string[] {
|
||||
validateLineRef(lines, anchor)
|
||||
const { line } = parseLineRef(anchor)
|
||||
const result = [...lines]
|
||||
const newLines = stripInsertAnchorEcho(lines[line - 1], toNewLines(text))
|
||||
if (newLines.length === 0) {
|
||||
throw new Error(`insert_after requires non-empty text for ${anchor}`)
|
||||
}
|
||||
result.splice(line, 0, ...newLines)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyInsertBefore(lines: string[], anchor: string, text: string | string[]): string[] {
|
||||
validateLineRef(lines, anchor)
|
||||
const { line } = parseLineRef(anchor)
|
||||
const result = [...lines]
|
||||
const newLines = stripInsertBeforeEcho(lines[line - 1], toNewLines(text))
|
||||
if (newLines.length === 0) {
|
||||
throw new Error(`insert_before requires non-empty text for ${anchor}`)
|
||||
}
|
||||
result.splice(line - 1, 0, ...newLines)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyInsertBetween(
|
||||
lines: string[],
|
||||
afterAnchor: string,
|
||||
beforeAnchor: string,
|
||||
text: string | string[]
|
||||
): string[] {
|
||||
validateLineRef(lines, afterAnchor)
|
||||
validateLineRef(lines, beforeAnchor)
|
||||
const { line: afterLine } = parseLineRef(afterAnchor)
|
||||
const { line: beforeLine } = parseLineRef(beforeAnchor)
|
||||
if (beforeLine <= afterLine) {
|
||||
throw new Error(`insert_between requires after_line (${afterLine}) < before_line (${beforeLine})`)
|
||||
}
|
||||
|
||||
const result = [...lines]
|
||||
const newLines = stripInsertBoundaryEcho(lines[afterLine - 1], lines[beforeLine - 1], toNewLines(text))
|
||||
if (newLines.length === 0) {
|
||||
throw new Error(`insert_between requires non-empty text for ${afterAnchor}..${beforeAnchor}`)
|
||||
}
|
||||
result.splice(beforeLine - 1, 0, ...newLines)
|
||||
return result
|
||||
}
|
||||
|
||||
export function applyReplace(content: string, oldText: string, newText: string | string[]): string {
|
||||
if (!content.includes(oldText)) {
|
||||
throw new Error(`Text not found: "${oldText}"`)
|
||||
}
|
||||
const replacement = Array.isArray(newText) ? newText.join("\n") : newText
|
||||
return content.replaceAll(oldText, replacement)
|
||||
}
|
||||
|
||||
function getEditLineNumber(edit: HashlineEdit): number {
|
||||
switch (edit.type) {
|
||||
case "set_line":
|
||||
return parseLineRef(edit.line).line
|
||||
case "replace_lines":
|
||||
return parseLineRef(edit.end_line).line
|
||||
case "insert_after":
|
||||
return parseLineRef(edit.line).line
|
||||
case "insert_before":
|
||||
return parseLineRef(edit.line).line
|
||||
case "insert_between":
|
||||
return parseLineRef(edit.before_line).line
|
||||
case "replace":
|
||||
return Number.NEGATIVE_INFINITY
|
||||
default:
|
||||
return Number.POSITIVE_INFINITY
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeEditPayload(payload: string | string[]): string {
|
||||
return toNewLines(payload).join("\n")
|
||||
}
|
||||
|
||||
function dedupeEdits(edits: HashlineEdit[]): { edits: HashlineEdit[]; deduplicatedEdits: number } {
|
||||
const seen = new Set<string>()
|
||||
const deduped: HashlineEdit[] = []
|
||||
let deduplicatedEdits = 0
|
||||
|
||||
for (const edit of edits) {
|
||||
const key = (() => {
|
||||
switch (edit.type) {
|
||||
case "set_line":
|
||||
return `set_line|${edit.line}|${normalizeEditPayload(edit.text)}`
|
||||
case "replace_lines":
|
||||
return `replace_lines|${edit.start_line}|${edit.end_line}|${normalizeEditPayload(edit.text)}`
|
||||
case "insert_after":
|
||||
return `insert_after|${edit.line}|${normalizeEditPayload(edit.text)}`
|
||||
case "insert_before":
|
||||
return `insert_before|${edit.line}|${normalizeEditPayload(edit.text)}`
|
||||
case "insert_between":
|
||||
return `insert_between|${edit.after_line}|${edit.before_line}|${normalizeEditPayload(edit.text)}`
|
||||
case "replace":
|
||||
return `replace|${edit.old_text}|${normalizeEditPayload(edit.new_text)}`
|
||||
}
|
||||
})()
|
||||
|
||||
if (seen.has(key)) {
|
||||
deduplicatedEdits += 1
|
||||
continue
|
||||
}
|
||||
seen.add(key)
|
||||
deduped.push(edit)
|
||||
}
|
||||
|
||||
return { edits: deduped, deduplicatedEdits }
|
||||
}
|
||||
|
||||
export function applyHashlineEditsWithReport(content: string, edits: HashlineEdit[]): HashlineApplyReport {
|
||||
if (edits.length === 0) {
|
||||
return {
|
||||
@@ -182,40 +34,23 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
|
||||
let noopEdits = 0
|
||||
|
||||
let result = content
|
||||
let lines = result.split("\n")
|
||||
let lines = result.length === 0 ? [] : result.split("\n")
|
||||
|
||||
const refs = sortedEdits.flatMap((edit) => {
|
||||
switch (edit.type) {
|
||||
case "set_line":
|
||||
return [edit.line]
|
||||
case "replace_lines":
|
||||
return [edit.start_line, edit.end_line]
|
||||
case "insert_after":
|
||||
return [edit.line]
|
||||
case "insert_before":
|
||||
return [edit.line]
|
||||
case "insert_between":
|
||||
return [edit.after_line, edit.before_line]
|
||||
case "replace":
|
||||
return []
|
||||
default:
|
||||
return []
|
||||
}
|
||||
})
|
||||
const refs = collectLineRefs(sortedEdits)
|
||||
validateLineRefs(lines, refs)
|
||||
|
||||
for (const edit of sortedEdits) {
|
||||
switch (edit.type) {
|
||||
case "set_line": {
|
||||
lines = applySetLine(lines, edit.line, edit.text)
|
||||
lines = applySetLine(lines, edit.line, edit.text, { skipValidation: true })
|
||||
break
|
||||
}
|
||||
case "replace_lines": {
|
||||
lines = applyReplaceLines(lines, edit.start_line, edit.end_line, edit.text)
|
||||
lines = applyReplaceLines(lines, edit.start_line, edit.end_line, edit.text, { skipValidation: true })
|
||||
break
|
||||
}
|
||||
case "insert_after": {
|
||||
const next = applyInsertAfter(lines, edit.line, edit.text)
|
||||
const next = applyInsertAfter(lines, edit.line, edit.text, { skipValidation: true })
|
||||
if (next.join("\n") === lines.join("\n")) {
|
||||
noopEdits += 1
|
||||
break
|
||||
@@ -224,7 +59,7 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
|
||||
break
|
||||
}
|
||||
case "insert_before": {
|
||||
const next = applyInsertBefore(lines, edit.line, edit.text)
|
||||
const next = applyInsertBefore(lines, edit.line, edit.text, { skipValidation: true })
|
||||
if (next.join("\n") === lines.join("\n")) {
|
||||
noopEdits += 1
|
||||
break
|
||||
@@ -233,7 +68,25 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
|
||||
break
|
||||
}
|
||||
case "insert_between": {
|
||||
const next = applyInsertBetween(lines, edit.after_line, edit.before_line, edit.text)
|
||||
const next = applyInsertBetween(lines, edit.after_line, edit.before_line, edit.text, { skipValidation: true })
|
||||
if (next.join("\n") === lines.join("\n")) {
|
||||
noopEdits += 1
|
||||
break
|
||||
}
|
||||
lines = next
|
||||
break
|
||||
}
|
||||
case "append": {
|
||||
const next = applyAppend(lines, edit.text)
|
||||
if (next.join("\n") === lines.join("\n")) {
|
||||
noopEdits += 1
|
||||
break
|
||||
}
|
||||
lines = next
|
||||
break
|
||||
}
|
||||
case "prepend": {
|
||||
const next = applyPrepend(lines, edit.text)
|
||||
if (next.join("\n") === lines.join("\n")) {
|
||||
noopEdits += 1
|
||||
break
|
||||
@@ -243,11 +96,7 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
|
||||
}
|
||||
case "replace": {
|
||||
result = lines.join("\n")
|
||||
if (!result.includes(edit.old_text)) {
|
||||
throw new Error(`Text not found: "${edit.old_text}"`)
|
||||
}
|
||||
const replacement = Array.isArray(edit.new_text) ? edit.new_text.join("\n") : edit.new_text
|
||||
const replaced = result.replaceAll(edit.old_text, replacement)
|
||||
const replaced = applyReplace(result, edit.old_text, edit.new_text)
|
||||
if (replaced === result) {
|
||||
noopEdits += 1
|
||||
break
|
||||
@@ -269,3 +118,12 @@ export function applyHashlineEditsWithReport(content: string, edits: HashlineEdi
|
||||
export function applyHashlineEdits(content: string, edits: HashlineEdit[]): string {
|
||||
return applyHashlineEditsWithReport(content, edits).content
|
||||
}
|
||||
|
||||
export {
|
||||
applySetLine,
|
||||
applyReplaceLines,
|
||||
applyInsertAfter,
|
||||
applyInsertBefore,
|
||||
applyInsertBetween,
|
||||
applyReplace,
|
||||
} from "./edit-operation-primitives"
|
||||
|
||||
48
src/tools/hashline-edit/edit-ordering.ts
Normal file
48
src/tools/hashline-edit/edit-ordering.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { parseLineRef } from "./validation"
|
||||
import type { HashlineEdit } from "./types"
|
||||
|
||||
export function getEditLineNumber(edit: HashlineEdit): number {
|
||||
switch (edit.type) {
|
||||
case "set_line":
|
||||
return parseLineRef(edit.line).line
|
||||
case "replace_lines":
|
||||
return parseLineRef(edit.end_line).line
|
||||
case "insert_after":
|
||||
return parseLineRef(edit.line).line
|
||||
case "insert_before":
|
||||
return parseLineRef(edit.line).line
|
||||
case "insert_between":
|
||||
return parseLineRef(edit.before_line).line
|
||||
case "append":
|
||||
return Number.NEGATIVE_INFINITY
|
||||
case "prepend":
|
||||
return Number.NEGATIVE_INFINITY
|
||||
case "replace":
|
||||
return Number.NEGATIVE_INFINITY
|
||||
default:
|
||||
return Number.POSITIVE_INFINITY
|
||||
}
|
||||
}
|
||||
|
||||
export function collectLineRefs(edits: HashlineEdit[]): string[] {
|
||||
return edits.flatMap((edit) => {
|
||||
switch (edit.type) {
|
||||
case "set_line":
|
||||
return [edit.line]
|
||||
case "replace_lines":
|
||||
return [edit.start_line, edit.end_line]
|
||||
case "insert_after":
|
||||
return [edit.line]
|
||||
case "insert_before":
|
||||
return [edit.line]
|
||||
case "insert_between":
|
||||
return [edit.after_line, edit.before_line]
|
||||
case "append":
|
||||
case "prepend":
|
||||
case "replace":
|
||||
return []
|
||||
default:
|
||||
return []
|
||||
}
|
||||
})
|
||||
}
|
||||
44
src/tools/hashline-edit/file-text-canonicalization.ts
Normal file
44
src/tools/hashline-edit/file-text-canonicalization.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
export interface FileTextEnvelope {
|
||||
content: string
|
||||
hadBom: boolean
|
||||
lineEnding: "\n" | "\r\n"
|
||||
}
|
||||
|
||||
function detectLineEnding(content: string): "\n" | "\r\n" {
|
||||
const crlfIndex = content.indexOf("\r\n")
|
||||
const lfIndex = content.indexOf("\n")
|
||||
if (lfIndex === -1) return "\n"
|
||||
if (crlfIndex === -1) return "\n"
|
||||
return crlfIndex < lfIndex ? "\r\n" : "\n"
|
||||
}
|
||||
|
||||
function stripBom(content: string): { content: string; hadBom: boolean } {
|
||||
if (!content.startsWith("\uFEFF")) {
|
||||
return { content, hadBom: false }
|
||||
}
|
||||
return { content: content.slice(1), hadBom: true }
|
||||
}
|
||||
|
||||
function normalizeToLf(content: string): string {
|
||||
return content.replace(/\r\n/g, "\n").replace(/\r/g, "\n")
|
||||
}
|
||||
|
||||
function restoreLineEndings(content: string, lineEnding: "\n" | "\r\n"): string {
|
||||
if (lineEnding === "\n") return content
|
||||
return content.replace(/\n/g, "\r\n")
|
||||
}
|
||||
|
||||
export function canonicalizeFileText(content: string): FileTextEnvelope {
|
||||
const stripped = stripBom(content)
|
||||
return {
|
||||
content: normalizeToLf(stripped.content),
|
||||
hadBom: stripped.hadBom,
|
||||
lineEnding: detectLineEnding(stripped.content),
|
||||
}
|
||||
}
|
||||
|
||||
export function restoreFileText(content: string, envelope: FileTextEnvelope): string {
|
||||
const withLineEnding = restoreLineEndings(content, envelope.lineEnding)
|
||||
if (!envelope.hadBom) return withLineEnding
|
||||
return `\uFEFF${withLineEnding}`
|
||||
}
|
||||
31
src/tools/hashline-edit/hashline-edit-diff.ts
Normal file
31
src/tools/hashline-edit/hashline-edit-diff.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { computeLineHash } from "./hash-computation"
|
||||
|
||||
export function generateHashlineDiff(oldContent: string, newContent: string, filePath: string): string {
|
||||
const oldLines = oldContent.split("\n")
|
||||
const newLines = newContent.split("\n")
|
||||
|
||||
let diff = `--- ${filePath}\n+++ ${filePath}\n`
|
||||
const maxLines = Math.max(oldLines.length, newLines.length)
|
||||
|
||||
for (let i = 0; i < maxLines; i += 1) {
|
||||
const oldLine = oldLines[i] ?? ""
|
||||
const newLine = newLines[i] ?? ""
|
||||
const lineNum = i + 1
|
||||
const hash = computeLineHash(lineNum, newLine)
|
||||
|
||||
if (i >= oldLines.length) {
|
||||
diff += `+ ${lineNum}#${hash}:${newLine}\n`
|
||||
continue
|
||||
}
|
||||
if (i >= newLines.length) {
|
||||
diff += `- ${lineNum}# :${oldLine}\n`
|
||||
continue
|
||||
}
|
||||
if (oldLine !== newLine) {
|
||||
diff += `- ${lineNum}# :${oldLine}\n`
|
||||
diff += `+ ${lineNum}#${hash}:${newLine}\n`
|
||||
}
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
146
src/tools/hashline-edit/hashline-edit-executor.ts
Normal file
146
src/tools/hashline-edit/hashline-edit-executor.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
import type { ToolContext } from "@opencode-ai/plugin/tool"
|
||||
import { storeToolMetadata } from "../../features/tool-metadata-store"
|
||||
import { applyHashlineEditsWithReport } from "./edit-operations"
|
||||
import { countLineDiffs, generateUnifiedDiff, toHashlineContent } from "./diff-utils"
|
||||
import { canonicalizeFileText, restoreFileText } from "./file-text-canonicalization"
|
||||
import { generateHashlineDiff } from "./hashline-edit-diff"
|
||||
import type { HashlineEdit } from "./types"
|
||||
|
||||
interface HashlineEditArgs {
|
||||
filePath: string
|
||||
edits: HashlineEdit[]
|
||||
delete?: boolean
|
||||
rename?: string
|
||||
}
|
||||
|
||||
type ToolContextWithCallID = ToolContext & {
|
||||
callID?: string
|
||||
callId?: string
|
||||
call_id?: string
|
||||
}
|
||||
|
||||
type ToolContextWithMetadata = ToolContextWithCallID & {
|
||||
metadata?: (value: unknown) => void
|
||||
}
|
||||
|
||||
function resolveToolCallID(ctx: ToolContextWithCallID): string | undefined {
|
||||
if (typeof ctx.callID === "string" && ctx.callID.trim() !== "") return ctx.callID
|
||||
if (typeof ctx.callId === "string" && ctx.callId.trim() !== "") return ctx.callId
|
||||
if (typeof ctx.call_id === "string" && ctx.call_id.trim() !== "") return ctx.call_id
|
||||
return undefined
|
||||
}
|
||||
|
||||
function canCreateFromMissingFile(edits: HashlineEdit[]): boolean {
|
||||
if (edits.length === 0) return false
|
||||
return edits.every((edit) => edit.type === "append" || edit.type === "prepend")
|
||||
}
|
||||
|
||||
function buildSuccessMeta(
|
||||
effectivePath: string,
|
||||
beforeContent: string,
|
||||
afterContent: string,
|
||||
noopEdits: number,
|
||||
deduplicatedEdits: number
|
||||
) {
|
||||
const unifiedDiff = generateUnifiedDiff(beforeContent, afterContent, effectivePath)
|
||||
const { additions, deletions } = countLineDiffs(beforeContent, afterContent)
|
||||
|
||||
return {
|
||||
title: effectivePath,
|
||||
metadata: {
|
||||
filePath: effectivePath,
|
||||
path: effectivePath,
|
||||
file: effectivePath,
|
||||
diff: unifiedDiff,
|
||||
noopEdits,
|
||||
deduplicatedEdits,
|
||||
filediff: {
|
||||
file: effectivePath,
|
||||
path: effectivePath,
|
||||
filePath: effectivePath,
|
||||
before: beforeContent,
|
||||
after: afterContent,
|
||||
additions,
|
||||
deletions,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeHashlineEditTool(args: HashlineEditArgs, context: ToolContext): Promise<string> {
|
||||
try {
|
||||
const metadataContext = context as ToolContextWithMetadata
|
||||
const filePath = args.filePath
|
||||
const { edits, delete: deleteMode, rename } = args
|
||||
|
||||
if (deleteMode && rename) {
|
||||
return "Error: delete and rename cannot be used together"
|
||||
}
|
||||
if (!deleteMode && (!edits || !Array.isArray(edits) || edits.length === 0)) {
|
||||
return "Error: edits parameter must be a non-empty array"
|
||||
}
|
||||
if (deleteMode && edits.length > 0) {
|
||||
return "Error: delete mode requires edits to be an empty array"
|
||||
}
|
||||
|
||||
const file = Bun.file(filePath)
|
||||
const exists = await file.exists()
|
||||
if (!exists && !deleteMode && !canCreateFromMissingFile(edits)) {
|
||||
return `Error: File not found: ${filePath}`
|
||||
}
|
||||
|
||||
if (deleteMode) {
|
||||
if (!exists) return `Error: File not found: ${filePath}`
|
||||
await Bun.file(filePath).delete()
|
||||
return `Successfully deleted ${filePath}`
|
||||
}
|
||||
|
||||
const rawOldContent = exists ? Buffer.from(await file.arrayBuffer()).toString("utf8") : ""
|
||||
const oldEnvelope = canonicalizeFileText(rawOldContent)
|
||||
|
||||
const applyResult = applyHashlineEditsWithReport(oldEnvelope.content, edits)
|
||||
const canonicalNewContent = applyResult.content
|
||||
const writeContent = restoreFileText(canonicalNewContent, oldEnvelope)
|
||||
|
||||
await Bun.write(filePath, writeContent)
|
||||
|
||||
if (rename && rename !== filePath) {
|
||||
await Bun.write(rename, writeContent)
|
||||
await Bun.file(filePath).delete()
|
||||
}
|
||||
|
||||
const effectivePath = rename && rename !== filePath ? rename : filePath
|
||||
const diff = generateHashlineDiff(oldEnvelope.content, canonicalNewContent, effectivePath)
|
||||
const newHashlined = toHashlineContent(canonicalNewContent)
|
||||
const meta = buildSuccessMeta(
|
||||
effectivePath,
|
||||
oldEnvelope.content,
|
||||
canonicalNewContent,
|
||||
applyResult.noopEdits,
|
||||
applyResult.deduplicatedEdits
|
||||
)
|
||||
|
||||
if (typeof metadataContext.metadata === "function") {
|
||||
metadataContext.metadata(meta)
|
||||
}
|
||||
|
||||
const callID = resolveToolCallID(metadataContext)
|
||||
if (callID) {
|
||||
storeToolMetadata(context.sessionID, callID, meta)
|
||||
}
|
||||
|
||||
return `Successfully applied ${edits.length} edit(s) to ${effectivePath}
|
||||
No-op edits: ${applyResult.noopEdits}, deduplicated edits: ${applyResult.deduplicatedEdits}
|
||||
|
||||
${diff}
|
||||
|
||||
Updated file (LINE#ID:content):
|
||||
${newHashlined}`
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
if (message.toLowerCase().includes("hash")) {
|
||||
return `Error: hash mismatch - ${message}\nTip: reuse LINE#ID entries from the latest read/edit output, or batch related edits in one call.`
|
||||
}
|
||||
return `Error: ${message}`
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,17 @@ export {
|
||||
} from "./hash-computation"
|
||||
export { parseLineRef, validateLineRef } from "./validation"
|
||||
export type { LineRef } from "./validation"
|
||||
export type { SetLine, ReplaceLines, InsertAfter, InsertBefore, InsertBetween, Replace, HashlineEdit } from "./types"
|
||||
export type {
|
||||
SetLine,
|
||||
ReplaceLines,
|
||||
InsertAfter,
|
||||
InsertBefore,
|
||||
InsertBetween,
|
||||
Replace,
|
||||
Append,
|
||||
Prepend,
|
||||
HashlineEdit,
|
||||
} from "./types"
|
||||
export { NIBBLE_STR, HASHLINE_DICT, HASHLINE_REF_PATTERN, HASHLINE_OUTPUT_PATTERN } from "./constants"
|
||||
export {
|
||||
applyHashlineEdits,
|
||||
|
||||
@@ -1,34 +1,70 @@
|
||||
export const HASHLINE_EDIT_DESCRIPTION = `Edit files using LINE#ID format for precise, safe modifications.
|
||||
|
||||
WORKFLOW:
|
||||
1. Read the file and copy exact LINE#ID anchors.
|
||||
2. Submit one edit call with all related operations for that file.
|
||||
3. If more edits are needed after success, use the latest anchors from read/edit output.
|
||||
4. Use anchors as "LINE#ID" only (never include trailing ":content").
|
||||
1. Read target file/range and copy exact LINE#ID tags.
|
||||
2. Pick the smallest operation per logical mutation site.
|
||||
3. Submit one edit call per file with all related operations.
|
||||
4. If same file needs another call, re-read first.
|
||||
5. Use anchors as "LINE#ID" only (never include trailing ":content").
|
||||
|
||||
VALIDATION:
|
||||
- Payload shape: { "filePath": string, "edits": [...], "delete"?: boolean, "rename"?: string }
|
||||
- Each edit must be one of: set_line, replace_lines, insert_after, insert_before, insert_between, replace
|
||||
- text/new_text must contain plain replacement text only (no LINE#ID prefixes, no diff + markers)
|
||||
Payload shape: { "filePath": string, "edits": [...], "delete"?: boolean, "rename"?: string }
|
||||
Each edit must be one of: set_line, replace_lines, insert_after, insert_before, insert_between, replace, append, prepend
|
||||
text/new_text must contain plain replacement text only (no LINE#ID prefixes, no diff + markers)
|
||||
CRITICAL: all operations validate against the same pre-edit file snapshot and apply bottom-up. Refs/tags are interpreted against the last-read version of the file.
|
||||
|
||||
LINE#ID FORMAT (CRITICAL - READ CAREFULLY):
|
||||
Each line reference must be in "LINE#ID" format where:
|
||||
- LINE: 1-based line number
|
||||
- ID: Two CID letters from the set ZPMQVRWSNKTXJBYH
|
||||
|
||||
OPERATION TYPES:
|
||||
1. set_line
|
||||
2. replace_lines
|
||||
3. insert_after
|
||||
4. insert_before
|
||||
5. insert_between
|
||||
6. replace
|
||||
LINE#ID FORMAT (CRITICAL):
|
||||
Each line reference must be in "LINE#ID" format where:
|
||||
LINE: 1-based line number
|
||||
ID: Two CID letters from the set ZPMQVRWSNKTXJBYH
|
||||
|
||||
FILE MODES:
|
||||
- delete=true deletes file and requires edits=[] with no rename
|
||||
- rename moves final content to a new path and removes old path
|
||||
delete=true deletes file and requires edits=[] with no rename
|
||||
rename moves final content to a new path and removes old path
|
||||
|
||||
CONTENT FORMAT:
|
||||
- text/new_text can be a string (single line) or string[] (multi-line, preferred).
|
||||
- If you pass a multi-line string, it is split by real newline characters.
|
||||
- Literal "\\n" is preserved as text.`
|
||||
text/new_text can be a string (single line) or string[] (multi-line, preferred).
|
||||
If you pass a multi-line string, it is split by real newline characters.
|
||||
Literal "\\n" is preserved as text.
|
||||
|
||||
FILE CREATION:
|
||||
append: adds content at EOF. If file does not exist, creates it.
|
||||
prepend: adds content at BOF. If file does not exist, creates it.
|
||||
CRITICAL: append/prepend are the only operations that work without an existing file.
|
||||
|
||||
OPERATION CHOICE:
|
||||
One line wrong -> set_line
|
||||
Adjacent block rewrite or swap/move -> replace_lines (prefer one range op over many single-line ops)
|
||||
Both boundaries known -> insert_between (ALWAYS prefer over insert_after/insert_before)
|
||||
One boundary known -> insert_after or insert_before
|
||||
New file or EOF/BOF addition -> append or prepend
|
||||
No LINE#ID available -> replace (last resort)
|
||||
|
||||
RULES (CRITICAL):
|
||||
1. Minimize scope: one logical mutation site per operation.
|
||||
2. Preserve formatting: keep indentation, punctuation, line breaks, trailing commas, brace style.
|
||||
3. Prefer insertion over neighbor rewrites: anchor to structural boundaries (}, ], },), not interior property lines.
|
||||
4. No no-ops: replacement content must differ from current content.
|
||||
5. Touch only requested code: avoid incidental edits.
|
||||
6. Use exact current tokens: NEVER rewrite approximately.
|
||||
7. For swaps/moves: prefer one range operation over multiple single-line operations.
|
||||
8. Output tool calls only; no prose or commentary between them.
|
||||
|
||||
TAG CHOICE (ALWAYS):
|
||||
- Copy tags exactly from read output or >>> mismatch output.
|
||||
- NEVER guess tags.
|
||||
- Prefer insert_between over insert_after/insert_before when both boundaries are known.
|
||||
- Anchor to structural lines (function/class/brace), NEVER blank lines.
|
||||
- Anti-pattern warning: blank/whitespace anchors are fragile.
|
||||
- Re-read after each successful edit call before issuing another on the same file.
|
||||
|
||||
AUTOCORRECT (built-in - you do NOT need to handle these):
|
||||
Merged lines are auto-expanded back to original line count.
|
||||
Indentation is auto-restored from original lines.
|
||||
BOM and CRLF line endings are preserved automatically.
|
||||
Hashline prefixes and diff markers in text are auto-stripped.
|
||||
|
||||
RECOVERY (when >>> mismatch error appears):
|
||||
Copy the updated LINE#ID tags shown in the error output directly.
|
||||
Re-read only if the needed tags are missing from the error snippet.
|
||||
ALWAYS batch all edits for one file in a single call.`
|
||||
|
||||
@@ -2,6 +2,7 @@ import { describe, it, expect, beforeEach, afterEach, mock } from "bun:test"
|
||||
import type { ToolContext } from "@opencode-ai/plugin/tool"
|
||||
import { createHashlineEditTool } from "./tools"
|
||||
import { computeLineHash } from "./hash-computation"
|
||||
import { canonicalizeFileText } from "./file-text-canonicalization"
|
||||
import * as fs from "node:fs"
|
||||
import * as os from "node:os"
|
||||
import * as path from "node:path"
|
||||
@@ -216,4 +217,72 @@ describe("createHashlineEditTool", () => {
|
||||
expect(fs.existsSync(filePath)).toBe(false)
|
||||
expect(result).toContain("Successfully deleted")
|
||||
})
|
||||
|
||||
it("creates missing file with append and prepend", async () => {
|
||||
//#given
|
||||
const filePath = path.join(tempDir, "created.txt")
|
||||
|
||||
//#when
|
||||
const result = await tool.execute(
|
||||
{
|
||||
filePath,
|
||||
edits: [
|
||||
{ type: "append", text: ["line2"] },
|
||||
{ type: "prepend", text: ["line1"] },
|
||||
],
|
||||
},
|
||||
createMockContext(),
|
||||
)
|
||||
|
||||
//#then
|
||||
expect(fs.existsSync(filePath)).toBe(true)
|
||||
expect(fs.readFileSync(filePath, "utf-8")).toBe("line1\nline2")
|
||||
expect(result).toContain("Successfully applied 2 edit(s)")
|
||||
})
|
||||
|
||||
it("preserves BOM and CRLF through hashline_edit", async () => {
|
||||
//#given
|
||||
const filePath = path.join(tempDir, "crlf-bom.txt")
|
||||
const bomCrLf = "\uFEFFline1\r\nline2\r\n"
|
||||
fs.writeFileSync(filePath, bomCrLf)
|
||||
const line2Hash = computeLineHash(2, "line2")
|
||||
|
||||
//#when
|
||||
await tool.execute(
|
||||
{
|
||||
filePath,
|
||||
edits: [{ type: "set_line", line: `2#${line2Hash}`, text: "line2-updated" }],
|
||||
},
|
||||
createMockContext(),
|
||||
)
|
||||
|
||||
//#then
|
||||
const bytes = fs.readFileSync(filePath)
|
||||
expect(bytes[0]).toBe(0xef)
|
||||
expect(bytes[1]).toBe(0xbb)
|
||||
expect(bytes[2]).toBe(0xbf)
|
||||
expect(bytes.toString("utf-8")).toBe("\uFEFFline1\r\nline2-updated\r\n")
|
||||
})
|
||||
|
||||
it("detects LF as line ending when LF appears before CRLF", () => {
|
||||
//#given
|
||||
const content = "line1\nline2\r\nline3"
|
||||
|
||||
//#when
|
||||
const envelope = canonicalizeFileText(content)
|
||||
|
||||
//#then
|
||||
expect(envelope.lineEnding).toBe("\n")
|
||||
})
|
||||
|
||||
it("detects CRLF as line ending when CRLF appears before LF", () => {
|
||||
//#given
|
||||
const content = "line1\r\nline2\nline3"
|
||||
|
||||
//#when
|
||||
const envelope = canonicalizeFileText(content)
|
||||
|
||||
//#then
|
||||
expect(envelope.lineEnding).toBe("\r\n")
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import { tool, type ToolContext, type ToolDefinition } from "@opencode-ai/plugin/tool"
|
||||
import { storeToolMetadata } from "../../features/tool-metadata-store"
|
||||
import type { HashlineEdit } from "./types"
|
||||
import { applyHashlineEditsWithReport } from "./edit-operations"
|
||||
import { computeLineHash } from "./hash-computation"
|
||||
import { toHashlineContent, generateUnifiedDiff, countLineDiffs } from "./diff-utils"
|
||||
import { executeHashlineEditTool } from "./hashline-edit-executor"
|
||||
import { HASHLINE_EDIT_DESCRIPTION } from "./tool-description"
|
||||
|
||||
interface HashlineEditArgs {
|
||||
@@ -13,49 +10,6 @@ interface HashlineEditArgs {
|
||||
rename?: string
|
||||
}
|
||||
|
||||
type ToolContextWithCallID = ToolContext & {
|
||||
callID?: string
|
||||
callId?: string
|
||||
call_id?: string
|
||||
}
|
||||
|
||||
type ToolContextWithMetadata = ToolContextWithCallID & {
|
||||
metadata?: (value: unknown) => void
|
||||
}
|
||||
|
||||
function resolveToolCallID(ctx: ToolContextWithCallID): string | undefined {
|
||||
if (typeof ctx.callID === "string" && ctx.callID.trim() !== "") return ctx.callID
|
||||
if (typeof ctx.callId === "string" && ctx.callId.trim() !== "") return ctx.callId
|
||||
if (typeof ctx.call_id === "string" && ctx.call_id.trim() !== "") return ctx.call_id
|
||||
return undefined
|
||||
}
|
||||
|
||||
function generateDiff(oldContent: string, newContent: string, filePath: string): string {
|
||||
const oldLines = oldContent.split("\n")
|
||||
const newLines = newContent.split("\n")
|
||||
|
||||
let diff = `--- ${filePath}\n+++ ${filePath}\n`
|
||||
|
||||
const maxLines = Math.max(oldLines.length, newLines.length)
|
||||
for (let i = 0; i < maxLines; i++) {
|
||||
const oldLine = oldLines[i] ?? ""
|
||||
const newLine = newLines[i] ?? ""
|
||||
const lineNum = i + 1
|
||||
const hash = computeLineHash(lineNum, newLine)
|
||||
|
||||
if (i >= oldLines.length) {
|
||||
diff += `+ ${lineNum}#${hash}:${newLine}\n`
|
||||
} else if (i >= newLines.length) {
|
||||
diff += `- ${lineNum}# :${oldLine}\n`
|
||||
} else if (oldLine !== newLine) {
|
||||
diff += `- ${lineNum}# :${oldLine}\n`
|
||||
diff += `+ ${lineNum}#${hash}:${newLine}\n`
|
||||
}
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
export function createHashlineEditTool(): ToolDefinition {
|
||||
return tool({
|
||||
description: HASHLINE_EDIT_DESCRIPTION,
|
||||
@@ -110,101 +64,22 @@ export function createHashlineEditTool(): ToolDefinition {
|
||||
.union([tool.schema.string(), tool.schema.array(tool.schema.string())])
|
||||
.describe("Replacement text (string or string[] for multiline)"),
|
||||
}),
|
||||
tool.schema.object({
|
||||
type: tool.schema.literal("append"),
|
||||
text: tool.schema
|
||||
.union([tool.schema.string(), tool.schema.array(tool.schema.string())])
|
||||
.describe("Content to append at EOF; also creates missing file"),
|
||||
}),
|
||||
tool.schema.object({
|
||||
type: tool.schema.literal("prepend"),
|
||||
text: tool.schema
|
||||
.union([tool.schema.string(), tool.schema.array(tool.schema.string())])
|
||||
.describe("Content to prepend at BOF; also creates missing file"),
|
||||
}),
|
||||
])
|
||||
)
|
||||
.describe("Array of edit operations to apply (empty when delete=true)"),
|
||||
},
|
||||
execute: async (args: HashlineEditArgs, context: ToolContext) => {
|
||||
try {
|
||||
const metadataContext = context as ToolContextWithMetadata
|
||||
const filePath = args.filePath
|
||||
const { edits, delete: deleteMode, rename } = args
|
||||
|
||||
if (deleteMode && rename) {
|
||||
return "Error: delete and rename cannot be used together"
|
||||
}
|
||||
|
||||
if (!deleteMode && (!edits || !Array.isArray(edits) || edits.length === 0)) {
|
||||
return "Error: edits parameter must be a non-empty array"
|
||||
}
|
||||
if (deleteMode && edits.length > 0) {
|
||||
return "Error: delete mode requires edits to be an empty array"
|
||||
}
|
||||
|
||||
const file = Bun.file(filePath)
|
||||
const exists = await file.exists()
|
||||
if (!exists) {
|
||||
return `Error: File not found: ${filePath}`
|
||||
}
|
||||
|
||||
if (deleteMode) {
|
||||
await Bun.file(filePath).delete()
|
||||
return `Successfully deleted ${filePath}`
|
||||
}
|
||||
|
||||
const oldContent = await file.text()
|
||||
const applyResult = applyHashlineEditsWithReport(oldContent, edits)
|
||||
const newContent = applyResult.content
|
||||
|
||||
await Bun.write(filePath, newContent)
|
||||
|
||||
if (rename && rename !== filePath) {
|
||||
await Bun.write(rename, newContent)
|
||||
await Bun.file(filePath).delete()
|
||||
}
|
||||
|
||||
const effectivePath = rename && rename !== filePath ? rename : filePath
|
||||
|
||||
const diff = generateDiff(oldContent, newContent, effectivePath)
|
||||
const newHashlined = toHashlineContent(newContent)
|
||||
|
||||
const unifiedDiff = generateUnifiedDiff(oldContent, newContent, effectivePath)
|
||||
const { additions, deletions } = countLineDiffs(oldContent, newContent)
|
||||
|
||||
const meta = {
|
||||
title: effectivePath,
|
||||
metadata: {
|
||||
filePath: effectivePath,
|
||||
path: effectivePath,
|
||||
file: effectivePath,
|
||||
diff: unifiedDiff,
|
||||
noopEdits: applyResult.noopEdits,
|
||||
deduplicatedEdits: applyResult.deduplicatedEdits,
|
||||
filediff: {
|
||||
file: effectivePath,
|
||||
path: effectivePath,
|
||||
filePath: effectivePath,
|
||||
before: oldContent,
|
||||
after: newContent,
|
||||
additions,
|
||||
deletions,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if (typeof metadataContext.metadata === "function") {
|
||||
metadataContext.metadata(meta)
|
||||
}
|
||||
|
||||
const callID = resolveToolCallID(metadataContext)
|
||||
if (callID) {
|
||||
storeToolMetadata(context.sessionID, callID, meta)
|
||||
}
|
||||
|
||||
return `Successfully applied ${edits.length} edit(s) to ${effectivePath}
|
||||
No-op edits: ${applyResult.noopEdits}, deduplicated edits: ${applyResult.deduplicatedEdits}
|
||||
|
||||
${diff}
|
||||
|
||||
Updated file (LINE#ID:content):
|
||||
${newHashlined}`
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
if (message.toLowerCase().includes("hash")) {
|
||||
return `Error: hash mismatch - ${message}\nTip: reuse LINE#ID entries from the latest read/edit output, or batch related edits in one call.`
|
||||
}
|
||||
return `Error: ${message}`
|
||||
}
|
||||
},
|
||||
execute: async (args: HashlineEditArgs, context: ToolContext) => executeHashlineEditTool(args, context),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -36,4 +36,22 @@ export interface Replace {
|
||||
new_text: string | string[]
|
||||
}
|
||||
|
||||
export type HashlineEdit = SetLine | ReplaceLines | InsertAfter | InsertBefore | InsertBetween | Replace
|
||||
export interface Append {
|
||||
type: "append"
|
||||
text: string | string[]
|
||||
}
|
||||
|
||||
export interface Prepend {
|
||||
type: "prepend"
|
||||
text: string | string[]
|
||||
}
|
||||
|
||||
export type HashlineEdit =
|
||||
| SetLine
|
||||
| ReplaceLines
|
||||
| InsertAfter
|
||||
| InsertBefore
|
||||
| InsertBetween
|
||||
| Replace
|
||||
| Append
|
||||
| Prepend
|
||||
|
||||
32
src/tools/shared/semaphore.ts
Normal file
32
src/tools/shared/semaphore.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Simple counting semaphore to limit concurrent process execution.
|
||||
* Used to prevent multiple ripgrep processes from saturating CPU.
|
||||
*/
|
||||
export class Semaphore {
|
||||
private queue: (() => void)[] = []
|
||||
private running = 0
|
||||
|
||||
constructor(private readonly max: number) {}
|
||||
|
||||
async acquire(): Promise<void> {
|
||||
if (this.running < this.max) {
|
||||
this.running++
|
||||
return
|
||||
}
|
||||
return new Promise<void>((resolve) => {
|
||||
this.queue.push(() => {
|
||||
this.running++
|
||||
resolve()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
release(): void {
|
||||
this.running--
|
||||
const next = this.queue.shift()
|
||||
if (next) next()
|
||||
}
|
||||
}
|
||||
|
||||
/** Global semaphore limiting concurrent ripgrep processes to 2 */
|
||||
export const rgSemaphore = new Semaphore(2)
|
||||
Reference in New Issue
Block a user