Enhance CLI tool with usage rendering and indented output formatting. Update user prompt for improved clarity and adjust verbosity settings. Modify file listing to return JSON strings for consistent output.

This commit is contained in:
sebseb7
2025-08-11 19:51:06 +02:00
parent f21aa45065
commit 31b8b5c719
3 changed files with 101 additions and 35 deletions

65
cli.js
View File

@@ -5,6 +5,29 @@ import OpenAI from 'openai';
import { promises as fs } from "node:fs";
import { fileURLToPath } from "node:url";
import path from "node:path";
import { resourceUsage } from 'node:process';
function renderUsage(usage) {
const inputTokens = usage.input_tokens - usage.input_tokens_details.cached_tokens;
const cacheTokens = usage.input_tokens_details.cached_tokens;
const outputToken = usage.output_tokens;
console.log('renderUsage', inputTokens, cacheTokens, outputToken);
}
function printIndented(indentNum, ...args) {
const indent = ' '.repeat(indentNum);
const output = args.map(arg => {
if (typeof arg === 'string') return arg;
try {
return JSON.stringify(arg, null, 2);
} catch {
return String(arg);
}
}).join(' ');
// Indent every line
console.log(output.split('\n').map(line => indent + line).join('\n'));
}
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@@ -25,12 +48,29 @@ async function loadTools() {
streamOnce(new OpenAI({ apiKey: process.env.OPENAI_API_KEY }), 'Zeig mir die Dateiein in / und lege index.html an mit dummydaten, kurz');
let counter = 0;
async function streamOnce(openai, userText) {
const toolsByFile = await loadTools();
let previousResponseId;
let input = [
{ "role": "developer", "content": [ {"type": "input_text","text": '' }] },
{ "role": "developer", "content": [ {"type": "input_text","text": `You are an interactive CLI AI assistant. Follow the user's instructions.
If a tool is available and relevant, plan to use it.
Be explicit when information is undefined.
Do not silently fall back: surface errors.
Prefer concise answers.
Developer rules:
- Null tells the truth. If data is missing/undefined, say so; do not invent values.
- In development, never hide errors; include warnings if using fallbacks.
Behavior:
- Answer succinctly.
- Ask for clarification when the user input is ambiguous.
- Output plain text suitable for a terminal.
` }] },
{"role": "user", "content": [ { "type": "input_text", "text": userText } ]},
]
@@ -40,21 +80,23 @@ async function streamOnce(openai, userText) {
const call = {
model: 'gpt-5-mini',
input: input,
text: { format: { type: 'text' }, verbosity: 'medium' },
text: { format: { type: 'text' }, verbosity: 'low' },
reasoning: { effort: 'low', summary: 'detailed' },
tools: Object.values(toolsByFile).map(t => t.def),
store: true,
}
if(previousResponseId) call.previous_response_id = previousResponseId;
console.log("------NEW OPENAI CALL--------------"
,"\n",counter++,"\n",'----INPUT-----------------'
,"\n",call.input.map(i => JSON.stringify(i)),"\n",
'--------CALL-------------',"\n");
const stream = await openai.responses.stream(call);
stream.on('response.created', (event) => {
if(!previousResponseId){
previousResponseId = event.response.id;
}
previousResponseId = event.response.id;
});
stream.on('response.reasoning_summary_text.delta', (event) => {
process.stdout.write(event.delta);
////process.stdout.write(event.delta);
});
stream.on('response.reasoning_summary_text.done', () => {
process.stdout.write('\n');
@@ -62,7 +104,7 @@ async function streamOnce(openai, userText) {
});
stream.on('response.output_text.delta', (event) => {
process.stdout.write(event.delta);
////process.stdout.write(event.delta);
});
@@ -72,7 +114,7 @@ async function streamOnce(openai, userText) {
}
});
stream.on('response.function_call_arguments.delta', (event) => {
process.stdout.write(event.delta);
////process.stdout.write(event.delta);
});
const functionCalls = [];
@@ -93,7 +135,10 @@ async function streamOnce(openai, userText) {
});
stream.on('response.completed', async (event) => {
//log usage & print messages to user
printIndented(10, 'response.completed',
renderUsage(event.response.usage),
"Result:",event.response.output.filter(i => i.type === 'message').map(i => i.content[0].text)
);
});
await Array.fromAsync(stream);
@@ -107,7 +152,7 @@ async function streamOnce(openai, userText) {
call_id: call.id,
output: JSON.stringify(result),
})
console.log('function call result:', call,result);
printIndented(10,'function call result:',result);
} catch (err) {
console.error('Error in function call:', call.name, err);
}

View File

@@ -160,7 +160,7 @@ export async function run(args) {
if (!includeHidden && fileName.startsWith(".")) {
return { cwd, files: [] };
}
return { cwd, files: [[fileName, "f", stat.size]] };
return { cwd, files: JSON.stringify([[fileName, 'f', stat.size]]) };
}
// Handle non-directory case
@@ -173,7 +173,7 @@ export async function run(args) {
const files = await listEntriesRecursive(resolvedBase, chrootResolved, depth === -1 ? Infinity : depth, includeHidden);
return {
cwd,
files: files.sort((a, b) => a[0].localeCompare(b[0])), // Sort for consistent output
files: JSON.stringify(files.sort((a, b) => a[0].localeCompare(b[0]))), // Sort for consistent output
};
} catch (err) {
return { err: `Failed to list files: ${err?.message || String(err)}` };

View File

@@ -428,6 +428,38 @@ function find_context(lines, context, start, eof) {
return [start, 0];
}
// Special handling for full-file replacement patterns
// If context is large and starts with deletion lines, try to match at beginning
if (context.length > 3) {
// Try exact match at start
let [new_index, fuzz] = find_context_core(lines, context, 0);
if (new_index !== -1) {
return [new_index, fuzz];
}
// Try fuzzy match at start (allowing for whitespace differences)
let match = true;
let local_fuzz = 0;
const compare_length = Math.min(context.length, lines.length);
for (let j = 0; j < compare_length; j++) {
if (j < lines.length && j < context.length) {
if (lines[j] !== context[j]) {
if (lines[j].trim() === context[j].trim()) {
local_fuzz += 10;
} else if (lines[j].replace(/\s+$/, "") === context[j].replace(/\s+$/, "")) {
local_fuzz += 1;
} else {
// Allow some mismatch for full-file replacements
local_fuzz += 100;
}
}
}
}
if (local_fuzz < context.length * 50) { // Allow up to 50 fuzz per line
return [0, local_fuzz];
}
}
if (eof) {
let [new_index, fuzz] = find_context_core(lines, context, Math.max(0, lines.length - context.length));
if (new_index !== -1) {
@@ -480,11 +512,6 @@ function peek_next_section(lines, index) {
}
s = s.substring(1);
// Handle the case where we're at the beginning and have content
if (index === orig_index + 1 && old.length === 0 && (del_lines.length > 0 || ins_lines.length > 0)) {
// This is the first content line, start collecting
}
if (mode === "keep" && last_mode !== mode && (ins_lines.length > 0 || del_lines.length > 0)) {
const chunk_orig_index = old.length - del_lines.length;
chunks.push(
@@ -622,12 +649,12 @@ function text_to_patch(text, orig, chroot = null) {
}
// Debug logging
console.log("Lines count:", lines.length);
console.log("First line:", JSON.stringify(lines[0]));
//console.log("Lines count:", lines.length);
//console.log("First line:", JSON.stringify(lines[0]));
if (lines.length > 0) {
console.log("Last line:", JSON.stringify(lines[lines.length - 1]));
console.log("First line normalized:", JSON.stringify(Parser._norm(lines[0])));
console.log("Last line normalized:", JSON.stringify(Parser._norm(lines[lines.length - 1])));
//console.log("Last line:", JSON.stringify(lines[lines.length - 1]));
//console.log("First line normalized:", JSON.stringify(Parser._norm(lines[0])));
//console.log("Last line normalized:", JSON.stringify(Parser._norm(lines[lines.length - 1])));
}
if (
@@ -719,13 +746,12 @@ function process_patch(text, open_fn, write_fn, remove_fn, chroot = null) {
if (!text.startsWith("*** Begin Patch")) {
throw new DiffError("Patch text must start with *** Begin Patch");
}
// Load update/delete targets and also attempt to load add targets
// so existing files are detected during parsing
const paths = [
...identify_files_needed(text, chroot),
...identify_files_added(text, chroot),
];
const orig = load_files(paths, open_fn);
// Load ONLY update/delete targets - do NOT load add targets
// because add targets are expected to not exist yet
const updateDeletePaths = identify_files_needed(text, chroot);
const orig = load_files(updateDeletePaths, open_fn);
const [patch, _fuzz] = text_to_patch(text, orig, chroot);
const commit = patch_to_commit(patch, orig, chroot);
apply_commit(commit, write_fn, remove_fn, chroot);
@@ -776,8 +802,6 @@ export default {
};
export async function run(args) {
console.log('patch_files:', args);
try {
const result = process_patch(
args.patch,
@@ -788,9 +812,6 @@ export async function run(args) {
);
return result;
} catch (error) {
if (error instanceof DiffError) {
throw new Error(`Patch error: ${error.message}`);
}
throw error;
return `Patch error: ${error.message}`
}
}