Enhance CLI tool with usage rendering and indented output formatting. Update user prompt for improved clarity and adjust verbosity settings. Modify file listing to return JSON strings for consistent output.
This commit is contained in:
65
cli.js
65
cli.js
@@ -5,6 +5,29 @@ import OpenAI from 'openai';
|
||||
import { promises as fs } from "node:fs";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import path from "node:path";
|
||||
import { resourceUsage } from 'node:process';
|
||||
|
||||
|
||||
function renderUsage(usage) {
|
||||
const inputTokens = usage.input_tokens - usage.input_tokens_details.cached_tokens;
|
||||
const cacheTokens = usage.input_tokens_details.cached_tokens;
|
||||
const outputToken = usage.output_tokens;
|
||||
console.log('renderUsage', inputTokens, cacheTokens, outputToken);
|
||||
}
|
||||
|
||||
function printIndented(indentNum, ...args) {
|
||||
const indent = ' '.repeat(indentNum);
|
||||
const output = args.map(arg => {
|
||||
if (typeof arg === 'string') return arg;
|
||||
try {
|
||||
return JSON.stringify(arg, null, 2);
|
||||
} catch {
|
||||
return String(arg);
|
||||
}
|
||||
}).join(' ');
|
||||
// Indent every line
|
||||
console.log(output.split('\n').map(line => indent + line).join('\n'));
|
||||
}
|
||||
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
@@ -25,12 +48,29 @@ async function loadTools() {
|
||||
|
||||
streamOnce(new OpenAI({ apiKey: process.env.OPENAI_API_KEY }), 'Zeig mir die Dateiein in / und lege index.html an mit dummydaten, kurz');
|
||||
|
||||
|
||||
let counter = 0;
|
||||
async function streamOnce(openai, userText) {
|
||||
const toolsByFile = await loadTools();
|
||||
let previousResponseId;
|
||||
|
||||
let input = [
|
||||
{ "role": "developer", "content": [ {"type": "input_text","text": '' }] },
|
||||
{ "role": "developer", "content": [ {"type": "input_text","text": `You are an interactive CLI AI assistant. Follow the user's instructions.
|
||||
If a tool is available and relevant, plan to use it.
|
||||
Be explicit when information is undefined.
|
||||
Do not silently fall back: surface errors.
|
||||
|
||||
Prefer concise answers.
|
||||
|
||||
Developer rules:
|
||||
- Null tells the truth. If data is missing/undefined, say so; do not invent values.
|
||||
- In development, never hide errors; include warnings if using fallbacks.
|
||||
|
||||
Behavior:
|
||||
- Answer succinctly.
|
||||
- Ask for clarification when the user input is ambiguous.
|
||||
- Output plain text suitable for a terminal.
|
||||
` }] },
|
||||
{"role": "user", "content": [ { "type": "input_text", "text": userText } ]},
|
||||
]
|
||||
|
||||
@@ -40,21 +80,23 @@ async function streamOnce(openai, userText) {
|
||||
const call = {
|
||||
model: 'gpt-5-mini',
|
||||
input: input,
|
||||
text: { format: { type: 'text' }, verbosity: 'medium' },
|
||||
text: { format: { type: 'text' }, verbosity: 'low' },
|
||||
reasoning: { effort: 'low', summary: 'detailed' },
|
||||
tools: Object.values(toolsByFile).map(t => t.def),
|
||||
store: true,
|
||||
}
|
||||
if(previousResponseId) call.previous_response_id = previousResponseId;
|
||||
|
||||
console.log("------NEW OPENAI CALL--------------"
|
||||
,"\n",counter++,"\n",'----INPUT-----------------'
|
||||
,"\n",call.input.map(i => JSON.stringify(i)),"\n",
|
||||
'--------CALL-------------',"\n");
|
||||
const stream = await openai.responses.stream(call);
|
||||
stream.on('response.created', (event) => {
|
||||
if(!previousResponseId){
|
||||
previousResponseId = event.response.id;
|
||||
}
|
||||
previousResponseId = event.response.id;
|
||||
});
|
||||
stream.on('response.reasoning_summary_text.delta', (event) => {
|
||||
process.stdout.write(event.delta);
|
||||
////process.stdout.write(event.delta);
|
||||
});
|
||||
stream.on('response.reasoning_summary_text.done', () => {
|
||||
process.stdout.write('\n');
|
||||
@@ -62,7 +104,7 @@ async function streamOnce(openai, userText) {
|
||||
});
|
||||
|
||||
stream.on('response.output_text.delta', (event) => {
|
||||
process.stdout.write(event.delta);
|
||||
////process.stdout.write(event.delta);
|
||||
});
|
||||
|
||||
|
||||
@@ -72,7 +114,7 @@ async function streamOnce(openai, userText) {
|
||||
}
|
||||
});
|
||||
stream.on('response.function_call_arguments.delta', (event) => {
|
||||
process.stdout.write(event.delta);
|
||||
////process.stdout.write(event.delta);
|
||||
});
|
||||
|
||||
const functionCalls = [];
|
||||
@@ -93,7 +135,10 @@ async function streamOnce(openai, userText) {
|
||||
});
|
||||
|
||||
stream.on('response.completed', async (event) => {
|
||||
//log usage & print messages to user
|
||||
printIndented(10, 'response.completed',
|
||||
renderUsage(event.response.usage),
|
||||
"Result:",event.response.output.filter(i => i.type === 'message').map(i => i.content[0].text)
|
||||
);
|
||||
});
|
||||
|
||||
await Array.fromAsync(stream);
|
||||
@@ -107,7 +152,7 @@ async function streamOnce(openai, userText) {
|
||||
call_id: call.id,
|
||||
output: JSON.stringify(result),
|
||||
})
|
||||
console.log('function call result:', call,result);
|
||||
printIndented(10,'function call result:',result);
|
||||
} catch (err) {
|
||||
console.error('Error in function call:', call.name, err);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user