Integrate terminal input handling into CLI tool using terminal-kit. Implement global key handler for CTRL-C and prompt user for input before initiating the LLM loop. Remove outdated test scripts for improved codebase clarity.

This commit is contained in:
sebseb7
2025-08-12 00:16:03 +02:00
parent 5090d2669b
commit 697cf74cc3
10 changed files with 414 additions and 1366 deletions

196
cli.js
View File

@@ -1,6 +1,7 @@
#!/usr/bin/env node
import 'dotenv/config';
import OpenAI from 'openai';
import terminalKit from 'terminal-kit';
//npm install tiktoken
//csk-8jftdte6r6vf8fdvp9xkyek5t3jnc6jfhh93d3ewfcwxxvh9
@@ -8,8 +9,6 @@ import OpenAI from 'openai';
import { promises as fs } from "node:fs";
import { fileURLToPath } from "node:url";
import path from "node:path";
import { resourceUsage } from 'node:process';
function renderUsage(usage) {
const inputTokens = usage.input_tokens - usage.input_tokens_details.cached_tokens;
@@ -34,6 +33,24 @@ function printIndented(indentNum, ...args) {
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const term = terminalKit.terminal;
// Global key handler so CTRL-C works everywhere (input fields, loops, etc.).
// Disable mouse tracking so terminal mouse wheel keeps controlling scrollback.
term.grabInput({ mouse: false });
term.on('key', (name) => {
if (name === 'CTRL_C') {
term.grabInput(false);
term.processExit(0);
}
});
async function askUserForInput() {
term.cyan("Enter your request: ");
const input = await term.inputField({ mouse: false }).promise;
return input;
}
async function loadTools() {
const toolsDir = path.join(__dirname, "tools");
const dirents = await fs.readdir(toolsDir, { withFileTypes: true });
@@ -49,100 +66,103 @@ async function loadTools() {
return Object.fromEntries(toolEntries);
}
streamOnce(new OpenAI({ apiKey: process.env.OPENAI_API_KEY }), 'Erstelle eine React Project für eine Abovverwaltung. Mui, Sqllite, Express, Nodejs, KEIN Typescript, Aber ESM import. webpack, kein vite. HRM, nodemon');
while(true){
let counter = 0;
// Block for user input before kicking off the LLM loop
const userText = await askUserForInput();
await streamOnce(new OpenAI({ apiKey: process.env.OPENAI_API_KEY }), userText || '');
async function streamOnce(openai, userText) {
const toolsByFile = await loadTools();
let previousResponseId;
let counter = 0;
async function streamOnce(openai, userText) {
const toolsByFile = await loadTools();
let previousResponseId;
let input = [
{"role": "developer", "content": [ {"type": "input_text","text": `You are an interactive CLI AI assistant. Follow the user's instructions.` }] },
{"role": "user", "content": [ {"type": "input_text","text": userText } ]},
]
let input = [
{"role": "developer", "content": [ {"type": "input_text","text": `You are an interactive CLI AI assistant. Follow the user's instructions.` }] },
{"role": "user", "content": [ {"type": "input_text","text": userText } ]},
]
while(input.length > 0){
while(input.length > 0){
const call = {
model: 'gpt-5-mini',
input: input,
text: { format: { type: 'text' }, verbosity: 'high' },
reasoning: { effort: 'medium', summary: 'detailed' },
tools: Object.values(toolsByFile).map(t => t.def),
store: true,
}
if(previousResponseId) call.previous_response_id = previousResponseId;
console.log("\n\n\n\n\n------NEW OPENAI CALL-"+input.length+"-------------"
,"\n",counter++,"\n",'----INPUT-----------------'
,"\n",call.input.map(i => JSON.stringify(i)),"\n",
'--------CALL-------------',"\n");
const stream = await openai.responses.stream(call);
stream.on('response.created', (event) => {
previousResponseId = event.response.id;
});
stream.on('response.reasoning_summary_text.delta', (event) => {
process.stdout.write('o')
});
stream.on('response.reasoning_summary_text.done', () => {
process.stdout.write('\n');
//clear on next delta
});
stream.on('response.output_text.delta', (event) => {
process.stdout.write('.')
});
stream.on('response.output_item.added', (event) => {
if(event.item && event.item.type === 'function_call'){
//console.log('function call:', event.item);
const call = {
model: 'gpt-5-mini',
input: input,
text: { format: { type: 'text' }, verbosity: 'high' },
reasoning: { effort: 'medium', summary: 'detailed' },
tools: Object.values(toolsByFile).map(t => t.def),
store: true,
}
});
stream.on('response.function_call_arguments.delta', (event) => {
process.stdout.write('x');
});
if(previousResponseId) call.previous_response_id = previousResponseId;
const functionCalls = [];
console.log("\n\n\n\n\n------NEW OPENAI CALL-"+input.length+"-------------"
,"\n",counter++,"\n",'----INPUT-----------------'
,"\n",call.input.map(i => JSON.stringify(i)),"\n",
'--------CALL-------------',"\n");
const stream = await openai.responses.stream(call);
stream.on('response.created', (event) => {
previousResponseId = event.response.id;
});
stream.on('response.reasoning_summary_text.delta', (event) => {
process.stdout.write('o')
});
stream.on('response.reasoning_summary_text.done', () => {
process.stdout.write('\n');
//clear on next delta
});
stream.on('response.output_item.done', async (event) => {
if(event.item && event.item.type === 'function_call'){
const id = event.item.call_id;
const name = event.item.name;
let args = {};
try {
args = JSON.parse(event.item.arguments);
} catch (e){
console.error('Error parsing arguments:', e, event.item.arguments);
stream.on('response.output_text.delta', (event) => {
process.stdout.write('.')
});
stream.on('response.output_item.added', (event) => {
if(event.item && event.item.type === 'function_call'){
//console.log('function call:', event.item);
}
console.log(' function call:', id, name);
functionCalls.push({ id, name, args, promise: toolsByFile[name].run(args) });
});
stream.on('response.function_call_arguments.delta', (event) => {
process.stdout.write('x');
});
const functionCalls = [];
stream.on('response.output_item.done', async (event) => {
if(event.item && event.item.type === 'function_call'){
const id = event.item.call_id;
const name = event.item.name;
let args = {};
try {
args = JSON.parse(event.item.arguments);
} catch (e){
console.error('Error parsing arguments:', e, event.item.arguments);
}
console.log(' function call:', id, name);
functionCalls.push({ id, name, args, promise: toolsByFile[name].run(args) });
}
});
stream.on('response.completed', async (event) => {
printIndented(10,renderUsage(event.response.usage));
if (event.response.output.filter(i => i.type === 'message').length > 0) printIndented(10, "Textresult:",event.response.output.filter(i => i.type === 'message').map(i => i.content[0].text));
});
await Array.fromAsync(stream);
input=[];
for (const call of functionCalls) {
//try {
const result = await call.promise;
input.push({
type: "function_call_output",
call_id: call.id,
output: JSON.stringify(result),
})
printIndented(10,'function call result:',result);
//} catch (err) {
// console.error('Error in function call:', call.name, err);
//}
}
});
stream.on('response.completed', async (event) => {
printIndented(10,renderUsage(event.response.usage));
if (event.response.output.filter(i => i.type === 'message').length > 0) printIndented(10, "Textresult:",event.response.output.filter(i => i.type === 'message').map(i => i.content[0].text));
});
await Array.fromAsync(stream);
input=[];
for (const call of functionCalls) {
//try {
const result = await call.promise;
input.push({
type: "function_call_output",
call_id: call.id,
output: JSON.stringify(result),
})
printIndented(10,'function call result:',result);
//} catch (err) {
// console.error('Error in function call:', call.name, err);
//}
}
}
//console.log('OPENAI STREAM FINISHED');
}
//console.log('OPENAI STREAM FINISHED');
}
}