|
|
@@ -6,32 +6,52 @@ import { createOpenAiClient } from './app'; |
|
|
|
|
|
|
|
export type Argv = Record<string, unknown>; |
|
|
|
|
|
|
|
const receiveData = (aiClient: PlatformEventEmitter, theContent: string, argv: Record<string, unknown>) => new Promise<void>((r2, rj) => { |
|
|
|
const receiveData = ( |
|
|
|
aiClient: PlatformEventEmitter, |
|
|
|
theContent: string, |
|
|
|
argv: Record<string, unknown>, |
|
|
|
memory: OpenAi.MessageObject[], |
|
|
|
) => new Promise<void>((r2, rj) => { |
|
|
|
let completionTokens: number; |
|
|
|
|
|
|
|
const normalizedChatMessage = OpenAi.normalizeChatMessage(theContent); |
|
|
|
|
|
|
|
const model = ( |
|
|
|
argv.model as OpenAi.ChatCompletionModel |
|
|
|
?? OpenAi.ChatCompletionModel.GPT_3_5_TURBO |
|
|
|
); |
|
|
|
|
|
|
|
let assistantMessage: Partial<OpenAi.MessageObject>; |
|
|
|
aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => { |
|
|
|
if (d.choices?.[0]?.delta?.role) { |
|
|
|
assistantMessage.role = d.choices[0].delta.role; |
|
|
|
assistantMessage.content = ''; |
|
|
|
process.stdout.write(`${d.choices[0].delta.role}:\n`); |
|
|
|
} |
|
|
|
|
|
|
|
if (d.choices?.[0]?.delta?.content) { |
|
|
|
completionTokens += 1; |
|
|
|
assistantMessage.content += d.choices[0].delta.content; |
|
|
|
process.stdout.write(d.choices[0].delta.content); |
|
|
|
} |
|
|
|
}); |
|
|
|
|
|
|
|
aiClient.on('end', () => { |
|
|
|
normalizedChatMessage.forEach((m) => { |
|
|
|
memory.push(m); |
|
|
|
}); |
|
|
|
memory.push(assistantMessage as OpenAi.MessageObject); |
|
|
|
process.stdout.write('\n\n'); |
|
|
|
const { length: promptTokens } = OpenAi.getPromptTokens( |
|
|
|
OpenAi.normalizeChatMessage(theContent), |
|
|
|
normalizedChatMessage, |
|
|
|
model, |
|
|
|
); |
|
|
|
process.stdout.write(`info:\n${promptTokens} prompt tokens\n${completionTokens} completion tokens\n\n`); |
|
|
|
process.stdout.write(`info:\n${promptTokens} prompt tokens\n${completionTokens} completion tokens`); |
|
|
|
process.stdout.write(`info:\n${completionTokens} completion tokens`); |
|
|
|
if (argv.memory) { |
|
|
|
process.stdout.write(`\n${memory.length} memory items`); |
|
|
|
} |
|
|
|
process.stdout.write('\n\n'); |
|
|
|
// TODO count tokens |
|
|
|
r2(); |
|
|
|
}); |
|
|
@@ -43,9 +63,24 @@ const receiveData = (aiClient: PlatformEventEmitter, theContent: string, argv: R |
|
|
|
}); |
|
|
|
|
|
|
|
completionTokens = 0; |
|
|
|
assistantMessage = {}; |
|
|
|
const effectiveMemory = ( |
|
|
|
argv.memory === true |
|
|
|
? memory |
|
|
|
: memory.slice(-(argv.memory as number)) |
|
|
|
); |
|
|
|
aiClient.createChatCompletion({ |
|
|
|
model, |
|
|
|
messages: theContent, |
|
|
|
messages: ( |
|
|
|
argv.memory |
|
|
|
? [ |
|
|
|
...effectiveMemory, |
|
|
|
...normalizedChatMessage, |
|
|
|
] |
|
|
|
: [ |
|
|
|
...normalizedChatMessage, |
|
|
|
] |
|
|
|
), |
|
|
|
temperature: argv.temperature as number, |
|
|
|
maxTokens: argv.maxTokens as number, |
|
|
|
topP: argv.topP as number, |
|
|
@@ -55,6 +90,7 @@ const receiveData = (aiClient: PlatformEventEmitter, theContent: string, argv: R |
|
|
|
const main = (argv: Argv) => new Promise<number>(async (resolve) => { |
|
|
|
let done = false; |
|
|
|
let resolveResult = 0; |
|
|
|
const memory = [] as OpenAi.MessageObject[]; |
|
|
|
|
|
|
|
process.stdout.write('Welcome to mio-ai CLI!\n\n'); |
|
|
|
|
|
|
@@ -85,14 +121,14 @@ const main = (argv: Argv) => new Promise<number>(async (resolve) => { |
|
|
|
break; |
|
|
|
} |
|
|
|
process.stdout.write('\n'); |
|
|
|
await receiveData(aiClient, content, argv); |
|
|
|
await receiveData(aiClient, content, argv, memory); |
|
|
|
} catch { |
|
|
|
resolveResult = -1; |
|
|
|
done = true; |
|
|
|
} |
|
|
|
} while (!done); |
|
|
|
|
|
|
|
process.stdout.write('Bye!\n\n'); |
|
|
|
process.stdout.write('\ninfo:\nUser exited loop\n\n'); |
|
|
|
resolve(resolveResult); |
|
|
|
}); |
|
|
|
|
|
|
|