|
@@ -6,32 +6,52 @@ import { createOpenAiClient } from './app'; |
|
|
|
|
|
|
|
|
export type Argv = Record<string, unknown>; |
|
|
export type Argv = Record<string, unknown>; |
|
|
|
|
|
|
|
|
const receiveData = (aiClient: PlatformEventEmitter, theContent: string, argv: Record<string, unknown>) => new Promise<void>((r2, rj) => { |
|
|
|
|
|
|
|
|
const receiveData = ( |
|
|
|
|
|
aiClient: PlatformEventEmitter, |
|
|
|
|
|
theContent: string, |
|
|
|
|
|
argv: Record<string, unknown>, |
|
|
|
|
|
memory: OpenAi.MessageObject[], |
|
|
|
|
|
) => new Promise<void>((r2, rj) => { |
|
|
let completionTokens: number; |
|
|
let completionTokens: number; |
|
|
|
|
|
|
|
|
|
|
|
const normalizedChatMessage = OpenAi.normalizeChatMessage(theContent); |
|
|
|
|
|
|
|
|
const model = ( |
|
|
const model = ( |
|
|
argv.model as OpenAi.ChatCompletionModel |
|
|
argv.model as OpenAi.ChatCompletionModel |
|
|
?? OpenAi.ChatCompletionModel.GPT_3_5_TURBO |
|
|
?? OpenAi.ChatCompletionModel.GPT_3_5_TURBO |
|
|
); |
|
|
); |
|
|
|
|
|
|
|
|
|
|
|
let assistantMessage: Partial<OpenAi.MessageObject>; |
|
|
aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => { |
|
|
aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => { |
|
|
if (d.choices?.[0]?.delta?.role) { |
|
|
if (d.choices?.[0]?.delta?.role) { |
|
|
|
|
|
assistantMessage.role = d.choices[0].delta.role; |
|
|
|
|
|
assistantMessage.content = ''; |
|
|
process.stdout.write(`${d.choices[0].delta.role}:\n`); |
|
|
process.stdout.write(`${d.choices[0].delta.role}:\n`); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (d.choices?.[0]?.delta?.content) { |
|
|
if (d.choices?.[0]?.delta?.content) { |
|
|
completionTokens += 1; |
|
|
completionTokens += 1; |
|
|
|
|
|
assistantMessage.content += d.choices[0].delta.content; |
|
|
process.stdout.write(d.choices[0].delta.content); |
|
|
process.stdout.write(d.choices[0].delta.content); |
|
|
} |
|
|
} |
|
|
}); |
|
|
}); |
|
|
|
|
|
|
|
|
aiClient.on('end', () => { |
|
|
aiClient.on('end', () => { |
|
|
|
|
|
normalizedChatMessage.forEach((m) => { |
|
|
|
|
|
memory.push(m); |
|
|
|
|
|
}); |
|
|
|
|
|
memory.push(assistantMessage as OpenAi.MessageObject); |
|
|
process.stdout.write('\n\n'); |
|
|
process.stdout.write('\n\n'); |
|
|
const { length: promptTokens } = OpenAi.getPromptTokens( |
|
|
const { length: promptTokens } = OpenAi.getPromptTokens( |
|
|
OpenAi.normalizeChatMessage(theContent), |
|
|
|
|
|
|
|
|
normalizedChatMessage, |
|
|
model, |
|
|
model, |
|
|
); |
|
|
); |
|
|
process.stdout.write(`info:\n${promptTokens} prompt tokens\n${completionTokens} completion tokens\n\n`); |
|
|
|
|
|
|
|
|
process.stdout.write(`info:\n${promptTokens} prompt tokens\n${completionTokens} completion tokens`); |
|
|
|
|
|
process.stdout.write(`info:\n${completionTokens} completion tokens`); |
|
|
|
|
|
if (argv.memory) { |
|
|
|
|
|
process.stdout.write(`\n${memory.length} memory items`); |
|
|
|
|
|
} |
|
|
|
|
|
process.stdout.write('\n\n'); |
|
|
// TODO count tokens |
|
|
// TODO count tokens |
|
|
r2(); |
|
|
r2(); |
|
|
}); |
|
|
}); |
|
@@ -43,9 +63,24 @@ const receiveData = (aiClient: PlatformEventEmitter, theContent: string, argv: R |
|
|
}); |
|
|
}); |
|
|
|
|
|
|
|
|
completionTokens = 0; |
|
|
completionTokens = 0; |
|
|
|
|
|
assistantMessage = {}; |
|
|
|
|
|
const effectiveMemory = ( |
|
|
|
|
|
argv.memory === true |
|
|
|
|
|
? memory |
|
|
|
|
|
: memory.slice(-(argv.memory as number)) |
|
|
|
|
|
); |
|
|
aiClient.createChatCompletion({ |
|
|
aiClient.createChatCompletion({ |
|
|
model, |
|
|
model, |
|
|
messages: theContent, |
|
|
|
|
|
|
|
|
messages: ( |
|
|
|
|
|
argv.memory |
|
|
|
|
|
? [ |
|
|
|
|
|
...effectiveMemory, |
|
|
|
|
|
...normalizedChatMessage, |
|
|
|
|
|
] |
|
|
|
|
|
: [ |
|
|
|
|
|
...normalizedChatMessage, |
|
|
|
|
|
] |
|
|
|
|
|
), |
|
|
temperature: argv.temperature as number, |
|
|
temperature: argv.temperature as number, |
|
|
maxTokens: argv.maxTokens as number, |
|
|
maxTokens: argv.maxTokens as number, |
|
|
topP: argv.topP as number, |
|
|
topP: argv.topP as number, |
|
@@ -55,6 +90,7 @@ const receiveData = (aiClient: PlatformEventEmitter, theContent: string, argv: R |
|
|
const main = (argv: Argv) => new Promise<number>(async (resolve) => { |
|
|
const main = (argv: Argv) => new Promise<number>(async (resolve) => { |
|
|
let done = false; |
|
|
let done = false; |
|
|
let resolveResult = 0; |
|
|
let resolveResult = 0; |
|
|
|
|
|
const memory = [] as OpenAi.MessageObject[]; |
|
|
|
|
|
|
|
|
process.stdout.write('Welcome to mio-ai CLI!\n\n'); |
|
|
process.stdout.write('Welcome to mio-ai CLI!\n\n'); |
|
|
|
|
|
|
|
@@ -85,14 +121,14 @@ const main = (argv: Argv) => new Promise<number>(async (resolve) => { |
|
|
break; |
|
|
break; |
|
|
} |
|
|
} |
|
|
process.stdout.write('\n'); |
|
|
process.stdout.write('\n'); |
|
|
await receiveData(aiClient, content, argv); |
|
|
|
|
|
|
|
|
await receiveData(aiClient, content, argv, memory); |
|
|
} catch { |
|
|
} catch { |
|
|
resolveResult = -1; |
|
|
resolveResult = -1; |
|
|
done = true; |
|
|
done = true; |
|
|
} |
|
|
} |
|
|
} while (!done); |
|
|
} while (!done); |
|
|
|
|
|
|
|
|
process.stdout.write('Bye!\n\n'); |
|
|
|
|
|
|
|
|
process.stdout.write('\ninfo:\nUser exited loop\n\n'); |
|
|
resolve(resolveResult); |
|
|
resolve(resolveResult); |
|
|
}); |
|
|
}); |
|
|
|
|
|
|
|
|