|
- import yargs from 'yargs';
- import { hideBin } from 'yargs/helpers';
- import { OpenAi } from '@modal-sh/mio-ai';
- import { TextPrompt, isCancel } from '@clack/core';
- import { createOpenAiClient } from './app';
-
- export type Argv = Record<string, unknown>;
-
- const receiveData = (
- aiClient: OpenAi.PlatformEventEmitter,
- theContent: string,
- argv: Record<string, unknown>,
- memory: OpenAi.MessageObject[],
- ) => new Promise<void>((r2, rj) => {
- let completionTokens: number;
-
- const normalizedChatMessage = OpenAi.normalizeChatMessage(theContent);
- const model = (
- argv.model as OpenAi.ChatCompletionModel
- ?? OpenAi.ChatCompletionModel.GPT_3_5_TURBO
- );
- const { length: promptTokens } = OpenAi.getPromptTokens(
- normalizedChatMessage,
- model,
- );
-
- process.stdout.write(`(${promptTokens} prompt tokens)`);
- if (argv.memory) {
- process.stdout.write(`\n${memory.length} memory items`);
- }
- process.stdout.write('\n\n');
-
- let assistantMessage: Partial<OpenAi.MessageObject>;
- aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => {
- if (d.choices?.[0]?.delta?.role) {
- assistantMessage.role = d.choices[0].delta.role;
- assistantMessage.content = '';
- process.stdout.write(`${d.choices[0].delta.role}:\n`);
- }
-
- if (d.choices?.[0]?.delta?.content) {
- completionTokens += 1;
- assistantMessage.content += d.choices[0].delta.content;
- process.stdout.write(d.choices[0].delta.content);
- }
- });
-
- aiClient.on('end', () => {
- normalizedChatMessage.forEach((m) => {
- memory.push(m);
- });
- memory.push(assistantMessage as OpenAi.MessageObject);
- process.stdout.write(`\n(${completionTokens} completion tokens)`);
- if (argv.memory) {
- process.stdout.write(`\n${memory.length} memory items`);
- }
- process.stdout.write('\n\n');
- r2();
- });
-
- aiClient.on('error', (error: Error) => {
- process.stderr.write(error.message);
- process.stderr.write('\n');
- rj(error);
- });
-
- completionTokens = 0;
- assistantMessage = {};
- const effectiveMemory = (
- argv.memory === true
- ? memory
- : memory.slice(-(argv.memory as number))
- );
- aiClient.createChatCompletion({
- model,
- messages: (
- argv.memory
- ? [
- ...effectiveMemory,
- ...normalizedChatMessage,
- ]
- : [
- ...normalizedChatMessage,
- ]
- ),
- temperature: argv.temperature as number,
- maxTokens: argv.maxTokens as number,
- topP: argv.topP as number,
- });
- });
-
- const main = async (argv: Argv) => {
- let resolveResult = 0;
- const memory = [] as OpenAi.MessageObject[];
-
- process.stdout.write('Welcome to mio-ai CLI!\n\n');
-
- process.stdout.write('This is a simple example of how to use mio-ai.\n');
- process.stdout.write('You can send messages to the OpenAI API.\n');
- process.stdout.write('You can also send empty messages to exit.\n');
- process.stdout.write('Alternatively, you can press Ctrl+C anytime to exit.\n\n');
-
- let done = false;
- do {
- process.stdout.write('--------\n\n');
- const aiClient = createOpenAiClient({
- apiKey: process.env.OPENAI_API_KEY as string,
- organizationId: process.env.OPENAI_ORGANIZATION_ID as string,
- apiVersion: OpenAi.ApiVersion.V1,
- });
-
- try {
- const textPrompt = new TextPrompt({
- render(): string | void {
- return `${OpenAi.MessageRole.USER}:\n${this.valueWithCursor ?? ''}`;
- },
- });
- // eslint-disable-next-line no-await-in-loop
- const content = await textPrompt.prompt();
- if (isCancel(content)) {
- break;
- }
-
- done = content.trim().length < 1;
- if (!done) {
- // eslint-disable-next-line no-await-in-loop
- await receiveData(aiClient, content, argv, memory);
- }
- } catch (errRaw) {
- const err = errRaw as Error;
- process.stderr.write(`${err.message}\n\n`);
- resolveResult = -1;
- done = true;
- }
- } while (done);
-
- if (resolveResult === 0) {
- process.stdout.write('(User exited loop)\n\n');
- }
-
- return resolveResult;
- };
-
- main(
- yargs(hideBin(process.argv))
- .options({
- t: {
- alias: 'temperature',
- type: 'number',
- description: 'Temperature argument.',
- },
- p: {
- alias: 'topP',
- type: 'number',
- description: '"top_p" argument.',
- },
- x: {
- alias: 'maxTokens',
- type: 'number',
- description: 'Maximum tokens ChatGPT will use.',
- },
- m: {
- alias: 'memory',
- description: 'If ChatGPT will use memory. Supply a numeric value to get only the last X memory items (includes messages from all roles).',
- },
- })
- .argv as unknown as Argv,
- )
- .then((result) => {
- process.exit(result);
- })
- .catch((errRaw) => {
- const error = errRaw as Error;
- process.stderr.write(error.message);
- process.stderr.write('\n');
- process.exit(-1);
- });
|