Many-in-one AI client.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

178 lines
4.8 KiB

  1. import yargs from 'yargs';
  2. import { hideBin } from 'yargs/helpers';
  3. import { OpenAi } from '@modal-sh/mio-ai';
  4. import { TextPrompt, isCancel } from '@clack/core';
  5. import { createOpenAiClient } from './app';
  6. export type Argv = Record<string, unknown>;
  7. const receiveData = (
  8. aiClient: OpenAi.PlatformEventEmitter,
  9. theContent: string,
  10. argv: Record<string, unknown>,
  11. memory: OpenAi.MessageObject[],
  12. ) => new Promise<void>((r2, rj) => {
  13. let completionTokens: number;
  14. const normalizedChatMessage = OpenAi.normalizeChatMessage(theContent);
  15. const model = (
  16. argv.model as OpenAi.ChatCompletionModel
  17. ?? OpenAi.ChatCompletionModel.GPT_3_5_TURBO
  18. );
  19. const { length: promptTokens } = OpenAi.getPromptTokens(
  20. normalizedChatMessage,
  21. model,
  22. );
  23. process.stdout.write(`(${promptTokens} prompt tokens)`);
  24. if (argv.memory) {
  25. process.stdout.write(`\n${memory.length} memory items`);
  26. }
  27. process.stdout.write('\n\n');
  28. let assistantMessage: Partial<OpenAi.MessageObject>;
  29. aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => {
  30. if (d.choices?.[0]?.delta?.role) {
  31. assistantMessage.role = d.choices[0].delta.role;
  32. assistantMessage.content = '';
  33. process.stdout.write(`${d.choices[0].delta.role}:\n`);
  34. }
  35. if (d.choices?.[0]?.delta?.content) {
  36. completionTokens += 1;
  37. assistantMessage.content += d.choices[0].delta.content;
  38. process.stdout.write(d.choices[0].delta.content);
  39. }
  40. });
  41. aiClient.on('end', () => {
  42. normalizedChatMessage.forEach((m) => {
  43. memory.push(m);
  44. });
  45. memory.push(assistantMessage as OpenAi.MessageObject);
  46. process.stdout.write(`\n(${completionTokens} completion tokens)`);
  47. if (argv.memory) {
  48. process.stdout.write(`\n${memory.length} memory items`);
  49. }
  50. process.stdout.write('\n\n');
  51. r2();
  52. });
  53. aiClient.on('error', (error: Error) => {
  54. process.stderr.write(error.message);
  55. process.stderr.write('\n');
  56. rj(error);
  57. });
  58. completionTokens = 0;
  59. assistantMessage = {};
  60. const effectiveMemory = (
  61. argv.memory === true
  62. ? memory
  63. : memory.slice(-(argv.memory as number))
  64. );
  65. aiClient.createChatCompletion({
  66. model,
  67. messages: (
  68. argv.memory
  69. ? [
  70. ...effectiveMemory,
  71. ...normalizedChatMessage,
  72. ]
  73. : [
  74. ...normalizedChatMessage,
  75. ]
  76. ),
  77. temperature: argv.temperature as number,
  78. maxTokens: argv.maxTokens as number,
  79. topP: argv.topP as number,
  80. });
  81. });
  82. const main = async (argv: Argv) => {
  83. let resolveResult = 0;
  84. const memory = [] as OpenAi.MessageObject[];
  85. process.stdout.write('Welcome to mio-ai CLI!\n\n');
  86. process.stdout.write('This is a simple example of how to use mio-ai.\n');
  87. process.stdout.write('You can send messages to the OpenAI API.\n');
  88. process.stdout.write('You can also send empty messages to exit.\n');
  89. process.stdout.write('Alternatively, you can press Ctrl+C anytime to exit.\n\n');
  90. let done = false;
  91. do {
  92. process.stdout.write('--------\n\n');
  93. const aiClient = createOpenAiClient({
  94. apiKey: process.env.OPENAI_API_KEY as string,
  95. organizationId: process.env.OPENAI_ORGANIZATION_ID as string,
  96. apiVersion: OpenAi.ApiVersion.V1,
  97. });
  98. try {
  99. const textPrompt = new TextPrompt({
  100. render(): string | void {
  101. return `${OpenAi.MessageRole.USER}:\n${this.valueWithCursor ?? ''}`;
  102. },
  103. });
  104. // eslint-disable-next-line no-await-in-loop
  105. const content = await textPrompt.prompt();
  106. if (isCancel(content)) {
  107. break;
  108. }
  109. done = content.trim().length < 1;
  110. if (!done) {
  111. // eslint-disable-next-line no-await-in-loop
  112. await receiveData(aiClient, content, argv, memory);
  113. }
  114. } catch (errRaw) {
  115. const err = errRaw as Error;
  116. process.stderr.write(`${err.message}\n\n`);
  117. resolveResult = -1;
  118. done = true;
  119. }
  120. } while (done);
  121. if (resolveResult === 0) {
  122. process.stdout.write('(User exited loop)\n\n');
  123. }
  124. return resolveResult;
  125. };
  126. main(
  127. yargs(hideBin(process.argv))
  128. .options({
  129. t: {
  130. alias: 'temperature',
  131. type: 'number',
  132. description: 'Temperature argument.',
  133. },
  134. p: {
  135. alias: 'topP',
  136. type: 'number',
  137. description: '"top_p" argument.',
  138. },
  139. x: {
  140. alias: 'maxTokens',
  141. type: 'number',
  142. description: 'Maximum tokens ChatGPT will use.',
  143. },
  144. m: {
  145. alias: 'memory',
  146. description: 'If ChatGPT will use memory. Supply a numeric value to get only the last X memory items (includes messages from all roles).',
  147. },
  148. })
  149. .argv as unknown as Argv,
  150. )
  151. .then((result) => {
  152. process.exit(result);
  153. })
  154. .catch((errRaw) => {
  155. const error = errRaw as Error;
  156. process.stderr.write(error.message);
  157. process.stderr.write('\n');
  158. process.exit(-1);
  159. });