Many-in-one AI client.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

96 lines
2.5 KiB

  1. import {
  2. FinishableChoiceBase,
  3. ConsumeStream,
  4. DataEventId,
  5. DoFetch,
  6. normalizeChatMessage,
  7. PlatformError,
  8. PlatformResponse,
  9. UsageMetadata,
  10. } from '../common';
  11. import { Message, MessageObject } from '../message';
  12. import { ChatCompletionModel } from '../models';
  13. export interface CreateChatCompletionParams {
  14. messages: Message | Message[];
  15. model: ChatCompletionModel;
  16. temperature?: number;
  17. topP?: number;
  18. n?: number;
  19. stop?: string | string[];
  20. maxTokens?: number;
  21. presencePenalty?: number;
  22. frequencyPenalty?: number;
  23. logitBias?: Record<string, number>;
  24. user?: string;
  25. }
  26. export interface ChatCompletionChunkChoice extends FinishableChoiceBase {
  27. delta: Partial<MessageObject>;
  28. }
  29. export interface ChatCompletionChoice extends FinishableChoiceBase {
  30. message: Partial<Message>;
  31. }
  32. export enum DataEventObjectType {
  33. CHAT_COMPLETION_CHUNK = 'chat.completion.chunk',
  34. CHAT_COMPLETION = 'chat.completion',
  35. }
  36. export interface CreateChatCompletionDataEvent<
  37. C extends Partial<FinishableChoiceBase>
  38. > extends PlatformResponse {
  39. id: DataEventId;
  40. object: DataEventObjectType;
  41. model: ChatCompletionModel;
  42. choices: C[];
  43. }
  44. export interface ChatCompletion
  45. extends CreateChatCompletionDataEvent<Partial<ChatCompletionChoice>>, UsageMetadata {}
  46. export type ChatCompletionChunkDataEvent = CreateChatCompletionDataEvent<ChatCompletionChunkChoice>;
  47. export function createChatCompletion(
  48. this: NodeJS.EventEmitter,
  49. doFetch: DoFetch,
  50. consumeStream: ConsumeStream,
  51. params: CreateChatCompletionParams,
  52. ) {
  53. doFetch('POST', '/chat/completions', {
  54. messages: normalizeChatMessage(params.messages),
  55. model: params.model ?? ChatCompletionModel.GPT_3_5_TURBO,
  56. temperature: params.temperature ?? 1,
  57. top_p: params.topP ?? 1,
  58. n: params.n ?? 1,
  59. stop: params.stop ?? null,
  60. stream: true,
  61. max_tokens: params.maxTokens,
  62. presence_penalty: params.presencePenalty ?? 0,
  63. frequency_penalty: params.frequencyPenalty ?? 0,
  64. logit_bias: params.logitBias ?? {},
  65. user: params.user,
  66. })
  67. .then(async (response) => {
  68. if (!response.ok) {
  69. this.emit('error', new PlatformError(
  70. // eslint-disable-next-line @typescript-eslint/restrict-template-expressions
  71. `Create chat completion returned with status: ${response.status}`,
  72. response,
  73. ));
  74. this.emit('end');
  75. return;
  76. }
  77. await consumeStream(response);
  78. this.emit('end');
  79. })
  80. .catch((err) => {
  81. this.emit('error', err as Error);
  82. this.emit('end');
  83. });
  84. return this;
  85. }