Browse Source

Initial commit

Add files from pridepack. Only text completion, chat completion, edit,
and image generation features have been implemented.
master
TheoryOfNekomata 1 year ago
commit
afba7df8d0
20 changed files with 4668 additions and 0 deletions
  1. +1
    -0
      .env.example
  2. +19
    -0
      .eslintrc
  3. +108
    -0
      .gitignore
  4. +7
    -0
      LICENSE
  5. +53
    -0
      package.json
  6. +3
    -0
      pridepack.json
  7. +24
    -0
      src/index.ts
  8. +74
    -0
      src/platforms/openai/common.ts
  9. +18
    -0
      src/platforms/openai/events.ts
  10. +95
    -0
      src/platforms/openai/features/chat-completion.ts
  11. +66
    -0
      src/platforms/openai/features/edit.ts
  12. +70
    -0
      src/platforms/openai/features/image.ts
  13. +93
    -0
      src/platforms/openai/features/text-completion.ts
  14. +114
    -0
      src/platforms/openai/index.ts
  15. +12
    -0
      src/platforms/openai/message.ts
  16. +48
    -0
      src/platforms/openai/models.ts
  17. +214
    -0
      test/index.test.ts
  18. +21
    -0
      tsconfig.eslint.json
  19. +21
    -0
      tsconfig.json
  20. +3607
    -0
      yarn.lock

+ 1
- 0
.env.example View File

@@ -0,0 +1 @@
OPENAI_API_KEY=

+ 19
- 0
.eslintrc View File

@@ -0,0 +1,19 @@
{
"root": true,
"rules": {
"@typescript-eslint/no-unsafe-assignment": "off",
"@typescript-eslint/no-unsafe-member-access": "off",
"@typescript-eslint/no-unsafe-call": "off",
"@typescript-eslint/no-unsafe-return": "off"
},
"extends": [
"lxsmnsyc/typescript"
],
"ignorePatterns": [
"dist",
"node_modules"
],
"parserOptions": {
"project": "./tsconfig.eslint.json"
}
}

+ 108
- 0
.gitignore View File

@@ -0,0 +1,108 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*

# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/
jspm_packages/

# TypeScript v1 declaration files
typings/

# TypeScript cache
*.tsbuildinfo

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variables file
.env
.env.production
.env.development

# parcel-bundler cache (https://parceljs.org/)
.cache

# Next.js build output
.next

# Nuxt.js build / generate output
.nuxt
dist

# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and *not* Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public

# vuepress build output
.vuepress/dist

# Serverless directories
.serverless/

# FuseBox cache
.fusebox/

# DynamoDB Local files
.dynamodb/

# TernJS port file
.tern-port

.npmrc
.idea/

+ 7
- 0
LICENSE View File

@@ -0,0 +1,7 @@
MIT License Copyright (c) 2023 TheoryOfNekomata <allan.crisostomo@outlook.com>

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 53
- 0
package.json View File

@@ -0,0 +1,53 @@
{
"name": "openai-utils",
"version": "0.0.0",
"files": [
"dist",
"src"
],
"engines": {
"node": ">=12"
},
"license": "MIT",
"keywords": [
"pridepack"
],
"devDependencies": {
"@types/node": "^18.14.1",
"dotenv": "^16.0.3",
"eslint": "^8.35.0",
"eslint-config-lxsmnsyc": "^0.5.0",
"pridepack": "2.4.4",
"tslib": "^2.5.0",
"typescript": "^4.9.5",
"vitest": "^0.28.1"
},
"scripts": {
"prepublishOnly": "pridepack clean && pridepack build",
"build": "pridepack build",
"type-check": "pridepack check",
"lint": "pridepack lint",
"clean": "pridepack clean",
"watch": "pridepack watch",
"start": "pridepack start",
"dev": "pridepack dev",
"test": "vitest"
},
"private": false,
"description": "Custom wrapper for OpenAI API.",
"repository": {
"url": "https://code.modal.sh/modal-soft/openai-utils",
"type": "git"
},
"homepage": "https://code.modal.sh/modal-soft/openai-utils",
"bugs": {
"url": "https://code.modal.sh/modal-soft/openai-utils/issues"
},
"author": "TheoryOfNekomata <allan.crisostomo@outlook.com>",
"publishConfig": {
"access": "public"
},
"dependencies": {
"fetch-ponyfill": "^7.1.0"
}
}

+ 3
- 0
pridepack.json View File

@@ -0,0 +1,3 @@
{
"target": "es2018"
}

+ 24
- 0
src/index.ts View File

@@ -0,0 +1,24 @@
import * as OpenAiImpl from './platforms/openai';

export const SUPPORTED_PLATFORMS = { OpenAi: OpenAiImpl } as const;
export type PlatformConfig = OpenAiImpl.PlatformConfig;
export type PlatformEventEmitter = OpenAiImpl.PlatformEventEmitter;

export * as OpenAi from './platforms/openai';

export const createAiClient = (configParams: PlatformConfig): PlatformEventEmitter => {
const {
platform,
platformConfiguration,
} = configParams;

const platformModules = Object.values(SUPPORTED_PLATFORMS);
const platformModule = platformModules.find((p) => p.PLATFORM_ID === platform);

if (!platformModule) {
const supportedPlatforms = platformModules.map((p) => p.PLATFORM_ID).join(', ');
throw new Error(`Unsupported platform: ${platform}. Supported platforms are: ${supportedPlatforms}`);
}

return new platformModule.PlatformEventEmitterImpl(platformConfiguration);
};

+ 74
- 0
src/platforms/openai/common.ts View File

@@ -0,0 +1,74 @@
import { Message, MessageRole } from './message';

export enum FinishReason {
STOP = 'stop',
LENGTH = 'length',
}

export interface ChoiceBase {
index: number;
}

export interface FinishableChoiceBase extends ChoiceBase {
finish_reason: FinishReason | null;
}

export type DataEventId = string;

export type Timestamp = number;

export interface Usage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}

export interface UsageMetadata {
usage: Usage;
}

export interface PlatformResponse {
created: Timestamp;
}

export type DoFetch = (
method: string,
path: string,
body: Record<string, unknown>
) => Promise<Response>;

export type ConsumeStream = (
response: Response,
) => Promise<void>;

export class PlatformError extends Error {
constructor(message: string, readonly response: Response) {
super(message);
this.name = 'OpenAi.PlatformError';
}
}

export const normalizeChatMessage = (messageRaw: Message | Message[]) => {
if (typeof messageRaw === 'string') {
return [
{
role: MessageRole.USER,
content: messageRaw,
},
];
}

if (Array.isArray(messageRaw)) {
return messageRaw.map((message) => {
if (typeof message === 'string') {
return {
role: MessageRole.USER,
content: message,
};
}
return message;
});
}

return messageRaw;
};

+ 18
- 0
src/platforms/openai/events.ts View File

@@ -0,0 +1,18 @@
import { CreateChatCompletionParams } from './features/chat-completion';
import { CreateImageParams } from './features/image';
import { CreateTextCompletionParams } from './features/text-completion';
import { CreateEditParams } from './features/edit';

export type DataEventCallback<D> = (data: D) => void;

export type ErrorEventCallback = (event: Error) => void;

export interface PlatformEventEmitter extends NodeJS.EventEmitter {
createChatCompletion(params: CreateChatCompletionParams): void;
createImage(params: CreateImageParams): void;
createCompletion(params: CreateTextCompletionParams): void;
createEdit(params: CreateEditParams): void;
on<D>(event: 'data', callback: DataEventCallback<D>): this;
on(event: 'end', callback: () => void): this;
on(event: 'error', callback: ErrorEventCallback): this;
}

+ 95
- 0
src/platforms/openai/features/chat-completion.ts View File

@@ -0,0 +1,95 @@
import {
FinishableChoiceBase,
ConsumeStream,
DataEventId,
DoFetch,
normalizeChatMessage,
PlatformError,
PlatformResponse,
UsageMetadata,
} from '../common';
import { Message, MessageObject } from '../message';
import { ChatCompletionModel } from '../models';

export interface CreateChatCompletionParams {
messages: Message | Message[];
model: ChatCompletionModel;
temperature?: number;
topP?: number;
n?: number;
stop?: string | string[];
maxTokens?: number;
presencePenalty?: number;
frequencyPenalty?: number;
logitBias?: Record<string, number>;
user?: string;
}

export interface ChatCompletionChunkChoice extends FinishableChoiceBase {
delta: Partial<MessageObject>;
}

export interface ChatCompletionChoice extends FinishableChoiceBase {
message: Partial<Message>;
}

export enum DataEventObjectType {
CHAT_COMPLETION_CHUNK = 'chat.completion.chunk',
CHAT_COMPLETION = 'chat.completion',
}

export interface CreateChatCompletionDataEvent<
C extends Partial<FinishableChoiceBase>
> extends PlatformResponse {
id: DataEventId;
object: DataEventObjectType;
model: ChatCompletionModel;
choices: C[];
}

export interface ChatCompletion
extends CreateChatCompletionDataEvent<Partial<ChatCompletionChoice>>, UsageMetadata {}

export type ChatCompletionChunkDataEvent = CreateChatCompletionDataEvent<ChatCompletionChunkChoice>;

export function createChatCompletion(
this: NodeJS.EventEmitter,
doFetch: DoFetch,
consumeStream: ConsumeStream,
params: CreateChatCompletionParams,
) {
doFetch('POST', '/chat/completions', {
messages: normalizeChatMessage(params.messages),
model: params.model ?? ChatCompletionModel.GPT_3_5_TURBO,
temperature: params.temperature ?? 1,
top_p: params.topP ?? 1,
n: params.n ?? 1,
stop: params.stop ?? null,
stream: true,
max_tokens: params.maxTokens,
presence_penalty: params.presencePenalty ?? 0,
frequency_penalty: params.frequencyPenalty ?? 0,
logit_bias: params.logitBias ?? {},
user: params.user,
})
.then(async (response) => {
if (!response.ok) {
this.emit('error', new PlatformError(
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
`Create chat completion returned with status: ${response.status}`,
response,
));
this.emit('end');
return;
}

await consumeStream(response);
this.emit('end');
})
.catch((err) => {
this.emit('error', err as Error);
this.emit('end');
});

return this;
}

+ 66
- 0
src/platforms/openai/features/edit.ts View File

@@ -0,0 +1,66 @@
import {
ChoiceBase,
DoFetch,
PlatformError,
PlatformResponse,
UsageMetadata,
} from '../common';
import { EditModel } from '../models';

export enum DataEventObjectType {
EDIT = 'edit',
}

export interface CreateEditParams {
model: EditModel;
input?: string;
instruction: string;
n?: number;
temperature?: number;
topP?: number;
}

export interface EditChoice extends ChoiceBase {
text: string;
}

export interface CreateEditDataEvent extends PlatformResponse, UsageMetadata {
object: DataEventObjectType;
choices: EditChoice[];
}

export function createEdit(
this: NodeJS.EventEmitter,
doFetch: DoFetch,
params: CreateEditParams,
) {
doFetch('POST', '/edits', {
model: params.model ?? EditModel.TEXT_DAVINCI_EDIT_001,
input: params.input ?? '',
instruction: params.instruction,
n: params.n ?? 1,
temperature: params.temperature ?? 1,
top_p: params.topP ?? 1,
})
.then(async (response) => {
if (!response.ok) {
this.emit('error', new PlatformError(
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
`Request from platform returned with status: ${response.status}`,
response,
));
this.emit('end');
return;
}

const responseData = await response.json() as Record<string, unknown>;
this.emit('data', responseData);
this.emit('end');
})
.catch((err) => {
this.emit('error', err as Error);
this.emit('end');
});

return this;
}

+ 70
- 0
src/platforms/openai/features/image.ts View File

@@ -0,0 +1,70 @@
import {
DoFetch,
PlatformError,
PlatformResponse,
} from '../common';

export enum CreateImageSize {
SQUARE_256 = '256x256',
SQUARE_512 = '512x512',
SQUARE_1024 = '1024x1024',
}

export enum CreateImageResponseFormat {
URL = 'url',
BASE64_JSON = 'b64_json',
}

export interface CreateImageParams {
prompt: string;
n? : number;
size?: CreateImageSize;
user?: string;
}

export interface CreateImageData {
b64_json: string;
}

export interface CreateImageDataEvent extends PlatformResponse {
data: Buffer[];
}

export function createImage(
this: NodeJS.EventEmitter,
doFetch: DoFetch,
params: CreateImageParams,
) {
doFetch('POST', '/images/generations', {
prompt: params.prompt,
n: params.n ?? 1,
size: params.size ?? CreateImageSize.SQUARE_1024,
user: params.user,
response_format: CreateImageResponseFormat.BASE64_JSON,
})
.then(async (response) => {
if (!response.ok) {
this.emit('error', new PlatformError(
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
`Request from platform returned with status: ${response.status}`,
response,
));
this.emit('end');
return;
}

const responseData = await response.json() as Record<string, unknown>;
const data = responseData.data as CreateImageData[];
this.emit('data', {
...responseData,
data: data.map((item) => Buffer.from(item.b64_json, 'base64')),
});
this.emit('end');
})
.catch((err) => {
this.emit('error', err as Error);
this.emit('end');
});

return this;
}

+ 93
- 0
src/platforms/openai/features/text-completion.ts View File

@@ -0,0 +1,93 @@
import { TextCompletionModel } from '../models';
import {
ConsumeStream,
DataEventId,
DoFetch,
FinishableChoiceBase,
PlatformError,
PlatformResponse,
UsageMetadata,
} from '../common';

export enum DataEventObjectType {
TEXT_COMPLETION = 'text_completion',
}

export interface CreateTextCompletionParams {
model: TextCompletionModel;
prompt: string;
temperature?: number;
topP?: number;
n?: number;
stop?: string | string[];
maxTokens?: number;
logprobs?: number;
echo?: boolean;
bestOf?: number;
logitBias?: Record<string, number>;
user?: string;
presencePenalty?: number;
frequencyPenalty?: number;
}

export interface TextCompletionChoice extends FinishableChoiceBase {
text: string;
logprobs?: number;
}

export interface CreateTextCompletionDataEvent<
C extends Partial<FinishableChoiceBase>
> extends PlatformResponse {
id: DataEventId;
object: DataEventObjectType;
model: TextCompletionModel;
choices: C[];
}

export interface TextCompletion
extends CreateTextCompletionDataEvent<Partial<TextCompletionChoice>>, UsageMetadata {}

export type TextCompletionChunkDataEvent = CreateTextCompletionDataEvent<TextCompletionChoice>;

export function createTextCompletion(
this: NodeJS.EventEmitter,
doFetch: DoFetch,
consumeStream: ConsumeStream,
params: CreateTextCompletionParams,
) {
doFetch('POST', '/completions', {
model: params.model ?? TextCompletionModel.TEXT_DAVINCI_003,
prompt: params.prompt,
temperature: params.temperature ?? 1,
top_p: params.topP ?? 1,
n: params.n ?? 1,
stop: params.stop ?? null,
stream: true,
max_tokens: params.maxTokens,
logprobs: params.logprobs,
echo: params.echo,
best_of: params.bestOf,
logit_bias: params.logitBias,
user: params.user,
presence_penalty: params.presencePenalty,
frequency_penalty: params.frequencyPenalty,
})
.then(async (response) => {
if (!response.ok) {
this.emit('error', new PlatformError(
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
`Create text completion returned with status: ${response.status}`,
response,
));
this.emit('end');
return;
}

await consumeStream(response);
this.emit('end');
})
.catch((err) => {
this.emit('error', err as Error);
this.emit('end');
});
}

+ 114
- 0
src/platforms/openai/index.ts View File

@@ -0,0 +1,114 @@
import fetchPonyfill from 'fetch-ponyfill';
import { EventEmitter } from 'events';
import { PassThrough } from 'stream';
import { PlatformEventEmitter } from './events';
import { createTextCompletion, TextCompletion } from './features/text-completion';
import { createImage } from './features/image';
import { createChatCompletion, ChatCompletion } from './features/chat-completion';
import { createEdit } from './features/edit';

export * from './message';
export * from './models';
export { PlatformEventEmitter, ChatCompletion, TextCompletion };
export {
ChatCompletionChunkDataEvent,
DataEventObjectType as ChatCompletionDataEventObjectType,
} from './features/chat-completion';
export {
TextCompletionChunkDataEvent,
DataEventObjectType as TextCompletionDataEventObjectType,
} from './features/text-completion';
export {
CreateEditDataEvent,
DataEventObjectType as EditDataEventObjectType,
} from './features/edit';
export { CreateImageDataEvent, CreateImageSize } from './features/image';
export * from './common';

export enum ApiVersion {
V1 = 'v1',
}

export const PLATFORM_ID = 'openai' as const;

export interface PlatformConfig {
platform: typeof PLATFORM_ID;
platformConfiguration: Configuration;
}

export interface Configuration {
organizationId?: string;
apiVersion: ApiVersion;
apiKey: string;
baseUrl?: string;
}

export class PlatformEventEmitterImpl extends EventEmitter implements PlatformEventEmitter {
readonly createCompletion: PlatformEventEmitter['createCompletion'];

readonly createImage: PlatformEventEmitter['createImage'];

readonly createChatCompletion: PlatformEventEmitter['createChatCompletion'];

readonly createEdit: PlatformEventEmitter['createEdit'];

constructor(configParams: Configuration) {
super();
const headers: Record<string, string> = {
Authorization: `Bearer ${configParams.apiKey}`,
};

if (configParams.organizationId) {
headers['OpenAI-Organization'] = configParams.organizationId;
}

const { fetch: fetchInstance } = fetchPonyfill();
const doFetch = (method: string, path: string, body: Record<string, unknown>) => {
const theFetchParams = {
method,
headers: {
...headers,
'Content-Type': 'application/json',
},
body: JSON.stringify(body),
};

const url = new URL(
`/${configParams.apiVersion}${path}`,
configParams.baseUrl ?? 'https://api.openai.com',
).toString();

this.emit('start', {
...theFetchParams,
url,
});

return fetchInstance(url, theFetchParams);
};

const consumeStream = async (response: Response) => {
// eslint-disable-next-line no-restricted-syntax
for await (const chunk of response.body as unknown as PassThrough) {
const chunkStringMaybeMultiple = chunk.toString();
const chunkStrings = chunkStringMaybeMultiple
.split('\n')
.filter((chunkString: string) => chunkString.length > 0);
chunkStrings.forEach((chunkString: string) => {
const dataRaw = chunkString.split('data: ').at(1);
if (!dataRaw) {
return;
}
if (dataRaw === '[DONE]') {
return;
}
const data = JSON.parse(dataRaw);
this.emit('data', data);
});
}
};
this.createImage = createImage.bind(this, doFetch);
this.createCompletion = createTextCompletion.bind(this, doFetch, consumeStream);
this.createChatCompletion = createChatCompletion.bind(this, doFetch, consumeStream);
this.createEdit = createEdit.bind(this, doFetch);
}
}

+ 12
- 0
src/platforms/openai/message.ts View File

@@ -0,0 +1,12 @@
export enum MessageRole {
SYSTEM = 'system',
USER = 'user',
ASSISTANT = 'assistant',
}

export interface MessageObject {
role: MessageRole;
content: string;
}

export type Message = string | MessageObject;

+ 48
- 0
src/platforms/openai/models.ts View File

@@ -0,0 +1,48 @@
// see https://platform.openai.com/docs/models/model-endpoint-compatibility

export enum ChatCompletionModel {
GPT_4 = 'gpt-4',
GPT_4_0314 = 'gpt-4-0314',
GPT_4_32K = 'gpt-4-32k',
GPT_4_32K_0314 = 'gpt-4-32k-0314',
GPT_3_5_TURBO = 'gpt-3.5-turbo',
GPT_3_5_TURBO_0301 = 'gpt-3.5-turbo-0301',
}

export enum TextCompletionModel {
TEXT_DAVINCI_003 = 'text-davinci-003',
TEXT_DAVINCI_002 = 'text-davinci-002',
TEXT_CURIE_001 = 'text-curie-001',
TEXT_BABBAGE_001 = 'text-babbage-001',
TEXT_ADA_001 = 'text-ada-001',
}

export enum EditModel {
TEXT_DAVINCI_EDIT_001 = 'text-davinci-edit-001',
CODE_DAVINCI_EDIT_001 = 'code-davinci-edit-001',
}

export enum AudioTranscriptionModel {
WHISPER_1 = 'whisper-1',
}

export enum AudioTranslationModel {
WHISPER_1 = 'whisper-1',
}

export enum FineTuneModel {
DAVINCI = 'davinci',
CURIE = 'curie',
BABBAGE = 'babbage',
ADA = 'ada',
}

export enum EmbeddingModel {
TEXT_EMBEDDING_ADA_002 = 'text-embedding-ada-002',
TEXT_SEARCH_ADA_DOC_001 = 'text-search-ada-doc-001',
}

export enum ModerationModel {
TEXT_MODERATION_STABLE = 'text-moderation-stable',
TEXT_MODERATION_LATEST = 'text-moderation-latest',
}

+ 214
- 0
test/index.test.ts View File

@@ -0,0 +1,214 @@
import { config } from 'dotenv';
import {
beforeAll,
beforeEach,
describe,
expect,
it,
} from 'vitest';
import {
createAiClient,
PlatformEventEmitter,
OpenAi,
} from '../src';

describe('ai-utils', () => {
beforeAll(() => {
config();
});

describe('OpenAI', () => {
let aiClient: PlatformEventEmitter;

beforeEach(() => {
aiClient = createAiClient({
platform: OpenAi.PLATFORM_ID,
platformConfiguration: {
apiKey: process.env.OPENAI_API_KEY as string,
organizationId: process.env.OPENAI_ORGANIZATION_ID as string,
apiVersion: OpenAi.ApiVersion.V1,
},
});
});

describe.skip('createChatCompletion', () => {
let result: Partial<OpenAi.ChatCompletion> | undefined;

beforeEach(() => {
result = undefined;

aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => {
d.choices.forEach((c) => {
if (!result) {
result = {
id: d.id,
object: OpenAi.ChatCompletionDataEventObjectType.CHAT_COMPLETION,
created: d.created,
model: d.model,
};
}

if (!Array.isArray(result?.choices)) {
result.choices = [];
}

if (!result.choices[c.index]) {
result.choices[c.index] = {
message: { content: '' },
index: c.index,
finish_reason: c.finish_reason,
};
}

if (result.choices[c.index].message) {
if (c.delta.role) {
(result.choices[c.index].message as Record<string, unknown>).role = c.delta.role;
}

if (c.delta.content) {
(result.choices[c.index].message as Record<string, unknown>)
.content += c.delta.content;
}
}

if (c.finish_reason) {
result.choices[c.index].finish_reason = c.finish_reason;
}
});
});
});

it('works', () => new Promise<void>((resolve, reject) => {
aiClient.on('end', () => {
expect(result).toHaveProperty('id', expect.any(String));
expect(result).toHaveProperty('object', OpenAi.ChatCompletionDataEventObjectType.CHAT_COMPLETION);
expect(result).toHaveProperty('model', expect.any(String));
expect(result).toHaveProperty('created', expect.any(Number));
expect(result).toHaveProperty('choices', expect.any(Array));
resolve();
});

aiClient.on('error', (error: Error) => {
reject(error);
});

aiClient.createChatCompletion({
messages: 'Count from 1 to 20 in increments of a random number from 1 to 10.',
model: OpenAi.ChatCompletionModel.GPT_3_5_TURBO,
n: 2,
});
}), { timeout: 10000 });
});

describe.skip('createImage', () => {
it('works', () => new Promise<void>((resolve, reject) => {
aiClient.on<OpenAi.CreateImageDataEvent>('data', (r) => {
expect(r).toHaveProperty('created', expect.any(Number));
expect(r).toHaveProperty('data', expect.any(Array));
expect(r.data.every((d) => d instanceof Buffer)).toBe(true);
});

aiClient.on('end', () => {
resolve();
});

aiClient.on('error', (error: Error) => {
reject(error);
});

aiClient.createImage({
prompt: 'A photo of a cat',
size: OpenAi.CreateImageSize.SQUARE_256,
});
}), { timeout: 10000 });
});

describe.skip('createCompletion', () => {
let result: Partial<OpenAi.TextCompletion> | undefined;

beforeEach(() => {
result = undefined;

aiClient.on<OpenAi.TextCompletionChunkDataEvent>('data', (d) => {
d.choices.forEach((c) => {
if (!result) {
result = {
id: d.id,
object: OpenAi.TextCompletionDataEventObjectType.TEXT_COMPLETION,
created: d.created,
model: d.model,
};
}

if (!Array.isArray(result?.choices)) {
result.choices = [];
}

if (!result.choices[c.index]) {
result.choices[c.index] = {
text: '',
index: c.index,
finish_reason: c.finish_reason,
logprobs: c.logprobs, // TODO dunno how to use this?
};
}

if (c.text) {
result.choices[c.index].text += c.text;
}

if (c.finish_reason) {
result.choices[c.index].finish_reason = c.finish_reason;
}
});
});
});

it('works', () => new Promise<void>((resolve, reject) => {
aiClient.on('end', () => {
expect(result).toHaveProperty('id', expect.any(String));
expect(result).toHaveProperty('object', OpenAi.TextCompletionDataEventObjectType.TEXT_COMPLETION);
expect(result).toHaveProperty('model', expect.any(String));
expect(result).toHaveProperty('created', expect.any(Number));
expect(result).toHaveProperty('choices', expect.any(Array));
resolve();
});

aiClient.on('error', (error: Error) => {
reject(error);
});

aiClient.createCompletion({
prompt: 'Say this is a test',
model: OpenAi.TextCompletionModel.TEXT_DAVINCI_003,
maxTokens: 7,
temperature: 0,
});
}), { timeout: 10000 });
});

describe.skip('createEdit', () => {
it('works', () => new Promise<void>((resolve, reject) => {
aiClient.on<OpenAi.CreateEditDataEvent>('data', (r) => {
expect(r).toHaveProperty('object', OpenAi.EditDataEventObjectType.EDIT);
expect(r).toHaveProperty('created', expect.any(Number));
expect(r).toHaveProperty('choices', expect.any(Array));
});

aiClient.on('end', () => {
resolve();
});

aiClient.on('error', (error: Error) => {
reject(error);
});

aiClient.createEdit({
model: OpenAi.EditModel.TEXT_DAVINCI_EDIT_001,
input: 'What day of the wek is it?',
instruction: 'Fix the spelling mistakes',
});
}), { timeout: 10000 });
});
});
});

+ 21
- 0
tsconfig.eslint.json View File

@@ -0,0 +1,21 @@
{
"exclude": ["node_modules"],
"include": ["src", "types", "test"],
"compilerOptions": {
"module": "ESNext",
"lib": ["ESNext"],
"importHelpers": true,
"declaration": true,
"sourceMap": true,
"rootDir": "./",
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"moduleResolution": "node",
"jsx": "react",
"esModuleInterop": true,
"target": "es2018"
}
}

+ 21
- 0
tsconfig.json View File

@@ -0,0 +1,21 @@
{
"exclude": ["node_modules"],
"include": ["src", "types"],
"compilerOptions": {
"module": "ESNext",
"lib": ["ESNext", "DOM"],
"importHelpers": true,
"declaration": true,
"sourceMap": true,
"rootDir": "./src",
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"moduleResolution": "node",
"jsx": "react",
"esModuleInterop": true,
"target": "es2018"
}
}

+ 3607
- 0
yarn.lock
File diff suppressed because it is too large
View File


Loading…
Cancel
Save