|
|
@@ -11,6 +11,8 @@ import { |
|
|
|
PlatformEventEmitter, |
|
|
|
OpenAi, |
|
|
|
} from '../../../src'; |
|
|
|
import { getPromptTokens } from '../../../src/platforms/openai/usage'; |
|
|
|
import { ChatCompletionModel } from '../../../src/platforms/openai'; |
|
|
|
|
|
|
|
describe('OpenAI', () => { |
|
|
|
beforeAll(() => { |
|
|
@@ -34,17 +36,30 @@ describe('OpenAI', () => { |
|
|
|
describe('createChatCompletion', () => { |
|
|
|
let result: Partial<OpenAi.ChatCompletion> | undefined; |
|
|
|
|
|
|
|
let prompt: string; |
|
|
|
|
|
|
|
beforeEach(() => { |
|
|
|
result = undefined; |
|
|
|
|
|
|
|
aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => { |
|
|
|
d.choices.forEach((c) => { |
|
|
|
if (!result) { |
|
|
|
const promptTokens = getPromptTokens( |
|
|
|
prompt, |
|
|
|
ChatCompletionModel.GPT_3_5_TURBO, |
|
|
|
) |
|
|
|
.length; |
|
|
|
|
|
|
|
result = { |
|
|
|
id: d.id, |
|
|
|
object: OpenAi.ChatCompletionDataEventObjectType.CHAT_COMPLETION, |
|
|
|
created: d.created, |
|
|
|
model: d.model, |
|
|
|
usage: { |
|
|
|
prompt_tokens: promptTokens, |
|
|
|
completion_tokens: 0, |
|
|
|
total_tokens: promptTokens, |
|
|
|
}, |
|
|
|
}; |
|
|
|
} |
|
|
|
|
|
|
@@ -68,6 +83,11 @@ describe('OpenAI', () => { |
|
|
|
if (c.delta.content) { |
|
|
|
(result.choices[c.index].message as Record<string, unknown>) |
|
|
|
.content += c.delta.content; |
|
|
|
|
|
|
|
result.usage!.completion_tokens += 1; |
|
|
|
result.usage!.total_tokens = ( |
|
|
|
result.usage!.prompt_tokens + result.usage!.completion_tokens |
|
|
|
); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
@@ -92,8 +112,9 @@ describe('OpenAI', () => { |
|
|
|
reject(error); |
|
|
|
}); |
|
|
|
|
|
|
|
prompt = 'Count from 1 to 20 in increments of a random number from 1 to 10.'; |
|
|
|
aiClient.createChatCompletion({ |
|
|
|
messages: 'Count from 1 to 20 in increments of a random number from 1 to 10.', |
|
|
|
messages: prompt, |
|
|
|
model: OpenAi.ChatCompletionModel.GPT_3_5_TURBO, |
|
|
|
n: 2, |
|
|
|
}); |
|
|
|