|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193 |
- import { describe, it, expect } from 'vitest';
- import {
- ChatCompletionModel,
- getPromptTokens, getTokens,
- MessageRole,
- Usage,
- } from '../../../src/platforms/openai';
-
- describe('OpenAI', () => {
- describe('usage', () => {
- describe('gpt-3.5-turbo', () => {
- it('calculates prompt token count for a single message', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(14);
- });
-
- it('calculates prompt token count for multiple messages', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.SYSTEM,
- content: 'You are a helpful assistant',
- },
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(24);
- });
-
- it('calculates all usage for a single message', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const response = {
- choices: [
- {
- message: {
- role: MessageRole.ASSISTANT,
- content: 'This is a test.',
- },
- },
- ],
- };
-
- const promptTokensLength = getPromptTokens(
- request.messages,
- request.model,
- )
- .length;
- const completionTokensLength = getTokens(
- response.choices[0].message.content,
- request.model,
- )
- .length;
- const usage: Usage = {
- prompt_tokens: promptTokensLength,
- completion_tokens: completionTokensLength,
- total_tokens: promptTokensLength + completionTokensLength,
- };
-
- expect(usage).toEqual({
- prompt_tokens: 14,
- completion_tokens: 5,
- total_tokens: 19,
- });
- });
-
- it('calculates all usage for multiple messages', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.SYSTEM,
- content: 'You are a helpful assistant',
- },
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const response = {
- choices: [
- {
- message: {
- role: MessageRole.ASSISTANT,
- content: 'This is a test.',
- },
- },
- ],
- };
-
- const promptTokensLength = getPromptTokens(
- request.messages,
- request.model,
- )
- .length;
- const completionTokensLength = getTokens(
- response.choices[0].message.content,
- request.model,
- )
- .length;
- const usage: Usage = {
- prompt_tokens: promptTokensLength,
- completion_tokens: completionTokensLength,
- total_tokens: promptTokensLength + completionTokensLength,
- };
-
- expect(usage).toEqual({
- prompt_tokens: 24,
- completion_tokens: 5,
- total_tokens: 29,
- });
- });
- });
-
- describe('gpt-4', () => {
- it('calculates prompt token count for a single message', () => {
- const request = {
- model: ChatCompletionModel.GPT_4,
- messages: [
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(13);
- });
-
- it('calculates prompt token count for multiple messages', () => {
- const request = {
- model: ChatCompletionModel.GPT_4,
- messages: [
- {
- role: MessageRole.SYSTEM,
- content: 'You are a helpful assistant',
- },
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(22);
- });
- });
- });
- });
|