|
- import { describe, it, expect } from 'vitest';
- import {
- ChatCompletionModel,
- getPromptTokens, getTokens,
- MessageRole,
- Usage,
- } from '../../../src/platforms/openai';
-
- describe('OpenAI', () => {
- describe('usage', () => {
- describe('gpt-3.5-turbo', () => {
- it('calculates prompt token count for a single message', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(14);
- });
-
- it('calculates prompt token count for multiple messages', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.SYSTEM,
- content: 'You are a helpful assistant',
- },
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(24);
- });
-
- it('calculates all usage for a single message', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const response = {
- choices: [
- {
- message: {
- role: MessageRole.ASSISTANT,
- content: 'This is a test.',
- },
- },
- ],
- };
-
- const promptTokensLength = getPromptTokens(
- request.messages,
- request.model,
- )
- .length;
- const completionTokensLength = getTokens(
- response.choices[0].message.content,
- request.model,
- )
- .length;
- const usage: Usage = {
- prompt_tokens: promptTokensLength,
- completion_tokens: completionTokensLength,
- total_tokens: promptTokensLength + completionTokensLength,
- };
-
- expect(usage).toEqual({
- prompt_tokens: 14,
- completion_tokens: 5,
- total_tokens: 19,
- });
- });
-
- it('calculates all usage for multiple messages', () => {
- const request = {
- model: ChatCompletionModel.GPT_3_5_TURBO,
- messages: [
- {
- role: MessageRole.SYSTEM,
- content: 'You are a helpful assistant',
- },
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const response = {
- choices: [
- {
- message: {
- role: MessageRole.ASSISTANT,
- content: 'This is a test.',
- },
- },
- ],
- };
-
- const promptTokensLength = getPromptTokens(
- request.messages,
- request.model,
- )
- .length;
- const completionTokensLength = getTokens(
- response.choices[0].message.content,
- request.model,
- )
- .length;
- const usage: Usage = {
- prompt_tokens: promptTokensLength,
- completion_tokens: completionTokensLength,
- total_tokens: promptTokensLength + completionTokensLength,
- };
-
- expect(usage).toEqual({
- prompt_tokens: 24,
- completion_tokens: 5,
- total_tokens: 29,
- });
- });
- });
-
- describe('gpt-4', () => {
- it('calculates prompt token count for a single message', () => {
- const request = {
- model: ChatCompletionModel.GPT_4,
- messages: [
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(13);
- });
-
- it('calculates prompt token count for multiple messages', () => {
- const request = {
- model: ChatCompletionModel.GPT_4,
- messages: [
- {
- role: MessageRole.SYSTEM,
- content: 'You are a helpful assistant',
- },
- {
- role: MessageRole.USER,
- content: 'Say this is a test.',
- },
- ],
- };
-
- const promptTokens = getPromptTokens(
- request.messages,
- request.model,
- );
-
- expect(promptTokens).toHaveLength(22);
- });
- });
- });
- });
|