Many-in-one AI client.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

194 lines
4.8 KiB

  1. import { describe, it, expect } from 'vitest';
  2. import {
  3. ChatCompletionModel,
  4. getPromptTokens, getTokens,
  5. MessageRole,
  6. CompletionUsage,
  7. } from '../../../src/platforms/openai';
  8. describe('OpenAI', () => {
  9. describe('usage', () => {
  10. describe('gpt-3.5-turbo', () => {
  11. it('calculates prompt token count for a single message', () => {
  12. const request = {
  13. model: ChatCompletionModel.GPT_3_5_TURBO,
  14. messages: [
  15. {
  16. role: MessageRole.USER,
  17. content: 'Say this is a test.',
  18. },
  19. ],
  20. };
  21. const promptTokens = getPromptTokens(
  22. request.messages,
  23. request.model,
  24. );
  25. expect(promptTokens).toHaveLength(14);
  26. });
  27. it('calculates prompt token count for multiple messages', () => {
  28. const request = {
  29. model: ChatCompletionModel.GPT_3_5_TURBO,
  30. messages: [
  31. {
  32. role: MessageRole.SYSTEM,
  33. content: 'You are a helpful assistant',
  34. },
  35. {
  36. role: MessageRole.USER,
  37. content: 'Say this is a test.',
  38. },
  39. ],
  40. };
  41. const promptTokens = getPromptTokens(
  42. request.messages,
  43. request.model,
  44. );
  45. expect(promptTokens).toHaveLength(24);
  46. });
  47. it('calculates all usage for a single message', () => {
  48. const request = {
  49. model: ChatCompletionModel.GPT_3_5_TURBO,
  50. messages: [
  51. {
  52. role: MessageRole.USER,
  53. content: 'Say this is a test.',
  54. },
  55. ],
  56. };
  57. const response = {
  58. choices: [
  59. {
  60. message: {
  61. role: MessageRole.ASSISTANT,
  62. content: 'This is a test.',
  63. },
  64. },
  65. ],
  66. };
  67. const promptTokensLength = getPromptTokens(
  68. request.messages,
  69. request.model,
  70. )
  71. .length;
  72. const completionTokensLength = getTokens(
  73. response.choices[0].message.content,
  74. request.model,
  75. )
  76. .length;
  77. const usage: CompletionUsage = {
  78. prompt_tokens: promptTokensLength,
  79. completion_tokens: completionTokensLength,
  80. total_tokens: promptTokensLength + completionTokensLength,
  81. };
  82. expect(usage).toEqual({
  83. prompt_tokens: 14,
  84. completion_tokens: 5,
  85. total_tokens: 19,
  86. });
  87. });
  88. it('calculates all usage for multiple messages', () => {
  89. const request = {
  90. model: ChatCompletionModel.GPT_3_5_TURBO,
  91. messages: [
  92. {
  93. role: MessageRole.SYSTEM,
  94. content: 'You are a helpful assistant',
  95. },
  96. {
  97. role: MessageRole.USER,
  98. content: 'Say this is a test.',
  99. },
  100. ],
  101. };
  102. const response = {
  103. choices: [
  104. {
  105. message: {
  106. role: MessageRole.ASSISTANT,
  107. content: 'This is a test.',
  108. },
  109. },
  110. ],
  111. };
  112. const promptTokensLength = getPromptTokens(
  113. request.messages,
  114. request.model,
  115. )
  116. .length;
  117. const completionTokensLength = getTokens(
  118. response.choices[0].message.content,
  119. request.model,
  120. )
  121. .length;
  122. const usage: CompletionUsage = {
  123. prompt_tokens: promptTokensLength,
  124. completion_tokens: completionTokensLength,
  125. total_tokens: promptTokensLength + completionTokensLength,
  126. };
  127. expect(usage).toEqual({
  128. prompt_tokens: 24,
  129. completion_tokens: 5,
  130. total_tokens: 29,
  131. });
  132. });
  133. });
  134. describe('gpt-4', () => {
  135. it('calculates prompt token count for a single message', () => {
  136. const request = {
  137. model: ChatCompletionModel.GPT_4,
  138. messages: [
  139. {
  140. role: MessageRole.USER,
  141. content: 'Say this is a test.',
  142. },
  143. ],
  144. };
  145. const promptTokens = getPromptTokens(
  146. request.messages,
  147. request.model,
  148. );
  149. expect(promptTokens).toHaveLength(13);
  150. });
  151. it('calculates prompt token count for multiple messages', () => {
  152. const request = {
  153. model: ChatCompletionModel.GPT_4,
  154. messages: [
  155. {
  156. role: MessageRole.SYSTEM,
  157. content: 'You are a helpful assistant',
  158. },
  159. {
  160. role: MessageRole.USER,
  161. content: 'Say this is a test.',
  162. },
  163. ],
  164. };
  165. const promptTokens = getPromptTokens(
  166. request.messages,
  167. request.model,
  168. );
  169. expect(promptTokens).toHaveLength(22);
  170. });
  171. });
  172. });
  173. });