Sfoglia il codice sorgente

Improve example CLI, move tiktoken wasm processing

The example CLI now has improved token counting UI.

The tiktoken wasm processing is moved to the main package instead.
master
parent
commit
140a6d03f3
8 ha cambiato i file con 38 aggiunte e 29 eliminazioni
  1. +0
    -12
      examples/cli/pridepack.config.js
  2. +1
    -1
      examples/cli/src/app.ts
  3. +14
    -11
      examples/cli/src/index.ts
  4. +1
    -0
      package.json
  5. +15
    -0
      pridepack.config.js
  6. +0
    -3
      pridepack.json
  7. +2
    -2
      src/index.ts
  8. +5
    -0
      yarn.lock

+ 0
- 12
examples/cli/pridepack.config.js Vedi File

@@ -1,15 +1,3 @@
const { wasmLoader } = require('esbuild-plugin-wasm');
module.exports = {
target: 'esnext',
plugins: ({ isCJS }) => {
if (isCJS) {
return [
wasmLoader({
mode: 'deferred',
}),
];
}

return [];
},
}

+ 1
- 1
examples/cli/src/app.ts Vedi File

@@ -7,7 +7,7 @@ export interface CreateOpenAiClientParams {
}

export const createOpenAiClient = (params: CreateOpenAiClientParams) => (
createAiClient({
createAiClient<OpenAi.PlatformEventEmitter>({
platform: OpenAi.PLATFORM_ID,
platformConfiguration: {
apiKey: params.apiKey,


+ 14
- 11
examples/cli/src/index.ts Vedi File

@@ -1,13 +1,13 @@
import yargs from 'yargs';
import { hideBin } from 'yargs/helpers';
import { OpenAi, PlatformEventEmitter } from '@modal-sh/mio-ai';
import { OpenAi } from '@modal-sh/mio-ai';
import { TextPrompt, isCancel } from '@clack/core';
import { createOpenAiClient } from './app';

export type Argv = Record<string, unknown>;

const receiveData = (
aiClient: PlatformEventEmitter,
aiClient: OpenAi.PlatformEventEmitter,
theContent: string,
argv: Record<string, unknown>,
memory: OpenAi.MessageObject[],
@@ -15,11 +15,20 @@ const receiveData = (
let completionTokens: number;

const normalizedChatMessage = OpenAi.normalizeChatMessage(theContent);

const model = (
argv.model as OpenAi.ChatCompletionModel
?? OpenAi.ChatCompletionModel.GPT_3_5_TURBO
);
const { length: promptTokens } = OpenAi.getPromptTokens(
normalizedChatMessage,
model,
);

process.stdout.write(`(${promptTokens} prompt tokens)`);
if (argv.memory) {
process.stdout.write(`\n${memory.length} memory items`);
}
process.stdout.write('\n\n');

let assistantMessage: Partial<OpenAi.MessageObject>;
aiClient.on<OpenAi.ChatCompletionChunkDataEvent>('data', (d) => {
@@ -41,12 +50,7 @@ const receiveData = (
memory.push(m);
});
memory.push(assistantMessage as OpenAi.MessageObject);
process.stdout.write('\n\n');
const { length: promptTokens } = OpenAi.getPromptTokens(
normalizedChatMessage,
model,
);
process.stdout.write(`info:\n${promptTokens} prompt tokens\n${completionTokens} completion tokens`);
process.stdout.write(`\n(${completionTokens} completion tokens)`);
if (argv.memory) {
process.stdout.write(`\n${memory.length} memory items`);
}
@@ -120,7 +124,6 @@ const main = async (argv: Argv) => {
done = true;
break;
}
process.stdout.write('\n');
// eslint-disable-next-line no-await-in-loop
await receiveData(aiClient, content, argv, memory);
} catch {
@@ -129,7 +132,7 @@ const main = async (argv: Argv) => {
}
} while (!done);

process.stdout.write('\ninfo:\nUser exited loop\n\n');
process.stdout.write('(User exited loop)\n\n');
return resolveResult;
};



+ 1
- 0
package.json Vedi File

@@ -15,6 +15,7 @@
"devDependencies": {
"@types/node": "^18.14.1",
"dotenv": "^16.0.3",
"esbuild-plugin-wasm": "^1.0.0",
"eslint": "^8.35.0",
"eslint-config-lxsmnsyc": "^0.5.0",
"pridepack": "2.4.4",


+ 15
- 0
pridepack.config.js Vedi File

@@ -0,0 +1,15 @@
const { wasmLoader } = require('esbuild-plugin-wasm');
module.exports = {
target: 'esnext',
plugins: ({ isCJS }) => {
if (isCJS) {
return [
wasmLoader({
mode: 'deferred',
}),
];
}

return [];
},
}

+ 0
- 3
pridepack.json Vedi File

@@ -1,3 +0,0 @@
{
"target": "es2018"
}

+ 2
- 2
src/index.ts Vedi File

@@ -18,7 +18,7 @@ export type PlatformEventEmitter = (
| ElevenLabsImpl.PlatformEventEmitter
);

export const createAiClient = (configParams: PlatformConfig): PlatformEventEmitter => {
export const createAiClient = <T extends PlatformEventEmitter = PlatformEventEmitter> (configParams: PlatformConfig): T => {
const {
platform,
platformConfiguration,
@@ -32,5 +32,5 @@ export const createAiClient = (configParams: PlatformConfig): PlatformEventEmitt
throw new Error(`Unsupported platform: ${platform}. Supported platforms are: ${supportedPlatforms}`);
}

return new platformModule.PlatformEventEmitterImpl(platformConfiguration);
return new platformModule.PlatformEventEmitterImpl(platformConfiguration) as T;
};

+ 5
- 0
yarn.lock Vedi File

@@ -1295,6 +1295,11 @@ es-to-primitive@^1.2.1:
is-date-object "^1.0.1"
is-symbol "^1.0.2"
esbuild-plugin-wasm@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/esbuild-plugin-wasm/-/esbuild-plugin-wasm-1.0.0.tgz#4ca95ac4ae553331d2b0099223f8713a49b6cff2"
integrity sha512-iXIf3hwfqorExG66/eNr3U8JakIZuge70nMNQtinvxbzdljQ/RjvwaBiGPqF/DvuIumUApbe3zj2kqHLVyc7uQ==
esbuild@^0.17.4, esbuild@^0.17.5:
version "0.17.16"
resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.17.16.tgz#5efec24a8ff29e0c157359f27e1b5532a728b720"


Caricamento…
Annulla
Salva