|
|
@@ -42,11 +42,11 @@ const receiveData = ( |
|
|
|
}); |
|
|
|
memory.push(assistantMessage as OpenAi.MessageObject); |
|
|
|
process.stdout.write('\n\n'); |
|
|
|
const { length: promptTokens } = OpenAi.getPromptTokens( |
|
|
|
normalizedChatMessage, |
|
|
|
model, |
|
|
|
); |
|
|
|
process.stdout.write(`info:\n${promptTokens} prompt tokens\n${completionTokens} completion tokens`); |
|
|
|
// const { length: promptTokens } = OpenAi.getPromptTokens( |
|
|
|
// normalizedChatMessage, |
|
|
|
// model, |
|
|
|
// ); |
|
|
|
// process.stdout.write(`info:\n${promptTokens} prompt tokens\n${completionTokens} completion tokens`); |
|
|
|
process.stdout.write(`info:\n${completionTokens} completion tokens`); |
|
|
|
if (argv.memory) { |
|
|
|
process.stdout.write(`\n${memory.length} memory items`); |
|
|
@@ -132,7 +132,31 @@ const main = (argv: Argv) => new Promise<number>(async (resolve) => { |
|
|
|
resolve(resolveResult); |
|
|
|
}); |
|
|
|
|
|
|
|
main(yargs(hideBin(process.argv)).argv as unknown as Argv) |
|
|
|
main( |
|
|
|
yargs(hideBin(process.argv)) |
|
|
|
.options({ |
|
|
|
t: { |
|
|
|
alias: 'temperature', |
|
|
|
type: 'number', |
|
|
|
description: 'Temperature argument.', |
|
|
|
}, |
|
|
|
p: { |
|
|
|
alias: 'topP', |
|
|
|
type: 'number', |
|
|
|
description: '"top_p" argument.', |
|
|
|
}, |
|
|
|
x: { |
|
|
|
alias: 'maxTokens', |
|
|
|
type: 'number', |
|
|
|
description: 'Maximum tokens ChatGPT will use.', |
|
|
|
}, |
|
|
|
m: { |
|
|
|
alias: 'memory', |
|
|
|
description: 'If ChatGPT will use memory. Supply a numeric value to get only the last X memory items (includes messages from all roles).', |
|
|
|
}, |
|
|
|
}) |
|
|
|
.argv as unknown as Argv, |
|
|
|
) |
|
|
|
.then((result) => { |
|
|
|
process.exit(result); |
|
|
|
}) |
|
|
|