Skip to content

Commit b105846

Browse files
committed
fix maximum token count 1
1 parent 8b4169a commit b105846

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

service/src/chatgpt/index.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,14 +58,14 @@ export async function initApi(key: KeyConfig, chatModel: string, maxContextCount
5858
// Set the token limits based on the model's type. This is because different models have different token limits.
5959
// The token limit includes the token count from both the message array sent and the model response.
6060
// 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.
61-
// Check if the model type is GPT-4-turbo
62-
if (model.toLowerCase().includes('gpt-4o') || model.toLowerCase().includes('gpt-4-turbo') || model.toLowerCase().includes('1106-preview') || model.toLowerCase().includes('0125-preview')) {
63-
// If it's a 'gpt-4o'/'gpt-4-turbo'/'1106-preview'/'0125-preview' model, set the maxModelTokens to 128000
61+
// Check if the model type is GPT-4-turbo or newer
62+
if (model.toLowerCase().includes('gpt-4o') || model.toLowerCase().includes('gpt-4-turbo') || model.toLowerCase().includes('-preview')) {
63+
// If it's a 'gpt-4o'/'gpt-4-turbo'/'xxxx-preview' model, set the maxModelTokens to 128000
6464
options.maxModelTokens = 128000
6565
options.maxResponseTokens = 32768
6666
}
6767
// Check if the model type includes '16k'
68-
if (model.toLowerCase().includes('16k')) {
68+
else if (model.toLowerCase().includes('16k')) {
6969
// If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
7070
options.maxModelTokens = 16384
7171
options.maxResponseTokens = 4096

0 commit comments

Comments
 (0)