fix: maxCompletionTokens Implementation for All Providers (#1938)

* Update LLM providers and constants

- Updated constants in app/lib/.server/llm/constants.ts
- Modified stream-text functionality in app/lib/.server/llm/stream-text.ts
- Updated Anthropic provider in app/lib/modules/llm/providers/anthropic.ts
- Modified GitHub provider in app/lib/modules/llm/providers/github.ts
- Updated Google provider in app/lib/modules/llm/providers/google.ts
- Modified OpenAI provider in app/lib/modules/llm/providers/openai.ts
- Updated LLM types in app/lib/modules/llm/types.ts
- Modified API route in app/routes/api.llmcall.ts

* Fix maxCompletionTokens Implementation for All Providers

 - Cohere: Added maxCompletionTokens: 4000 to all 10 static models
  - DeepSeek: Added maxCompletionTokens: 8192 to all 3 static models
  - Groq: Added maxCompletionTokens: 8192 to both static models
  - Mistral: Added maxCompletionTokens: 8192 to all 9 static models
  - Together: Added maxCompletionTokens: 8192 to both static models

  - Groq: Fixed getDynamicModels to include maxCompletionTokens: 8192
  - Together: Fixed getDynamicModels to include maxCompletionTokens: 8192
  - OpenAI: Fixed getDynamicModels with proper logic for reasoning models (o1: 16384, o1-mini: 8192) and standard models
This commit is contained in:
Stijnus
2025-08-29 23:13:58 +02:00
committed by GitHub
parent 38c13494c2
commit ff8b0d7af1
6 changed files with 169 additions and 23 deletions

View File

@@ -79,11 +79,26 @@ export default class OpenAIProvider extends BaseProvider {
contextWindow = 16385; // GPT-3.5-turbo has 16k context
}
// Determine completion token limits based on model type
let maxCompletionTokens = 16384; // default for most models
if (m.id?.startsWith('o1-preview') || m.id?.startsWith('o1-mini') || m.id?.startsWith('o1')) {
// Reasoning models have specific completion limits
maxCompletionTokens = m.id?.includes('mini') ? 8192 : 16384;
} else if (m.id?.includes('gpt-4o')) {
maxCompletionTokens = 16384;
} else if (m.id?.includes('gpt-4')) {
maxCompletionTokens = 8192;
} else if (m.id?.includes('gpt-3.5-turbo')) {
maxCompletionTokens = 4096;
}
return {
name: m.id,
label: `${m.id} (${Math.floor(contextWindow / 1000)}k context)`,
provider: this.name,
maxTokenAllowed: Math.min(contextWindow, 128000), // Cap at 128k for safety
maxCompletionTokens,
};
});
}