fix: maxCompletionTokens Implementation for All Providers (#1938)

* Update LLM providers and constants

- Updated constants in app/lib/.server/llm/constants.ts
- Modified stream-text functionality in app/lib/.server/llm/stream-text.ts
- Updated Anthropic provider in app/lib/modules/llm/providers/anthropic.ts
- Modified GitHub provider in app/lib/modules/llm/providers/github.ts
- Updated Google provider in app/lib/modules/llm/providers/google.ts
- Modified OpenAI provider in app/lib/modules/llm/providers/openai.ts
- Updated LLM types in app/lib/modules/llm/types.ts
- Modified API route in app/routes/api.llmcall.ts

* Fix maxCompletionTokens Implementation for All Providers

 - Cohere: Added maxCompletionTokens: 4000 to all 10 static models
  - DeepSeek: Added maxCompletionTokens: 8192 to all 3 static models
  - Groq: Added maxCompletionTokens: 8192 to both static models
  - Mistral: Added maxCompletionTokens: 8192 to all 9 static models
  - Together: Added maxCompletionTokens: 8192 to both static models

  - Groq: Fixed getDynamicModels to include maxCompletionTokens: 8192
  - Together: Fixed getDynamicModels to include maxCompletionTokens: 8192
  - OpenAI: Fixed getDynamicModels with proper logic for reasoning models (o1: 16384, o1-mini: 8192) and standard models
This commit is contained in:
Stijnus
2025-08-29 23:13:58 +02:00
committed by GitHub
parent 38c13494c2
commit ff8b0d7af1
6 changed files with 169 additions and 23 deletions

View File

@@ -13,9 +13,27 @@ export default class DeepseekProvider extends BaseProvider {
};
staticModels: ModelInfo[] = [
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
{ name: 'deepseek-reasoner', label: 'Deepseek-Reasoner', provider: 'Deepseek', maxTokenAllowed: 8000 },
{
name: 'deepseek-coder',
label: 'Deepseek-Coder',
provider: 'Deepseek',
maxTokenAllowed: 8000,
maxCompletionTokens: 8192,
},
{
name: 'deepseek-chat',
label: 'Deepseek-Chat',
provider: 'Deepseek',
maxTokenAllowed: 8000,
maxCompletionTokens: 8192,
},
{
name: 'deepseek-reasoner',
label: 'Deepseek-Reasoner',
provider: 'Deepseek',
maxTokenAllowed: 8000,
maxCompletionTokens: 8192,
},
];
getModelInstance(options: {