Files
bolt-diy/app/lib/modules/llm/providers/groq.ts
Stijnus ff8b0d7af1 fix: maxCompletionTokens Implementation for All Providers (#1938)
* Update LLM providers and constants

- Updated constants in app/lib/.server/llm/constants.ts
- Modified stream-text functionality in app/lib/.server/llm/stream-text.ts
- Updated Anthropic provider in app/lib/modules/llm/providers/anthropic.ts
- Modified GitHub provider in app/lib/modules/llm/providers/github.ts
- Updated Google provider in app/lib/modules/llm/providers/google.ts
- Modified OpenAI provider in app/lib/modules/llm/providers/openai.ts
- Updated LLM types in app/lib/modules/llm/types.ts
- Modified API route in app/routes/api.llmcall.ts

* Fix maxCompletionTokens Implementation for All Providers

 - Cohere: Added maxCompletionTokens: 4000 to all 10 static models
  - DeepSeek: Added maxCompletionTokens: 8192 to all 3 static models
  - Groq: Added maxCompletionTokens: 8192 to both static models
  - Mistral: Added maxCompletionTokens: 8192 to all 9 static models
  - Together: Added maxCompletionTokens: 8192 to both static models

  - Groq: Fixed getDynamicModels to include maxCompletionTokens: 8192
  - Together: Fixed getDynamicModels to include maxCompletionTokens: 8192
  - OpenAI: Fixed getDynamicModels with proper logic for reasoning models (o1: 16384, o1-mini: 8192) and standard models
2025-08-29 23:13:58 +02:00

104 lines
2.8 KiB
TypeScript

import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class GroqProvider extends BaseProvider {
name = 'Groq';
getApiKeyLink = 'https://console.groq.com/keys';
config = {
apiTokenKey: 'GROQ_API_KEY',
};
staticModels: ModelInfo[] = [
/*
* Essential fallback models - only the most stable/reliable ones
* Llama 3.1 8B: 128k context, fast and efficient
*/
{
name: 'llama-3.1-8b-instant',
label: 'Llama 3.1 8B',
provider: 'Groq',
maxTokenAllowed: 128000,
maxCompletionTokens: 8192,
},
// Llama 3.3 70B: 128k context, most capable model
{
name: 'llama-3.3-70b-versatile',
label: 'Llama 3.3 70B',
provider: 'Groq',
maxTokenAllowed: 128000,
maxCompletionTokens: 8192,
},
];
async getDynamicModels(
apiKeys?: Record<string, string>,
settings?: IProviderSetting,
serverEnv?: Record<string, string>,
): Promise<ModelInfo[]> {
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'GROQ_API_KEY',
});
if (!apiKey) {
throw `Missing Api Key configuration for ${this.name} provider`;
}
const response = await fetch(`https://api.groq.com/openai/v1/models`, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
});
const res = (await response.json()) as any;
const data = res.data.filter(
(model: any) => model.object === 'model' && model.active && model.context_window > 8000,
);
return data.map((m: any) => ({
name: m.id,
label: `${m.id} - context ${m.context_window ? Math.floor(m.context_window / 1000) + 'k' : 'N/A'} [ by ${m.owned_by}]`,
provider: this.name,
maxTokenAllowed: Math.min(m.context_window || 8192, 16384),
maxCompletionTokens: 8192,
}));
}
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'GROQ_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openai = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey,
});
return openai(model);
}
}