Files
bolt-diy/app/lib/modules/llm/providers/open-router.ts
Stijnus b5d9055851 🔧 Fix Token Limits & Invalid JSON Response Errors (#1934)
ISSUES FIXED:
-  Invalid JSON response errors during streaming
-  Incorrect token limits causing API rejections
-  Outdated hardcoded model configurations
-  Poor error messages for API failures

SOLUTIONS IMPLEMENTED:

🎯 ACCURATE TOKEN LIMITS & CONTEXT SIZES
- OpenAI GPT-4o: 128k context (was 8k)
- OpenAI GPT-3.5-turbo: 16k context (was 8k)
- Anthropic Claude 3.5 Sonnet: 200k context (was 8k)
- Anthropic Claude 3 Haiku: 200k context (was 8k)
- Google Gemini 1.5 Pro: 2M context (was 8k)
- Google Gemini 1.5 Flash: 1M context (was 8k)
- Groq Llama models: 128k context (was 8k)
- Together models: Updated with accurate limits

�� DYNAMIC MODEL FETCHING ENHANCED
- Smart context detection from provider APIs
- Automatic fallback to known limits when API unavailable
- Safety caps to prevent token overflow (100k max)
- Intelligent model filtering and deduplication

🛡️ IMPROVED ERROR HANDLING
- Specific error messages for Invalid JSON responses
- Token limit exceeded warnings with solutions
- API key validation with clear guidance
- Rate limiting detection and user guidance
- Network timeout handling

 PERFORMANCE OPTIMIZATIONS
- Reduced static models from 40+ to 12 essential
- Enhanced streaming error detection
- Better API response validation
- Improved context window display (shows M/k units)

🔧 TECHNICAL IMPROVEMENTS
- Dynamic model context detection from APIs
- Enhanced streaming reliability
- Better token limit enforcement
- Comprehensive error categorization
- Smart model validation before API calls

IMPACT:
 Eliminates Invalid JSON response errors
 Prevents token limit API rejections
 Provides accurate model capabilities
 Improves user experience with clear errors
 Enables full utilization of modern LLM context windows
2025-08-29 20:53:57 +02:00

115 lines
3.3 KiB
TypeScript

import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
interface OpenRouterModel {
name: string;
id: string;
context_length: number;
pricing: {
prompt: number;
completion: number;
};
}
interface OpenRouterModelsResponse {
data: OpenRouterModel[];
}
export default class OpenRouterProvider extends BaseProvider {
name = 'OpenRouter';
getApiKeyLink = 'https://openrouter.ai/settings/keys';
config = {
apiTokenKey: 'OPEN_ROUTER_API_KEY',
};
staticModels: ModelInfo[] = [
/*
* Essential fallback models - only the most stable/reliable ones
* Claude 3.5 Sonnet via OpenRouter: 200k context
*/
{
name: 'anthropic/claude-3.5-sonnet',
label: 'Claude 3.5 Sonnet',
provider: 'OpenRouter',
maxTokenAllowed: 200000,
},
// GPT-4o via OpenRouter: 128k context
{
name: 'openai/gpt-4o',
label: 'GPT-4o',
provider: 'OpenRouter',
maxTokenAllowed: 128000,
},
];
async getDynamicModels(
_apiKeys?: Record<string, string>,
_settings?: IProviderSetting,
_serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
try {
const response = await fetch('https://openrouter.ai/api/v1/models', {
headers: {
'Content-Type': 'application/json',
},
});
const data = (await response.json()) as OpenRouterModelsResponse;
return data.data
.sort((a, b) => a.name.localeCompare(b.name))
.map((m) => {
// Get accurate context window from OpenRouter API
const contextWindow = m.context_length || 32000; // Use API value or fallback
// Cap at reasonable limits to prevent issues (OpenRouter has some very large models)
const maxAllowed = 1000000; // 1M tokens max for safety
const finalContext = Math.min(contextWindow, maxAllowed);
return {
name: m.id,
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${finalContext >= 1000000 ? Math.floor(finalContext / 1000000) + 'M' : Math.floor(finalContext / 1000) + 'k'}`,
provider: this.name,
maxTokenAllowed: finalContext,
};
});
} catch (error) {
console.error('Error getting OpenRouter models:', error);
return [];
}
}
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'OPEN_ROUTER_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openRouter = createOpenRouter({
apiKey,
});
const instance = openRouter.chat(model) as LanguageModelV1;
return instance;
}
}