feat: add Moonshot AI (Kimi) provider and update xAI Grok models (#1953)

- Add comprehensive Moonshot AI provider with 11 models including:
  * Legacy moonshot-v1 series (8k, 32k, 128k context)
  * Latest Kimi K2 models (K2 Preview, Turbo, Thinking)
  * Vision-enabled models for multimodal capabilities
  * Auto-selecting model variants

- Update xAI provider with latest Grok models:
  * Add Grok 4 (256K context) and Grok 4 (07-09) variant
  * Add Grok 3 Mini Beta and Mini Fast Beta variants
  * Update context limits to match actual model capabilities
  * Remove outdated grok-beta and grok-2-1212 models

- Add MOONSHOT_API_KEY to environment configuration
- Register Moonshot provider in service status monitoring
- Full OpenAI-compatible API integration via api.moonshot.ai
- Fix TypeScript errors in GitHub provider

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Stijnus
2025-08-31 18:54:14 +02:00
committed by GitHub
parent 56f5d6f68c
commit df242a7935
18 changed files with 810 additions and 192 deletions

View File

@@ -8,7 +8,7 @@ import { motion } from 'framer-motion';
import { classNames } from '~/utils/classNames';
import { toast } from 'react-toastify';
import { providerBaseUrlEnvKeys } from '~/utils/constants';
import { SiAmazon, SiGoogle, SiHuggingface, SiPerplexity, SiOpenai } from 'react-icons/si';
import { SiAmazon, SiGoogle, SiGithub, SiHuggingface, SiPerplexity, SiOpenai } from 'react-icons/si';
import { BsRobot, BsCloud } from 'react-icons/bs';
import { TbBrain, TbCloudComputing } from 'react-icons/tb';
import { BiCodeBlock, BiChip } from 'react-icons/bi';
@@ -21,6 +21,7 @@ type ProviderName =
| 'Anthropic'
| 'Cohere'
| 'Deepseek'
| 'Github'
| 'Google'
| 'Groq'
| 'HuggingFace'
@@ -38,6 +39,7 @@ const PROVIDER_ICONS: Record<ProviderName, IconType> = {
Anthropic: FaBrain,
Cohere: BiChip,
Deepseek: BiCodeBlock,
Github: SiGithub,
Google: SiGoogle,
Groq: BsCloud,
HuggingFace: SiHuggingface,
@@ -53,6 +55,7 @@ const PROVIDER_ICONS: Record<ProviderName, IconType> = {
// Update PROVIDER_DESCRIPTIONS to use the same type
const PROVIDER_DESCRIPTIONS: Partial<Record<ProviderName, string>> = {
Anthropic: 'Access Claude and other Anthropic models',
Github: 'Use OpenAI models hosted through GitHub infrastructure',
OpenAI: 'Use GPT-4, GPT-3.5, and other OpenAI models',
};

View File

@@ -13,6 +13,7 @@ import { OpenRouterStatusChecker } from './providers/openrouter';
import { PerplexityStatusChecker } from './providers/perplexity';
import { TogetherStatusChecker } from './providers/together';
import { XAIStatusChecker } from './providers/xai';
import { MoonshotStatusChecker } from './providers/moonshot';
export class ProviderStatusCheckerFactory {
private static _providerConfigs: Record<ProviderName, ProviderConfig> = {
@@ -82,6 +83,12 @@ export class ProviderStatusCheckerFactory {
headers: {},
testModel: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
},
Moonshot: {
statusUrl: 'https://status.moonshot.ai/',
apiUrl: 'https://api.moonshot.ai/v1/models',
headers: {},
testModel: 'moonshot-v1-8k',
},
XAI: {
statusUrl: 'https://status.x.ai/',
apiUrl: 'https://api.x.ai/v1/models',
@@ -120,6 +127,8 @@ export class ProviderStatusCheckerFactory {
return new PerplexityStatusChecker(config);
case 'Together':
return new TogetherStatusChecker(config);
case 'Moonshot':
return new MoonshotStatusChecker(config);
case 'XAI':
return new XAIStatusChecker(config);
default:

View File

@@ -0,0 +1,37 @@
import { BaseProviderChecker } from '~/components/@settings/tabs/providers/service-status/base-provider';
import type { StatusCheckResult } from '~/components/@settings/tabs/providers/service-status/types';
export class MoonshotStatusChecker extends BaseProviderChecker {
async checkStatus(): Promise<StatusCheckResult> {
try {
// Check Moonshot API endpoint
const apiEndpoint = 'https://api.moonshot.ai/v1/models';
const apiStatus = await this.checkEndpoint(apiEndpoint);
// Check their main website
const websiteStatus = await this.checkEndpoint('https://www.moonshot.ai');
let status: StatusCheckResult['status'] = 'operational';
let message = 'All systems operational';
if (apiStatus !== 'reachable' || websiteStatus !== 'reachable') {
status = apiStatus !== 'reachable' ? 'down' : 'degraded';
message = apiStatus !== 'reachable' ? 'API appears to be down' : 'Service may be experiencing issues';
}
return {
status,
message,
incidents: [], // No public incident tracking available yet
};
} catch (error) {
console.error('Error checking Moonshot status:', error);
return {
status: 'degraded',
message: 'Unable to determine service status',
incidents: ['Note: Limited status information available'],
};
}
}
}

View File

@@ -9,6 +9,7 @@ export type ProviderName =
| 'HuggingFace'
| 'Hyperbolic'
| 'Mistral'
| 'Moonshot'
| 'OpenRouter'
| 'Perplexity'
| 'Together'

View File

@@ -1,9 +1,84 @@
import type { ProviderInfo } from '~/types/model';
import { useEffect, useState, useRef } from 'react';
import { useEffect, useState, useRef, useMemo, useCallback } from 'react';
import type { KeyboardEvent } from 'react';
import type { ModelInfo } from '~/lib/modules/llm/types';
import { classNames } from '~/utils/classNames';
// Fuzzy search utilities
const levenshteinDistance = (str1: string, str2: string): number => {
const matrix = [];
for (let i = 0; i <= str2.length; i++) {
matrix[i] = [i];
}
for (let j = 0; j <= str1.length; j++) {
matrix[0][j] = j;
}
for (let i = 1; i <= str2.length; i++) {
for (let j = 1; j <= str1.length; j++) {
if (str2.charAt(i - 1) === str1.charAt(j - 1)) {
matrix[i][j] = matrix[i - 1][j - 1];
} else {
matrix[i][j] = Math.min(matrix[i - 1][j - 1] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j] + 1);
}
}
}
return matrix[str2.length][str1.length];
};
const fuzzyMatch = (query: string, text: string): { score: number; matches: boolean } => {
if (!query) {
return { score: 0, matches: true };
}
if (!text) {
return { score: 0, matches: false };
}
const queryLower = query.toLowerCase();
const textLower = text.toLowerCase();
// Exact substring match gets highest score
if (textLower.includes(queryLower)) {
return { score: 100 - (textLower.indexOf(queryLower) / textLower.length) * 20, matches: true };
}
// Fuzzy match with reasonable threshold
const distance = levenshteinDistance(queryLower, textLower);
const maxLen = Math.max(queryLower.length, textLower.length);
const similarity = 1 - distance / maxLen;
return {
score: similarity > 0.6 ? similarity * 80 : 0,
matches: similarity > 0.6,
};
};
const highlightText = (text: string, query: string): string => {
if (!query) {
return text;
}
const regex = new RegExp(`(${query.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')})`, 'gi');
return text.replace(regex, '<mark class="bg-yellow-200 dark:bg-yellow-800 text-current">$1</mark>');
};
const formatContextSize = (tokens: number): string => {
if (tokens >= 1000000) {
return `${(tokens / 1000000).toFixed(1)}M`;
}
if (tokens >= 1000) {
return `${(tokens / 1000).toFixed(0)}K`;
}
return tokens.toString();
};
interface ModelSelectorProps {
model?: string;
setModel?: (model: string) => void;
@@ -40,12 +115,14 @@ export const ModelSelector = ({
modelLoading,
}: ModelSelectorProps) => {
const [modelSearchQuery, setModelSearchQuery] = useState('');
const [debouncedModelSearchQuery, setDebouncedModelSearchQuery] = useState('');
const [isModelDropdownOpen, setIsModelDropdownOpen] = useState(false);
const [focusedModelIndex, setFocusedModelIndex] = useState(-1);
const modelSearchInputRef = useRef<HTMLInputElement>(null);
const modelOptionsRef = useRef<(HTMLDivElement | null)[]>([]);
const modelDropdownRef = useRef<HTMLDivElement>(null);
const [providerSearchQuery, setProviderSearchQuery] = useState('');
const [debouncedProviderSearchQuery, setDebouncedProviderSearchQuery] = useState('');
const [isProviderDropdownOpen, setIsProviderDropdownOpen] = useState(false);
const [focusedProviderIndex, setFocusedProviderIndex] = useState(-1);
const providerSearchInputRef = useRef<HTMLInputElement>(null);
@@ -53,6 +130,23 @@ export const ModelSelector = ({
const providerDropdownRef = useRef<HTMLDivElement>(null);
const [showFreeModelsOnly, setShowFreeModelsOnly] = useState(false);
// Debounce search queries
useEffect(() => {
const timer = setTimeout(() => {
setDebouncedModelSearchQuery(modelSearchQuery);
}, 150);
return () => clearTimeout(timer);
}, [modelSearchQuery]);
useEffect(() => {
const timer = setTimeout(() => {
setDebouncedProviderSearchQuery(providerSearchQuery);
}, 150);
return () => clearTimeout(timer);
}, [providerSearchQuery]);
useEffect(() => {
const handleClickOutside = (event: MouseEvent) => {
if (modelDropdownRef.current && !modelDropdownRef.current.contains(event.target as Node)) {
@@ -71,24 +165,64 @@ export const ModelSelector = ({
return () => document.removeEventListener('mousedown', handleClickOutside);
}, []);
const filteredModels = [...modelList]
.filter((e) => e.provider === provider?.name && e.name)
.filter((model) => {
// Apply free models filter
if (showFreeModelsOnly && !isModelLikelyFree(model, provider?.name)) {
return false;
}
const filteredModels = useMemo(() => {
const baseModels = [...modelList].filter((e) => e.provider === provider?.name && e.name);
// Apply search filter
return (
model.label.toLowerCase().includes(modelSearchQuery.toLowerCase()) ||
model.name.toLowerCase().includes(modelSearchQuery.toLowerCase())
);
});
return baseModels
.filter((model) => {
// Apply free models filter
if (showFreeModelsOnly && !isModelLikelyFree(model, provider?.name)) {
return false;
}
const filteredProviders = providerList.filter((p) =>
p.name.toLowerCase().includes(providerSearchQuery.toLowerCase()),
);
return true;
})
.map((model) => {
// Calculate search scores for fuzzy matching
const labelMatch = fuzzyMatch(debouncedModelSearchQuery, model.label);
const nameMatch = fuzzyMatch(debouncedModelSearchQuery, model.name);
const contextMatch = fuzzyMatch(debouncedModelSearchQuery, formatContextSize(model.maxTokenAllowed));
const bestScore = Math.max(labelMatch.score, nameMatch.score, contextMatch.score);
const matches = labelMatch.matches || nameMatch.matches || contextMatch.matches || !debouncedModelSearchQuery; // Show all if no query
return {
...model,
searchScore: bestScore,
searchMatches: matches,
highlightedLabel: highlightText(model.label, debouncedModelSearchQuery),
highlightedName: highlightText(model.name, debouncedModelSearchQuery),
};
})
.filter((model) => model.searchMatches)
.sort((a, b) => {
// Sort by search score (highest first), then by label
if (debouncedModelSearchQuery) {
return b.searchScore - a.searchScore;
}
return a.label.localeCompare(b.label);
});
}, [modelList, provider?.name, showFreeModelsOnly, debouncedModelSearchQuery]);
const filteredProviders = useMemo(() => {
if (!debouncedProviderSearchQuery) {
return providerList;
}
return providerList
.map((provider) => {
const match = fuzzyMatch(debouncedProviderSearchQuery, provider.name);
return {
...provider,
searchScore: match.score,
searchMatches: match.matches,
highlightedName: highlightText(provider.name, debouncedProviderSearchQuery),
};
})
.filter((provider) => provider.searchMatches)
.sort((a, b) => b.searchScore - a.searchScore);
}, [providerList, debouncedProviderSearchQuery]);
// Reset free models filter when provider changes
useEffect(() => {
@@ -97,11 +231,30 @@ export const ModelSelector = ({
useEffect(() => {
setFocusedModelIndex(-1);
}, [modelSearchQuery, isModelDropdownOpen, showFreeModelsOnly]);
}, [debouncedModelSearchQuery, isModelDropdownOpen, showFreeModelsOnly]);
useEffect(() => {
setFocusedProviderIndex(-1);
}, [providerSearchQuery, isProviderDropdownOpen]);
}, [debouncedProviderSearchQuery, isProviderDropdownOpen]);
// Clear search functions
const clearModelSearch = useCallback(() => {
setModelSearchQuery('');
setDebouncedModelSearchQuery('');
if (modelSearchInputRef.current) {
modelSearchInputRef.current.focus();
}
}, []);
const clearProviderSearch = useCallback(() => {
setProviderSearchQuery('');
setDebouncedProviderSearchQuery('');
if (providerSearchInputRef.current) {
providerSearchInputRef.current.focus();
}
}, []);
useEffect(() => {
if (isModelDropdownOpen && modelSearchInputRef.current) {
@@ -137,6 +290,7 @@ export const ModelSelector = ({
setModel?.(selectedModel.name);
setIsModelDropdownOpen(false);
setModelSearchQuery('');
setDebouncedModelSearchQuery('');
}
break;
@@ -144,12 +298,20 @@ export const ModelSelector = ({
e.preventDefault();
setIsModelDropdownOpen(false);
setModelSearchQuery('');
setDebouncedModelSearchQuery('');
break;
case 'Tab':
if (!e.shiftKey && focusedModelIndex === filteredModels.length - 1) {
setIsModelDropdownOpen(false);
}
break;
case 'k':
if (e.ctrlKey || e.metaKey) {
e.preventDefault();
clearModelSearch();
}
break;
}
};
@@ -186,6 +348,7 @@ export const ModelSelector = ({
setIsProviderDropdownOpen(false);
setProviderSearchQuery('');
setDebouncedProviderSearchQuery('');
}
break;
@@ -193,12 +356,20 @@ export const ModelSelector = ({
e.preventDefault();
setIsProviderDropdownOpen(false);
setProviderSearchQuery('');
setDebouncedProviderSearchQuery('');
break;
case 'Tab':
if (!e.shiftKey && focusedProviderIndex === filteredProviders.length - 1) {
setIsProviderDropdownOpen(false);
}
break;
case 'k':
if (e.ctrlKey || e.metaKey) {
e.preventDefault();
clearProviderSearch();
}
break;
}
};
@@ -292,9 +463,9 @@ export const ModelSelector = ({
type="text"
value={providerSearchQuery}
onChange={(e) => setProviderSearchQuery(e.target.value)}
placeholder="Search providers..."
placeholder="Search providers... (⌘K to clear)"
className={classNames(
'w-full pl-2 py-1.5 rounded-md text-sm',
'w-full pl-8 pr-8 py-1.5 rounded-md text-sm',
'bg-bolt-elements-background-depth-2 border border-bolt-elements-borderColor',
'text-bolt-elements-textPrimary placeholder:text-bolt-elements-textTertiary',
'focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus',
@@ -307,6 +478,19 @@ export const ModelSelector = ({
<div className="absolute left-2.5 top-1/2 -translate-y-1/2">
<span className="i-ph:magnifying-glass text-bolt-elements-textTertiary" />
</div>
{providerSearchQuery && (
<button
type="button"
onClick={(e) => {
e.stopPropagation();
clearProviderSearch();
}}
className="absolute right-2.5 top-1/2 -translate-y-1/2 p-0.5 rounded hover:bg-bolt-elements-background-depth-3 transition-colors"
aria-label="Clear search"
>
<span className="i-ph:x text-bolt-elements-textTertiary text-xs" />
</button>
)}
</div>
</div>
@@ -327,7 +511,18 @@ export const ModelSelector = ({
)}
>
{filteredProviders.length === 0 ? (
<div className="px-3 py-2 text-sm text-bolt-elements-textTertiary">No providers found</div>
<div className="px-3 py-3 text-sm">
<div className="text-bolt-elements-textTertiary mb-1">
{debouncedProviderSearchQuery
? `No providers match "${debouncedProviderSearchQuery}"`
: 'No providers found'}
</div>
{debouncedProviderSearchQuery && (
<div className="text-xs text-bolt-elements-textTertiary">
Try searching for provider names like "OpenAI", "Anthropic", or "Google"
</div>
)}
</div>
) : (
filteredProviders.map((providerOption, index) => (
<div
@@ -360,10 +555,15 @@ export const ModelSelector = ({
setIsProviderDropdownOpen(false);
setProviderSearchQuery('');
setDebouncedProviderSearchQuery('');
}}
tabIndex={focusedProviderIndex === index ? 0 : -1}
>
{providerOption.name}
<div
dangerouslySetInnerHTML={{
__html: (providerOption as any).highlightedName || providerOption.name,
}}
/>
</div>
))
)}
@@ -441,6 +641,14 @@ export const ModelSelector = ({
</div>
)}
{/* Search Result Count */}
{debouncedModelSearchQuery && filteredModels.length > 0 && (
<div className="text-xs text-bolt-elements-textTertiary px-1">
{filteredModels.length} model{filteredModels.length !== 1 ? 's' : ''} found
{filteredModels.length > 5 && ' (showing best matches)'}
</div>
)}
{/* Search Input */}
<div className="relative">
<input
@@ -448,9 +656,9 @@ export const ModelSelector = ({
type="text"
value={modelSearchQuery}
onChange={(e) => setModelSearchQuery(e.target.value)}
placeholder="Search models..."
placeholder="Search models... (⌘K to clear)"
className={classNames(
'w-full pl-2 py-1.5 rounded-md text-sm',
'w-full pl-8 pr-8 py-1.5 rounded-md text-sm',
'bg-bolt-elements-background-depth-2 border border-bolt-elements-borderColor',
'text-bolt-elements-textPrimary placeholder:text-bolt-elements-textTertiary',
'focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus',
@@ -463,6 +671,19 @@ export const ModelSelector = ({
<div className="absolute left-2.5 top-1/2 -translate-y-1/2">
<span className="i-ph:magnifying-glass text-bolt-elements-textTertiary" />
</div>
{modelSearchQuery && (
<button
type="button"
onClick={(e) => {
e.stopPropagation();
clearModelSearch();
}}
className="absolute right-2.5 top-1/2 -translate-y-1/2 p-0.5 rounded hover:bg-bolt-elements-background-depth-3 transition-colors"
aria-label="Clear search"
>
<span className="i-ph:x text-bolt-elements-textTertiary text-xs" />
</button>
)}
</div>
</div>
@@ -483,16 +704,37 @@ export const ModelSelector = ({
)}
>
{modelLoading === 'all' || modelLoading === provider?.name ? (
<div className="px-3 py-2 text-sm text-bolt-elements-textTertiary">Loading...</div>
<div className="px-3 py-3 text-sm">
<div className="flex items-center gap-2 text-bolt-elements-textTertiary">
<span className="i-ph:spinner animate-spin" />
Loading models...
</div>
</div>
) : filteredModels.length === 0 ? (
<div className="px-3 py-2 text-sm text-bolt-elements-textTertiary">
{showFreeModelsOnly ? 'No free models found' : 'No models found'}
<div className="px-3 py-3 text-sm">
<div className="text-bolt-elements-textTertiary mb-1">
{debouncedModelSearchQuery
? `No models match "${debouncedModelSearchQuery}"${showFreeModelsOnly ? ' (free only)' : ''}`
: showFreeModelsOnly
? 'No free models available'
: 'No models available'}
</div>
{debouncedModelSearchQuery && (
<div className="text-xs text-bolt-elements-textTertiary">
Try searching for model names, context sizes (e.g., "128k", "1M"), or capabilities
</div>
)}
{showFreeModelsOnly && !debouncedModelSearchQuery && (
<div className="text-xs text-bolt-elements-textTertiary">
Try disabling the "Free models only" filter to see all available models
</div>
)}
</div>
) : (
filteredModels.map((modelOption, index) => (
<div
ref={(el) => (modelOptionsRef.current[index] = el)}
key={index} // Consider using modelOption.name if unique
key={modelOption.name}
role="option"
aria-selected={model === modelOption.name}
className={classNames(
@@ -510,14 +752,38 @@ export const ModelSelector = ({
setModel?.(modelOption.name);
setIsModelDropdownOpen(false);
setModelSearchQuery('');
setDebouncedModelSearchQuery('');
}}
tabIndex={focusedModelIndex === index ? 0 : -1}
>
<div className="flex items-center justify-between">
<span>{modelOption.label}</span>
{isModelLikelyFree(modelOption, provider?.name) && (
<span className="i-ph:gift text-xs text-purple-400 ml-2" title="Free model" />
)}
<div className="flex-1 min-w-0">
<div className="truncate">
<span
dangerouslySetInnerHTML={{
__html: (modelOption as any).highlightedLabel || modelOption.label,
}}
/>
</div>
<div className="flex items-center gap-2 mt-0.5">
<span className="text-xs text-bolt-elements-textTertiary">
{formatContextSize(modelOption.maxTokenAllowed)} tokens
</span>
{debouncedModelSearchQuery && (modelOption as any).searchScore > 70 && (
<span className="text-xs text-green-500 font-medium">
{(modelOption as any).searchScore.toFixed(0)}% match
</span>
)}
</div>
</div>
<div className="flex items-center gap-1 ml-2">
{isModelLikelyFree(modelOption, provider?.name) && (
<span className="i-ph:gift text-xs text-purple-400" title="Free model" />
)}
{model === modelOption.name && (
<span className="i-ph:check text-xs text-green-500" title="Selected" />
)}
</div>
</div>
</div>
))

View File

@@ -1,18 +1,19 @@
/*
* Maximum tokens for response generation (conservative default for older models)
* Modern models can handle much higher limits - specific limits are set per model
* Maximum tokens for response generation (updated for modern model capabilities)
* This serves as a fallback when model-specific limits are unavailable
* Modern models like Claude 3.5, GPT-4o, and Gemini Pro support 128k+ tokens
*/
export const MAX_TOKENS = 32000;
export const MAX_TOKENS = 128000;
/*
* Provider-specific default completion token limits
* Used as fallbacks when model doesn't specify maxCompletionTokens
*/
export const PROVIDER_COMPLETION_LIMITS: Record<string, number> = {
OpenAI: 16384,
Github: 16384, // GitHub Models use OpenAI-compatible limits
Anthropic: 128000,
Google: 32768,
OpenAI: 4096, // Standard GPT models (o1 models have much higher limits)
Github: 4096, // GitHub Models use OpenAI-compatible limits
Anthropic: 64000, // Conservative limit for Claude 4 models (Opus: 32k, Sonnet: 64k)
Google: 8192, // Gemini 1.5 Pro/Flash standard limit
Cohere: 4000,
DeepSeek: 8192,
Groq: 8192,

View File

@@ -142,11 +142,11 @@ export async function streamText(props: {
const dynamicMaxTokens = modelDetails ? getCompletionTokenLimit(modelDetails) : Math.min(MAX_TOKENS, 16384);
// Additional safety cap - should not be needed with proper completion limits, but kept for safety
const safeMaxTokens = Math.min(dynamicMaxTokens, 128000);
// Use model-specific limits directly - no artificial cap needed
const safeMaxTokens = dynamicMaxTokens;
logger.info(
`Max tokens for model ${modelDetails.name} is ${safeMaxTokens} (capped from ${dynamicMaxTokens}) based on model limits`,
`Token limits for model ${modelDetails.name}: maxTokens=${safeMaxTokens}, maxTokenAllowed=${modelDetails.maxTokenAllowed}, maxCompletionTokens=${modelDetails.maxCompletionTokens}`,
);
let systemPrompt =
@@ -221,11 +221,18 @@ export async function streamText(props: {
logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`);
// DEBUG: Log reasoning model detection
// Log reasoning model detection and token parameters
const isReasoning = isReasoningModel(modelDetails.name);
logger.info(`DEBUG STREAM: Model "${modelDetails.name}" detected as reasoning model: ${isReasoning}`);
logger.info(
`Model "${modelDetails.name}" is reasoning model: ${isReasoning}, using ${isReasoning ? 'maxCompletionTokens' : 'maxTokens'}: ${safeMaxTokens}`,
);
// console.log(systemPrompt, processedMessages);
// Validate token limits before API call
if (safeMaxTokens > (modelDetails.maxTokenAllowed || 128000)) {
logger.warn(
`Token limit warning: requesting ${safeMaxTokens} tokens but model supports max ${modelDetails.maxTokenAllowed || 128000}`,
);
}
// Use maxCompletionTokens for reasoning models (o1, GPT-5), maxTokens for traditional models
const tokenParams = isReasoning ? { maxCompletionTokens: safeMaxTokens } : { maxTokens: safeMaxTokens };

View File

@@ -33,6 +33,15 @@ export default class AnthropicProvider extends BaseProvider {
maxTokenAllowed: 200000,
maxCompletionTokens: 128000,
},
// Claude Opus 4: 200k context, 32k output limit (latest flagship model)
{
name: 'claude-opus-4-20250514',
label: 'Claude 4 Opus',
provider: 'Anthropic',
maxTokenAllowed: 200000,
maxCompletionTokens: 32000,
},
];
async getDynamicModels(
@@ -81,12 +90,23 @@ export default class AnthropicProvider extends BaseProvider {
contextWindow = 200000; // Claude 3 Sonnet has 200k context
}
// Determine completion token limits based on specific model
let maxCompletionTokens = 128000; // default for older Claude 3 models
if (m.id?.includes('claude-opus-4')) {
maxCompletionTokens = 32000; // Claude 4 Opus: 32K output limit
} else if (m.id?.includes('claude-sonnet-4')) {
maxCompletionTokens = 64000; // Claude 4 Sonnet: 64K output limit
} else if (m.id?.includes('claude-4')) {
maxCompletionTokens = 32000; // Other Claude 4 models: conservative 32K limit
}
return {
name: m.id,
label: `${m.display_name} (${Math.floor(contextWindow / 1000)}k context)`,
provider: this.name,
maxTokenAllowed: contextWindow,
maxCompletionTokens: 128000, // Claude models support up to 128k completion tokens
maxCompletionTokens,
};
});
}

View File

@@ -12,35 +12,114 @@ export default class GithubProvider extends BaseProvider {
apiTokenKey: 'GITHUB_API_KEY',
};
// find more in https://github.com/marketplace?type=models
/*
* GitHub Models - Available models through GitHub's native API
* Updated for the new GitHub Models API at https://models.github.ai
* Model IDs use the format: publisher/model-name
*/
staticModels: ModelInfo[] = [
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'Github', maxTokenAllowed: 128000, maxCompletionTokens: 16384 },
{ name: 'o1', label: 'o1-preview', provider: 'Github', maxTokenAllowed: 100000, maxCompletionTokens: 16384 },
{ name: 'o1-mini', label: 'o1-mini', provider: 'Github', maxTokenAllowed: 65536, maxCompletionTokens: 8192 },
{ name: 'openai/gpt-4o', label: 'GPT-4o', provider: 'Github', maxTokenAllowed: 131072, maxCompletionTokens: 4096 },
{
name: 'gpt-4o-mini',
name: 'openai/gpt-4o-mini',
label: 'GPT-4o Mini',
provider: 'Github',
maxTokenAllowed: 128000,
maxCompletionTokens: 16384,
maxTokenAllowed: 131072,
maxCompletionTokens: 4096,
},
{
name: 'gpt-4-turbo',
label: 'GPT-4 Turbo',
name: 'openai/o1-preview',
label: 'o1-preview',
provider: 'Github',
maxTokenAllowed: 128000,
maxCompletionTokens: 8192,
maxCompletionTokens: 32000,
},
{ name: 'gpt-4', label: 'GPT-4', provider: 'Github', maxTokenAllowed: 8192, maxCompletionTokens: 8192 },
{
name: 'gpt-3.5-turbo',
label: 'GPT-3.5 Turbo',
name: 'openai/o1-mini',
label: 'o1-mini',
provider: 'Github',
maxTokenAllowed: 16385,
maxTokenAllowed: 128000,
maxCompletionTokens: 65000,
},
{ name: 'openai/o1', label: 'o1', provider: 'Github', maxTokenAllowed: 200000, maxCompletionTokens: 100000 },
{
name: 'openai/gpt-4.1',
label: 'GPT-4.1',
provider: 'Github',
maxTokenAllowed: 1048576,
maxCompletionTokens: 32768,
},
{
name: 'openai/gpt-4.1-mini',
label: 'GPT-4.1-mini',
provider: 'Github',
maxTokenAllowed: 1048576,
maxCompletionTokens: 32768,
},
{
name: 'deepseek/deepseek-r1',
label: 'DeepSeek-R1',
provider: 'Github',
maxTokenAllowed: 128000,
maxCompletionTokens: 4096,
},
];
async getDynamicModels(
apiKeys?: Record<string, string>,
settings?: IProviderSetting,
serverEnv?: Record<string, string>,
): Promise<ModelInfo[]> {
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'GITHUB_API_KEY',
});
if (!apiKey) {
console.log('GitHub: No API key found. Make sure GITHUB_API_KEY is set in your .env.local file');
// Return static models if no API key is available
return this.staticModels;
}
console.log('GitHub: API key found, attempting to fetch dynamic models...');
try {
// Try to fetch dynamic models from GitHub API
const response = await fetch('https://models.github.ai/v1/models', {
headers: {
Authorization: `Bearer ${apiKey}`,
},
});
if (response.ok) {
const data = (await response.json()) as { data?: any[] };
console.log('GitHub: Successfully fetched models from API');
if (data.data && Array.isArray(data.data)) {
return data.data.map((model: any) => ({
name: model.id,
label: model.name || model.id.split('/').pop() || model.id,
provider: 'Github',
maxTokenAllowed: model.limits?.max_input_tokens || 128000,
maxCompletionTokens: model.limits?.max_output_tokens || 16384,
}));
}
} else {
console.warn('GitHub: API request failed with status:', response.status, response.statusText);
}
} catch (error) {
console.warn('GitHub: Failed to fetch models, using static models:', error);
}
// Fallback to static models
console.log('GitHub: Using static models as fallback');
return this.staticModels;
}
getModelInstance(options: {
model: string;
serverEnv: Env;
@@ -49,6 +128,8 @@ export default class GithubProvider extends BaseProvider {
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
console.log(`GitHub: Creating model instance for ${model}`);
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
@@ -58,14 +139,19 @@ export default class GithubProvider extends BaseProvider {
});
if (!apiKey) {
console.error('GitHub: No API key found');
throw new Error(`Missing API key for ${this.name} provider`);
}
console.log(`GitHub: Using API key (first 8 chars): ${apiKey.substring(0, 8)}...`);
const openai = createOpenAI({
baseURL: 'https://models.inference.ai.azure.com',
baseURL: 'https://models.github.ai/inference',
apiKey,
});
console.log(`GitHub: Created OpenAI client, requesting model: ${model}`);
return openai(model);
}
}

View File

@@ -15,23 +15,23 @@ export default class GoogleProvider extends BaseProvider {
staticModels: ModelInfo[] = [
/*
* Essential fallback models - only the most reliable/stable ones
* Gemini 1.5 Pro: 2M context, excellent for complex reasoning and large codebases
* Gemini 1.5 Pro: 2M context, 8K output limit (verified from API docs)
*/
{
name: 'gemini-1.5-pro',
label: 'Gemini 1.5 Pro',
provider: 'Google',
maxTokenAllowed: 2000000,
maxCompletionTokens: 32768,
maxCompletionTokens: 8192,
},
// Gemini 1.5 Flash: 1M context, fast and cost-effective
// Gemini 1.5 Flash: 1M context, 8K output limit, fast and cost-effective
{
name: 'gemini-1.5-flash',
label: 'Gemini 1.5 Flash',
provider: 'Google',
maxTokenAllowed: 1000000,
maxCompletionTokens: 32768,
maxCompletionTokens: 8192,
},
];
@@ -102,10 +102,10 @@ export default class GoogleProvider extends BaseProvider {
const finalContext = Math.min(contextWindow, maxAllowed);
// Get completion token limit from Google API
let completionTokens = 32768; // default fallback
let completionTokens = 8192; // default fallback (Gemini 1.5 standard limit)
if (m.outputTokenLimit && m.outputTokenLimit > 0) {
completionTokens = Math.min(m.outputTokenLimit, 128000); // Cap at reasonable limit
completionTokens = Math.min(m.outputTokenLimit, 128000); // Use API value, cap at reasonable limit
}
return {

View File

@@ -0,0 +1,71 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class MoonshotProvider extends BaseProvider {
name = 'Moonshot';
getApiKeyLink = 'https://platform.moonshot.ai/console/api-keys';
config = {
apiTokenKey: 'MOONSHOT_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'moonshot-v1-8k', label: 'Moonshot v1 8K', provider: 'Moonshot', maxTokenAllowed: 8000 },
{ name: 'moonshot-v1-32k', label: 'Moonshot v1 32K', provider: 'Moonshot', maxTokenAllowed: 32000 },
{ name: 'moonshot-v1-128k', label: 'Moonshot v1 128K', provider: 'Moonshot', maxTokenAllowed: 128000 },
{ name: 'moonshot-v1-auto', label: 'Moonshot v1 Auto', provider: 'Moonshot', maxTokenAllowed: 128000 },
{
name: 'moonshot-v1-8k-vision-preview',
label: 'Moonshot v1 8K Vision',
provider: 'Moonshot',
maxTokenAllowed: 8000,
},
{
name: 'moonshot-v1-32k-vision-preview',
label: 'Moonshot v1 32K Vision',
provider: 'Moonshot',
maxTokenAllowed: 32000,
},
{
name: 'moonshot-v1-128k-vision-preview',
label: 'Moonshot v1 128K Vision',
provider: 'Moonshot',
maxTokenAllowed: 128000,
},
{ name: 'kimi-latest', label: 'Kimi Latest', provider: 'Moonshot', maxTokenAllowed: 128000 },
{ name: 'kimi-k2-0711-preview', label: 'Kimi K2 Preview', provider: 'Moonshot', maxTokenAllowed: 128000 },
{ name: 'kimi-k2-turbo-preview', label: 'Kimi K2 Turbo', provider: 'Moonshot', maxTokenAllowed: 128000 },
{ name: 'kimi-thinking-preview', label: 'Kimi Thinking', provider: 'Moonshot', maxTokenAllowed: 128000 },
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'MOONSHOT_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openai = createOpenAI({
baseURL: 'https://api.moonshot.ai/v1',
apiKey,
});
return openai(model);
}
}

View File

@@ -15,9 +15,18 @@ export default class OpenAIProvider extends BaseProvider {
staticModels: ModelInfo[] = [
/*
* Essential fallback models - only the most stable/reliable ones
* GPT-4o: 128k context, high performance, recommended for most tasks
* GPT-4o: 128k context, 4k standard output (64k with long output mode)
*/
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 128000, maxCompletionTokens: 16384 },
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 128000, maxCompletionTokens: 4096 },
// GPT-4o Mini: 128k context, cost-effective alternative
{
name: 'gpt-4o-mini',
label: 'GPT-4o Mini',
provider: 'OpenAI',
maxTokenAllowed: 128000,
maxCompletionTokens: 4096,
},
// GPT-3.5-turbo: 16k context, fast and cost-effective
{
@@ -27,6 +36,18 @@ export default class OpenAIProvider extends BaseProvider {
maxTokenAllowed: 16000,
maxCompletionTokens: 4096,
},
// o1-preview: 128k context, 32k output limit (reasoning model)
{
name: 'o1-preview',
label: 'o1-preview',
provider: 'OpenAI',
maxTokenAllowed: 128000,
maxCompletionTokens: 32000,
},
// o1-mini: 128k context, 65k output limit (reasoning model)
{ name: 'o1-mini', label: 'o1-mini', provider: 'OpenAI', maxTokenAllowed: 128000, maxCompletionTokens: 65000 },
];
async getDynamicModels(
@@ -79,18 +100,23 @@ export default class OpenAIProvider extends BaseProvider {
contextWindow = 16385; // GPT-3.5-turbo has 16k context
}
// Determine completion token limits based on model type
let maxCompletionTokens = 16384; // default for most models
// Determine completion token limits based on model type (accurate 2025 limits)
let maxCompletionTokens = 4096; // default for most models
if (m.id?.startsWith('o1-preview') || m.id?.startsWith('o1-mini') || m.id?.startsWith('o1')) {
// Reasoning models have specific completion limits
maxCompletionTokens = m.id?.includes('mini') ? 8192 : 16384;
if (m.id?.startsWith('o1-preview')) {
maxCompletionTokens = 32000; // o1-preview: 32K output limit
} else if (m.id?.startsWith('o1-mini')) {
maxCompletionTokens = 65000; // o1-mini: 65K output limit
} else if (m.id?.startsWith('o1')) {
maxCompletionTokens = 32000; // Other o1 models: 32K limit
} else if (m.id?.includes('o3') || m.id?.includes('o4')) {
maxCompletionTokens = 100000; // o3/o4 models: 100K output limit
} else if (m.id?.includes('gpt-4o')) {
maxCompletionTokens = 16384;
maxCompletionTokens = 4096; // GPT-4o standard: 4K (64K with long output mode)
} else if (m.id?.includes('gpt-4')) {
maxCompletionTokens = 8192;
maxCompletionTokens = 8192; // Standard GPT-4: 8K output limit
} else if (m.id?.includes('gpt-3.5-turbo')) {
maxCompletionTokens = 4096;
maxCompletionTokens = 4096; // GPT-3.5-turbo: 4K output limit
}
return {

View File

@@ -14,20 +14,20 @@ export default class PerplexityProvider extends BaseProvider {
staticModels: ModelInfo[] = [
{
name: 'llama-3.1-sonar-small-128k-online',
label: 'Sonar Small Online',
name: 'sonar',
label: 'Sonar',
provider: 'Perplexity',
maxTokenAllowed: 8192,
},
{
name: 'llama-3.1-sonar-large-128k-online',
label: 'Sonar Large Online',
name: 'sonar-pro',
label: 'Sonar Pro',
provider: 'Perplexity',
maxTokenAllowed: 8192,
},
{
name: 'llama-3.1-sonar-huge-128k-online',
label: 'Sonar Huge Online',
name: 'sonar-reasoning-pro',
label: 'Sonar Reasoning Pro',
provider: 'Perplexity',
maxTokenAllowed: 8192,
},

View File

@@ -13,9 +13,11 @@ export default class XAIProvider extends BaseProvider {
};
staticModels: ModelInfo[] = [
{ name: 'grok-3-beta', label: 'xAI Grok 3 Beta', provider: 'xAI', maxTokenAllowed: 8000 },
{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 },
{ name: 'grok-2-1212', label: 'xAI Grok2 1212', provider: 'xAI', maxTokenAllowed: 8000 },
{ name: 'grok-4', label: 'xAI Grok 4', provider: 'xAI', maxTokenAllowed: 256000 },
{ name: 'grok-4-07-09', label: 'xAI Grok 4 (07-09)', provider: 'xAI', maxTokenAllowed: 256000 },
{ name: 'grok-3-beta', label: 'xAI Grok 3 Beta', provider: 'xAI', maxTokenAllowed: 131000 },
{ name: 'grok-3-mini-beta', label: 'xAI Grok 3 Mini Beta', provider: 'xAI', maxTokenAllowed: 131000 },
{ name: 'grok-3-mini-fast-beta', label: 'xAI Grok 3 Mini Fast Beta', provider: 'xAI', maxTokenAllowed: 131000 },
];
getModelInstance(options: {

View File

@@ -16,6 +16,7 @@ import XAIProvider from './providers/xai';
import HyperbolicProvider from './providers/hyperbolic';
import AmazonBedrockProvider from './providers/amazon-bedrock';
import GithubProvider from './providers/github';
import MoonshotProvider from './providers/moonshot';
export {
AnthropicProvider,
@@ -26,6 +27,7 @@ export {
HuggingFaceProvider,
HyperbolicProvider,
MistralProvider,
MoonshotProvider,
OllamaProvider,
OpenAIProvider,
OpenRouterProvider,

View File

@@ -41,6 +41,29 @@ function getCompletionTokenLimit(modelDetails: ModelInfo): number {
return Math.min(MAX_TOKENS, 16384);
}
function validateTokenLimits(modelDetails: ModelInfo, requestedTokens: number): { valid: boolean; error?: string } {
const modelMaxTokens = modelDetails.maxTokenAllowed || 128000;
const maxCompletionTokens = getCompletionTokenLimit(modelDetails);
// Check against model's context window
if (requestedTokens > modelMaxTokens) {
return {
valid: false,
error: `Requested tokens (${requestedTokens}) exceed model's context window (${modelMaxTokens}). Please reduce your request size.`,
};
}
// Check against completion token limits
if (requestedTokens > maxCompletionTokens) {
return {
valid: false,
error: `Requested tokens (${requestedTokens}) exceed model's completion limit (${maxCompletionTokens}). Consider using a model with higher token limits.`,
};
}
return { valid: true };
}
async function llmCallAction({ context, request }: ActionFunctionArgs) {
const { system, message, model, provider, streamOutput } = await request.json<{
system: string;
@@ -104,6 +127,23 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
});
}
// Handle token limit errors with helpful messages
if (
error instanceof Error &&
(error.message?.includes('max_tokens') ||
error.message?.includes('token') ||
error.message?.includes('exceeds') ||
error.message?.includes('maximum'))
) {
throw new Response(
`Token limit error: ${error.message}. Try reducing your request size or using a model with higher token limits.`,
{
status: 400,
statusText: 'Token Limit Exceeded',
},
);
}
throw new Response(null, {
status: 500,
statusText: 'Internal Server Error',
@@ -120,6 +160,16 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
const dynamicMaxTokens = modelDetails ? getCompletionTokenLimit(modelDetails) : Math.min(MAX_TOKENS, 16384);
// Validate token limits before making API request
const validation = validateTokenLimits(modelDetails, dynamicMaxTokens);
if (!validation.valid) {
throw new Response(validation.error, {
status: 400,
statusText: 'Token Limit Exceeded',
});
}
const providerInfo = PROVIDER_LIST.find((p) => p.name === provider.name);
if (!providerInfo) {
@@ -215,6 +265,29 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
);
}
// Handle token limit errors with helpful messages
if (
error instanceof Error &&
(error.message?.includes('max_tokens') ||
error.message?.includes('token') ||
error.message?.includes('exceeds') ||
error.message?.includes('maximum'))
) {
return new Response(
JSON.stringify({
...errorResponse,
message: `Token limit error: ${error.message}. Try reducing your request size or using a model with higher token limits.`,
statusCode: 400,
isRetryable: false,
}),
{
status: 400,
headers: { 'Content-Type': 'application/json' },
statusText: 'Token Limit Exceeded',
},
);
}
return new Response(JSON.stringify(errorResponse), {
status: errorResponse.statusCode,
headers: { 'Content-Type': 'application/json' },