Merge branch 'main' into terminal-error-detection
This commit is contained in:
@@ -1 +0,0 @@
|
||||
{ "commit": "b25b8b98f9c3c2b009802c2608e3e6a5a1f81569" }
|
||||
@@ -125,6 +125,9 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
||||
|
||||
useEffect(() => {
|
||||
// Load API keys from cookies on component mount
|
||||
|
||||
let parsedApiKeys: Record<string, string> | undefined = {};
|
||||
|
||||
try {
|
||||
const storedApiKeys = Cookies.get('apiKeys');
|
||||
|
||||
@@ -133,6 +136,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
||||
|
||||
if (typeof parsedKeys === 'object' && parsedKeys !== null) {
|
||||
setApiKeys(parsedKeys);
|
||||
parsedApiKeys = parsedKeys;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -161,7 +165,8 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
||||
Cookies.remove('providers');
|
||||
}
|
||||
|
||||
initializeModelList(providerSettings).then((modelList) => {
|
||||
initializeModelList({ apiKeys: parsedApiKeys, providerSettings }).then((modelList) => {
|
||||
console.log('Model List: ', modelList);
|
||||
setModelList(modelList);
|
||||
});
|
||||
|
||||
@@ -381,7 +386,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
||||
modelList={modelList}
|
||||
provider={provider}
|
||||
setProvider={setProvider}
|
||||
providerList={providerList || PROVIDER_LIST}
|
||||
providerList={providerList || (PROVIDER_LIST as ProviderInfo[])}
|
||||
apiKeys={apiKeys}
|
||||
/>
|
||||
{(providerList || []).length > 0 && provider && (
|
||||
|
||||
@@ -21,6 +21,7 @@ import { debounce } from '~/utils/debounce';
|
||||
import { useSettings } from '~/lib/hooks/useSettings';
|
||||
import type { ProviderInfo } from '~/types/model';
|
||||
import { useSearchParams } from '@remix-run/react';
|
||||
import { createSampler } from '~/utils/sampler';
|
||||
|
||||
const toastAnimation = cssTransition({
|
||||
enter: 'animated fadeInRight',
|
||||
@@ -77,6 +78,24 @@ export function Chat() {
|
||||
);
|
||||
}
|
||||
|
||||
const processSampledMessages = createSampler(
|
||||
(options: {
|
||||
messages: Message[];
|
||||
initialMessages: Message[];
|
||||
isLoading: boolean;
|
||||
parseMessages: (messages: Message[], isLoading: boolean) => void;
|
||||
storeMessageHistory: (messages: Message[]) => Promise<void>;
|
||||
}) => {
|
||||
const { messages, initialMessages, isLoading, parseMessages, storeMessageHistory } = options;
|
||||
parseMessages(messages, isLoading);
|
||||
|
||||
if (messages.length > initialMessages.length) {
|
||||
storeMessageHistory(messages).catch((error) => toast.error(error.message));
|
||||
}
|
||||
},
|
||||
50,
|
||||
);
|
||||
|
||||
interface ChatProps {
|
||||
initialMessages: Message[];
|
||||
storeMessageHistory: (messages: Message[]) => Promise<void>;
|
||||
@@ -104,7 +123,7 @@ export const ChatImpl = memo(
|
||||
});
|
||||
const [provider, setProvider] = useState(() => {
|
||||
const savedProvider = Cookies.get('selectedProvider');
|
||||
return PROVIDER_LIST.find((p) => p.name === savedProvider) || DEFAULT_PROVIDER;
|
||||
return (PROVIDER_LIST.find((p) => p.name === savedProvider) || DEFAULT_PROVIDER) as ProviderInfo;
|
||||
});
|
||||
|
||||
const { showChat } = useStore(chatStore);
|
||||
@@ -170,11 +189,13 @@ export const ChatImpl = memo(
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
parseMessages(messages, isLoading);
|
||||
|
||||
if (messages.length > initialMessages.length) {
|
||||
storeMessageHistory(messages).catch((error) => toast.error(error.message));
|
||||
}
|
||||
processSampledMessages({
|
||||
messages,
|
||||
initialMessages,
|
||||
isLoading,
|
||||
parseMessages,
|
||||
storeMessageHistory,
|
||||
});
|
||||
}, [messages, isLoading, parseMessages]);
|
||||
|
||||
const scrollTextArea = () => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { ProviderInfo } from '~/types/model';
|
||||
import type { ModelInfo } from '~/utils/types';
|
||||
import { useEffect } from 'react';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
|
||||
interface ModelSelectorProps {
|
||||
model?: string;
|
||||
|
||||
@@ -63,7 +63,7 @@ export const SettingsWindow = ({ open, onClose }: SettingsProps) => {
|
||||
variants={dialogBackdropVariants}
|
||||
/>
|
||||
</RadixDialog.Overlay>
|
||||
<RadixDialog.Content asChild>
|
||||
<RadixDialog.Content aria-describedby={undefined} asChild>
|
||||
<motion.div
|
||||
className="fixed top-[50%] left-[50%] z-max h-[85vh] w-[90vw] max-w-[900px] translate-x-[-50%] translate-y-[-50%] border border-bolt-elements-borderColor rounded-lg shadow-lg focus:outline-none overflow-hidden"
|
||||
initial="closed"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
import { useSettings } from '~/lib/hooks/useSettings';
|
||||
import commit from '~/commit.json';
|
||||
import { toast } from 'react-toastify';
|
||||
import { providerBaseUrlEnvKeys } from '~/utils/constants';
|
||||
|
||||
interface ProviderStatus {
|
||||
name: string;
|
||||
@@ -43,11 +43,16 @@ interface CommitData {
|
||||
version?: string;
|
||||
}
|
||||
|
||||
const connitJson: CommitData = commit;
|
||||
const connitJson: CommitData = {
|
||||
commit: __COMMIT_HASH,
|
||||
version: __APP_VERSION,
|
||||
};
|
||||
|
||||
const LOCAL_PROVIDERS = ['Ollama', 'LMStudio', 'OpenAILike'];
|
||||
|
||||
const versionHash = connitJson.commit;
|
||||
const versionTag = connitJson.version;
|
||||
|
||||
const GITHUB_URLS = {
|
||||
original: 'https://api.github.com/repos/stackblitz-labs/bolt.diy/commits/main',
|
||||
fork: 'https://api.github.com/repos/Stijnus/bolt.new-any-llm/commits/main',
|
||||
@@ -236,7 +241,7 @@ const checkProviderStatus = async (url: string | null, providerName: string): Pr
|
||||
}
|
||||
|
||||
// Try different endpoints based on provider
|
||||
const checkUrls = [`${url}/api/health`, `${url}/v1/models`];
|
||||
const checkUrls = [`${url}/api/health`, url.endsWith('v1') ? `${url}/models` : `${url}/v1/models`];
|
||||
console.log(`[Debug] Checking additional endpoints:`, checkUrls);
|
||||
|
||||
const results = await Promise.all(
|
||||
@@ -321,14 +326,16 @@ export default function DebugTab() {
|
||||
.filter(([, provider]) => LOCAL_PROVIDERS.includes(provider.name))
|
||||
.map(async ([, provider]) => {
|
||||
const envVarName =
|
||||
provider.name.toLowerCase() === 'ollama'
|
||||
? 'OLLAMA_API_BASE_URL'
|
||||
: provider.name.toLowerCase() === 'lmstudio'
|
||||
? 'LMSTUDIO_API_BASE_URL'
|
||||
: `REACT_APP_${provider.name.toUpperCase()}_URL`;
|
||||
providerBaseUrlEnvKeys[provider.name].baseUrlKey || `REACT_APP_${provider.name.toUpperCase()}_URL`;
|
||||
|
||||
// Access environment variables through import.meta.env
|
||||
const url = import.meta.env[envVarName] || provider.settings.baseUrl || null; // Ensure baseUrl is used
|
||||
let settingsUrl = provider.settings.baseUrl;
|
||||
|
||||
if (settingsUrl && settingsUrl.trim().length === 0) {
|
||||
settingsUrl = undefined;
|
||||
}
|
||||
|
||||
const url = settingsUrl || import.meta.env[envVarName] || null; // Ensure baseUrl is used
|
||||
console.log(`[Debug] Using URL for ${provider.name}:`, url, `(from ${envVarName})`);
|
||||
|
||||
const status = await checkProviderStatus(url, provider.name);
|
||||
@@ -521,7 +528,7 @@ export default function DebugTab() {
|
||||
<div className="mt-3 pt-3 border-t border-bolt-elements-surface-hover">
|
||||
<p className="text-xs text-bolt-elements-textSecondary">Version</p>
|
||||
<p className="text-sm font-medium text-bolt-elements-textPrimary font-mono">
|
||||
{versionHash.slice(0, 7)}
|
||||
{connitJson.commit.slice(0, 7)}
|
||||
<span className="ml-2 text-xs text-bolt-elements-textSecondary">
|
||||
(v{versionTag || '0.0.1'}) - {isLatestBranch ? 'nightly' : 'stable'}
|
||||
</span>
|
||||
|
||||
@@ -7,6 +7,7 @@ import { logStore } from '~/lib/stores/logs';
|
||||
|
||||
// Import a default fallback icon
|
||||
import DefaultIcon from '/icons/Default.svg'; // Adjust the path as necessary
|
||||
import { providerBaseUrlEnvKeys } from '~/utils/constants';
|
||||
|
||||
export default function ProvidersTab() {
|
||||
const { providers, updateProviderSettings, isLocalModel } = useSettings();
|
||||
@@ -33,9 +34,87 @@ export default function ProvidersTab() {
|
||||
|
||||
newFilteredProviders.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
setFilteredProviders(newFilteredProviders);
|
||||
// Split providers into regular and URL-configurable
|
||||
const regular = newFilteredProviders.filter((p) => !URL_CONFIGURABLE_PROVIDERS.includes(p.name));
|
||||
const urlConfigurable = newFilteredProviders.filter((p) => URL_CONFIGURABLE_PROVIDERS.includes(p.name));
|
||||
|
||||
setFilteredProviders([...regular, ...urlConfigurable]);
|
||||
}, [providers, searchTerm, isLocalModel]);
|
||||
|
||||
const renderProviderCard = (provider: IProviderConfig) => {
|
||||
const envBaseUrlKey = providerBaseUrlEnvKeys[provider.name].baseUrlKey;
|
||||
const envBaseUrl = envBaseUrlKey ? import.meta.env[envBaseUrlKey] : undefined;
|
||||
const isUrlConfigurable = URL_CONFIGURABLE_PROVIDERS.includes(provider.name);
|
||||
|
||||
return (
|
||||
<div
|
||||
key={provider.name}
|
||||
className="flex flex-col provider-item hover:bg-bolt-elements-bg-depth-3 p-4 rounded-lg border border-bolt-elements-borderColor"
|
||||
>
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<img
|
||||
src={`/icons/${provider.name}.svg`}
|
||||
onError={(e) => {
|
||||
e.currentTarget.src = DefaultIcon;
|
||||
}}
|
||||
alt={`${provider.name} icon`}
|
||||
className="w-6 h-6 dark:invert"
|
||||
/>
|
||||
<span className="text-bolt-elements-textPrimary">{provider.name}</span>
|
||||
</div>
|
||||
<Switch
|
||||
className="ml-auto"
|
||||
checked={provider.settings.enabled}
|
||||
onCheckedChange={(enabled) => {
|
||||
updateProviderSettings(provider.name, { ...provider.settings, enabled });
|
||||
|
||||
if (enabled) {
|
||||
logStore.logProvider(`Provider ${provider.name} enabled`, { provider: provider.name });
|
||||
} else {
|
||||
logStore.logProvider(`Provider ${provider.name} disabled`, { provider: provider.name });
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{isUrlConfigurable && provider.settings.enabled && (
|
||||
<div className="mt-2">
|
||||
{envBaseUrl && (
|
||||
<label className="block text-xs text-bolt-elements-textSecondary text-green-300 mb-2">
|
||||
Set On (.env) : {envBaseUrl}
|
||||
</label>
|
||||
)}
|
||||
<label className="block text-sm text-bolt-elements-textSecondary mb-2">
|
||||
{envBaseUrl ? 'Override Base Url' : 'Base URL '}:{' '}
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
value={provider.settings.baseUrl || ''}
|
||||
onChange={(e) => {
|
||||
let newBaseUrl: string | undefined = e.target.value;
|
||||
|
||||
if (newBaseUrl && newBaseUrl.trim().length === 0) {
|
||||
newBaseUrl = undefined;
|
||||
}
|
||||
|
||||
updateProviderSettings(provider.name, { ...provider.settings, baseUrl: newBaseUrl });
|
||||
logStore.logProvider(`Base URL updated for ${provider.name}`, {
|
||||
provider: provider.name,
|
||||
baseUrl: newBaseUrl,
|
||||
});
|
||||
}}
|
||||
placeholder={`Enter ${provider.name} base URL`}
|
||||
className="w-full bg-white dark:bg-bolt-elements-background-depth-4 relative px-2 py-1.5 rounded-md focus:outline-none placeholder-bolt-elements-textTertiary text-bolt-elements-textPrimary dark:text-bolt-elements-textPrimary border border-bolt-elements-borderColor"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const regularProviders = filteredProviders.filter((p) => !URL_CONFIGURABLE_PROVIDERS.includes(p.name));
|
||||
const urlConfigurableProviders = filteredProviders.filter((p) => URL_CONFIGURABLE_PROVIDERS.includes(p.name));
|
||||
|
||||
return (
|
||||
<div className="p-4">
|
||||
<div className="flex mb-4">
|
||||
@@ -47,60 +126,21 @@ export default function ProvidersTab() {
|
||||
className="w-full bg-white dark:bg-bolt-elements-background-depth-4 relative px-2 py-1.5 rounded-md focus:outline-none placeholder-bolt-elements-textTertiary text-bolt-elements-textPrimary dark:text-bolt-elements-textPrimary border border-bolt-elements-borderColor"
|
||||
/>
|
||||
</div>
|
||||
{filteredProviders.map((provider) => (
|
||||
<div
|
||||
key={provider.name}
|
||||
className="flex flex-col mb-2 provider-item hover:bg-bolt-elements-bg-depth-3 p-4 rounded-lg border border-bolt-elements-borderColor "
|
||||
>
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<img
|
||||
src={`/icons/${provider.name}.svg`} // Attempt to load the specific icon
|
||||
onError={(e) => {
|
||||
// Fallback to default icon on error
|
||||
e.currentTarget.src = DefaultIcon;
|
||||
}}
|
||||
alt={`${provider.name} icon`}
|
||||
className="w-6 h-6 dark:invert"
|
||||
/>
|
||||
<span className="text-bolt-elements-textPrimary">{provider.name}</span>
|
||||
</div>
|
||||
<Switch
|
||||
className="ml-auto"
|
||||
checked={provider.settings.enabled}
|
||||
onCheckedChange={(enabled) => {
|
||||
updateProviderSettings(provider.name, { ...provider.settings, enabled });
|
||||
|
||||
if (enabled) {
|
||||
logStore.logProvider(`Provider ${provider.name} enabled`, { provider: provider.name });
|
||||
} else {
|
||||
logStore.logProvider(`Provider ${provider.name} disabled`, { provider: provider.name });
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{/* Base URL input for configurable providers */}
|
||||
{URL_CONFIGURABLE_PROVIDERS.includes(provider.name) && provider.settings.enabled && (
|
||||
<div className="mt-2">
|
||||
<label className="block text-sm text-bolt-elements-textSecondary mb-1">Base URL:</label>
|
||||
<input
|
||||
type="text"
|
||||
value={provider.settings.baseUrl || ''}
|
||||
onChange={(e) => {
|
||||
const newBaseUrl = e.target.value;
|
||||
updateProviderSettings(provider.name, { ...provider.settings, baseUrl: newBaseUrl });
|
||||
logStore.logProvider(`Base URL updated for ${provider.name}`, {
|
||||
provider: provider.name,
|
||||
baseUrl: newBaseUrl,
|
||||
});
|
||||
}}
|
||||
placeholder={`Enter ${provider.name} base URL`}
|
||||
className="w-full bg-white dark:bg-bolt-elements-background-depth-4 relative px-2 py-1.5 rounded-md focus:outline-none placeholder-bolt-elements-textTertiary text-bolt-elements-textPrimary dark:text-bolt-elements-textPrimary border border-bolt-elements-borderColor"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{/* Regular Providers Grid */}
|
||||
<div className="grid grid-cols-2 gap-4 mb-8">{regularProviders.map(renderProviderCard)}</div>
|
||||
|
||||
{/* URL Configurable Providers Section */}
|
||||
{urlConfigurableProviders.length > 0 && (
|
||||
<div className="mt-8">
|
||||
<h3 className="text-lg font-semibold mb-2 text-bolt-elements-textPrimary">Experimental Providers</h3>
|
||||
<p className="text-sm text-bolt-elements-textSecondary mb-4">
|
||||
These providers are experimental and allow you to run AI models locally or connect to your own
|
||||
infrastructure. They require additional setup but offer more flexibility.
|
||||
</p>
|
||||
<div className="space-y-4">{urlConfigurableProviders.map(renderProviderCard)}</div>
|
||||
</div>
|
||||
))}
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ export default async function handleRequest(
|
||||
remixContext: EntryContext,
|
||||
_loadContext: AppLoadContext,
|
||||
) {
|
||||
await initializeModelList();
|
||||
await initializeModelList({});
|
||||
|
||||
const readable = await renderToReadableStream(<RemixServer context={remixContext} url={request.url} />, {
|
||||
signal: request.signal,
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
* @ts-nocheck
|
||||
* Preventing TS checks with files presented in the video for a better presentation.
|
||||
*/
|
||||
import { env } from 'node:process';
|
||||
|
||||
export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record<string, string>) {
|
||||
/**
|
||||
* The `cloudflareEnv` is only used when deployed or when previewing locally.
|
||||
* In development the environment variables are available through `env`.
|
||||
*/
|
||||
|
||||
// First check user-provided API keys
|
||||
if (userApiKeys?.[provider]) {
|
||||
return userApiKeys[provider];
|
||||
}
|
||||
|
||||
// Fall back to environment variables
|
||||
switch (provider) {
|
||||
case 'Anthropic':
|
||||
return env.ANTHROPIC_API_KEY || cloudflareEnv.ANTHROPIC_API_KEY;
|
||||
case 'OpenAI':
|
||||
return env.OPENAI_API_KEY || cloudflareEnv.OPENAI_API_KEY;
|
||||
case 'Google':
|
||||
return env.GOOGLE_GENERATIVE_AI_API_KEY || cloudflareEnv.GOOGLE_GENERATIVE_AI_API_KEY;
|
||||
case 'Groq':
|
||||
return env.GROQ_API_KEY || cloudflareEnv.GROQ_API_KEY;
|
||||
case 'HuggingFace':
|
||||
return env.HuggingFace_API_KEY || cloudflareEnv.HuggingFace_API_KEY;
|
||||
case 'OpenRouter':
|
||||
return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
|
||||
case 'Deepseek':
|
||||
return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY;
|
||||
case 'Mistral':
|
||||
return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
|
||||
case 'OpenAILike':
|
||||
return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
|
||||
case 'Together':
|
||||
return env.TOGETHER_API_KEY || cloudflareEnv.TOGETHER_API_KEY;
|
||||
case 'xAI':
|
||||
return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
|
||||
case 'Perplexity':
|
||||
return env.PERPLEXITY_API_KEY || cloudflareEnv.PERPLEXITY_API_KEY;
|
||||
case 'Cohere':
|
||||
return env.COHERE_API_KEY;
|
||||
case 'AzureOpenAI':
|
||||
return env.AZURE_OPENAI_API_KEY;
|
||||
default:
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
export function getBaseURL(cloudflareEnv: Env, provider: string) {
|
||||
switch (provider) {
|
||||
case 'Together':
|
||||
return env.TOGETHER_API_BASE_URL || cloudflareEnv.TOGETHER_API_BASE_URL || 'https://api.together.xyz/v1';
|
||||
case 'OpenAILike':
|
||||
return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
|
||||
case 'LMStudio':
|
||||
return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
|
||||
case 'Ollama': {
|
||||
let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
|
||||
|
||||
if (env.RUNNING_IN_DOCKER === 'true') {
|
||||
baseUrl = baseUrl.replace('localhost', 'host.docker.internal');
|
||||
}
|
||||
|
||||
return baseUrl;
|
||||
}
|
||||
default:
|
||||
return '';
|
||||
}
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
/*
|
||||
* @ts-nocheck
|
||||
* Preventing TS checks with files presented in the video for a better presentation.
|
||||
*/
|
||||
import { getAPIKey, getBaseURL } from '~/lib/.server/llm/api-key';
|
||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
||||
import { ollama } from 'ollama-ai-provider';
|
||||
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||
import { createMistral } from '@ai-sdk/mistral';
|
||||
import { createCohere } from '@ai-sdk/cohere';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
|
||||
export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
|
||||
|
||||
type OptionalApiKey = string | undefined;
|
||||
|
||||
export function getAnthropicModel(apiKey: OptionalApiKey, model: string) {
|
||||
const anthropic = createAnthropic({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return anthropic(model);
|
||||
}
|
||||
export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL,
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getCohereAIModel(apiKey: OptionalApiKey, model: string) {
|
||||
const cohere = createCohere({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return cohere(model);
|
||||
}
|
||||
|
||||
export function getOpenAIModel(apiKey: OptionalApiKey, model: string) {
|
||||
const openai = createOpenAI({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getMistralModel(apiKey: OptionalApiKey, model: string) {
|
||||
const mistral = createMistral({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return mistral(model);
|
||||
}
|
||||
|
||||
export function getGoogleModel(apiKey: OptionalApiKey, model: string) {
|
||||
const google = createGoogleGenerativeAI({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return google(model);
|
||||
}
|
||||
|
||||
export function getGroqModel(apiKey: OptionalApiKey, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api-inference.huggingface.co/v1/',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getOllamaModel(baseURL: string, model: string) {
|
||||
const ollamaInstance = ollama(model, {
|
||||
numCtx: DEFAULT_NUM_CTX,
|
||||
}) as LanguageModelV1 & { config: any };
|
||||
|
||||
ollamaInstance.config.baseURL = `${baseURL}/api`;
|
||||
|
||||
return ollamaInstance;
|
||||
}
|
||||
|
||||
export function getDeepseekModel(apiKey: OptionalApiKey, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.deepseek.com/beta',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getOpenRouterModel(apiKey: OptionalApiKey, model: string) {
|
||||
const openRouter = createOpenRouter({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openRouter.chat(model);
|
||||
}
|
||||
|
||||
export function getLMStudioModel(baseURL: string, model: string) {
|
||||
const lmstudio = createOpenAI({
|
||||
baseUrl: `${baseURL}/v1`,
|
||||
apiKey: '',
|
||||
});
|
||||
|
||||
return lmstudio(model);
|
||||
}
|
||||
|
||||
export function getXAIModel(apiKey: OptionalApiKey, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.x.ai/v1',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getPerplexityModel(apiKey: OptionalApiKey, model: string) {
|
||||
const perplexity = createOpenAI({
|
||||
baseURL: 'https://api.perplexity.ai/',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return perplexity(model);
|
||||
}
|
||||
|
||||
export function getModel(
|
||||
provider: string,
|
||||
model: string,
|
||||
env: Env,
|
||||
apiKeys?: Record<string, string>,
|
||||
providerSettings?: Record<string, IProviderSetting>,
|
||||
) {
|
||||
/*
|
||||
* let apiKey; // Declare first
|
||||
* let baseURL;
|
||||
*/
|
||||
|
||||
const apiKey = getAPIKey(env, provider, apiKeys); // Then assign
|
||||
const baseURL = providerSettings?.[provider].baseUrl || getBaseURL(env, provider);
|
||||
|
||||
switch (provider) {
|
||||
case 'Anthropic':
|
||||
return getAnthropicModel(apiKey, model);
|
||||
case 'OpenAI':
|
||||
return getOpenAIModel(apiKey, model);
|
||||
case 'Groq':
|
||||
return getGroqModel(apiKey, model);
|
||||
case 'HuggingFace':
|
||||
return getHuggingFaceModel(apiKey, model);
|
||||
case 'OpenRouter':
|
||||
return getOpenRouterModel(apiKey, model);
|
||||
case 'Google':
|
||||
return getGoogleModel(apiKey, model);
|
||||
case 'OpenAILike':
|
||||
return getOpenAILikeModel(baseURL, apiKey, model);
|
||||
case 'Together':
|
||||
return getOpenAILikeModel(baseURL, apiKey, model);
|
||||
case 'Deepseek':
|
||||
return getDeepseekModel(apiKey, model);
|
||||
case 'Mistral':
|
||||
return getMistralModel(apiKey, model);
|
||||
case 'LMStudio':
|
||||
return getLMStudioModel(baseURL, model);
|
||||
case 'xAI':
|
||||
return getXAIModel(apiKey, model);
|
||||
case 'Cohere':
|
||||
return getCohereAIModel(apiKey, model);
|
||||
case 'Perplexity':
|
||||
return getPerplexityModel(apiKey, model);
|
||||
default:
|
||||
return getOllamaModel(baseURL, model);
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
import { convertToCoreMessages, streamText as _streamText } from 'ai';
|
||||
import { getModel } from '~/lib/.server/llm/model';
|
||||
import { MAX_TOKENS } from './constants';
|
||||
import { getSystemPrompt } from '~/lib/common/prompts/prompts';
|
||||
import {
|
||||
@@ -8,6 +7,7 @@ import {
|
||||
getModelList,
|
||||
MODEL_REGEX,
|
||||
MODIFICATIONS_TAG_NAME,
|
||||
PROVIDER_LIST,
|
||||
PROVIDER_REGEX,
|
||||
WORK_DIR,
|
||||
} from '~/utils/constants';
|
||||
@@ -151,10 +151,13 @@ export async function streamText(props: {
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
promptId?: string;
|
||||
}) {
|
||||
const { messages, env, options, apiKeys, files, providerSettings, promptId } = props;
|
||||
const { messages, env: serverEnv, options, apiKeys, files, providerSettings, promptId } = props;
|
||||
|
||||
// console.log({serverEnv});
|
||||
|
||||
let currentModel = DEFAULT_MODEL;
|
||||
let currentProvider = DEFAULT_PROVIDER.name;
|
||||
const MODEL_LIST = await getModelList(apiKeys || {}, providerSettings);
|
||||
const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any });
|
||||
const processedMessages = messages.map((message) => {
|
||||
if (message.role === 'user') {
|
||||
const { model, provider, content } = extractPropertiesFromMessage(message);
|
||||
@@ -181,6 +184,8 @@ export async function streamText(props: {
|
||||
|
||||
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
|
||||
|
||||
const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
|
||||
|
||||
let systemPrompt =
|
||||
PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
|
||||
cwd: WORK_DIR,
|
||||
@@ -196,7 +201,12 @@ export async function streamText(props: {
|
||||
}
|
||||
|
||||
return _streamText({
|
||||
model: getModel(currentProvider, currentModel, env, apiKeys, providerSettings) as any,
|
||||
model: provider.getModelInstance({
|
||||
model: currentModel,
|
||||
serverEnv,
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
}),
|
||||
system: systemPrompt,
|
||||
maxTokens: dynamicMaxTokens,
|
||||
messages: convertToCoreMessages(processedMessages as any),
|
||||
|
||||
@@ -12,14 +12,16 @@ import { useCallback, useEffect, useState } from 'react';
|
||||
import Cookies from 'js-cookie';
|
||||
import type { IProviderSetting, ProviderInfo } from '~/types/model';
|
||||
import { logStore } from '~/lib/stores/logs'; // assuming logStore is imported from this location
|
||||
import commit from '~/commit.json';
|
||||
|
||||
interface CommitData {
|
||||
commit: string;
|
||||
version?: string;
|
||||
}
|
||||
|
||||
const commitJson: CommitData = commit;
|
||||
const versionData: CommitData = {
|
||||
commit: __COMMIT_HASH,
|
||||
version: __APP_VERSION,
|
||||
};
|
||||
|
||||
export function useSettings() {
|
||||
const providers = useStore(providersStore);
|
||||
@@ -34,7 +36,7 @@ export function useSettings() {
|
||||
const checkIsStableVersion = async () => {
|
||||
try {
|
||||
const stableResponse = await fetch(
|
||||
`https://raw.githubusercontent.com/stackblitz-labs/bolt.diy/refs/tags/v${commitJson.version}/app/commit.json`,
|
||||
`https://raw.githubusercontent.com/stackblitz-labs/bolt.diy/refs/tags/v${versionData.version}/app/commit.json`,
|
||||
);
|
||||
|
||||
if (!stableResponse.ok) {
|
||||
@@ -44,7 +46,7 @@ export function useSettings() {
|
||||
|
||||
const stableData = (await stableResponse.json()) as CommitData;
|
||||
|
||||
return commit.commit === stableData.commit;
|
||||
return versionData.commit === stableData.commit;
|
||||
} catch (error) {
|
||||
console.warn('Error checking stable version:', error);
|
||||
return false;
|
||||
@@ -105,16 +107,16 @@ export function useSettings() {
|
||||
let checkCommit = Cookies.get('commitHash');
|
||||
|
||||
if (checkCommit === undefined) {
|
||||
checkCommit = commit.commit;
|
||||
checkCommit = versionData.commit;
|
||||
}
|
||||
|
||||
if (savedLatestBranch === undefined || checkCommit !== commit.commit) {
|
||||
if (savedLatestBranch === undefined || checkCommit !== versionData.commit) {
|
||||
// If setting hasn't been set by user, check version
|
||||
checkIsStableVersion().then((isStable) => {
|
||||
const shouldUseLatest = !isStable;
|
||||
latestBranchStore.set(shouldUseLatest);
|
||||
Cookies.set('isLatestBranch', String(shouldUseLatest));
|
||||
Cookies.set('commitHash', String(commit.commit));
|
||||
Cookies.set('commitHash', String(versionData.commit));
|
||||
});
|
||||
} else {
|
||||
latestBranchStore.set(savedLatestBranch === 'true');
|
||||
|
||||
72
app/lib/modules/llm/base-provider.ts
Normal file
72
app/lib/modules/llm/base-provider.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import type { ProviderInfo, ProviderConfig, ModelInfo } from './types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
import { LLMManager } from './manager';
|
||||
|
||||
export abstract class BaseProvider implements ProviderInfo {
|
||||
abstract name: string;
|
||||
abstract staticModels: ModelInfo[];
|
||||
abstract config: ProviderConfig;
|
||||
|
||||
getApiKeyLink?: string;
|
||||
labelForGetApiKey?: string;
|
||||
icon?: string;
|
||||
|
||||
getProviderBaseUrlAndKey(options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: IProviderSetting;
|
||||
serverEnv?: Record<string, string>;
|
||||
defaultBaseUrlKey: string;
|
||||
defaultApiTokenKey: string;
|
||||
}) {
|
||||
const { apiKeys, providerSettings, serverEnv, defaultBaseUrlKey, defaultApiTokenKey } = options;
|
||||
let settingsBaseUrl = providerSettings?.baseUrl;
|
||||
const manager = LLMManager.getInstance();
|
||||
|
||||
if (settingsBaseUrl && settingsBaseUrl.length == 0) {
|
||||
settingsBaseUrl = undefined;
|
||||
}
|
||||
|
||||
const baseUrlKey = this.config.baseUrlKey || defaultBaseUrlKey;
|
||||
let baseUrl = settingsBaseUrl || serverEnv?.[baseUrlKey] || process?.env?.[baseUrlKey] || manager.env?.[baseUrlKey];
|
||||
|
||||
if (baseUrl && baseUrl.endsWith('/')) {
|
||||
baseUrl = baseUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
const apiTokenKey = this.config.apiTokenKey || defaultApiTokenKey;
|
||||
const apiKey =
|
||||
apiKeys?.[this.name] || serverEnv?.[apiTokenKey] || process?.env?.[apiTokenKey] || manager.env?.[baseUrlKey];
|
||||
|
||||
return {
|
||||
baseUrl,
|
||||
apiKey,
|
||||
};
|
||||
}
|
||||
|
||||
// Declare the optional getDynamicModels method
|
||||
getDynamicModels?(
|
||||
apiKeys?: Record<string, string>,
|
||||
settings?: IProviderSetting,
|
||||
serverEnv?: Record<string, string>,
|
||||
): Promise<ModelInfo[]>;
|
||||
|
||||
abstract getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1;
|
||||
}
|
||||
|
||||
type OptionalApiKey = string | undefined;
|
||||
|
||||
export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL,
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
116
app/lib/modules/llm/manager.ts
Normal file
116
app/lib/modules/llm/manager.ts
Normal file
@@ -0,0 +1,116 @@
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import { BaseProvider } from './base-provider';
|
||||
import type { ModelInfo, ProviderInfo } from './types';
|
||||
import * as providers from './registry';
|
||||
|
||||
export class LLMManager {
|
||||
private static _instance: LLMManager;
|
||||
private _providers: Map<string, BaseProvider> = new Map();
|
||||
private _modelList: ModelInfo[] = [];
|
||||
private readonly _env: any = {};
|
||||
|
||||
private constructor(_env: Record<string, string>) {
|
||||
this._registerProvidersFromDirectory();
|
||||
this._env = _env;
|
||||
}
|
||||
|
||||
static getInstance(env: Record<string, string> = {}): LLMManager {
|
||||
if (!LLMManager._instance) {
|
||||
LLMManager._instance = new LLMManager(env);
|
||||
}
|
||||
|
||||
return LLMManager._instance;
|
||||
}
|
||||
get env() {
|
||||
return this._env;
|
||||
}
|
||||
|
||||
private async _registerProvidersFromDirectory() {
|
||||
try {
|
||||
/*
|
||||
* Dynamically import all files from the providers directory
|
||||
* const providerModules = import.meta.glob('./providers/*.ts', { eager: true });
|
||||
*/
|
||||
|
||||
// Look for exported classes that extend BaseProvider
|
||||
for (const exportedItem of Object.values(providers)) {
|
||||
if (typeof exportedItem === 'function' && exportedItem.prototype instanceof BaseProvider) {
|
||||
const provider = new exportedItem();
|
||||
|
||||
try {
|
||||
this.registerProvider(provider);
|
||||
} catch (error: any) {
|
||||
console.log('Failed To Register Provider: ', provider.name, 'error:', error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error registering providers:', error);
|
||||
}
|
||||
}
|
||||
|
||||
registerProvider(provider: BaseProvider) {
|
||||
if (this._providers.has(provider.name)) {
|
||||
console.warn(`Provider ${provider.name} is already registered. Skipping.`);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Registering Provider: ', provider.name);
|
||||
this._providers.set(provider.name, provider);
|
||||
this._modelList = [...this._modelList, ...provider.staticModels];
|
||||
}
|
||||
|
||||
getProvider(name: string): BaseProvider | undefined {
|
||||
return this._providers.get(name);
|
||||
}
|
||||
|
||||
getAllProviders(): BaseProvider[] {
|
||||
return Array.from(this._providers.values());
|
||||
}
|
||||
|
||||
getModelList(): ModelInfo[] {
|
||||
return this._modelList;
|
||||
}
|
||||
|
||||
async updateModelList(options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
}): Promise<ModelInfo[]> {
|
||||
const { apiKeys, providerSettings, serverEnv } = options;
|
||||
|
||||
// Get dynamic models from all providers that support them
|
||||
const dynamicModels = await Promise.all(
|
||||
Array.from(this._providers.values())
|
||||
.filter(
|
||||
(provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> =>
|
||||
!!provider.getDynamicModels,
|
||||
)
|
||||
.map((provider) =>
|
||||
provider.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv).catch((err) => {
|
||||
console.error(`Error getting dynamic models ${provider.name} :`, err);
|
||||
return [];
|
||||
}),
|
||||
),
|
||||
);
|
||||
|
||||
// Combine static and dynamic models
|
||||
const modelList = [
|
||||
...dynamicModels.flat(),
|
||||
...Array.from(this._providers.values()).flatMap((p) => p.staticModels || []),
|
||||
];
|
||||
this._modelList = modelList;
|
||||
|
||||
return modelList;
|
||||
}
|
||||
|
||||
getDefaultProvider(): BaseProvider {
|
||||
const firstProvider = this._providers.values().next().value;
|
||||
|
||||
if (!firstProvider) {
|
||||
throw new Error('No providers registered');
|
||||
}
|
||||
|
||||
return firstProvider;
|
||||
}
|
||||
}
|
||||
58
app/lib/modules/llm/providers/anthropic.ts
Normal file
58
app/lib/modules/llm/providers/anthropic.ts
Normal file
@@ -0,0 +1,58 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||
|
||||
export default class AnthropicProvider extends BaseProvider {
|
||||
name = 'Anthropic';
|
||||
getApiKeyLink = 'https://console.anthropic.com/settings/keys';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'ANTHROPIC_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{
|
||||
name: 'claude-3-5-sonnet-latest',
|
||||
label: 'Claude 3.5 Sonnet (new)',
|
||||
provider: 'Anthropic',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'claude-3-5-sonnet-20240620',
|
||||
label: 'Claude 3.5 Sonnet (old)',
|
||||
provider: 'Anthropic',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'claude-3-5-haiku-latest',
|
||||
label: 'Claude 3.5 Haiku (new)',
|
||||
provider: 'Anthropic',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{ name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
{ name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
{ name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
];
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}) => LanguageModelV1 = (options) => {
|
||||
const { apiKeys, providerSettings, serverEnv, model } = options;
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'ANTHROPIC_API_KEY',
|
||||
});
|
||||
const anthropic = createAnthropic({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return anthropic(model);
|
||||
};
|
||||
}
|
||||
54
app/lib/modules/llm/providers/cohere.ts
Normal file
54
app/lib/modules/llm/providers/cohere.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createCohere } from '@ai-sdk/cohere';
|
||||
|
||||
export default class CohereProvider extends BaseProvider {
|
||||
name = 'Cohere';
|
||||
getApiKeyLink = 'https://dashboard.cohere.com/api-keys';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'COHERE_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'COHERE_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const cohere = createCohere({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return cohere(model);
|
||||
}
|
||||
}
|
||||
47
app/lib/modules/llm/providers/deepseek.ts
Normal file
47
app/lib/modules/llm/providers/deepseek.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
|
||||
export default class DeepseekProvider extends BaseProvider {
|
||||
name = 'Deepseek';
|
||||
getApiKeyLink = 'https://platform.deepseek.com/apiKeys';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'DEEPSEEK_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'DEEPSEEK_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.deepseek.com/beta',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
}
|
||||
51
app/lib/modules/llm/providers/google.ts
Normal file
51
app/lib/modules/llm/providers/google.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
||||
|
||||
export default class GoogleProvider extends BaseProvider {
|
||||
name = 'Google';
|
||||
getApiKeyLink = 'https://aistudio.google.com/app/apikey';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: any;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const google = createGoogleGenerativeAI({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return google(model);
|
||||
}
|
||||
}
|
||||
51
app/lib/modules/llm/providers/groq.ts
Normal file
51
app/lib/modules/llm/providers/groq.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
|
||||
export default class GroqProvider extends BaseProvider {
|
||||
name = 'Groq';
|
||||
getApiKeyLink = 'https://console.groq.com/keys';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'GROQ_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'GROQ_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
}
|
||||
69
app/lib/modules/llm/providers/huggingface.ts
Normal file
69
app/lib/modules/llm/providers/huggingface.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
|
||||
export default class HuggingFaceProvider extends BaseProvider {
|
||||
name = 'HuggingFace';
|
||||
getApiKeyLink = 'https://huggingface.co/settings/tokens';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'HuggingFace_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: '01-ai/Yi-1.5-34B-Chat',
|
||||
label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.1-70B-Instruct',
|
||||
label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.1-405B',
|
||||
label: 'Llama-3.1-405B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'HuggingFace_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api-inference.huggingface.co/v1/',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
}
|
||||
73
app/lib/modules/llm/providers/lmstudio.ts
Normal file
73
app/lib/modules/llm/providers/lmstudio.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
|
||||
export default class LMStudioProvider extends BaseProvider {
|
||||
name = 'LMStudio';
|
||||
getApiKeyLink = 'https://lmstudio.ai/';
|
||||
labelForGetApiKey = 'Get LMStudio';
|
||||
icon = 'i-ph:cloud-arrow-down';
|
||||
|
||||
config = {
|
||||
baseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [];
|
||||
|
||||
async getDynamicModels(
|
||||
apiKeys?: Record<string, string>,
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/v1/models`);
|
||||
const data = (await response.json()) as { data: Array<{ id: string }> };
|
||||
|
||||
return data.data.map((model) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.log('Error getting LMStudio models:', error.message);
|
||||
|
||||
return [];
|
||||
}
|
||||
}
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}) => LanguageModelV1 = (options) => {
|
||||
const { apiKeys, providerSettings, serverEnv, model } = options;
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
const lmstudio = createOpenAI({
|
||||
baseUrl: `${baseUrl}/v1`,
|
||||
apiKey: '',
|
||||
});
|
||||
|
||||
return lmstudio(model);
|
||||
};
|
||||
}
|
||||
53
app/lib/modules/llm/providers/mistral.ts
Normal file
53
app/lib/modules/llm/providers/mistral.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createMistral } from '@ai-sdk/mistral';
|
||||
|
||||
export default class MistralProvider extends BaseProvider {
|
||||
name = 'Mistral';
|
||||
getApiKeyLink = 'https://console.mistral.ai/api-keys/';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'MISTRAL_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'MISTRAL_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const mistral = createMistral({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return mistral(model);
|
||||
}
|
||||
}
|
||||
99
app/lib/modules/llm/providers/ollama.ts
Normal file
99
app/lib/modules/llm/providers/ollama.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { ollama } from 'ollama-ai-provider';
|
||||
|
||||
interface OllamaModelDetails {
|
||||
parent_model: string;
|
||||
format: string;
|
||||
family: string;
|
||||
families: string[];
|
||||
parameter_size: string;
|
||||
quantization_level: string;
|
||||
}
|
||||
|
||||
export interface OllamaModel {
|
||||
name: string;
|
||||
model: string;
|
||||
modified_at: string;
|
||||
size: number;
|
||||
digest: string;
|
||||
details: OllamaModelDetails;
|
||||
}
|
||||
|
||||
export interface OllamaApiResponse {
|
||||
models: OllamaModel[];
|
||||
}
|
||||
|
||||
export const DEFAULT_NUM_CTX = process?.env?.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
|
||||
|
||||
export default class OllamaProvider extends BaseProvider {
|
||||
name = 'Ollama';
|
||||
getApiKeyLink = 'https://ollama.com/download';
|
||||
labelForGetApiKey = 'Download Ollama';
|
||||
icon = 'i-ph:cloud-arrow-down';
|
||||
|
||||
config = {
|
||||
baseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [];
|
||||
|
||||
async getDynamicModels(
|
||||
apiKeys?: Record<string, string>,
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
const data = (await response.json()) as OllamaApiResponse;
|
||||
|
||||
// console.log({ ollamamodels: data.models });
|
||||
|
||||
return data.models.map((model: OllamaModel) => ({
|
||||
name: model.name,
|
||||
label: `${model.name} (${model.details.parameter_size})`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (e) {
|
||||
console.error('Failed to get Ollama models:', e);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}) => LanguageModelV1 = (options) => {
|
||||
const { apiKeys, providerSettings, serverEnv, model } = options;
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
const ollamaInstance = ollama(model, {
|
||||
numCtx: DEFAULT_NUM_CTX,
|
||||
}) as LanguageModelV1 & { config: any };
|
||||
|
||||
ollamaInstance.config.baseURL = `${baseUrl}/api`;
|
||||
|
||||
return ollamaInstance;
|
||||
};
|
||||
}
|
||||
132
app/lib/modules/llm/providers/open-router.ts
Normal file
132
app/lib/modules/llm/providers/open-router.ts
Normal file
@@ -0,0 +1,132 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||
|
||||
interface OpenRouterModel {
|
||||
name: string;
|
||||
id: string;
|
||||
context_length: number;
|
||||
pricing: {
|
||||
prompt: number;
|
||||
completion: number;
|
||||
};
|
||||
}
|
||||
|
||||
interface OpenRouterModelsResponse {
|
||||
data: OpenRouterModel[];
|
||||
}
|
||||
|
||||
export default class OpenRouterProvider extends BaseProvider {
|
||||
name = 'OpenRouter';
|
||||
getApiKeyLink = 'https://openrouter.ai/settings/keys';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'OPEN_ROUTER_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{
|
||||
name: 'anthropic/claude-3.5-sonnet',
|
||||
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'anthropic/claude-3-haiku',
|
||||
label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'deepseek/deepseek-coder',
|
||||
label: 'Deepseek-Coder V2 236B (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'google/gemini-flash-1.5',
|
||||
label: 'Google Gemini Flash 1.5 (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'google/gemini-pro-1.5',
|
||||
label: 'Google Gemini Pro 1.5 (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{ name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
||||
{
|
||||
name: 'mistralai/mistral-nemo',
|
||||
label: 'OpenRouter Mistral Nemo (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'qwen/qwen-110b-chat',
|
||||
label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
|
||||
];
|
||||
|
||||
async getDynamicModels(
|
||||
_apiKeys?: Record<string, string>,
|
||||
_settings?: IProviderSetting,
|
||||
_serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const response = await fetch('https://openrouter.ai/api/v1/models', {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const data = (await response.json()) as OpenRouterModelsResponse;
|
||||
|
||||
return data.data
|
||||
.sort((a, b) => a.name.localeCompare(b.name))
|
||||
.map((m) => ({
|
||||
name: m.id,
|
||||
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error) {
|
||||
console.error('Error getting OpenRouter models:', error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'OPEN_ROUTER_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const openRouter = createOpenRouter({
|
||||
apiKey,
|
||||
});
|
||||
const instance = openRouter.chat(model) as LanguageModelV1;
|
||||
|
||||
return instance;
|
||||
}
|
||||
}
|
||||
77
app/lib/modules/llm/providers/openai-like.ts
Normal file
77
app/lib/modules/llm/providers/openai-like.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
import { BaseProvider, getOpenAILikeModel } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
|
||||
export default class OpenAILikeProvider extends BaseProvider {
|
||||
name = 'OpenAILike';
|
||||
getApiKeyLink = undefined;
|
||||
|
||||
config = {
|
||||
baseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
||||
apiTokenKey: 'OPENAI_LIKE_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [];
|
||||
|
||||
async getDynamicModels(
|
||||
apiKeys?: Record<string, string>,
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
||||
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
||||
});
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
return res.data.map((model: any) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error) {
|
||||
console.error('Error getting OpenAILike models:', error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
||||
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
||||
});
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
throw new Error(`Missing configuration for ${this.name} provider`);
|
||||
}
|
||||
|
||||
return getOpenAILikeModel(baseUrl, apiKey, model);
|
||||
}
|
||||
}
|
||||
48
app/lib/modules/llm/providers/openai.ts
Normal file
48
app/lib/modules/llm/providers/openai.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
|
||||
export default class OpenAIProvider extends BaseProvider {
|
||||
name = 'OpenAI';
|
||||
getApiKeyLink = 'https://platform.openai.com/api-keys';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'OPENAI_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'OPENAI_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const openai = createOpenAI({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
}
|
||||
63
app/lib/modules/llm/providers/perplexity.ts
Normal file
63
app/lib/modules/llm/providers/perplexity.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
|
||||
export default class PerplexityProvider extends BaseProvider {
|
||||
name = 'Perplexity';
|
||||
getApiKeyLink = 'https://www.perplexity.ai/settings/api';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'PERPLEXITY_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{
|
||||
name: 'llama-3.1-sonar-small-128k-online',
|
||||
label: 'Sonar Small Online',
|
||||
provider: 'Perplexity',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
{
|
||||
name: 'llama-3.1-sonar-large-128k-online',
|
||||
label: 'Sonar Large Online',
|
||||
provider: 'Perplexity',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
{
|
||||
name: 'llama-3.1-sonar-huge-128k-online',
|
||||
label: 'Sonar Huge Online',
|
||||
provider: 'Perplexity',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'PERPLEXITY_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const perplexity = createOpenAI({
|
||||
baseURL: 'https://api.perplexity.ai/',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return perplexity(model);
|
||||
}
|
||||
}
|
||||
100
app/lib/modules/llm/providers/together.ts
Normal file
100
app/lib/modules/llm/providers/together.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
import { BaseProvider, getOpenAILikeModel } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
|
||||
export default class TogetherProvider extends BaseProvider {
|
||||
name = 'Together';
|
||||
getApiKeyLink = 'https://api.together.xyz/settings/api-keys';
|
||||
|
||||
config = {
|
||||
baseUrlKey: 'TOGETHER_API_BASE_URL',
|
||||
apiTokenKey: 'TOGETHER_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
provider: 'Together',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
provider: 'Together',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||
label: 'Mixtral 8x7B Instruct',
|
||||
provider: 'Together',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
];
|
||||
|
||||
async getDynamicModels(
|
||||
apiKeys?: Record<string, string>,
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
||||
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// console.log({ baseUrl, apiKey });
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
const data = (res || []).filter((model: any) => model.type === 'chat');
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.error('Error getting Together models:', error.message);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
||||
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
||||
});
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
throw new Error(`Missing configuration for ${this.name} provider`);
|
||||
}
|
||||
|
||||
return getOpenAILikeModel(baseUrl, apiKey, model);
|
||||
}
|
||||
}
|
||||
47
app/lib/modules/llm/providers/xai.ts
Normal file
47
app/lib/modules/llm/providers/xai.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
|
||||
export default class XAIProvider extends BaseProvider {
|
||||
name = 'xAI';
|
||||
getApiKeyLink = 'https://docs.x.ai/docs/quickstart#creating-an-api-key';
|
||||
|
||||
config = {
|
||||
apiTokenKey: 'XAI_API_KEY',
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'grok-2-1212', label: 'xAI Grok2 1212', provider: 'xAI', maxTokenAllowed: 8000 },
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}): LanguageModelV1 {
|
||||
const { model, serverEnv, apiKeys, providerSettings } = options;
|
||||
|
||||
const { apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'XAI_API_KEY',
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error(`Missing API key for ${this.name} provider`);
|
||||
}
|
||||
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.x.ai/v1',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return openai(model);
|
||||
}
|
||||
}
|
||||
33
app/lib/modules/llm/registry.ts
Normal file
33
app/lib/modules/llm/registry.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import AnthropicProvider from './providers/anthropic';
|
||||
import CohereProvider from './providers/cohere';
|
||||
import DeepseekProvider from './providers/deepseek';
|
||||
import GoogleProvider from './providers/google';
|
||||
import GroqProvider from './providers/groq';
|
||||
import HuggingFaceProvider from './providers/huggingface';
|
||||
import LMStudioProvider from './providers/lmstudio';
|
||||
import MistralProvider from './providers/mistral';
|
||||
import OllamaProvider from './providers/ollama';
|
||||
import OpenRouterProvider from './providers/open-router';
|
||||
import OpenAILikeProvider from './providers/openai-like';
|
||||
import OpenAIProvider from './providers/openai';
|
||||
import PerplexityProvider from './providers/perplexity';
|
||||
import TogetherProvider from './providers/together';
|
||||
import XAIProvider from './providers/xai';
|
||||
|
||||
export {
|
||||
AnthropicProvider,
|
||||
CohereProvider,
|
||||
DeepseekProvider,
|
||||
GoogleProvider,
|
||||
GroqProvider,
|
||||
HuggingFaceProvider,
|
||||
MistralProvider,
|
||||
OllamaProvider,
|
||||
OpenAIProvider,
|
||||
OpenRouterProvider,
|
||||
OpenAILikeProvider,
|
||||
PerplexityProvider,
|
||||
XAIProvider,
|
||||
TogetherProvider,
|
||||
LMStudioProvider,
|
||||
};
|
||||
32
app/lib/modules/llm/types.ts
Normal file
32
app/lib/modules/llm/types.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
|
||||
export interface ModelInfo {
|
||||
name: string;
|
||||
label: string;
|
||||
provider: string;
|
||||
maxTokenAllowed: number;
|
||||
}
|
||||
|
||||
export interface ProviderInfo {
|
||||
name: string;
|
||||
staticModels: ModelInfo[];
|
||||
getDynamicModels?: (
|
||||
apiKeys?: Record<string, string>,
|
||||
settings?: IProviderSetting,
|
||||
serverEnv?: Record<string, string>,
|
||||
) => Promise<ModelInfo[]>;
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
serverEnv: Env;
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}) => LanguageModelV1;
|
||||
getApiKeyLink?: string;
|
||||
labelForGetApiKey?: string;
|
||||
icon?: string;
|
||||
}
|
||||
export interface ProviderConfig {
|
||||
baseUrlKey?: string;
|
||||
apiTokenKey?: string;
|
||||
}
|
||||
@@ -1,9 +1,14 @@
|
||||
import type { ModelInfo } from '~/utils/types';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
|
||||
export type ProviderInfo = {
|
||||
staticModels: ModelInfo[];
|
||||
name: string;
|
||||
getDynamicModels?: (apiKeys?: Record<string, string>, providerSettings?: IProviderSetting) => Promise<ModelInfo[]>;
|
||||
getDynamicModels?: (
|
||||
providerName: string,
|
||||
apiKeys?: Record<string, string>,
|
||||
providerSettings?: IProviderSetting,
|
||||
serverEnv?: Record<string, string>,
|
||||
) => Promise<ModelInfo[]>;
|
||||
getApiKeyLink?: string;
|
||||
labelForGetApiKey?: string;
|
||||
icon?: string;
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import Cookies from 'js-cookie';
|
||||
import type { ModelInfo, OllamaApiResponse, OllamaModel } from './types';
|
||||
import type { ProviderInfo, IProviderSetting } from '~/types/model';
|
||||
import { createScopedLogger } from './logger';
|
||||
import { logStore } from '~/lib/stores/logs';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
|
||||
import { LLMManager } from '~/lib/modules/llm/manager';
|
||||
import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
|
||||
export const WORK_DIR_NAME = 'project';
|
||||
export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
|
||||
@@ -12,533 +11,351 @@ export const PROVIDER_REGEX = /\[Provider: (.*?)\]\n\n/;
|
||||
export const DEFAULT_MODEL = 'claude-3-5-sonnet-latest';
|
||||
export const PROMPT_COOKIE_KEY = 'cachedPrompt';
|
||||
|
||||
const logger = createScopedLogger('Constants');
|
||||
const llmManager = LLMManager.getInstance(import.meta.env);
|
||||
|
||||
const PROVIDER_LIST: ProviderInfo[] = [
|
||||
{
|
||||
name: 'Anthropic',
|
||||
staticModels: [
|
||||
{
|
||||
name: 'claude-3-5-sonnet-latest',
|
||||
label: 'Claude 3.5 Sonnet (new)',
|
||||
provider: 'Anthropic',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'claude-3-5-sonnet-20240620',
|
||||
label: 'Claude 3.5 Sonnet (old)',
|
||||
provider: 'Anthropic',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'claude-3-5-haiku-latest',
|
||||
label: 'Claude 3.5 Haiku (new)',
|
||||
provider: 'Anthropic',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{ name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
{ name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
{ name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
],
|
||||
getApiKeyLink: 'https://console.anthropic.com/settings/keys',
|
||||
},
|
||||
{
|
||||
name: 'Ollama',
|
||||
staticModels: [],
|
||||
getDynamicModels: getOllamaModels,
|
||||
getApiKeyLink: 'https://ollama.com/download',
|
||||
labelForGetApiKey: 'Download Ollama',
|
||||
icon: 'i-ph:cloud-arrow-down',
|
||||
},
|
||||
{
|
||||
name: 'OpenAILike',
|
||||
staticModels: [],
|
||||
getDynamicModels: getOpenAILikeModels,
|
||||
},
|
||||
{
|
||||
name: 'Cohere',
|
||||
staticModels: [
|
||||
{ name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
{ name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
],
|
||||
getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
|
||||
},
|
||||
{
|
||||
name: 'OpenRouter',
|
||||
staticModels: [
|
||||
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{
|
||||
name: 'anthropic/claude-3.5-sonnet',
|
||||
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'anthropic/claude-3-haiku',
|
||||
label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'deepseek/deepseek-coder',
|
||||
label: 'Deepseek-Coder V2 236B (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'google/gemini-flash-1.5',
|
||||
label: 'Google Gemini Flash 1.5 (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'google/gemini-pro-1.5',
|
||||
label: 'Google Gemini Pro 1.5 (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{ name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
||||
{
|
||||
name: 'mistralai/mistral-nemo',
|
||||
label: 'OpenRouter Mistral Nemo (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'qwen/qwen-110b-chat',
|
||||
label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
|
||||
],
|
||||
getDynamicModels: getOpenRouterModels,
|
||||
getApiKeyLink: 'https://openrouter.ai/settings/keys',
|
||||
},
|
||||
{
|
||||
name: 'Google',
|
||||
staticModels: [
|
||||
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
{ name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
],
|
||||
getApiKeyLink: 'https://aistudio.google.com/app/apikey',
|
||||
},
|
||||
{
|
||||
name: 'Groq',
|
||||
staticModels: [
|
||||
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
{ name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
],
|
||||
getApiKeyLink: 'https://console.groq.com/keys',
|
||||
},
|
||||
{
|
||||
name: 'HuggingFace',
|
||||
staticModels: [
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: '01-ai/Yi-1.5-34B-Chat',
|
||||
label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-72B-Instruct',
|
||||
label: 'Qwen2.5-72B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.1-70B-Instruct',
|
||||
label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.1-405B',
|
||||
label: 'Llama-3.1-405B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: '01-ai/Yi-1.5-34B-Chat',
|
||||
label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
],
|
||||
getApiKeyLink: 'https://huggingface.co/settings/tokens',
|
||||
},
|
||||
export const PROVIDER_LIST = llmManager.getAllProviders();
|
||||
export const DEFAULT_PROVIDER = llmManager.getDefaultProvider();
|
||||
|
||||
{
|
||||
name: 'OpenAI',
|
||||
staticModels: [
|
||||
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
],
|
||||
getApiKeyLink: 'https://platform.openai.com/api-keys',
|
||||
},
|
||||
{
|
||||
name: 'xAI',
|
||||
staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
|
||||
getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
|
||||
},
|
||||
{
|
||||
name: 'Deepseek',
|
||||
staticModels: [
|
||||
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
],
|
||||
getApiKeyLink: 'https://platform.deepseek.com/apiKeys',
|
||||
},
|
||||
{
|
||||
name: 'Mistral',
|
||||
staticModels: [
|
||||
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
{ name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
],
|
||||
getApiKeyLink: 'https://console.mistral.ai/api-keys/',
|
||||
},
|
||||
{
|
||||
name: 'LMStudio',
|
||||
staticModels: [],
|
||||
getDynamicModels: getLMStudioModels,
|
||||
getApiKeyLink: 'https://lmstudio.ai/',
|
||||
labelForGetApiKey: 'Get LMStudio',
|
||||
icon: 'i-ph:cloud-arrow-down',
|
||||
},
|
||||
{
|
||||
name: 'Together',
|
||||
getDynamicModels: getTogetherModels,
|
||||
staticModels: [
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
provider: 'Together',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
provider: 'Together',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
let MODEL_LIST = llmManager.getModelList();
|
||||
|
||||
{
|
||||
name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||
label: 'Mixtral 8x7B Instruct',
|
||||
provider: 'Together',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
],
|
||||
getApiKeyLink: 'https://api.together.xyz/settings/api-keys',
|
||||
},
|
||||
{
|
||||
name: 'Perplexity',
|
||||
staticModels: [
|
||||
{
|
||||
name: 'llama-3.1-sonar-small-128k-online',
|
||||
label: 'Sonar Small Online',
|
||||
provider: 'Perplexity',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
{
|
||||
name: 'llama-3.1-sonar-large-128k-online',
|
||||
label: 'Sonar Large Online',
|
||||
provider: 'Perplexity',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
{
|
||||
name: 'llama-3.1-sonar-huge-128k-online',
|
||||
label: 'Sonar Huge Online',
|
||||
provider: 'Perplexity',
|
||||
maxTokenAllowed: 8192,
|
||||
},
|
||||
],
|
||||
getApiKeyLink: 'https://www.perplexity.ai/settings/api',
|
||||
},
|
||||
];
|
||||
/*
|
||||
*const PROVIDER_LIST_OLD: ProviderInfo[] = [
|
||||
* {
|
||||
* name: 'Anthropic',
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'claude-3-5-sonnet-latest',
|
||||
* label: 'Claude 3.5 Sonnet (new)',
|
||||
* provider: 'Anthropic',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'claude-3-5-sonnet-20240620',
|
||||
* label: 'Claude 3.5 Sonnet (old)',
|
||||
* provider: 'Anthropic',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'claude-3-5-haiku-latest',
|
||||
* label: 'Claude 3.5 Haiku (new)',
|
||||
* provider: 'Anthropic',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
* { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
* { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://console.anthropic.com/settings/keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Ollama',
|
||||
* staticModels: [],
|
||||
* getDynamicModels: getOllamaModels,
|
||||
* getApiKeyLink: 'https://ollama.com/download',
|
||||
* labelForGetApiKey: 'Download Ollama',
|
||||
* icon: 'i-ph:cloud-arrow-down',
|
||||
* },
|
||||
* {
|
||||
* name: 'OpenAILike',
|
||||
* staticModels: [],
|
||||
* getDynamicModels: getOpenAILikeModels,
|
||||
* },
|
||||
* {
|
||||
* name: 'Cohere',
|
||||
* staticModels: [
|
||||
* { name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'OpenRouter',
|
||||
* staticModels: [
|
||||
* { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* {
|
||||
* name: 'anthropic/claude-3.5-sonnet',
|
||||
* label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'anthropic/claude-3-haiku',
|
||||
* label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'deepseek/deepseek-coder',
|
||||
* label: 'Deepseek-Coder V2 236B (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'google/gemini-flash-1.5',
|
||||
* label: 'Google Gemini Flash 1.5 (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'google/gemini-pro-1.5',
|
||||
* label: 'Google Gemini Pro 1.5 (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
||||
* {
|
||||
* name: 'mistralai/mistral-nemo',
|
||||
* label: 'OpenRouter Mistral Nemo (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'qwen/qwen-110b-chat',
|
||||
* label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
|
||||
* ],
|
||||
* getDynamicModels: getOpenRouterModels,
|
||||
* getApiKeyLink: 'https://openrouter.ai/settings/keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Google',
|
||||
* staticModels: [
|
||||
* { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://aistudio.google.com/app/apikey',
|
||||
* },
|
||||
* {
|
||||
* name: 'Groq',
|
||||
* staticModels: [
|
||||
* { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://console.groq.com/keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'HuggingFace',
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: '01-ai/Yi-1.5-34B-Chat',
|
||||
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-72B-Instruct',
|
||||
* label: 'Qwen2.5-72B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'meta-llama/Llama-3.1-70B-Instruct',
|
||||
* label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'meta-llama/Llama-3.1-405B',
|
||||
* label: 'Llama-3.1-405B (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: '01-ai/Yi-1.5-34B-Chat',
|
||||
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://huggingface.co/settings/tokens',
|
||||
* },
|
||||
* {
|
||||
* name: 'OpenAI',
|
||||
* staticModels: [
|
||||
* { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://platform.openai.com/api-keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'xAI',
|
||||
* staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
|
||||
* getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
|
||||
* },
|
||||
* {
|
||||
* name: 'Deepseek',
|
||||
* staticModels: [
|
||||
* { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
* { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://platform.deepseek.com/apiKeys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Mistral',
|
||||
* staticModels: [
|
||||
* { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://console.mistral.ai/api-keys/',
|
||||
* },
|
||||
* {
|
||||
* name: 'LMStudio',
|
||||
* staticModels: [],
|
||||
* getDynamicModels: getLMStudioModels,
|
||||
* getApiKeyLink: 'https://lmstudio.ai/',
|
||||
* labelForGetApiKey: 'Get LMStudio',
|
||||
* icon: 'i-ph:cloud-arrow-down',
|
||||
* },
|
||||
* {
|
||||
* name: 'Together',
|
||||
* getDynamicModels: getTogetherModels,
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* provider: 'Together',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
* label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
* provider: 'Together',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
*
|
||||
* {
|
||||
* name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||
* label: 'Mixtral 8x7B Instruct',
|
||||
* provider: 'Together',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://api.together.xyz/settings/api-keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Perplexity',
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'llama-3.1-sonar-small-128k-online',
|
||||
* label: 'Sonar Small Online',
|
||||
* provider: 'Perplexity',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* {
|
||||
* name: 'llama-3.1-sonar-large-128k-online',
|
||||
* label: 'Sonar Large Online',
|
||||
* provider: 'Perplexity',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* {
|
||||
* name: 'llama-3.1-sonar-huge-128k-online',
|
||||
* label: 'Sonar Huge Online',
|
||||
* provider: 'Perplexity',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://www.perplexity.ai/settings/api',
|
||||
* },
|
||||
*];
|
||||
*/
|
||||
|
||||
export const DEFAULT_PROVIDER = PROVIDER_LIST[0];
|
||||
const providerBaseUrlEnvKeys: Record<string, { baseUrlKey?: string; apiTokenKey?: string }> = {};
|
||||
PROVIDER_LIST.forEach((provider) => {
|
||||
providerBaseUrlEnvKeys[provider.name] = {
|
||||
baseUrlKey: provider.config.baseUrlKey,
|
||||
apiTokenKey: provider.config.apiTokenKey,
|
||||
};
|
||||
});
|
||||
|
||||
const staticModels: ModelInfo[] = PROVIDER_LIST.map((p) => p.staticModels).flat();
|
||||
|
||||
export let MODEL_LIST: ModelInfo[] = [...staticModels];
|
||||
|
||||
export async function getModelList(
|
||||
apiKeys: Record<string, string>,
|
||||
providerSettings?: Record<string, IProviderSetting>,
|
||||
) {
|
||||
MODEL_LIST = [
|
||||
...(
|
||||
await Promise.all(
|
||||
PROVIDER_LIST.filter(
|
||||
(p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels,
|
||||
).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name])),
|
||||
)
|
||||
).flat(),
|
||||
...staticModels,
|
||||
];
|
||||
return MODEL_LIST;
|
||||
// Export the getModelList function using the manager
|
||||
export async function getModelList(options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
}) {
|
||||
return await llmManager.updateModelList(options);
|
||||
}
|
||||
|
||||
async function getTogetherModels(apiKeys?: Record<string, string>, settings?: IProviderSetting): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const baseUrl = settings?.baseUrl || import.meta.env.TOGETHER_API_BASE_URL || '';
|
||||
const provider = 'Together';
|
||||
async function initializeModelList(options: {
|
||||
env?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
apiKeys?: Record<string, string>;
|
||||
}): Promise<ModelInfo[]> {
|
||||
const { providerSettings, apiKeys, env } = options;
|
||||
const list = await getModelList({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
serverEnv: env,
|
||||
});
|
||||
MODEL_LIST = list || MODEL_LIST;
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
let apiKey = import.meta.env.OPENAI_LIKE_API_KEY ?? '';
|
||||
|
||||
if (apiKeys && apiKeys[provider]) {
|
||||
apiKey = apiKeys[provider];
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
const res = (await response.json()) as any;
|
||||
const data: any[] = (res || []).filter((model: any) => model.type == 'chat');
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.display_name} - in:$${m.pricing.input.toFixed(
|
||||
2,
|
||||
)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (e) {
|
||||
console.error('Error getting OpenAILike models:', e);
|
||||
return [];
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
const getOllamaBaseUrl = (settings?: IProviderSetting) => {
|
||||
const defaultBaseUrl = settings?.baseUrl || import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434';
|
||||
|
||||
// Check if we're in the browser
|
||||
if (typeof window !== 'undefined') {
|
||||
// Frontend always uses localhost
|
||||
return defaultBaseUrl;
|
||||
}
|
||||
|
||||
// Backend: Check if we're running in Docker
|
||||
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
||||
|
||||
return isDocker ? defaultBaseUrl.replace('localhost', 'host.docker.internal') : defaultBaseUrl;
|
||||
};
|
||||
|
||||
async function getOllamaModels(apiKeys?: Record<string, string>, settings?: IProviderSetting): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const baseUrl = getOllamaBaseUrl(settings);
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
const data = (await response.json()) as OllamaApiResponse;
|
||||
|
||||
return data.models.map((model: OllamaModel) => ({
|
||||
name: model.name,
|
||||
label: `${model.name} (${model.details.parameter_size})`,
|
||||
provider: 'Ollama',
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (e: any) {
|
||||
logStore.logError('Failed to get Ollama models', e, { baseUrl: settings?.baseUrl });
|
||||
logger.warn('Failed to get Ollama models: ', e.message || '');
|
||||
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
async function getOpenAILikeModels(
|
||||
apiKeys?: Record<string, string>,
|
||||
settings?: IProviderSetting,
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const baseUrl = settings?.baseUrl || import.meta.env.OPENAI_LIKE_API_BASE_URL || '';
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
let apiKey = '';
|
||||
|
||||
if (apiKeys && apiKeys.OpenAILike) {
|
||||
apiKey = apiKeys.OpenAILike;
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
return res.data.map((model: any) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: 'OpenAILike',
|
||||
}));
|
||||
} catch (e) {
|
||||
console.error('Error getting OpenAILike models:', e);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
type OpenRouterModelsResponse = {
|
||||
data: {
|
||||
name: string;
|
||||
id: string;
|
||||
context_length: number;
|
||||
pricing: {
|
||||
prompt: number;
|
||||
completion: number;
|
||||
};
|
||||
}[];
|
||||
};
|
||||
|
||||
async function getOpenRouterModels(): Promise<ModelInfo[]> {
|
||||
const data: OpenRouterModelsResponse = await (
|
||||
await fetch('https://openrouter.ai/api/v1/models', {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
).json();
|
||||
|
||||
return data.data
|
||||
.sort((a, b) => a.name.localeCompare(b.name))
|
||||
.map((m) => ({
|
||||
name: m.id,
|
||||
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
|
||||
2,
|
||||
)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider: 'OpenRouter',
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
|
||||
async function getLMStudioModels(_apiKeys?: Record<string, string>, settings?: IProviderSetting): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const baseUrl = settings?.baseUrl || import.meta.env.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
|
||||
const response = await fetch(`${baseUrl}/v1/models`);
|
||||
const data = (await response.json()) as any;
|
||||
|
||||
return data.data.map((model: any) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: 'LMStudio',
|
||||
}));
|
||||
} catch (e: any) {
|
||||
logStore.logError('Failed to get LMStudio models', e, { baseUrl: settings?.baseUrl });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
async function initializeModelList(providerSettings?: Record<string, IProviderSetting>): Promise<ModelInfo[]> {
|
||||
let apiKeys: Record<string, string> = {};
|
||||
|
||||
try {
|
||||
const storedApiKeys = Cookies.get('apiKeys');
|
||||
|
||||
if (storedApiKeys) {
|
||||
const parsedKeys = JSON.parse(storedApiKeys);
|
||||
|
||||
if (typeof parsedKeys === 'object' && parsedKeys !== null) {
|
||||
apiKeys = parsedKeys;
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
logStore.logError('Failed to fetch API keys from cookies', error);
|
||||
logger.warn(`Failed to fetch apikeys from cookies: ${error?.message}`);
|
||||
}
|
||||
MODEL_LIST = [
|
||||
...(
|
||||
await Promise.all(
|
||||
PROVIDER_LIST.filter(
|
||||
(p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels,
|
||||
).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name])),
|
||||
)
|
||||
).flat(),
|
||||
...staticModels,
|
||||
];
|
||||
|
||||
return MODEL_LIST;
|
||||
}
|
||||
|
||||
export {
|
||||
getOllamaModels,
|
||||
getOpenAILikeModels,
|
||||
getLMStudioModels,
|
||||
initializeModelList,
|
||||
getOpenRouterModels,
|
||||
PROVIDER_LIST,
|
||||
};
|
||||
// initializeModelList({})
|
||||
export { initializeModelList, providerBaseUrlEnvKeys, MODEL_LIST };
|
||||
|
||||
@@ -111,6 +111,7 @@ export class BoltShell {
|
||||
* this.#shellInputStream?.write('\x03');
|
||||
*/
|
||||
this.terminal.input('\x03');
|
||||
await this.waitTillOscCode('prompt');
|
||||
|
||||
if (state && state.executionPrms) {
|
||||
await state.executionPrms;
|
||||
|
||||
@@ -19,10 +19,3 @@ export interface OllamaModel {
|
||||
export interface OllamaApiResponse {
|
||||
models: OllamaModel[];
|
||||
}
|
||||
|
||||
export interface ModelInfo {
|
||||
name: string;
|
||||
label: string;
|
||||
provider: string;
|
||||
maxTokenAllowed: number;
|
||||
}
|
||||
|
||||
2
app/vite-env.d.ts
vendored
Normal file
2
app/vite-env.d.ts
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
declare const __COMMIT_HASH: string;
|
||||
declare const __APP_VERSION: string;
|
||||
Reference in New Issue
Block a user