import React, { useEffect, useState, useCallback, useMemo } from 'react'; import { Switch } from '~/components/ui/Switch'; import { Card, CardContent, CardHeader } from '~/components/ui/Card'; import { Button } from '~/components/ui/Button'; import { useSettings } from '~/lib/hooks/useSettings'; import { LOCAL_PROVIDERS } from '~/lib/stores/settings'; import type { IProviderConfig } from '~/types/model'; import { logStore } from '~/lib/stores/logs'; import { providerBaseUrlEnvKeys } from '~/utils/constants'; import { useToast } from '~/components/ui/use-toast'; import { useLocalModelHealth } from '~/lib/hooks/useLocalModelHealth'; import ErrorBoundary from './ErrorBoundary'; import { ModelCardSkeleton } from './LoadingSkeleton'; import SetupGuide from './SetupGuide'; import StatusDashboard from './StatusDashboard'; import ProviderCard from './ProviderCard'; import ModelCard from './ModelCard'; import { OLLAMA_API_URL } from './types'; import type { OllamaModel, LMStudioModel } from './types'; import { Cpu, Server, BookOpen, Activity, PackageOpen, Monitor, Loader2, RotateCw, ExternalLink } from 'lucide-react'; // Type definitions type ViewMode = 'dashboard' | 'guide' | 'status'; export default function LocalProvidersTab() { const { providers, updateProviderSettings } = useSettings(); const [viewMode, setViewMode] = useState('dashboard'); const [editingProvider, setEditingProvider] = useState(null); const [ollamaModels, setOllamaModels] = useState([]); const [lmStudioModels, setLMStudioModels] = useState([]); const [isLoadingModels, setIsLoadingModels] = useState(false); const [isLoadingLMStudioModels, setIsLoadingLMStudioModels] = useState(false); const { toast } = useToast(); const { startMonitoring, stopMonitoring } = useLocalModelHealth(); // Memoized filtered providers to prevent unnecessary re-renders const filteredProviders = useMemo(() => { return Object.entries(providers || {}) .filter(([key]) => [...LOCAL_PROVIDERS, 'OpenAILike'].includes(key)) .map(([key, value]) => { const provider = value as IProviderConfig; const envKey = providerBaseUrlEnvKeys[key]?.baseUrlKey; const envUrl = envKey ? (import.meta.env[envKey] as string | undefined) : undefined; // Set default base URLs for local providers let defaultBaseUrl = provider.settings.baseUrl || envUrl; if (!defaultBaseUrl) { if (key === 'Ollama') { defaultBaseUrl = 'http://127.0.0.1:11434'; } else if (key === 'LMStudio') { defaultBaseUrl = 'http://127.0.0.1:1234'; } } return { name: key, settings: { ...provider.settings, baseUrl: defaultBaseUrl, }, staticModels: provider.staticModels || [], getDynamicModels: provider.getDynamicModels, getApiKeyLink: provider.getApiKeyLink, labelForGetApiKey: provider.labelForGetApiKey, icon: provider.icon, } as IProviderConfig; }) .sort((a, b) => { // Custom sort: Ollama first, then LMStudio, then OpenAILike const order = { Ollama: 0, LMStudio: 1, OpenAILike: 2 }; return (order[a.name as keyof typeof order] || 3) - (order[b.name as keyof typeof order] || 3); }); }, [providers]); const categoryEnabled = useMemo(() => { return filteredProviders.length > 0 && filteredProviders.every((p) => p.settings.enabled); }, [filteredProviders]); // Start/stop health monitoring for enabled providers useEffect(() => { filteredProviders.forEach((provider) => { const baseUrl = provider.settings.baseUrl; if (provider.settings.enabled && baseUrl) { console.log(`[LocalProvidersTab] Starting monitoring for ${provider.name} at ${baseUrl}`); startMonitoring(provider.name as 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl); } else if (!provider.settings.enabled && baseUrl) { console.log(`[LocalProvidersTab] Stopping monitoring for ${provider.name} at ${baseUrl}`); stopMonitoring(provider.name as 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl); } }); }, [filteredProviders, startMonitoring, stopMonitoring]); // Fetch Ollama models when enabled useEffect(() => { const ollamaProvider = filteredProviders.find((p) => p.name === 'Ollama'); if (ollamaProvider?.settings.enabled) { fetchOllamaModels(); } }, [filteredProviders]); // Fetch LM Studio models when enabled useEffect(() => { const lmStudioProvider = filteredProviders.find((p) => p.name === 'LMStudio'); if (lmStudioProvider?.settings.enabled && lmStudioProvider.settings.baseUrl) { fetchLMStudioModels(lmStudioProvider.settings.baseUrl); } }, [filteredProviders]); const fetchOllamaModels = async () => { try { setIsLoadingModels(true); const response = await fetch(`${OLLAMA_API_URL}/api/tags`); if (!response.ok) { throw new Error('Failed to fetch models'); } const data = (await response.json()) as { models: OllamaModel[] }; setOllamaModels( data.models.map((model) => ({ ...model, status: 'idle' as const, })), ); } catch { console.error('Error fetching Ollama models'); } finally { setIsLoadingModels(false); } }; const fetchLMStudioModels = async (baseUrl: string) => { try { setIsLoadingLMStudioModels(true); const response = await fetch(`${baseUrl}/v1/models`); if (!response.ok) { throw new Error('Failed to fetch LM Studio models'); } const data = (await response.json()) as { data: LMStudioModel[] }; setLMStudioModels(data.data || []); } catch { console.error('Error fetching LM Studio models'); setLMStudioModels([]); } finally { setIsLoadingLMStudioModels(false); } }; const handleToggleCategory = useCallback( async (enabled: boolean) => { filteredProviders.forEach((provider) => { updateProviderSettings(provider.name, { ...provider.settings, enabled }); }); toast(enabled ? 'All local providers enabled' : 'All local providers disabled'); }, [filteredProviders, updateProviderSettings, toast], ); const handleToggleProvider = useCallback( (provider: IProviderConfig, enabled: boolean) => { updateProviderSettings(provider.name, { ...provider.settings, enabled, }); logStore.logProvider(`Provider ${provider.name} ${enabled ? 'enabled' : 'disabled'}`, { provider: provider.name, }); toast(`${provider.name} ${enabled ? 'enabled' : 'disabled'}`); }, [updateProviderSettings, toast], ); const handleUpdateBaseUrl = useCallback( (provider: IProviderConfig, newBaseUrl: string) => { updateProviderSettings(provider.name, { ...provider.settings, baseUrl: newBaseUrl, }); toast(`${provider.name} base URL updated`); }, [updateProviderSettings, toast], ); const handleUpdateOllamaModel = async (modelName: string) => { try { setOllamaModels((prev) => prev.map((m) => (m.name === modelName ? { ...m, status: 'updating' } : m))); const response = await fetch(`${OLLAMA_API_URL}/api/pull`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ name: modelName }), }); if (!response.ok) { throw new Error(`Failed to update ${modelName}`); } // Handle streaming response const reader = response.body?.getReader(); if (!reader) { throw new Error('No response reader available'); } while (true) { const { done, value } = await reader.read(); if (done) { break; } const text = new TextDecoder().decode(value); const lines = text.split('\n').filter(Boolean); for (const line of lines) { try { const data = JSON.parse(line); if (data.status && data.completed && data.total) { setOllamaModels((current) => current.map((m) => m.name === modelName ? { ...m, progress: { current: data.completed, total: data.total, status: data.status, }, } : m, ), ); } } catch { // Ignore parsing errors } } } setOllamaModels((prev) => prev.map((m) => (m.name === modelName ? { ...m, status: 'updated', progress: undefined } : m)), ); toast(`Successfully updated ${modelName}`); } catch { setOllamaModels((prev) => prev.map((m) => (m.name === modelName ? { ...m, status: 'error', progress: undefined } : m)), ); toast(`Failed to update ${modelName}`, { type: 'error' }); } }; const handleDeleteOllamaModel = async (modelName: string) => { if (!window.confirm(`Are you sure you want to delete ${modelName}?`)) { return; } try { const response = await fetch(`${OLLAMA_API_URL}/api/delete`, { method: 'DELETE', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ name: modelName }), }); if (!response.ok) { throw new Error(`Failed to delete ${modelName}`); } setOllamaModels((current) => current.filter((m) => m.name !== modelName)); toast(`Deleted ${modelName}`); } catch { toast(`Failed to delete ${modelName}`, { type: 'error' }); } }; // Render different views based on viewMode if (viewMode === 'guide') { return ( setViewMode('dashboard')} /> ); } if (viewMode === 'status') { return ( setViewMode('dashboard')} /> ); } return (
{/* Header */}

Local AI Providers

Configure and manage your local AI models

Enable All
{/* Provider Cards */}
{filteredProviders.map((provider) => (
handleToggleProvider(provider, enabled)} onUpdateBaseUrl={(url) => handleUpdateBaseUrl(provider, url)} isEditing={editingProvider === provider.name} onStartEditing={() => setEditingProvider(provider.name)} onStopEditing={() => setEditingProvider(null)} /> {/* Ollama Models Section */} {provider.name === 'Ollama' && provider.settings.enabled && (

Installed Models

{isLoadingModels ? (
{Array.from({ length: 3 }).map((_, i) => ( ))}
) : ollamaModels.length === 0 ? (

No Models Installed

Visit{' '} ollama.com/library {' '} to browse available models

) : (
{ollamaModels.map((model) => ( handleUpdateOllamaModel(model.name)} onDelete={() => handleDeleteOllamaModel(model.name)} /> ))}
)}
)} {/* LM Studio Models Section */} {provider.name === 'LMStudio' && provider.settings.enabled && (

Available Models

{isLoadingLMStudioModels ? (
{Array.from({ length: 3 }).map((_, i) => ( ))}
) : lmStudioModels.length === 0 ? (

No Models Available

Make sure LM Studio is running with the local server started and CORS enabled.

) : (
{lmStudioModels.map((model) => (

{model.id}

Available
{model.object}
Owned by: {model.owned_by}
{model.created && (
Created: {new Date(model.created * 1000).toLocaleDateString()}
)}
))}
)}
)}
))}
{filteredProviders.length === 0 && (

No Local Providers Available

Local providers will appear here when they're configured in the system.

)}
); }