refactor: refactored LLM Providers: Adapting Modular Approach (#832)

* refactor: Refactoring Providers to have providers as modules

* updated package and lock file

* added grok model back

* updated registry system
This commit is contained in:
Anirban Kar
2024-12-21 11:45:17 +05:30
committed by GitHub
parent 63abf52000
commit 7295352a98
30 changed files with 1621 additions and 961 deletions

View File

@@ -0,0 +1,58 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { LanguageModelV1 } from 'ai';
import type { IProviderSetting } from '~/types/model';
import { createAnthropic } from '@ai-sdk/anthropic';
export default class AnthropicProvider extends BaseProvider {
name = 'Anthropic';
getApiKeyLink = 'https://console.anthropic.com/settings/keys';
config = {
apiTokenKey: 'ANTHROPIC_API_KEY',
};
staticModels: ModelInfo[] = [
{
name: 'claude-3-5-sonnet-latest',
label: 'Claude 3.5 Sonnet (new)',
provider: 'Anthropic',
maxTokenAllowed: 8000,
},
{
name: 'claude-3-5-sonnet-20240620',
label: 'Claude 3.5 Sonnet (old)',
provider: 'Anthropic',
maxTokenAllowed: 8000,
},
{
name: 'claude-3-5-haiku-latest',
label: 'Claude 3.5 Haiku (new)',
provider: 'Anthropic',
maxTokenAllowed: 8000,
},
{ name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
{ name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
{ name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
];
getModelInstance: (options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}) => LanguageModelV1 = (options) => {
const { apiKeys, providerSettings, serverEnv, model } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings,
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'ANTHROPIC_API_KEY',
});
const anthropic = createAnthropic({
apiKey,
});
return anthropic(model);
};
}

View File

@@ -0,0 +1,54 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createCohere } from '@ai-sdk/cohere';
export default class CohereProvider extends BaseProvider {
name = 'Cohere';
getApiKeyLink = 'https://dashboard.cohere.com/api-keys';
config = {
apiTokenKey: 'COHERE_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
{ name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'COHERE_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const cohere = createCohere({
apiKey,
});
return cohere(model);
}
}

View File

@@ -0,0 +1,47 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class DeepseekProvider extends BaseProvider {
name = 'Deepseek';
getApiKeyLink = 'https://platform.deepseek.com/apiKeys';
config = {
apiTokenKey: 'DEEPSEEK_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'DEEPSEEK_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openai = createOpenAI({
baseURL: 'https://api.deepseek.com/beta',
apiKey,
});
return openai(model);
}
}

View File

@@ -0,0 +1,51 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
export default class GoogleProvider extends BaseProvider {
name = 'Google';
getApiKeyLink = 'https://aistudio.google.com/app/apikey';
config = {
apiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
{ name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
{ name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
{ name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
{ name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
{ name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
];
getModelInstance(options: {
model: string;
serverEnv: any;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const google = createGoogleGenerativeAI({
apiKey,
});
return google(model);
}
}

View File

@@ -0,0 +1,51 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class GroqProvider extends BaseProvider {
name = 'Groq';
getApiKeyLink = 'https://console.groq.com/keys';
config = {
apiTokenKey: 'GROQ_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'GROQ_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openai = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey,
});
return openai(model);
}
}

View File

@@ -0,0 +1,69 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class HuggingFaceProvider extends BaseProvider {
name = 'HuggingFace';
getApiKeyLink = 'https://huggingface.co/settings/tokens';
config = {
apiTokenKey: 'HuggingFace_API_KEY',
};
staticModels: ModelInfo[] = [
{
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: '01-ai/Yi-1.5-34B-Chat',
label: 'Yi-1.5-34B-Chat (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: 'meta-llama/Llama-3.1-70B-Instruct',
label: 'Llama-3.1-70B-Instruct (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: 'meta-llama/Llama-3.1-405B',
label: 'Llama-3.1-405B (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'HuggingFace_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openai = createOpenAI({
baseURL: 'https://api-inference.huggingface.co/v1/',
apiKey,
});
return openai(model);
}
}

View File

@@ -0,0 +1,73 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import { createOpenAI } from '@ai-sdk/openai';
import type { LanguageModelV1 } from 'ai';
export default class LMStudioProvider extends BaseProvider {
name = 'LMStudio';
getApiKeyLink = 'https://lmstudio.ai/';
labelForGetApiKey = 'Get LMStudio';
icon = 'i-ph:cloud-arrow-down';
config = {
baseUrlKey: 'LMSTUDIO_API_BASE_URL',
};
staticModels: ModelInfo[] = [];
async getDynamicModels(
apiKeys?: Record<string, string>,
settings?: IProviderSetting,
serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
try {
const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
defaultApiTokenKey: '',
});
if (!baseUrl) {
return [];
}
const response = await fetch(`${baseUrl}/v1/models`);
const data = (await response.json()) as { data: Array<{ id: string }> };
return data.data.map((model) => ({
name: model.id,
label: model.id,
provider: this.name,
maxTokenAllowed: 8000,
}));
} catch (error: any) {
console.log('Error getting LMStudio models:', error.message);
return [];
}
}
getModelInstance: (options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}) => LanguageModelV1 = (options) => {
const { apiKeys, providerSettings, serverEnv, model } = options;
const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings,
serverEnv: serverEnv as any,
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
defaultApiTokenKey: '',
});
const lmstudio = createOpenAI({
baseUrl: `${baseUrl}/v1`,
apiKey: '',
});
return lmstudio(model);
};
}

View File

@@ -0,0 +1,53 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createMistral } from '@ai-sdk/mistral';
export default class MistralProvider extends BaseProvider {
name = 'Mistral';
getApiKeyLink = 'https://console.mistral.ai/api-keys/';
config = {
apiTokenKey: 'MISTRAL_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
{ name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'MISTRAL_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const mistral = createMistral({
apiKey,
});
return mistral(model);
}
}

View File

@@ -0,0 +1,99 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { ollama } from 'ollama-ai-provider';
interface OllamaModelDetails {
parent_model: string;
format: string;
family: string;
families: string[];
parameter_size: string;
quantization_level: string;
}
export interface OllamaModel {
name: string;
model: string;
modified_at: string;
size: number;
digest: string;
details: OllamaModelDetails;
}
export interface OllamaApiResponse {
models: OllamaModel[];
}
export const DEFAULT_NUM_CTX = process?.env?.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
export default class OllamaProvider extends BaseProvider {
name = 'Ollama';
getApiKeyLink = 'https://ollama.com/download';
labelForGetApiKey = 'Download Ollama';
icon = 'i-ph:cloud-arrow-down';
config = {
baseUrlKey: 'OLLAMA_API_BASE_URL',
};
staticModels: ModelInfo[] = [];
async getDynamicModels(
apiKeys?: Record<string, string>,
settings?: IProviderSetting,
serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
try {
const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
defaultApiTokenKey: '',
});
if (!baseUrl) {
return [];
}
const response = await fetch(`${baseUrl}/api/tags`);
const data = (await response.json()) as OllamaApiResponse;
// console.log({ ollamamodels: data.models });
return data.models.map((model: OllamaModel) => ({
name: model.name,
label: `${model.name} (${model.details.parameter_size})`,
provider: this.name,
maxTokenAllowed: 8000,
}));
} catch (e) {
console.error('Failed to get Ollama models:', e);
return [];
}
}
getModelInstance: (options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}) => LanguageModelV1 = (options) => {
const { apiKeys, providerSettings, serverEnv, model } = options;
const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings,
serverEnv: serverEnv as any,
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
defaultApiTokenKey: '',
});
const ollamaInstance = ollama(model, {
numCtx: DEFAULT_NUM_CTX,
}) as LanguageModelV1 & { config: any };
ollamaInstance.config.baseURL = `${baseUrl}/api`;
return ollamaInstance;
};
}

View File

@@ -0,0 +1,132 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
interface OpenRouterModel {
name: string;
id: string;
context_length: number;
pricing: {
prompt: number;
completion: number;
};
}
interface OpenRouterModelsResponse {
data: OpenRouterModel[];
}
export default class OpenRouterProvider extends BaseProvider {
name = 'OpenRouter';
getApiKeyLink = 'https://openrouter.ai/settings/keys';
config = {
apiTokenKey: 'OPEN_ROUTER_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
{
name: 'anthropic/claude-3.5-sonnet',
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
provider: 'OpenRouter',
maxTokenAllowed: 8000,
},
{
name: 'anthropic/claude-3-haiku',
label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
provider: 'OpenRouter',
maxTokenAllowed: 8000,
},
{
name: 'deepseek/deepseek-coder',
label: 'Deepseek-Coder V2 236B (OpenRouter)',
provider: 'OpenRouter',
maxTokenAllowed: 8000,
},
{
name: 'google/gemini-flash-1.5',
label: 'Google Gemini Flash 1.5 (OpenRouter)',
provider: 'OpenRouter',
maxTokenAllowed: 8000,
},
{
name: 'google/gemini-pro-1.5',
label: 'Google Gemini Pro 1.5 (OpenRouter)',
provider: 'OpenRouter',
maxTokenAllowed: 8000,
},
{ name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
{
name: 'mistralai/mistral-nemo',
label: 'OpenRouter Mistral Nemo (OpenRouter)',
provider: 'OpenRouter',
maxTokenAllowed: 8000,
},
{
name: 'qwen/qwen-110b-chat',
label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
provider: 'OpenRouter',
maxTokenAllowed: 8000,
},
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
];
async getDynamicModels(
_apiKeys?: Record<string, string>,
_settings?: IProviderSetting,
_serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
try {
const response = await fetch('https://openrouter.ai/api/v1/models', {
headers: {
'Content-Type': 'application/json',
},
});
const data = (await response.json()) as OpenRouterModelsResponse;
return data.data
.sort((a, b) => a.name.localeCompare(b.name))
.map((m) => ({
name: m.id,
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
provider: this.name,
maxTokenAllowed: 8000,
}));
} catch (error) {
console.error('Error getting OpenRouter models:', error);
return [];
}
}
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'OPEN_ROUTER_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openRouter = createOpenRouter({
apiKey,
});
const instance = openRouter.chat(model) as LanguageModelV1;
return instance;
}
}

View File

@@ -0,0 +1,77 @@
import { BaseProvider, getOpenAILikeModel } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
export default class OpenAILikeProvider extends BaseProvider {
name = 'OpenAILike';
getApiKeyLink = undefined;
config = {
baseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
apiTokenKey: 'OPENAI_LIKE_API_KEY',
};
staticModels: ModelInfo[] = [];
async getDynamicModels(
apiKeys?: Record<string, string>,
settings?: IProviderSetting,
serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
try {
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
});
if (!baseUrl || !apiKey) {
return [];
}
const response = await fetch(`${baseUrl}/models`, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
});
const res = (await response.json()) as any;
return res.data.map((model: any) => ({
name: model.id,
label: model.id,
provider: this.name,
maxTokenAllowed: 8000,
}));
} catch (error) {
console.error('Error getting OpenAILike models:', error);
return [];
}
}
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
});
if (!baseUrl || !apiKey) {
throw new Error(`Missing configuration for ${this.name} provider`);
}
return getOpenAILikeModel(baseUrl, apiKey, model);
}
}

View File

@@ -0,0 +1,48 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class OpenAIProvider extends BaseProvider {
name = 'OpenAI';
getApiKeyLink = 'https://platform.openai.com/api-keys';
config = {
apiTokenKey: 'OPENAI_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'OPENAI_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openai = createOpenAI({
apiKey,
});
return openai(model);
}
}

View File

@@ -0,0 +1,63 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class PerplexityProvider extends BaseProvider {
name = 'Perplexity';
getApiKeyLink = 'https://www.perplexity.ai/settings/api';
config = {
apiTokenKey: 'PERPLEXITY_API_KEY',
};
staticModels: ModelInfo[] = [
{
name: 'llama-3.1-sonar-small-128k-online',
label: 'Sonar Small Online',
provider: 'Perplexity',
maxTokenAllowed: 8192,
},
{
name: 'llama-3.1-sonar-large-128k-online',
label: 'Sonar Large Online',
provider: 'Perplexity',
maxTokenAllowed: 8192,
},
{
name: 'llama-3.1-sonar-huge-128k-online',
label: 'Sonar Huge Online',
provider: 'Perplexity',
maxTokenAllowed: 8192,
},
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'PERPLEXITY_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const perplexity = createOpenAI({
baseURL: 'https://api.perplexity.ai/',
apiKey,
});
return perplexity(model);
}
}

View File

@@ -0,0 +1,100 @@
import { BaseProvider, getOpenAILikeModel } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
export default class TogetherProvider extends BaseProvider {
name = 'Together';
getApiKeyLink = 'https://api.together.xyz/settings/api-keys';
config = {
baseUrlKey: 'TOGETHER_API_BASE_URL',
apiTokenKey: 'TOGETHER_API_KEY',
};
staticModels: ModelInfo[] = [
{
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
provider: 'Together',
maxTokenAllowed: 8000,
},
{
name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
provider: 'Together',
maxTokenAllowed: 8000,
},
{
name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
label: 'Mixtral 8x7B Instruct',
provider: 'Together',
maxTokenAllowed: 8192,
},
];
async getDynamicModels(
apiKeys?: Record<string, string>,
settings?: IProviderSetting,
serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
try {
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
defaultApiTokenKey: 'TOGETHER_API_KEY',
});
const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
if (!baseUrl || !apiKey) {
return [];
}
// console.log({ baseUrl, apiKey });
const response = await fetch(`${baseUrl}/models`, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
});
const res = (await response.json()) as any;
const data = (res || []).filter((model: any) => model.type === 'chat');
return data.map((m: any) => ({
name: m.id,
label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
provider: this.name,
maxTokenAllowed: 8000,
}));
} catch (error: any) {
console.error('Error getting Together models:', error.message);
return [];
}
}
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
defaultApiTokenKey: 'TOGETHER_API_KEY',
});
if (!baseUrl || !apiKey) {
throw new Error(`Missing configuration for ${this.name} provider`);
}
return getOpenAILikeModel(baseUrl, apiKey, model);
}
}

View File

@@ -0,0 +1,47 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createOpenAI } from '@ai-sdk/openai';
export default class XAIProvider extends BaseProvider {
name = 'xAI';
getApiKeyLink = 'https://docs.x.ai/docs/quickstart#creating-an-api-key';
config = {
apiTokenKey: 'XAI_API_KEY',
};
staticModels: ModelInfo[] = [
{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 },
{ name: 'grok-2-1212', label: 'xAI Grok2 1212', provider: 'xAI', maxTokenAllowed: 8000 },
];
getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;
const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'XAI_API_KEY',
});
if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}
const openai = createOpenAI({
baseURL: 'https://api.x.ai/v1',
apiKey,
});
return openai(model);
}
}