fix: updated logger and model caching minor bugfix #release (#895)
* fix: updated logger and model caching * usage token stream issue fix * minor changes * updated starter template change to fix the app title * starter template bigfix * fixed hydretion errors and raw logs * removed raw log * made auto select template false by default * more cleaner logs and updated logic to call dynamicModels only if not found in static models * updated starter template instructions * browser console log improved for firefox * provider icons fix icons
This commit is contained in:
@@ -8,6 +8,10 @@ export abstract class BaseProvider implements ProviderInfo {
|
||||
abstract name: string;
|
||||
abstract staticModels: ModelInfo[];
|
||||
abstract config: ProviderConfig;
|
||||
cachedDynamicModels?: {
|
||||
cacheId: string;
|
||||
models: ModelInfo[];
|
||||
};
|
||||
|
||||
getApiKeyLink?: string;
|
||||
labelForGetApiKey?: string;
|
||||
@@ -49,6 +53,54 @@ export abstract class BaseProvider implements ProviderInfo {
|
||||
apiKey,
|
||||
};
|
||||
}
|
||||
getModelsFromCache(options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
}): ModelInfo[] | null {
|
||||
if (!this.cachedDynamicModels) {
|
||||
// console.log('no dynamic models',this.name);
|
||||
return null;
|
||||
}
|
||||
|
||||
const cacheKey = this.cachedDynamicModels.cacheId;
|
||||
const generatedCacheKey = this.getDynamicModelsCacheKey(options);
|
||||
|
||||
if (cacheKey !== generatedCacheKey) {
|
||||
// console.log('cache key mismatch',this.name,cacheKey,generatedCacheKey);
|
||||
this.cachedDynamicModels = undefined;
|
||||
return null;
|
||||
}
|
||||
|
||||
return this.cachedDynamicModels.models;
|
||||
}
|
||||
getDynamicModelsCacheKey(options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
}) {
|
||||
return JSON.stringify({
|
||||
apiKeys: options.apiKeys?.[this.name],
|
||||
providerSettings: options.providerSettings?.[this.name],
|
||||
serverEnv: options.serverEnv,
|
||||
});
|
||||
}
|
||||
storeDynamicModels(
|
||||
options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
},
|
||||
models: ModelInfo[],
|
||||
) {
|
||||
const cacheId = this.getDynamicModelsCacheKey(options);
|
||||
|
||||
// console.log('caching dynamic models',this.name,cacheId);
|
||||
this.cachedDynamicModels = {
|
||||
cacheId,
|
||||
models,
|
||||
};
|
||||
}
|
||||
|
||||
// Declare the optional getDynamicModels method
|
||||
getDynamicModels?(
|
||||
|
||||
@@ -2,7 +2,9 @@ import type { IProviderSetting } from '~/types/model';
|
||||
import { BaseProvider } from './base-provider';
|
||||
import type { ModelInfo, ProviderInfo } from './types';
|
||||
import * as providers from './registry';
|
||||
import { createScopedLogger } from '~/utils/logger';
|
||||
|
||||
const logger = createScopedLogger('LLMManager');
|
||||
export class LLMManager {
|
||||
private static _instance: LLMManager;
|
||||
private _providers: Map<string, BaseProvider> = new Map();
|
||||
@@ -40,22 +42,22 @@ export class LLMManager {
|
||||
try {
|
||||
this.registerProvider(provider);
|
||||
} catch (error: any) {
|
||||
console.log('Failed To Register Provider: ', provider.name, 'error:', error.message);
|
||||
logger.warn('Failed To Register Provider: ', provider.name, 'error:', error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error registering providers:', error);
|
||||
logger.error('Error registering providers:', error);
|
||||
}
|
||||
}
|
||||
|
||||
registerProvider(provider: BaseProvider) {
|
||||
if (this._providers.has(provider.name)) {
|
||||
console.warn(`Provider ${provider.name} is already registered. Skipping.`);
|
||||
logger.warn(`Provider ${provider.name} is already registered. Skipping.`);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Registering Provider: ', provider.name);
|
||||
logger.info('Registering Provider: ', provider.name);
|
||||
this._providers.set(provider.name, provider);
|
||||
this._modelList = [...this._modelList, ...provider.staticModels];
|
||||
}
|
||||
@@ -93,12 +95,28 @@ export class LLMManager {
|
||||
(provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> =>
|
||||
!!provider.getDynamicModels,
|
||||
)
|
||||
.map((provider) =>
|
||||
provider.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv).catch((err) => {
|
||||
console.error(`Error getting dynamic models ${provider.name} :`, err);
|
||||
return [];
|
||||
}),
|
||||
),
|
||||
.map(async (provider) => {
|
||||
const cachedModels = provider.getModelsFromCache(options);
|
||||
|
||||
if (cachedModels) {
|
||||
return cachedModels;
|
||||
}
|
||||
|
||||
const dynamicModels = await provider
|
||||
.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv)
|
||||
.then((models) => {
|
||||
logger.info(`Caching ${models.length} dynamic models for ${provider.name}`);
|
||||
provider.storeDynamicModels(options, models);
|
||||
|
||||
return models;
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error(`Error getting dynamic models ${provider.name} :`, err);
|
||||
return [];
|
||||
});
|
||||
|
||||
return dynamicModels;
|
||||
}),
|
||||
);
|
||||
|
||||
// Combine static and dynamic models
|
||||
@@ -110,6 +128,68 @@ export class LLMManager {
|
||||
|
||||
return modelList;
|
||||
}
|
||||
getStaticModelList() {
|
||||
return [...this._providers.values()].flatMap((p) => p.staticModels || []);
|
||||
}
|
||||
async getModelListFromProvider(
|
||||
providerArg: BaseProvider,
|
||||
options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
},
|
||||
): Promise<ModelInfo[]> {
|
||||
const provider = this._providers.get(providerArg.name);
|
||||
|
||||
if (!provider) {
|
||||
throw new Error(`Provider ${providerArg.name} not found`);
|
||||
}
|
||||
|
||||
const staticModels = provider.staticModels || [];
|
||||
|
||||
if (!provider.getDynamicModels) {
|
||||
return staticModels;
|
||||
}
|
||||
|
||||
const { apiKeys, providerSettings, serverEnv } = options;
|
||||
|
||||
const cachedModels = provider.getModelsFromCache({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
serverEnv,
|
||||
});
|
||||
|
||||
if (cachedModels) {
|
||||
logger.info(`Found ${cachedModels.length} cached models for ${provider.name}`);
|
||||
return [...cachedModels, ...staticModels];
|
||||
}
|
||||
|
||||
logger.info(`Getting dynamic models for ${provider.name}`);
|
||||
|
||||
const dynamicModels = await provider
|
||||
.getDynamicModels?.(apiKeys, providerSettings?.[provider.name], serverEnv)
|
||||
.then((models) => {
|
||||
logger.info(`Got ${models.length} dynamic models for ${provider.name}`);
|
||||
provider.storeDynamicModels(options, models);
|
||||
|
||||
return models;
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error(`Error getting dynamic models ${provider.name} :`, err);
|
||||
return [];
|
||||
});
|
||||
|
||||
return [...dynamicModels, ...staticModels];
|
||||
}
|
||||
getStaticModelListFromProvider(providerArg: BaseProvider) {
|
||||
const provider = this._providers.get(providerArg.name);
|
||||
|
||||
if (!provider) {
|
||||
throw new Error(`Provider ${providerArg.name} not found`);
|
||||
}
|
||||
|
||||
return [...(provider.staticModels || [])];
|
||||
}
|
||||
|
||||
getDefaultProvider(): BaseProvider {
|
||||
const firstProvider = this._providers.values().next().value;
|
||||
|
||||
@@ -25,6 +25,30 @@ export default class HuggingFaceProvider extends BaseProvider {
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-72B-Instruct',
|
||||
label: 'Qwen2.5-72B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.1-70B-Instruct',
|
||||
label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
||||
@@ -37,6 +61,24 @@ export default class HuggingFaceProvider extends BaseProvider {
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: '01-ai/Yi-1.5-34B-Chat',
|
||||
label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
|
||||
@@ -50,40 +50,35 @@ export default class HyperbolicProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'HYPERBOLIC_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1';
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'HYPERBOLIC_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1';
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat);
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: m.context_length || 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.error('Error getting Hyperbolic models:', error.message);
|
||||
return [];
|
||||
if (!apiKey) {
|
||||
throw `Missing Api Key configuration for ${this.name} provider`;
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat);
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: m.context_length || 8000,
|
||||
}));
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
@@ -103,8 +98,7 @@ export default class HyperbolicProvider extends BaseProvider {
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
console.log(`Missing configuration for ${this.name} provider`);
|
||||
throw new Error(`Missing configuration for ${this.name} provider`);
|
||||
throw `Missing Api Key configuration for ${this.name} provider`;
|
||||
}
|
||||
|
||||
const openai = createOpenAI({
|
||||
|
||||
@@ -22,33 +22,27 @@ export default class LMStudioProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/v1/models`);
|
||||
const data = (await response.json()) as { data: Array<{ id: string }> };
|
||||
|
||||
return data.data.map((model) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.log('Error getting LMStudio models:', error.message);
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/v1/models`);
|
||||
const data = (await response.json()) as { data: Array<{ id: string }> };
|
||||
|
||||
return data.data.map((model) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
|
||||
@@ -45,34 +45,29 @@ export default class OllamaProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
const data = (await response.json()) as OllamaApiResponse;
|
||||
|
||||
// console.log({ ollamamodels: data.models });
|
||||
|
||||
return data.models.map((model: OllamaModel) => ({
|
||||
name: model.name,
|
||||
label: `${model.name} (${model.details.parameter_size})`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (e) {
|
||||
console.error('Failed to get Ollama models:', e);
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
const data = (await response.json()) as OllamaApiResponse;
|
||||
|
||||
// console.log({ ollamamodels: data.models });
|
||||
|
||||
return data.models.map((model: OllamaModel) => ({
|
||||
name: model.name,
|
||||
label: `${model.name} (${model.details.parameter_size})`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
|
||||
@@ -27,7 +27,6 @@ export default class OpenRouterProvider extends BaseProvider {
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{
|
||||
name: 'anthropic/claude-3.5-sonnet',
|
||||
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
||||
|
||||
@@ -19,37 +19,32 @@ export default class OpenAILikeProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
||||
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
||||
});
|
||||
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
||||
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
||||
});
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
return res.data.map((model: any) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error) {
|
||||
console.error('Error getting OpenAILike models:', error);
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
return res.data.map((model: any) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
|
||||
@@ -13,6 +13,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
|
||||
@@ -38,41 +38,36 @@ export default class TogetherProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
||||
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
||||
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// console.log({ baseUrl, apiKey });
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
const data = (res || []).filter((model: any) => model.type === 'chat');
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.error('Error getting Together models:', error.message);
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// console.log({ baseUrl, apiKey });
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
const data = (res || []).filter((model: any) => model.type === 'chat');
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
|
||||
Reference in New Issue
Block a user