fix: updated logger and model caching minor bugfix #release (#895)

* fix: updated logger and model caching

* usage token stream issue fix

* minor changes

* updated starter template change to fix the app title

* starter template bigfix

* fixed hydretion errors and raw logs

* removed raw log

* made auto select template false by default

* more cleaner logs and updated logic to call dynamicModels only if not found in static models

* updated starter template instructions

* browser console log improved for firefox

* provider icons fix icons
This commit is contained in:
Anirban Kar
2024-12-31 22:47:32 +05:30
committed by GitHub
parent 389eedcac4
commit 6494f5ac2e
23 changed files with 478 additions and 567 deletions

View File

@@ -22,33 +22,27 @@ export default class LMStudioProvider extends BaseProvider {
settings?: IProviderSetting,
serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
try {
const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
defaultApiTokenKey: '',
});
if (!baseUrl) {
return [];
}
const response = await fetch(`${baseUrl}/v1/models`);
const data = (await response.json()) as { data: Array<{ id: string }> };
return data.data.map((model) => ({
name: model.id,
label: model.id,
provider: this.name,
maxTokenAllowed: 8000,
}));
} catch (error: any) {
console.log('Error getting LMStudio models:', error.message);
const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
defaultApiTokenKey: '',
});
if (!baseUrl) {
return [];
}
const response = await fetch(`${baseUrl}/v1/models`);
const data = (await response.json()) as { data: Array<{ id: string }> };
return data.data.map((model) => ({
name: model.id,
label: model.id,
provider: this.name,
maxTokenAllowed: 8000,
}));
}
getModelInstance: (options: {
model: string;