fix: updated logger and model caching minor bugfix #release (#895)

* fix: updated logger and model caching

* usage token stream issue fix

* minor changes

* updated starter template change to fix the app title

* starter template bigfix

* fixed hydretion errors and raw logs

* removed raw log

* made auto select template false by default

* more cleaner logs and updated logic to call dynamicModels only if not found in static models

* updated starter template instructions

* browser console log improved for firefox

* provider icons fix icons
This commit is contained in:
Anirban Kar
2024-12-31 22:47:32 +05:30
committed by GitHub
parent 389eedcac4
commit 6494f5ac2e
23 changed files with 478 additions and 567 deletions

View File

@@ -168,6 +168,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
}, []); }, []);
useEffect(() => { useEffect(() => {
if (typeof window !== 'undefined') {
const providerSettings = getProviderSettings(); const providerSettings = getProviderSettings();
let parsedApiKeys: Record<string, string> | undefined = {}; let parsedApiKeys: Record<string, string> | undefined = {};
@@ -183,7 +184,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
setIsModelLoading('all'); setIsModelLoading('all');
initializeModelList({ apiKeys: parsedApiKeys, providerSettings }) initializeModelList({ apiKeys: parsedApiKeys, providerSettings })
.then((modelList) => { .then((modelList) => {
console.log('Model List: ', modelList); // console.log('Model List: ', modelList);
setModelList(modelList); setModelList(modelList);
}) })
.catch((error) => { .catch((error) => {
@@ -192,6 +193,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
.finally(() => { .finally(() => {
setIsModelLoading(undefined); setIsModelLoading(undefined);
}); });
}
}, [providerList]); }, [providerList]);
const onApiKeysChange = async (providerName: string, apiKey: string) => { const onApiKeysChange = async (providerName: string, apiKey: string) => {
@@ -401,6 +403,8 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
<rect className={classNames(styles.PromptShine)} x="48" y="24" width="70" height="1"></rect> <rect className={classNames(styles.PromptShine)} x="48" y="24" width="70" height="1"></rect>
</svg> </svg>
<div> <div>
<ClientOnly>
{() => (
<div className={isModelSettingsCollapsed ? 'hidden' : ''}> <div className={isModelSettingsCollapsed ? 'hidden' : ''}>
<ModelSelector <ModelSelector
key={provider?.name + ':' + modelList.length} key={provider?.name + ':' + modelList.length}
@@ -423,6 +427,8 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
/> />
)} )}
</div> </div>
)}
</ClientOnly>
</div> </div>
<FilePreview <FilePreview
files={uploadedFiles} files={uploadedFiles}

View File

@@ -168,7 +168,8 @@ export const ChatImpl = memo(
}); });
useEffect(() => { useEffect(() => {
const prompt = searchParams.get('prompt'); const prompt = searchParams.get('prompt');
console.log(prompt, searchParams, model, provider);
// console.log(prompt, searchParams, model, provider);
if (prompt) { if (prompt) {
setSearchParams({}); setSearchParams({});
@@ -289,14 +290,14 @@ export const ChatImpl = memo(
// reload(); // reload();
const template = await selectStarterTemplate({ const { template, title } = await selectStarterTemplate({
message: messageInput, message: messageInput,
model, model,
provider, provider,
}); });
if (template !== 'blank') { if (template !== 'blank') {
const temResp = await getTemplates(template); const temResp = await getTemplates(template, title);
if (temResp) { if (temResp) {
const { assistantMessage, userMessage } = temResp; const { assistantMessage, userMessage } = temResp;

View File

@@ -6,9 +6,10 @@ import type { IProviderConfig } from '~/types/model';
import { logStore } from '~/lib/stores/logs'; import { logStore } from '~/lib/stores/logs';
// Import a default fallback icon // Import a default fallback icon
import DefaultIcon from '/icons/Default.svg'; // Adjust the path as necessary
import { providerBaseUrlEnvKeys } from '~/utils/constants'; import { providerBaseUrlEnvKeys } from '~/utils/constants';
const DefaultIcon = '/icons/Default.svg'; // Adjust the path as necessary
export default function ProvidersTab() { export default function ProvidersTab() {
const { providers, updateProviderSettings, isLocalModel } = useSettings(); const { providers, updateProviderSettings, isLocalModel } = useSettings();
const [filteredProviders, setFilteredProviders] = useState<IProviderConfig[]>([]); const [filteredProviders, setFilteredProviders] = useState<IProviderConfig[]>([]);

View File

@@ -5,7 +5,6 @@ import { renderToReadableStream } from 'react-dom/server';
import { renderHeadToString } from 'remix-island'; import { renderHeadToString } from 'remix-island';
import { Head } from './root'; import { Head } from './root';
import { themeStore } from '~/lib/stores/theme'; import { themeStore } from '~/lib/stores/theme';
import { initializeModelList } from '~/utils/constants';
export default async function handleRequest( export default async function handleRequest(
request: Request, request: Request,
@@ -14,7 +13,7 @@ export default async function handleRequest(
remixContext: EntryContext, remixContext: EntryContext,
_loadContext: AppLoadContext, _loadContext: AppLoadContext,
) { ) {
await initializeModelList({}); // await initializeModelList({});
const readable = await renderToReadableStream(<RemixServer context={remixContext} url={request.url} />, { const readable = await renderToReadableStream(<RemixServer context={remixContext} url={request.url} />, {
signal: request.signal, signal: request.signal,

View File

@@ -4,7 +4,6 @@ import { getSystemPrompt } from '~/lib/common/prompts/prompts';
import { import {
DEFAULT_MODEL, DEFAULT_MODEL,
DEFAULT_PROVIDER, DEFAULT_PROVIDER,
getModelList,
MODEL_REGEX, MODEL_REGEX,
MODIFICATIONS_TAG_NAME, MODIFICATIONS_TAG_NAME,
PROVIDER_LIST, PROVIDER_LIST,
@@ -15,6 +14,8 @@ import ignore from 'ignore';
import type { IProviderSetting } from '~/types/model'; import type { IProviderSetting } from '~/types/model';
import { PromptLibrary } from '~/lib/common/prompt-library'; import { PromptLibrary } from '~/lib/common/prompt-library';
import { allowedHTMLElements } from '~/utils/markdown'; import { allowedHTMLElements } from '~/utils/markdown';
import { LLMManager } from '~/lib/modules/llm/manager';
import { createScopedLogger } from '~/utils/logger';
interface ToolResult<Name extends string, Args, Result> { interface ToolResult<Name extends string, Args, Result> {
toolCallId: string; toolCallId: string;
@@ -142,6 +143,8 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
return { model, provider, content: cleanedContent }; return { model, provider, content: cleanedContent };
} }
const logger = createScopedLogger('stream-text');
export async function streamText(props: { export async function streamText(props: {
messages: Messages; messages: Messages;
env: Env; env: Env;
@@ -158,15 +161,10 @@ export async function streamText(props: {
let currentModel = DEFAULT_MODEL; let currentModel = DEFAULT_MODEL;
let currentProvider = DEFAULT_PROVIDER.name; let currentProvider = DEFAULT_PROVIDER.name;
const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any });
const processedMessages = messages.map((message) => { const processedMessages = messages.map((message) => {
if (message.role === 'user') { if (message.role === 'user') {
const { model, provider, content } = extractPropertiesFromMessage(message); const { model, provider, content } = extractPropertiesFromMessage(message);
if (MODEL_LIST.find((m) => m.name === model)) {
currentModel = model; currentModel = model;
}
currentProvider = provider; currentProvider = provider;
return { ...message, content }; return { ...message, content };
@@ -183,12 +181,37 @@ export async function streamText(props: {
return message; return message;
}); });
const modelDetails = MODEL_LIST.find((m) => m.name === currentModel); const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
const staticModels = LLMManager.getInstance().getStaticModelListFromProvider(provider);
let modelDetails = staticModels.find((m) => m.name === currentModel);
if (!modelDetails) {
const modelsList = [
...(provider.staticModels || []),
...(await LLMManager.getInstance().getModelListFromProvider(provider, {
apiKeys,
providerSettings,
serverEnv: serverEnv as any,
})),
];
if (!modelsList.length) {
throw new Error(`No models found for provider ${provider.name}`);
}
modelDetails = modelsList.find((m) => m.name === currentModel);
if (!modelDetails) {
// Fallback to first model
logger.warn(
`MODEL [${currentModel}] not found in provider [${provider.name}]. Falling back to first model. ${modelsList[0].name}`,
);
modelDetails = modelsList[0];
}
}
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS; const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
let systemPrompt = let systemPrompt =
PromptLibrary.getPropmtFromLibrary(promptId || 'default', { PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
cwd: WORK_DIR, cwd: WORK_DIR,
@@ -201,6 +224,8 @@ export async function streamText(props: {
systemPrompt = `${systemPrompt}\n\n ${codeContext}`; systemPrompt = `${systemPrompt}\n\n ${codeContext}`;
} }
logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`);
return _streamText({ return _streamText({
model: provider.getModelInstance({ model: provider.getModelInstance({
model: currentModel, model: currentModel,

View File

@@ -8,6 +8,10 @@ export abstract class BaseProvider implements ProviderInfo {
abstract name: string; abstract name: string;
abstract staticModels: ModelInfo[]; abstract staticModels: ModelInfo[];
abstract config: ProviderConfig; abstract config: ProviderConfig;
cachedDynamicModels?: {
cacheId: string;
models: ModelInfo[];
};
getApiKeyLink?: string; getApiKeyLink?: string;
labelForGetApiKey?: string; labelForGetApiKey?: string;
@@ -49,6 +53,54 @@ export abstract class BaseProvider implements ProviderInfo {
apiKey, apiKey,
}; };
} }
getModelsFromCache(options: {
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
serverEnv?: Record<string, string>;
}): ModelInfo[] | null {
if (!this.cachedDynamicModels) {
// console.log('no dynamic models',this.name);
return null;
}
const cacheKey = this.cachedDynamicModels.cacheId;
const generatedCacheKey = this.getDynamicModelsCacheKey(options);
if (cacheKey !== generatedCacheKey) {
// console.log('cache key mismatch',this.name,cacheKey,generatedCacheKey);
this.cachedDynamicModels = undefined;
return null;
}
return this.cachedDynamicModels.models;
}
getDynamicModelsCacheKey(options: {
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
serverEnv?: Record<string, string>;
}) {
return JSON.stringify({
apiKeys: options.apiKeys?.[this.name],
providerSettings: options.providerSettings?.[this.name],
serverEnv: options.serverEnv,
});
}
storeDynamicModels(
options: {
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
serverEnv?: Record<string, string>;
},
models: ModelInfo[],
) {
const cacheId = this.getDynamicModelsCacheKey(options);
// console.log('caching dynamic models',this.name,cacheId);
this.cachedDynamicModels = {
cacheId,
models,
};
}
// Declare the optional getDynamicModels method // Declare the optional getDynamicModels method
getDynamicModels?( getDynamicModels?(

View File

@@ -2,7 +2,9 @@ import type { IProviderSetting } from '~/types/model';
import { BaseProvider } from './base-provider'; import { BaseProvider } from './base-provider';
import type { ModelInfo, ProviderInfo } from './types'; import type { ModelInfo, ProviderInfo } from './types';
import * as providers from './registry'; import * as providers from './registry';
import { createScopedLogger } from '~/utils/logger';
const logger = createScopedLogger('LLMManager');
export class LLMManager { export class LLMManager {
private static _instance: LLMManager; private static _instance: LLMManager;
private _providers: Map<string, BaseProvider> = new Map(); private _providers: Map<string, BaseProvider> = new Map();
@@ -40,22 +42,22 @@ export class LLMManager {
try { try {
this.registerProvider(provider); this.registerProvider(provider);
} catch (error: any) { } catch (error: any) {
console.log('Failed To Register Provider: ', provider.name, 'error:', error.message); logger.warn('Failed To Register Provider: ', provider.name, 'error:', error.message);
} }
} }
} }
} catch (error) { } catch (error) {
console.error('Error registering providers:', error); logger.error('Error registering providers:', error);
} }
} }
registerProvider(provider: BaseProvider) { registerProvider(provider: BaseProvider) {
if (this._providers.has(provider.name)) { if (this._providers.has(provider.name)) {
console.warn(`Provider ${provider.name} is already registered. Skipping.`); logger.warn(`Provider ${provider.name} is already registered. Skipping.`);
return; return;
} }
console.log('Registering Provider: ', provider.name); logger.info('Registering Provider: ', provider.name);
this._providers.set(provider.name, provider); this._providers.set(provider.name, provider);
this._modelList = [...this._modelList, ...provider.staticModels]; this._modelList = [...this._modelList, ...provider.staticModels];
} }
@@ -93,12 +95,28 @@ export class LLMManager {
(provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> => (provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> =>
!!provider.getDynamicModels, !!provider.getDynamicModels,
) )
.map((provider) => .map(async (provider) => {
provider.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv).catch((err) => { const cachedModels = provider.getModelsFromCache(options);
console.error(`Error getting dynamic models ${provider.name} :`, err);
if (cachedModels) {
return cachedModels;
}
const dynamicModels = await provider
.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv)
.then((models) => {
logger.info(`Caching ${models.length} dynamic models for ${provider.name}`);
provider.storeDynamicModels(options, models);
return models;
})
.catch((err) => {
logger.error(`Error getting dynamic models ${provider.name} :`, err);
return []; return [];
});
return dynamicModels;
}), }),
),
); );
// Combine static and dynamic models // Combine static and dynamic models
@@ -110,6 +128,68 @@ export class LLMManager {
return modelList; return modelList;
} }
getStaticModelList() {
return [...this._providers.values()].flatMap((p) => p.staticModels || []);
}
async getModelListFromProvider(
providerArg: BaseProvider,
options: {
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
serverEnv?: Record<string, string>;
},
): Promise<ModelInfo[]> {
const provider = this._providers.get(providerArg.name);
if (!provider) {
throw new Error(`Provider ${providerArg.name} not found`);
}
const staticModels = provider.staticModels || [];
if (!provider.getDynamicModels) {
return staticModels;
}
const { apiKeys, providerSettings, serverEnv } = options;
const cachedModels = provider.getModelsFromCache({
apiKeys,
providerSettings,
serverEnv,
});
if (cachedModels) {
logger.info(`Found ${cachedModels.length} cached models for ${provider.name}`);
return [...cachedModels, ...staticModels];
}
logger.info(`Getting dynamic models for ${provider.name}`);
const dynamicModels = await provider
.getDynamicModels?.(apiKeys, providerSettings?.[provider.name], serverEnv)
.then((models) => {
logger.info(`Got ${models.length} dynamic models for ${provider.name}`);
provider.storeDynamicModels(options, models);
return models;
})
.catch((err) => {
logger.error(`Error getting dynamic models ${provider.name} :`, err);
return [];
});
return [...dynamicModels, ...staticModels];
}
getStaticModelListFromProvider(providerArg: BaseProvider) {
const provider = this._providers.get(providerArg.name);
if (!provider) {
throw new Error(`Provider ${providerArg.name} not found`);
}
return [...(provider.staticModels || [])];
}
getDefaultProvider(): BaseProvider { getDefaultProvider(): BaseProvider {
const firstProvider = this._providers.values().next().value; const firstProvider = this._providers.values().next().value;

View File

@@ -25,6 +25,30 @@ export default class HuggingFaceProvider extends BaseProvider {
provider: 'HuggingFace', provider: 'HuggingFace',
maxTokenAllowed: 8000, maxTokenAllowed: 8000,
}, },
{
name: 'codellama/CodeLlama-34b-Instruct-hf',
label: 'CodeLlama-34b-Instruct (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: 'Qwen/Qwen2.5-72B-Instruct',
label: 'Qwen2.5-72B-Instruct (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{ {
name: 'meta-llama/Llama-3.1-70B-Instruct', name: 'meta-llama/Llama-3.1-70B-Instruct',
label: 'Llama-3.1-70B-Instruct (HuggingFace)', label: 'Llama-3.1-70B-Instruct (HuggingFace)',
@@ -37,6 +61,24 @@ export default class HuggingFaceProvider extends BaseProvider {
provider: 'HuggingFace', provider: 'HuggingFace',
maxTokenAllowed: 8000, maxTokenAllowed: 8000,
}, },
{
name: '01-ai/Yi-1.5-34B-Chat',
label: 'Yi-1.5-34B-Chat (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: 'codellama/CodeLlama-34b-Instruct-hf',
label: 'CodeLlama-34b-Instruct (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
{
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
provider: 'HuggingFace',
maxTokenAllowed: 8000,
},
]; ];
getModelInstance(options: { getModelInstance(options: {

View File

@@ -50,7 +50,6 @@ export default class HyperbolicProvider extends BaseProvider {
settings?: IProviderSetting, settings?: IProviderSetting,
serverEnv: Record<string, string> = {}, serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> { ): Promise<ModelInfo[]> {
try {
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({ const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings: settings, providerSettings: settings,
@@ -60,8 +59,8 @@ export default class HyperbolicProvider extends BaseProvider {
}); });
const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1'; const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1';
if (!baseUrl || !apiKey) { if (!apiKey) {
return []; throw `Missing Api Key configuration for ${this.name} provider`;
} }
const response = await fetch(`${baseUrl}/models`, { const response = await fetch(`${baseUrl}/models`, {
@@ -80,10 +79,6 @@ export default class HyperbolicProvider extends BaseProvider {
provider: this.name, provider: this.name,
maxTokenAllowed: m.context_length || 8000, maxTokenAllowed: m.context_length || 8000,
})); }));
} catch (error: any) {
console.error('Error getting Hyperbolic models:', error.message);
return [];
}
} }
getModelInstance(options: { getModelInstance(options: {
@@ -103,8 +98,7 @@ export default class HyperbolicProvider extends BaseProvider {
}); });
if (!apiKey) { if (!apiKey) {
console.log(`Missing configuration for ${this.name} provider`); throw `Missing Api Key configuration for ${this.name} provider`;
throw new Error(`Missing configuration for ${this.name} provider`);
} }
const openai = createOpenAI({ const openai = createOpenAI({

View File

@@ -22,7 +22,6 @@ export default class LMStudioProvider extends BaseProvider {
settings?: IProviderSetting, settings?: IProviderSetting,
serverEnv: Record<string, string> = {}, serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> { ): Promise<ModelInfo[]> {
try {
const { baseUrl } = this.getProviderBaseUrlAndKey({ const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings: settings, providerSettings: settings,
@@ -44,11 +43,6 @@ export default class LMStudioProvider extends BaseProvider {
provider: this.name, provider: this.name,
maxTokenAllowed: 8000, maxTokenAllowed: 8000,
})); }));
} catch (error: any) {
console.log('Error getting LMStudio models:', error.message);
return [];
}
} }
getModelInstance: (options: { getModelInstance: (options: {
model: string; model: string;

View File

@@ -45,7 +45,6 @@ export default class OllamaProvider extends BaseProvider {
settings?: IProviderSetting, settings?: IProviderSetting,
serverEnv: Record<string, string> = {}, serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> { ): Promise<ModelInfo[]> {
try {
const { baseUrl } = this.getProviderBaseUrlAndKey({ const { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings: settings, providerSettings: settings,
@@ -69,10 +68,6 @@ export default class OllamaProvider extends BaseProvider {
provider: this.name, provider: this.name,
maxTokenAllowed: 8000, maxTokenAllowed: 8000,
})); }));
} catch (e) {
console.error('Failed to get Ollama models:', e);
return [];
}
} }
getModelInstance: (options: { getModelInstance: (options: {
model: string; model: string;

View File

@@ -27,7 +27,6 @@ export default class OpenRouterProvider extends BaseProvider {
}; };
staticModels: ModelInfo[] = [ staticModels: ModelInfo[] = [
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
{ {
name: 'anthropic/claude-3.5-sonnet', name: 'anthropic/claude-3.5-sonnet',
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)', label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',

View File

@@ -19,7 +19,6 @@ export default class OpenAILikeProvider extends BaseProvider {
settings?: IProviderSetting, settings?: IProviderSetting,
serverEnv: Record<string, string> = {}, serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> { ): Promise<ModelInfo[]> {
try {
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({ const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings: settings, providerSettings: settings,
@@ -46,10 +45,6 @@ export default class OpenAILikeProvider extends BaseProvider {
provider: this.name, provider: this.name,
maxTokenAllowed: 8000, maxTokenAllowed: 8000,
})); }));
} catch (error) {
console.error('Error getting OpenAILike models:', error);
return [];
}
} }
getModelInstance(options: { getModelInstance(options: {

View File

@@ -13,6 +13,7 @@ export default class OpenAIProvider extends BaseProvider {
}; };
staticModels: ModelInfo[] = [ staticModels: ModelInfo[] = [
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 }, { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 }, { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 }, { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },

View File

@@ -38,7 +38,6 @@ export default class TogetherProvider extends BaseProvider {
settings?: IProviderSetting, settings?: IProviderSetting,
serverEnv: Record<string, string> = {}, serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> { ): Promise<ModelInfo[]> {
try {
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({ const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings: settings, providerSettings: settings,
@@ -69,10 +68,6 @@ export default class TogetherProvider extends BaseProvider {
provider: this.name, provider: this.name,
maxTokenAllowed: 8000, maxTokenAllowed: 8000,
})); }));
} catch (error: any) {
console.error('Error getting Together models:', error.message);
return [];
}
} }
getModelInstance(options: { getModelInstance(options: {

View File

@@ -55,7 +55,8 @@ interface MessageState {
function cleanoutMarkdownSyntax(content: string) { function cleanoutMarkdownSyntax(content: string) {
const codeBlockRegex = /^\s*```\w*\n([\s\S]*?)\n\s*```\s*$/; const codeBlockRegex = /^\s*```\w*\n([\s\S]*?)\n\s*```\s*$/;
const match = content.match(codeBlockRegex); const match = content.match(codeBlockRegex);
console.log('matching', !!match, content);
// console.log('matching', !!match, content);
if (match) { if (match) {
return match[1]; // Remove common leading 4-space indent return match[1]; // Remove common leading 4-space indent

View File

@@ -54,5 +54,5 @@ export const promptStore = atom<string>('default');
export const latestBranchStore = atom(false); export const latestBranchStore = atom(false);
export const autoSelectStarterTemplate = atom(true); export const autoSelectStarterTemplate = atom(false);
export const enableContextOptimizationStore = atom(false); export const enableContextOptimizationStore = atom(false);

View File

@@ -5,11 +5,14 @@ import { CONTINUE_PROMPT } from '~/lib/common/prompts/prompts';
import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text'; import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text';
import SwitchableStream from '~/lib/.server/llm/switchable-stream'; import SwitchableStream from '~/lib/.server/llm/switchable-stream';
import type { IProviderSetting } from '~/types/model'; import type { IProviderSetting } from '~/types/model';
import { createScopedLogger } from '~/utils/logger';
export async function action(args: ActionFunctionArgs) { export async function action(args: ActionFunctionArgs) {
return chatAction(args); return chatAction(args);
} }
const logger = createScopedLogger('api.chat');
function parseCookies(cookieHeader: string): Record<string, string> { function parseCookies(cookieHeader: string): Record<string, string> {
const cookies: Record<string, string> = {}; const cookies: Record<string, string> = {};
@@ -54,7 +57,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
const options: StreamingOptions = { const options: StreamingOptions = {
toolChoice: 'none', toolChoice: 'none',
onFinish: async ({ text: content, finishReason, usage }) => { onFinish: async ({ text: content, finishReason, usage }) => {
console.log('usage', usage); logger.debug('usage', JSON.stringify(usage));
if (usage) { if (usage) {
cumulativeUsage.completionTokens += usage.completionTokens || 0; cumulativeUsage.completionTokens += usage.completionTokens || 0;
@@ -63,9 +66,8 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
} }
if (finishReason !== 'length') { if (finishReason !== 'length') {
return stream const encoder = new TextEncoder();
.switchSource( const usageStream = createDataStream({
createDataStream({
async execute(dataStream) { async execute(dataStream) {
dataStream.writeMessageAnnotation({ dataStream.writeMessageAnnotation({
type: 'usage', type: 'usage',
@@ -77,9 +79,20 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}); });
}, },
onError: (error: any) => `Custom error: ${error.message}`, onError: (error: any) => `Custom error: ${error.message}`,
}).pipeThrough(
new TransformStream({
transform: (chunk, controller) => {
// Convert the string stream to a byte stream
const str = typeof chunk === 'string' ? chunk : JSON.stringify(chunk);
controller.enqueue(encoder.encode(str));
},
}), }),
) );
.then(() => stream.close()); await stream.switchSource(usageStream);
await new Promise((resolve) => setTimeout(resolve, 0));
stream.close();
return;
} }
if (stream.switches >= MAX_RESPONSE_SEGMENTS) { if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
@@ -88,7 +101,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches; const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`); logger.info(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
messages.push({ role: 'assistant', content }); messages.push({ role: 'assistant', content });
messages.push({ role: 'user', content: CONTINUE_PROMPT }); messages.push({ role: 'user', content: CONTINUE_PROMPT });
@@ -104,7 +117,9 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
contextOptimization, contextOptimization,
}); });
return stream.switchSource(result.toDataStream()); stream.switchSource(result.toDataStream());
return;
}, },
}; };
@@ -128,7 +143,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}, },
}); });
} catch (error: any) { } catch (error: any) {
console.error(error); logger.error(error);
if (error.message?.includes('API key')) { if (error.message?.includes('API key')) {
throw new Response('Invalid or missing API key', { throw new Response('Invalid or missing API key', {

View File

@@ -19,312 +19,6 @@ export const DEFAULT_PROVIDER = llmManager.getDefaultProvider();
let MODEL_LIST = llmManager.getModelList(); let MODEL_LIST = llmManager.getModelList();
/*
*const PROVIDER_LIST_OLD: ProviderInfo[] = [
* {
* name: 'Anthropic',
* staticModels: [
* {
* name: 'claude-3-5-sonnet-latest',
* label: 'Claude 3.5 Sonnet (new)',
* provider: 'Anthropic',
* maxTokenAllowed: 8000,
* },
* {
* name: 'claude-3-5-sonnet-20240620',
* label: 'Claude 3.5 Sonnet (old)',
* provider: 'Anthropic',
* maxTokenAllowed: 8000,
* },
* {
* name: 'claude-3-5-haiku-latest',
* label: 'Claude 3.5 Haiku (new)',
* provider: 'Anthropic',
* maxTokenAllowed: 8000,
* },
* { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
* { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
* { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
* ],
* getApiKeyLink: 'https://console.anthropic.com/settings/keys',
* },
* {
* name: 'Ollama',
* staticModels: [],
* getDynamicModels: getOllamaModels,
* getApiKeyLink: 'https://ollama.com/download',
* labelForGetApiKey: 'Download Ollama',
* icon: 'i-ph:cloud-arrow-down',
* },
* {
* name: 'OpenAILike',
* staticModels: [],
* getDynamicModels: getOpenAILikeModels,
* },
* {
* name: 'Cohere',
* staticModels: [
* { name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
* { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
* ],
* getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
* },
* {
* name: 'OpenRouter',
* staticModels: [
* { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
* {
* name: 'anthropic/claude-3.5-sonnet',
* label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
* provider: 'OpenRouter',
* maxTokenAllowed: 8000,
* },
* {
* name: 'anthropic/claude-3-haiku',
* label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
* provider: 'OpenRouter',
* maxTokenAllowed: 8000,
* },
* {
* name: 'deepseek/deepseek-coder',
* label: 'Deepseek-Coder V2 236B (OpenRouter)',
* provider: 'OpenRouter',
* maxTokenAllowed: 8000,
* },
* {
* name: 'google/gemini-flash-1.5',
* label: 'Google Gemini Flash 1.5 (OpenRouter)',
* provider: 'OpenRouter',
* maxTokenAllowed: 8000,
* },
* {
* name: 'google/gemini-pro-1.5',
* label: 'Google Gemini Pro 1.5 (OpenRouter)',
* provider: 'OpenRouter',
* maxTokenAllowed: 8000,
* },
* { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
* {
* name: 'mistralai/mistral-nemo',
* label: 'OpenRouter Mistral Nemo (OpenRouter)',
* provider: 'OpenRouter',
* maxTokenAllowed: 8000,
* },
* {
* name: 'qwen/qwen-110b-chat',
* label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
* provider: 'OpenRouter',
* maxTokenAllowed: 8000,
* },
* { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
* ],
* getDynamicModels: getOpenRouterModels,
* getApiKeyLink: 'https://openrouter.ai/settings/keys',
* },
* {
* name: 'Google',
* staticModels: [
* { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
* { name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
* { name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
* { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
* { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
* { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
* { name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
* ],
* getApiKeyLink: 'https://aistudio.google.com/app/apikey',
* },
* {
* name: 'Groq',
* staticModels: [
* { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
* { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
* { name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
* { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
* { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
* { name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
* ],
* getApiKeyLink: 'https://console.groq.com/keys',
* },
* {
* name: 'HuggingFace',
* staticModels: [
* {
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: '01-ai/Yi-1.5-34B-Chat',
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'codellama/CodeLlama-34b-Instruct-hf',
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'Qwen/Qwen2.5-72B-Instruct',
* label: 'Qwen2.5-72B-Instruct (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'meta-llama/Llama-3.1-70B-Instruct',
* label: 'Llama-3.1-70B-Instruct (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'meta-llama/Llama-3.1-405B',
* label: 'Llama-3.1-405B (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: '01-ai/Yi-1.5-34B-Chat',
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'codellama/CodeLlama-34b-Instruct-hf',
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* {
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
* provider: 'HuggingFace',
* maxTokenAllowed: 8000,
* },
* ],
* getApiKeyLink: 'https://huggingface.co/settings/tokens',
* },
* {
* name: 'OpenAI',
* staticModels: [
* { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
* { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
* { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
* { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
* ],
* getApiKeyLink: 'https://platform.openai.com/api-keys',
* },
* {
* name: 'xAI',
* staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
* getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
* },
* {
* name: 'Deepseek',
* staticModels: [
* { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
* { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
* ],
* getApiKeyLink: 'https://platform.deepseek.com/apiKeys',
* },
* {
* name: 'Mistral',
* staticModels: [
* { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
* { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
* ],
* getApiKeyLink: 'https://console.mistral.ai/api-keys/',
* },
* {
* name: 'LMStudio',
* staticModels: [],
* getDynamicModels: getLMStudioModels,
* getApiKeyLink: 'https://lmstudio.ai/',
* labelForGetApiKey: 'Get LMStudio',
* icon: 'i-ph:cloud-arrow-down',
* },
* {
* name: 'Together',
* getDynamicModels: getTogetherModels,
* staticModels: [
* {
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
* label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
* provider: 'Together',
* maxTokenAllowed: 8000,
* },
* {
* name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
* label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
* provider: 'Together',
* maxTokenAllowed: 8000,
* },
*
* {
* name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
* label: 'Mixtral 8x7B Instruct',
* provider: 'Together',
* maxTokenAllowed: 8192,
* },
* ],
* getApiKeyLink: 'https://api.together.xyz/settings/api-keys',
* },
* {
* name: 'Perplexity',
* staticModels: [
* {
* name: 'llama-3.1-sonar-small-128k-online',
* label: 'Sonar Small Online',
* provider: 'Perplexity',
* maxTokenAllowed: 8192,
* },
* {
* name: 'llama-3.1-sonar-large-128k-online',
* label: 'Sonar Large Online',
* provider: 'Perplexity',
* maxTokenAllowed: 8192,
* },
* {
* name: 'llama-3.1-sonar-huge-128k-online',
* label: 'Sonar Huge Online',
* provider: 'Perplexity',
* maxTokenAllowed: 8192,
* },
* ],
* getApiKeyLink: 'https://www.perplexity.ai/settings/api',
* },
*];
*/
const providerBaseUrlEnvKeys: Record<string, { baseUrlKey?: string; apiTokenKey?: string }> = {}; const providerBaseUrlEnvKeys: Record<string, { baseUrlKey?: string; apiTokenKey?: string }> = {};
PROVIDER_LIST.forEach((provider) => { PROVIDER_LIST.forEach((provider) => {
providerBaseUrlEnvKeys[provider.name] = { providerBaseUrlEnvKeys[provider.name] = {

View File

@@ -1,4 +1,7 @@
export type DebugLevel = 'trace' | 'debug' | 'info' | 'warn' | 'error'; export type DebugLevel = 'trace' | 'debug' | 'info' | 'warn' | 'error';
import { Chalk } from 'chalk';
const chalk = new Chalk({ level: 3 });
type LoggerFunction = (...messages: any[]) => void; type LoggerFunction = (...messages: any[]) => void;
@@ -13,9 +16,6 @@ interface Logger {
let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info'; let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info';
const isWorker = 'HTMLRewriter' in globalThis;
const supportsColor = !isWorker;
export const logger: Logger = { export const logger: Logger = {
trace: (...messages: any[]) => log('trace', undefined, messages), trace: (...messages: any[]) => log('trace', undefined, messages),
debug: (...messages: any[]) => log('debug', undefined, messages), debug: (...messages: any[]) => log('debug', undefined, messages),
@@ -63,14 +63,8 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) {
return `${acc} ${current}`; return `${acc} ${current}`;
}, ''); }, '');
if (!supportsColor) {
console.log(`[${level.toUpperCase()}]`, allMessages);
return;
}
const labelBackgroundColor = getColorForLevel(level); const labelBackgroundColor = getColorForLevel(level);
const labelTextColor = level === 'warn' ? 'black' : 'white'; const labelTextColor = level === 'warn' ? '#000000' : '#FFFFFF';
const labelStyles = getLabelStyles(labelBackgroundColor, labelTextColor); const labelStyles = getLabelStyles(labelBackgroundColor, labelTextColor);
const scopeStyles = getLabelStyles('#77828D', 'white'); const scopeStyles = getLabelStyles('#77828D', 'white');
@@ -81,7 +75,21 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) {
styles.push('', scopeStyles); styles.push('', scopeStyles);
} }
let labelText = formatText(` ${level.toUpperCase()} `, labelTextColor, labelBackgroundColor);
if (scope) {
labelText = `${labelText} ${formatText(` ${scope} `, '#FFFFFF', '77828D')}`;
}
if (typeof window !== 'undefined') {
console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages); console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages);
} else {
console.log(`${labelText}`, allMessages);
}
}
function formatText(text: string, color: string, bg: string) {
return chalk.bgHex(bg)(chalk.hex(color)(text));
} }
function getLabelStyles(color: string, textColor: string) { function getLabelStyles(color: string, textColor: string) {
@@ -104,7 +112,7 @@ function getColorForLevel(level: DebugLevel): string {
return '#EE4744'; return '#EE4744';
} }
default: { default: {
return 'black'; return '#000000';
} }
} }
} }

View File

@@ -27,7 +27,7 @@ ${templates
Response Format: Response Format:
<selection> <selection>
<templateName>{selected template name}</templateName> <templateName>{selected template name}</templateName>
<reasoning>{brief explanation for the choice}</reasoning> <title>{a proper title for the project}</title>
</selection> </selection>
Examples: Examples:
@@ -37,7 +37,7 @@ User: I need to build a todo app
Response: Response:
<selection> <selection>
<templateName>react-basic-starter</templateName> <templateName>react-basic-starter</templateName>
<reasoning>Simple React setup perfect for building a todo application</reasoning> <title>Simple React todo application</title>
</selection> </selection>
</example> </example>
@@ -46,7 +46,7 @@ User: Write a script to generate numbers from 1 to 100
Response: Response:
<selection> <selection>
<templateName>blank</templateName> <templateName>blank</templateName>
<reasoning>This is a simple script that doesn't require any template setup</reasoning> <title>script to generate numbers from 1 to 100</title>
</selection> </selection>
</example> </example>
@@ -62,16 +62,17 @@ Important: Provide only the selection tags in your response, no additional text.
const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn')); const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn'));
const parseSelectedTemplate = (llmOutput: string): string | null => { const parseSelectedTemplate = (llmOutput: string): { template: string; title: string } | null => {
try { try {
// Extract content between <templateName> tags // Extract content between <templateName> tags
const templateNameMatch = llmOutput.match(/<templateName>(.*?)<\/templateName>/); const templateNameMatch = llmOutput.match(/<templateName>(.*?)<\/templateName>/);
const titleMatch = llmOutput.match(/<title>(.*?)<\/title>/);
if (!templateNameMatch) { if (!templateNameMatch) {
return null; return null;
} }
return templateNameMatch[1].trim(); return { template: templateNameMatch[1].trim(), title: titleMatch?.[1].trim() || 'Untitled Project' };
} catch (error) { } catch (error) {
console.error('Error parsing template selection:', error); console.error('Error parsing template selection:', error);
return null; return null;
@@ -101,7 +102,10 @@ export const selectStarterTemplate = async (options: { message: string; model: s
} else { } else {
console.log('No template selected, using blank template'); console.log('No template selected, using blank template');
return 'blank'; return {
template: 'blank',
title: '',
};
} }
}; };
@@ -181,7 +185,7 @@ const getGitHubRepoContent = async (
} }
}; };
export async function getTemplates(templateName: string) { export async function getTemplates(templateName: string, title?: string) {
const template = STARTER_TEMPLATES.find((t) => t.name == templateName); const template = STARTER_TEMPLATES.find((t) => t.name == templateName);
if (!template) { if (!template) {
@@ -211,7 +215,7 @@ export async function getTemplates(templateName: string) {
const filesToImport = { const filesToImport = {
files: filteredFiles, files: filteredFiles,
ignoreFile: filteredFiles, ignoreFile: [] as typeof filteredFiles,
}; };
if (templateIgnoreFile) { if (templateIgnoreFile) {
@@ -227,7 +231,7 @@ export async function getTemplates(templateName: string) {
} }
const assistantMessage = ` const assistantMessage = `
<boltArtifact id="imported-files" title="Importing Starter Files" type="bundled"> <boltArtifact id="imported-files" title="${title || 'Importing Starter Files'}" type="bundled">
${filesToImport.files ${filesToImport.files
.map( .map(
(file) => (file) =>
@@ -278,10 +282,16 @@ Any attempt to modify these protected files will result in immediate termination
If you need to make changes to functionality, create new files instead of modifying the protected ones listed above. If you need to make changes to functionality, create new files instead of modifying the protected ones listed above.
--- ---
`; `;
}
userMessage += ` userMessage += `
---
template import is done, and you can now use the imported files,
edit only the files that need to be changed, and you can create new files as needed.
NO NOT EDIT/WRITE ANY FILES THAT ALREADY EXIST IN THE PROJECT AND DOES NOT NEED TO BE MODIFIED
---
Now that the Template is imported please continue with my original request Now that the Template is imported please continue with my original request
`; `;
}
return { return {
assistantMessage, assistantMessage,

View File

@@ -74,6 +74,7 @@
"@xterm/addon-web-links": "^0.11.0", "@xterm/addon-web-links": "^0.11.0",
"@xterm/xterm": "^5.5.0", "@xterm/xterm": "^5.5.0",
"ai": "^4.0.13", "ai": "^4.0.13",
"chalk": "^5.4.1",
"date-fns": "^3.6.0", "date-fns": "^3.6.0",
"diff": "^5.2.0", "diff": "^5.2.0",
"dotenv": "^16.4.7", "dotenv": "^16.4.7",

11
pnpm-lock.yaml generated
View File

@@ -143,6 +143,9 @@ importers:
ai: ai:
specifier: ^4.0.13 specifier: ^4.0.13
version: 4.0.18(react@18.3.1)(zod@3.23.8) version: 4.0.18(react@18.3.1)(zod@3.23.8)
chalk:
specifier: ^5.4.1
version: 5.4.1
date-fns: date-fns:
specifier: ^3.6.0 specifier: ^3.6.0
version: 3.6.0 version: 3.6.0
@@ -2604,8 +2607,8 @@ packages:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'} engines: {node: '>=10'}
chalk@5.3.0: chalk@5.4.1:
resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==} resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==}
engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
character-entities-html4@2.1.0: character-entities-html4@2.1.0:
@@ -8207,7 +8210,7 @@ snapshots:
ansi-styles: 4.3.0 ansi-styles: 4.3.0
supports-color: 7.2.0 supports-color: 7.2.0
chalk@5.3.0: {} chalk@5.4.1: {}
character-entities-html4@2.1.0: {} character-entities-html4@2.1.0: {}
@@ -9415,7 +9418,7 @@ snapshots:
jsondiffpatch@0.6.0: jsondiffpatch@0.6.0:
dependencies: dependencies:
'@types/diff-match-patch': 1.0.36 '@types/diff-match-patch': 1.0.36
chalk: 5.3.0 chalk: 5.4.1
diff-match-patch: 1.0.5 diff-match-patch: 1.0.5
jsonfile@6.1.0: jsonfile@6.1.0: