Fix linting issues
This commit is contained in:
@@ -52,7 +52,7 @@ export function getBaseURL(cloudflareEnv: Env, provider: string) {
|
||||
return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
|
||||
case 'LMStudio':
|
||||
return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
|
||||
case 'Ollama':
|
||||
case 'Ollama': {
|
||||
let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
|
||||
|
||||
if (env.RUNNING_IN_DOCKER === 'true') {
|
||||
@@ -60,6 +60,7 @@ export function getBaseURL(cloudflareEnv: Env, provider: string) {
|
||||
}
|
||||
|
||||
return baseUrl;
|
||||
}
|
||||
default:
|
||||
return '';
|
||||
}
|
||||
|
||||
@@ -11,14 +11,14 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||
import { createMistral } from '@ai-sdk/mistral';
|
||||
import { createCohere } from '@ai-sdk/cohere';
|
||||
|
||||
export function getAnthropicModel(apiKey: string, model: string) {
|
||||
export function getAnthropicModel(apiKey: string | undefined, model: string) {
|
||||
const anthropic = createAnthropic({
|
||||
apiKey,
|
||||
});
|
||||
|
||||
return anthropic(model);
|
||||
}
|
||||
export function getOpenAILikeModel(baseURL: string, apiKey: string, model: string) {
|
||||
export function getOpenAILikeModel(baseURL: string, apiKey: string | undefined, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL,
|
||||
apiKey,
|
||||
@@ -27,7 +27,7 @@ export function getOpenAILikeModel(baseURL: string, apiKey: string, model: strin
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getCohereAIModel(apiKey: string, model: string) {
|
||||
export function getCohereAIModel(apiKey: string | undefined, model: string) {
|
||||
const cohere = createCohere({
|
||||
apiKey,
|
||||
});
|
||||
@@ -35,7 +35,7 @@ export function getCohereAIModel(apiKey: string, model: string) {
|
||||
return cohere(model);
|
||||
}
|
||||
|
||||
export function getOpenAIModel(apiKey: string, model: string) {
|
||||
export function getOpenAIModel(apiKey: string | undefined, model: string) {
|
||||
const openai = createOpenAI({
|
||||
apiKey,
|
||||
});
|
||||
@@ -43,7 +43,7 @@ export function getOpenAIModel(apiKey: string, model: string) {
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getMistralModel(apiKey: string, model: string) {
|
||||
export function getMistralModel(apiKey: string | undefined, model: string) {
|
||||
const mistral = createMistral({
|
||||
apiKey,
|
||||
});
|
||||
@@ -51,7 +51,7 @@ export function getMistralModel(apiKey: string, model: string) {
|
||||
return mistral(model);
|
||||
}
|
||||
|
||||
export function getGoogleModel(apiKey: string, model: string) {
|
||||
export function getGoogleModel(apiKey: string | undefined, model: string) {
|
||||
const google = createGoogleGenerativeAI({
|
||||
apiKey,
|
||||
});
|
||||
@@ -59,7 +59,7 @@ export function getGoogleModel(apiKey: string, model: string) {
|
||||
return google(model);
|
||||
}
|
||||
|
||||
export function getGroqModel(apiKey: string, model: string) {
|
||||
export function getGroqModel(apiKey: string | undefined, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.groq.com/openai/v1',
|
||||
apiKey,
|
||||
@@ -68,7 +68,7 @@ export function getGroqModel(apiKey: string, model: string) {
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getHuggingFaceModel(apiKey: string, model: string) {
|
||||
export function getHuggingFaceModel(apiKey: string | undefined, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api-inference.huggingface.co/v1/',
|
||||
apiKey,
|
||||
@@ -78,16 +78,16 @@ export function getHuggingFaceModel(apiKey: string, model: string) {
|
||||
}
|
||||
|
||||
export function getOllamaModel(baseURL: string, model: string) {
|
||||
const Ollama = ollama(model, {
|
||||
const ollamaInstance = ollama(model, {
|
||||
numCtx: 32768,
|
||||
});
|
||||
|
||||
Ollama.config.baseURL = `${baseURL}/api`;
|
||||
ollamaInstance.config.baseURL = `${baseURL}/api`;
|
||||
|
||||
return Ollama;
|
||||
return ollamaInstance;
|
||||
}
|
||||
|
||||
export function getDeepseekModel(apiKey: string, model: string) {
|
||||
export function getDeepseekModel(apiKey: string | undefined, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.deepseek.com/beta',
|
||||
apiKey,
|
||||
@@ -96,7 +96,7 @@ export function getDeepseekModel(apiKey: string, model: string) {
|
||||
return openai(model);
|
||||
}
|
||||
|
||||
export function getOpenRouterModel(apiKey: string, model: string) {
|
||||
export function getOpenRouterModel(apiKey: string | undefined, model: string) {
|
||||
const openRouter = createOpenRouter({
|
||||
apiKey,
|
||||
});
|
||||
@@ -113,7 +113,7 @@ export function getLMStudioModel(baseURL: string, model: string) {
|
||||
return lmstudio(model);
|
||||
}
|
||||
|
||||
export function getXAIModel(apiKey: string, model: string) {
|
||||
export function getXAIModel(apiKey: string | undefined, model: string) {
|
||||
const openai = createOpenAI({
|
||||
baseURL: 'https://api.x.ai/v1',
|
||||
apiKey,
|
||||
|
||||
@@ -110,6 +110,7 @@ export function useChatHistory() {
|
||||
toast.success('Chat duplicated successfully');
|
||||
} catch (error) {
|
||||
toast.error('Failed to duplicate chat');
|
||||
console.log(error);
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import { WebContainer, type WebContainerProcess } from '@webcontainer/api';
|
||||
import { WebContainer } from '@webcontainer/api';
|
||||
import { atom, map, type MapStore } from 'nanostores';
|
||||
import * as nodePath from 'node:path';
|
||||
import type { BoltAction } from '~/types/actions';
|
||||
import { createScopedLogger } from '~/utils/logger';
|
||||
import { unreachable } from '~/utils/unreachable';
|
||||
import type { ActionCallbackData } from './message-parser';
|
||||
import type { ITerminal } from '~/types/terminal';
|
||||
import type { BoltShell } from '~/utils/shell';
|
||||
|
||||
const logger = createScopedLogger('ActionRunner');
|
||||
@@ -94,9 +93,10 @@ export class ActionRunner {
|
||||
|
||||
this.#updateAction(actionId, { ...action, ...data.action, executed: !isStreaming });
|
||||
|
||||
// eslint-disable-next-line consistent-return
|
||||
return (this.#currentExecutionPromise = this.#currentExecutionPromise
|
||||
.then(() => {
|
||||
return this.#executeAction(actionId, isStreaming);
|
||||
this.#executeAction(actionId, isStreaming);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Action failed:', error);
|
||||
@@ -127,12 +127,11 @@ export class ActionRunner {
|
||||
|
||||
/*
|
||||
* adding a delay to avoid any race condition between 2 start actions
|
||||
* i am up for a better approch
|
||||
* i am up for a better approach
|
||||
*/
|
||||
await new Promise((resolve) => setTimeout(resolve, 2000));
|
||||
|
||||
return;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import JSZip from 'jszip';
|
||||
import { saveAs } from 'file-saver';
|
||||
import { Octokit, type RestEndpointMethodTypes } from '@octokit/rest';
|
||||
import * as nodePath from 'node:path';
|
||||
import type { WebContainerProcess } from '@webcontainer/api';
|
||||
import { extractRelativePath } from '~/utils/diff';
|
||||
|
||||
export interface ArtifactState {
|
||||
@@ -42,7 +41,6 @@ export class WorkbenchStore {
|
||||
unsavedFiles: WritableAtom<Set<string>> = import.meta.hot?.data.unsavedFiles ?? atom(new Set<string>());
|
||||
modifiedFiles = new Set<string>();
|
||||
artifactIdList: string[] = [];
|
||||
#boltTerminal: { terminal: ITerminal; process: WebContainerProcess } | undefined;
|
||||
#globalExecutionQueue = Promise.resolve();
|
||||
constructor() {
|
||||
if (import.meta.hot) {
|
||||
@@ -439,6 +437,8 @@ export class WorkbenchStore {
|
||||
});
|
||||
return { path: extractRelativePath(filePath), sha: blob.sha };
|
||||
}
|
||||
|
||||
return null;
|
||||
}),
|
||||
);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user