Merge branch 'main' into main

This commit is contained in:
Chris Mahoney
2024-11-21 20:39:08 -06:00
committed by GitHub
35 changed files with 755 additions and 200 deletions

View File

@@ -23,6 +23,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
return env.GOOGLE_GENERATIVE_AI_API_KEY || cloudflareEnv.GOOGLE_GENERATIVE_AI_API_KEY;
case 'Groq':
return env.GROQ_API_KEY || cloudflareEnv.GROQ_API_KEY;
case 'HuggingFace':
return env.HuggingFace_API_KEY || cloudflareEnv.HuggingFace_API_KEY;
case 'OpenRouter':
return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
case 'Deepseek':
@@ -33,6 +35,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
case "xAI":
return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
case "Cohere":
return env.COHERE_API_KEY;
default:
return "";
}

View File

@@ -7,6 +7,7 @@ import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { ollama } from 'ollama-ai-provider';
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
import { createMistral } from '@ai-sdk/mistral';
import { createCohere } from '@ai-sdk/cohere'
export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ?
parseInt(process.env.DEFAULT_NUM_CTX, 10) :
@@ -27,6 +28,15 @@ export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string)
return openai(model);
}
export function getCohereAIModel(apiKey:string, model: string){
const cohere = createCohere({
apiKey,
});
return cohere(model);
}
export function getOpenAIModel(apiKey: string, model: string) {
const openai = createOpenAI({
apiKey,
@@ -60,6 +70,15 @@ export function getGroqModel(apiKey: string, model: string) {
return openai(model);
}
export function getHuggingFaceModel(apiKey: string, model: string) {
const openai = createOpenAI({
baseURL: 'https://api-inference.huggingface.co/v1/',
apiKey,
});
return openai(model);
}
export function getOllamaModel(baseURL: string, model: string) {
let Ollama = ollama(model, {
numCtx: DEFAULT_NUM_CTX,
@@ -103,6 +122,8 @@ export function getXAIModel(apiKey: string, model: string) {
return openai(model);
}
export function getModel(provider: string, model: string, env: Env, apiKeys?: Record<string, string>) {
const apiKey = getAPIKey(env, provider, apiKeys);
const baseURL = getBaseURL(env, provider);
@@ -114,6 +135,8 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
return getOpenAIModel(apiKey, model);
case 'Groq':
return getGroqModel(apiKey, model);
case 'HuggingFace':
return getHuggingFaceModel(apiKey, model);
case 'OpenRouter':
return getOpenRouterModel(apiKey, model);
case 'Google':
@@ -128,6 +151,8 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
return getLMStudioModel(baseURL, model);
case 'xAI':
return getXAIModel(apiKey, model);
case 'Cohere':
return getCohereAIModel(apiKey, model);
default:
return getOllamaModel(baseURL, model);
}

View File

@@ -88,7 +88,7 @@ You are Bolt, an expert AI assistant and exceptional senior software developer w
Example:
<${MODIFICATIONS_TAG_NAME}>
<diff path="/home/project/src/main.js">
<diff path="${WORK_DIR}/src/main.js">
@@ -2,7 +2,10 @@
return a + b;
}
@@ -103,7 +103,7 @@ You are Bolt, an expert AI assistant and exceptional senior software developer w
+
+console.log('The End');
</diff>
<file path="/home/project/package.json">
<file path="${WORK_DIR}/package.json">
// full file content here
</file>
</${MODIFICATIONS_TAG_NAME}>

View File

@@ -41,10 +41,9 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
return { model, provider, content: cleanedContent };
}
export function streamText(
messages: Messages,
env: Env,
messages: Messages,
env: Env,
options?: StreamingOptions,
apiKeys?: Record<string, string>
) {
@@ -64,13 +63,22 @@ export function streamText(
return { ...message, content };
}
return message; // No changes for non-user messages
return message;
});
const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
const dynamicMaxTokens =
modelDetails && modelDetails.maxTokenAllowed
? modelDetails.maxTokenAllowed
: MAX_TOKENS;
return _streamText({
model: getModel(currentProvider, currentModel, env, apiKeys),
system: getSystemPrompt(),
maxTokens: MAX_TOKENS,
maxTokens: dynamicMaxTokens,
messages: convertToCoreMessages(processedMessages),
...options,
});