diff --git a/app/components/sidebar/Menu.client.tsx b/app/components/sidebar/Menu.client.tsx
index f0e975e..7e9c8c5 100644
--- a/app/components/sidebar/Menu.client.tsx
+++ b/app/components/sidebar/Menu.client.tsx
@@ -4,7 +4,7 @@ import { toast } from 'react-toastify';
import { Dialog, DialogButton, DialogDescription, DialogRoot, DialogTitle } from '~/components/ui/Dialog';
import { ThemeSwitch } from '~/components/ui/ThemeSwitch';
import { ControlPanel } from '~/components/@settings/core/ControlPanel';
-import { SettingsButton } from '~/components/ui/SettingsButton';
+import { SettingsButton, HelpButton } from '~/components/ui/SettingsButton';
import { Button } from '~/components/ui/Button';
import { db, deleteById, getAll, chatId, type ChatHistoryItem, useChatHistory } from '~/lib/persistence';
import { cubicEasingFn } from '~/utils/easings';
@@ -525,7 +525,10 @@ export const Menu = () => {
-
+
+
+ window.open('https://stackblitz-labs.github.io/bolt.diy/', '_blank')} />
+
diff --git a/app/components/ui/SettingsButton.tsx b/app/components/ui/SettingsButton.tsx
index 0c2bde0..4438f87 100644
--- a/app/components/ui/SettingsButton.tsx
+++ b/app/components/ui/SettingsButton.tsx
@@ -16,3 +16,20 @@ export const SettingsButton = memo(({ onClick }: SettingsButtonProps) => {
/>
);
});
+
+interface HelpButtonProps {
+ onClick: () => void;
+}
+
+export const HelpButton = memo(({ onClick }: HelpButtonProps) => {
+ return (
+
+ );
+});
diff --git a/docs/docs/FAQ.md b/docs/docs/FAQ.md
index 54eeebb..a66832a 100644
--- a/docs/docs/FAQ.md
+++ b/docs/docs/FAQ.md
@@ -3,32 +3,182 @@
## Models and Setup
??? question "What are the best models for bolt.diy?"
-For the best experience with bolt.diy, we recommend using the following models:
+For the best experience with bolt.diy, we recommend using the following models from our 20+ supported providers:
- - **Claude 3.5 Sonnet (old)**: Best overall coder, providing excellent results across all use cases
- - **Gemini 2.0 Flash**: Exceptional speed while maintaining good performance
- - **GPT-4o**: Strong alternative to Claude 3.5 Sonnet with comparable capabilities
- - **DeepSeekCoder V3**: Best open source model (available through OpenRouter, DeepSeek API, or self-hosted)
- - **DeepSeekCoder V2 236b**: available through OpenRouter, DeepSeek API, or self-hosted
- - **Qwen 2.5 Coder 32b**: Best model for self-hosting with reasonable hardware requirements
+ **Top Recommended Models:**
+ - **Claude 3.5 Sonnet** (Anthropic): Best overall coder, excellent for complex applications
+ - **GPT-4o** (OpenAI): Strong alternative with great performance across all use cases
+ - **Claude 4 Opus** (Anthropic): Latest flagship model with enhanced capabilities
+ - **Gemini 2.0 Flash** (Google): Exceptional speed for rapid development
+ - **DeepSeekCoder V3** (DeepSeek): Best open-source model for coding tasks
- !!! warning
- Models with less than 7b parameters typically lack the capability to properly interact with bolt!
+ **Self-Hosting Options:**
+ - **DeepSeekCoder V2 236b**: Powerful self-hosted option
+ - **Qwen 2.5 Coder 32b**: Best for moderate hardware requirements
+ - **Ollama models**: Local inference with various model sizes
+
+ **Latest Specialized Models:**
+ - **Moonshot AI (Kimi)**: Kimi K2 models with advanced reasoning capabilities
+ - **xAI Grok 4**: Latest Grok model with 256K context window
+ - **Anthropic Claude 4 Opus**: Latest flagship model from Anthropic
+
+ !!! tip "Model Selection Tips"
+ - Use larger models (7B+ parameters) for complex applications
+ - Claude models excel at structured code generation
+ - GPT-4o provides excellent general-purpose coding assistance
+ - Gemini models offer the fastest response times
+
+??? question "How do I configure API keys for different providers?"
+You can configure API keys in two ways:
+
+ **Option 1: Environment Variables (Recommended for production)**
+ Create a `.env.local` file in your project root:
+ ```bash
+ ANTHROPIC_API_KEY=your_anthropic_key_here
+ OPENAI_API_KEY=your_openai_key_here
+ GOOGLE_GENERATIVE_AI_API_KEY=your_google_key_here
+ MOONSHOT_API_KEY=your_moonshot_key_here
+ XAI_API_KEY=your_xai_key_here
+ ```
+
+ **Option 2: In-App Configuration**
+ - Go to Settings → Providers
+ - Select a provider
+ - Click the pencil icon next to the provider
+ - Enter your API key directly in the interface
+
+ !!! note "Security Note"
+ Never commit API keys to version control. The `.env.local` file is already in `.gitignore`.
+
+??? question "How do I add a new LLM provider?"
+bolt.diy uses a modular provider architecture. To add a new provider:
+
+ 1. **Create a Provider Class** in `app/lib/modules/llm/providers/your-provider.ts`
+ 2. **Implement the BaseProvider interface** with your provider's specific logic
+ 3. **Register the provider** in `app/lib/modules/llm/registry.ts`
+ 4. **The system automatically detects** and registers your new provider
+
+ See the [Adding New LLMs](../#adding-new-llms) section for complete implementation details.
+
+??? question "How do I set up Moonshot AI (Kimi) provider?"
+Moonshot AI provides access to advanced Kimi models with excellent reasoning capabilities:
+
+ **Setup Steps:**
+ 1. Visit [Moonshot AI Platform](https://platform.moonshot.ai/console/api-keys)
+ 2. Create an account and generate an API key
+ 3. Add `MOONSHOT_API_KEY=your_key_here` to your `.env.local` file
+ 4. Or configure it directly in Settings → Providers → Moonshot
+
+ **Available Models:**
+ - **Kimi K2 Preview**: Latest Kimi model with 128K context
+ - **Kimi K2 Turbo**: Fast inference optimized version
+ - **Kimi Thinking**: Specialized for complex reasoning tasks
+ - **Moonshot v1 series**: Legacy models with vision capabilities
+
+ !!! tip "Moonshot AI Features"
+ - Excellent for Chinese language tasks
+ - Strong reasoning capabilities
+ - Vision-enabled models available
+ - Competitive pricing
+
+??? question "What are the latest xAI Grok models?"
+xAI has released several new Grok models with enhanced capabilities:
+
+ **Latest Models:**
+ - **Grok 4**: Most advanced model with 256K context window
+ - **Grok 4 (07-09)**: Specialized variant for specific tasks
+ - **Grok 3 Beta**: Previous generation with 131K context
+ - **Grok 3 Mini variants**: Optimized for speed and efficiency
+
+ **Setup:**
+ 1. Get your API key from [xAI Platform](https://docs.x.ai/docs/quickstart#creating-an-api-key)
+ 2. Add `XAI_API_KEY=your_key_here` to your `.env.local` file
+ 3. Models will be available in the provider selection
## Best Practices
-??? question "How do I get the best results with bolt.diy?" - **Be specific about your stack**:
- Mention the frameworks or libraries you want to use (e.g., Astro, Tailwind, ShadCN) in your initial prompt. This ensures that bolt.diy scaffolds the project according to your preferences.
+??? question "How do I access help and documentation?"
+bolt.diy provides multiple ways to access help and documentation:
- - **Use the enhance prompt icon**:
- Before sending your prompt, click the *enhance* icon to let the AI refine your prompt. You can edit the suggested improvements before submitting.
+ **Help Icon in Sidebar:**
+ - Look for the question mark (?) icon in the sidebar
+ - Click it to open the full documentation in a new tab
+ - Provides instant access to guides, troubleshooting, and FAQs
- - **Scaffold the basics first, then add features**:
- Ensure the foundational structure of your application is in place before introducing advanced functionality. This helps bolt.diy establish a solid base to build on.
+ **Documentation Resources:**
+ - **Main Documentation**: Complete setup and feature guides
+ - **FAQ Section**: Answers to common questions
+ - **Troubleshooting**: Solutions for common issues
+ - **Best Practices**: Tips for optimal usage
- - **Batch simple instructions**:
- Combine simple tasks into a single prompt to save time and reduce API credit consumption. For example:
- *"Change the color scheme, add mobile responsiveness, and restart the dev server."*
+ **Community Support:**
+ - **GitHub Issues**: Report bugs and request features
+ - **Community Forum**: [thinktank.ottomator.ai](https://thinktank.ottomator.ai)
+
+??? question "How do I get the best results with bolt.diy?"
+Follow these proven strategies for optimal results:
+
+ **Project Setup:**
+ - **Be specific about your stack**: Mention frameworks/libraries (Astro, Tailwind, ShadCN, Next.js) in your initial prompt
+ - **Choose appropriate templates**: Use our 15+ project templates for quick starts
+ - **Configure providers properly**: Set up your preferred LLM providers before starting
+
+ **Development Workflow:**
+ - **Use the enhance prompt icon**: Click the enhance icon to let AI refine your prompts before submitting
+ - **Scaffold basics first**: Build foundational structure before adding advanced features
+ - **Batch simple instructions**: Combine tasks like *"Change colors, add mobile responsiveness, restart dev server"*
+
+ **Advanced Features:**
+ - **Leverage MCP tools**: Use Model Context Protocol for enhanced AI capabilities
+ - **Connect databases**: Integrate Supabase for backend functionality
+ - **Use Git integration**: Version control your projects with GitHub
+ - **Deploy easily**: Use built-in Vercel, Netlify, or GitHub Pages deployment
+
+??? question "How do I use MCP (Model Context Protocol) tools?"
+MCP extends bolt.diy's AI capabilities with external tools:
+
+ **Setting up MCP:**
+ 1. Go to Settings → MCP tab
+ 2. Add MCP server configurations
+ 3. Configure server endpoints and authentication
+ 4. Enable/disable servers as needed
+
+ **Available MCP Capabilities:**
+ - Database connections and queries
+ - File system operations
+ - API integrations
+ - Custom business logic tools
+
+ The MCP integration allows the AI to interact with external services and data sources during conversations.
+
+??? question "How do I deploy my bolt.diy projects?"
+bolt.diy supports one-click deployment to multiple platforms:
+
+ **Supported Platforms:**
+ - **Vercel**: Go to Settings → Connections → Vercel, then deploy with one click
+ - **Netlify**: Connect your Netlify account and deploy instantly
+ - **GitHub Pages**: Push to GitHub and enable Pages in repository settings
+
+ **Deployment Features:**
+ - Automatic build configuration for popular frameworks
+ - Environment variable management
+ - Custom domain support
+ - Preview deployments for testing
+
+??? question "How do I use Git integration features?"
+bolt.diy provides comprehensive Git and GitHub integration:
+
+ **Basic Git Operations:**
+ - Import existing repositories by URL
+ - Create new repositories on GitHub
+ - Automatic commits for major changes
+ - Push/pull changes seamlessly
+
+ **Advanced Features:**
+ - Connect GitHub account in Settings → Connections
+ - Import from your connected repositories
+ - Version control with diff visualization
+ - Collaborative development support
## Project Information
@@ -44,44 +194,205 @@ bolt.diy began as a small showcase project on @ColeMedin's YouTube channel to ex
We're forming a team of maintainers to manage demand and streamline issue resolution. The maintainers are rockstars, and we're also exploring partnerships to help the project thrive.
+## New Features & Technologies
+
+??? question "What's new in bolt.diy?"
+Recent major additions to bolt.diy include:
+
+ **Advanced AI Capabilities:**
+ - **20+ LLM Providers**: Support for Anthropic, OpenAI, Google, DeepSeek, Cohere, and more
+ - **MCP Integration**: Model Context Protocol for enhanced AI tool calling
+ - **Dynamic Model Loading**: Automatic model discovery from provider APIs
+
+ **Development Tools:**
+ - **WebContainer**: Secure sandboxed development environment
+ - **Live Preview**: Real-time application previews without leaving the editor
+ - **Project Templates**: 15+ starter templates for popular frameworks
+
+ **Version Control & Collaboration:**
+ - **Git Integration**: Import/export projects with GitHub
+ - **Automatic Commits**: Smart version control for project changes
+ - **Diff Visualization**: See code changes clearly
+
+ **Backend & Database:**
+ - **Supabase Integration**: Built-in database and authentication
+ - **API Integration**: Connect to external services and databases
+
+ **Deployment & Production:**
+ - **One-Click Deployment**: Vercel, Netlify, and GitHub Pages support
+ - **Environment Management**: Production-ready configuration
+ - **Build Optimization**: Automatic configuration for popular frameworks
+
+??? question "How do I use the new project templates?"
+bolt.diy offers templates for popular frameworks and technologies:
+
+ **Getting Started:**
+ 1. Start a new project in bolt.diy
+ 2. Browse available templates in the starter selection
+ 3. Choose your preferred technology stack
+ 4. The AI will scaffold your project with best practices
+
+ **Available Templates:**
+ - **Frontend**: React, Vue, Angular, Svelte, SolidJS
+ - **Full-Stack**: Next.js, Astro, Qwik, Remix, Nuxt
+ - **Mobile**: Expo, React Native
+ - **Content**: Slidev presentations, Astro blogs
+ - **Vanilla**: Vite with TypeScript/JavaScript
+
+ Templates include pre-configured tooling, linting, and build processes.
+
+??? question "How does WebContainer work?"
+WebContainer provides a secure development environment:
+
+ **Features:**
+ - **Isolated Environment**: Secure sandbox for running code
+ - **Full Node.js Support**: Run npm, build tools, and dev servers
+ - **Live File System**: Direct manipulation of project files
+ - **Terminal Integration**: Execute commands with real-time output
+
+ **Supported Technologies:**
+ - All major JavaScript frameworks (React, Vue, Angular, etc.)
+ - Build tools (Vite, Webpack, Parcel)
+ - Package managers (npm, pnpm, yarn)
+
+??? question "How do I connect external databases?"
+Use Supabase for backend database functionality:
+
+ **Setup Process:**
+ 1. Create a Supabase project at supabase.com
+ 2. Get your project URL and API keys
+ 3. Configure the connection in your bolt.diy project
+ 4. Use Supabase tools to interact with your database
+
+ **Available Features:**
+ - Real-time subscriptions
+ - Built-in authentication
+ - Row Level Security (RLS)
+ - Automatic API generation
+ - Database migrations
+
## Model Comparisons
??? question "How do local LLMs compare to larger models like Claude 3.5 Sonnet for bolt.diy?"
-While local LLMs are improving rapidly, larger models like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b still offer the best results for complex applications. Our ongoing focus is to improve prompts, agents, and the platform to better support smaller local LLMs.
+While local LLMs are improving rapidly, larger models still offer the best results for complex applications. Here's the current landscape:
+
+ **Recommended for Production:**
+ - **Claude 4 Opus**: Latest flagship model with enhanced reasoning (200K context)
+ - **Claude 3.5 Sonnet**: Proven excellent performance across all tasks
+ - **GPT-4o**: Strong general-purpose coding with great reliability
+ - **xAI Grok 4**: Latest Grok with 256K context window
+
+ **Fast & Efficient:**
+ - **Gemini 2.0 Flash**: Exceptional speed for rapid development
+ - **Claude 3 Haiku**: Cost-effective for simpler tasks
+ - **xAI Grok 3 Mini Fast**: Optimized for speed and efficiency
+
+ **Advanced Reasoning:**
+ - **Moonshot AI Kimi K2**: Advanced reasoning with 128K context
+ - **Moonshot AI Kimi Thinking**: Specialized for complex reasoning tasks
+
+ **Open Source & Self-Hosting:**
+ - **DeepSeekCoder V3**: Best open-source model available
+ - **DeepSeekCoder V2 236b**: Powerful self-hosted option
+ - **Qwen 2.5 Coder 32b**: Good balance of performance and resource usage
+
+ **Local Models (Ollama):**
+ - Best for privacy and offline development
+ - Use 7B+ parameter models for reasonable performance
+ - Still experimental for complex, large-scale applications
+
+ !!! tip "Model Selection Guide"
+ - Use Claude/GPT-4o for complex applications
+ - Use Gemini for fast prototyping
+ - Use local models for privacy/offline development
+ - Always test with your specific use case
## Troubleshooting
??? error "There was an error processing this request"
-This generic error message means something went wrong. Check both:
+This generic error message means something went wrong. Check these locations:
- - The terminal (if you started the app with Docker or `pnpm`).
-
- - The developer console in your browser (press `F12` or right-click > *Inspect*, then go to the *Console* tab).
+ - **Terminal output**: If you started with Docker or `pnpm`
+ - **Browser developer console**: Press `F12` → Console tab
+ - **Server logs**: Check for any backend errors
+ - **Network tab**: Verify API calls are working
??? error "x-api-key header missing"
-This error is sometimes resolved by restarting the Docker container.
- If that doesn't work, try switching from Docker to `pnpm` or vice versa. We're actively investigating this issue.
+This authentication error can be resolved by:
+
+ - **Restarting the container**: `docker compose restart` (if using Docker)
+ - **Switching run methods**: Try `pnpm` if using Docker, or vice versa
+ - **Checking API keys**: Verify your API keys are properly configured
+ - **Clearing browser cache**: Sometimes cached authentication causes issues
??? error "Blank preview when running the app"
-A blank preview often occurs due to hallucinated bad code or incorrect commands.
- To troubleshoot:
+Blank previews usually indicate code generation issues:
- - Check the developer console for errors.
+ - **Check developer console** for JavaScript errors
+ - **Verify WebContainer is running** properly
+ - **Try refreshing** the preview pane
+ - **Check for hallucinated code** in the generated files
+ - **Restart the development server** if issues persist
- - Remember, previews are core functionality, so the app isn't broken! We're working on making these errors more transparent.
+??? error "MCP server connection failed"
+If you're having trouble with MCP integrations:
+
+ - **Verify server configuration** in Settings → MCP
+ - **Check server endpoints** and authentication credentials
+ - **Test server connectivity** outside of bolt.diy
+ - **Review MCP server logs** for specific error messages
+ - **Ensure server supports** the MCP protocol version
+
+??? error "Git integration not working"
+Common Git-related issues and solutions:
+
+ - **GitHub connection failed**: Verify your GitHub token has correct permissions
+ - **Repository not found**: Check repository URL and access permissions
+ - **Push/pull failed**: Ensure you have write access to the repository
+ - **Merge conflicts**: Resolve conflicts manually or use the diff viewer
+ - **Large files blocked**: Check GitHub's file size limits
+
+??? error "Deployment failed"
+Deployment issues can be resolved by:
+
+ - **Checking build logs** for specific error messages
+ - **Verifying environment variables** are set correctly
+ - **Testing locally** before deploying
+ - **Checking platform-specific requirements** (Node version, build commands)
+ - **Reviewing deployment configuration** in platform settings
??? error "Everything works, but the results are bad"
-Local LLMs like Qwen-2.5-Coder are powerful for small applications but still experimental for larger projects. For better results, consider using larger models like
+For suboptimal AI responses, try these solutions:
- - GPT-4o
- - Claude 3.5 Sonnet
- - DeepSeek Coder V2 236b
+ - **Switch to a more capable model**: Use Claude 3.5 Sonnet, GPT-4o, or Claude 4 Opus
+ - **Be more specific** in your prompts about requirements and technologies
+ - **Use the enhance prompt feature** to refine your requests
+ - **Break complex tasks** into smaller, focused prompts
+ - **Provide context** about your project structure and goals
+
+??? error "WebContainer preview not loading"
+If the live preview isn't working:
+
+ - **Check WebContainer status** in the terminal
+ - **Verify Node.js compatibility** with your project
+ - **Restart the development environment**
+ - **Clear browser cache** and reload
+ - **Check for conflicting ports** (default is 5173)
??? error "Received structured exception #0xc0000005: access violation"
-If you are getting this, you are probably on Windows. The fix is generally to update the [Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170)
+**Windows-specific issue**: Update the [Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170)
??? error "Miniflare or Wrangler errors in Windows"
-You will need to make sure you have the latest version of Visual Studio C++ installed (14.40.33816), more information here Github Issues
+**Windows development environment**: Install Visual Studio C++ (version 14.40.33816 or later). More details in [GitHub Issues](https://github.com/stackblitz-labs/bolt.diy/issues/19)
+
+??? error "Provider not showing up after adding it"
+If your custom LLM provider isn't appearing:
+
+ - **Restart the development server** to reload providers
+ - **Check the provider registry** in `app/lib/modules/llm/registry.ts`
+ - **Verify the provider class** extends `BaseProvider` correctly
+ - **Check browser console** for provider loading errors
+ - **Ensure proper TypeScript compilation** without errors
---
diff --git a/docs/docs/index.md b/docs/docs/index.md
index 66698af..d3ac768 100644
--- a/docs/docs/index.md
+++ b/docs/docs/index.md
@@ -1,6 +1,6 @@
# Welcome to bolt diy
-bolt.diy allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
+bolt.diy allows you to choose the LLM that you use for each prompt! Currently, you can use models from 20+ providers including OpenAI, Anthropic, Ollama, OpenRouter, Google/Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, Groq, Cohere, Together AI, Perplexity AI, Hyperbolic, Moonshot AI (Kimi), Amazon Bedrock, GitHub Models, and more - with easy extensibility to add any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
## Table of Contents
@@ -17,6 +17,12 @@ bolt.diy allows you to choose the LLM that you use for each prompt! Currently, y
- [Option 2: With Docker](#option-2-with-docker)
- [Update Your Local Version to the Latest](#update-your-local-version-to-the-latest)
- [Adding New LLMs](#adding-new-llms)
+- [MCP (Model Context Protocol) Integration](#mcp-model-context-protocol-integration)
+- [Git Integration and Version Control](#git-integration-and-version-control)
+- [Deployment Options](#deployment-options)
+- [Supabase Integration](#supabase-integration)
+- [WebContainer and Live Preview](#webcontainer-and-live-preview)
+- [Project Templates](#project-templates)
- [Available Scripts](#available-scripts)
- [Development](#development)
- [Tips and Tricks](#tips-and-tricks)
@@ -33,13 +39,22 @@ Also [this pinned post in our community](https://thinktank.ottomator.ai/t/videos
## Features
-- **AI-powered full-stack web development** directly in your browser.
-- **Support for multiple LLMs** with an extensible architecture to integrate additional models.
-- **Attach images to prompts** for better contextual understanding.
-- **Integrated terminal** to view output of LLM-run commands.
-- **Revert code to earlier versions** for easier debugging and quicker changes.
-- **Download projects as ZIP** for easy portability.
-- **Integration-ready Docker support** for a hassle-free setup.
+- **AI-powered full-stack web development** directly in your browser with live preview
+- **Support for 20+ LLM providers** with an extensible architecture to integrate additional models
+- **Attach images and files to prompts** for better contextual understanding
+- **Integrated terminal** with WebContainer sandbox for running commands and testing
+- **Version control with Git** - import/export projects, connect to GitHub repositories
+- **MCP (Model Context Protocol)** integration for enhanced AI capabilities and tool calling
+- **Database integration** with Supabase for backend development
+- **One-click deployments** to Vercel, Netlify, and GitHub Pages
+- **Project templates** for popular frameworks (React, Vue, Angular, Next.js, Astro, etc.)
+- **Real-time collaboration** and project sharing
+- **Code diff visualization** and version history
+- **Download projects as ZIP** or push directly to GitHub
+- **Docker support** for containerized development environments
+- **Electron app** for native desktop experience
+- **Theme customization** and accessibility features
+- **Help icon** in sidebar linking to comprehensive documentation
---
@@ -67,7 +82,8 @@ Alternatively, you can download the latest version of the project directly from
Clone the repository using Git:
```bash
-git clone -b stable https://github.com/stackblitz-labs/bolt.diy
+git clone https://github.com/stackblitz-labs/bolt.diy
+cd bolt.diy
```
---
@@ -216,26 +232,367 @@ This ensures that you're running the latest version of bolt.diy and can take adv
---
-## Adding New LLMs:
+## Adding New LLMs
-To make new LLMs available to use in this version of bolt.diy, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
+bolt.diy supports a modular architecture for adding new LLM providers and models. The system is designed to be easily extensible while maintaining consistency across all providers.
-By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish!
+### Understanding the Provider Architecture
-When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here!
+Each LLM provider is implemented as a separate class that extends the `BaseProvider` class. The provider system includes:
+
+- **Static Models**: Pre-defined models that are always available
+- **Dynamic Models**: Models that can be loaded from the provider's API at runtime
+- **Configuration**: API key management and provider-specific settings
+
+### Adding a New Provider
+
+To add a new LLM provider, you need to create multiple files:
+
+#### 1. Create the Provider Class
+
+Create a new file in `app/lib/modules/llm/providers/your-provider.ts`:
+
+```typescript
+import { BaseProvider } from '~/lib/modules/llm/base-provider';
+import type { ModelInfo } from '~/lib/modules/llm/types';
+import type { LanguageModelV1 } from 'ai';
+import type { IProviderSetting } from '~/types/model';
+import { createYourProvider } from '@ai-sdk/your-provider';
+
+export default class YourProvider extends BaseProvider {
+ name = 'YourProvider';
+ getApiKeyLink = 'https://your-provider.com/api-keys';
+
+ config = {
+ apiTokenKey: 'YOUR_PROVIDER_API_KEY',
+ };
+
+ staticModels: ModelInfo[] = [
+ {
+ name: 'your-model-name',
+ label: 'Your Model Label',
+ provider: 'YourProvider',
+ maxTokenAllowed: 100000,
+ maxCompletionTokens: 4000,
+ },
+ ];
+
+ async getDynamicModels(
+ apiKeys?: Record,
+ settings?: IProviderSetting,
+ serverEnv?: Record,
+ ): Promise {
+ // Implement dynamic model loading if supported
+ return [];
+ }
+
+ getModelInstance(options: {
+ model: string;
+ serverEnv: Record;
+ apiKeys?: Record;
+ providerSettings?: Record;
+ }): LanguageModelV1 {
+ const { apiKeys, model } = options;
+ const apiKey = apiKeys?.[this.config.apiTokenKey] || '';
+
+ return createYourProvider({
+ apiKey,
+ // other configuration options
+ })(model);
+ }
+}
+```
+
+#### 2. Register the Provider
+
+Add your provider to `app/lib/modules/llm/registry.ts`:
+
+```typescript
+import YourProvider from './providers/your-provider';
+
+// ... existing imports ...
+
+export {
+ // ... existing exports ...
+ YourProvider,
+};
+```
+
+#### 3. Update the Manager (if needed)
+
+The provider will be automatically registered by the `LLMManager` through the registry. The manager scans for all classes that extend `BaseProvider` and registers them automatically.
+
+### Adding Models to Existing Providers
+
+To add new models to an existing provider:
+
+1. **Edit the provider file** (e.g., `app/lib/modules/llm/providers/openai.ts`)
+2. **Add to the `staticModels` array**:
+
+```typescript
+staticModels: ModelInfo[] = [
+ // ... existing models ...
+ {
+ name: 'gpt-4o-mini-new',
+ label: 'GPT-4o Mini (New)',
+ provider: 'OpenAI',
+ maxTokenAllowed: 128000,
+ maxCompletionTokens: 16000,
+ },
+];
+```
+
+### Provider-Specific Configuration
+
+Each provider can have its own configuration options:
+
+- **API Key Environment Variables**: Define in the `config` object
+- **Base URL Support**: Add `baseUrlKey` for custom endpoints
+- **Provider Settings**: Custom settings in the UI
+- **Dynamic Model Loading**: Implement `getDynamicModels()` for API-based model discovery
+
+### Testing Your New Provider
+
+1. **Restart the development server** after making changes
+2. **Check the provider appears** in the Settings → Providers section
+3. **Configure API keys** in the provider settings
+4. **Test the models** in a chat session
+
+### Best Practices
+
+- **Follow the naming conventions** used by existing providers
+- **Include proper error handling** for API failures
+- **Add comprehensive documentation** for your provider
+- **Test with both static and dynamic models**
+- **Ensure proper API key validation**
+
+The modular architecture makes it easy to add new providers while maintaining consistency and reliability across the entire system.
+
+---
+
+## MCP (Model Context Protocol) Integration
+
+bolt.diy supports MCP (Model Context Protocol) servers to extend AI capabilities with external tools and services. MCP allows you to connect various tools and services that the AI can use during conversations.
+
+### Setting up MCP Servers
+
+1. Navigate to Settings → MCP tab
+2. Add MCP server configurations
+3. Configure server endpoints and authentication
+4. Enable/disable servers as needed
+
+MCP servers can provide:
+- Database connections and queries
+- File system operations
+- API integrations
+- Custom business logic tools
+- And much more...
+
+The MCP integration enhances the AI's ability to perform complex tasks by giving it access to external tools and data sources.
+
+---
+
+## Git Integration and Version Control
+
+bolt.diy provides comprehensive Git integration for version control, collaboration, and project management.
+
+### GitHub Integration
+
+1. **Connect your GitHub account** in Settings → Connections → GitHub
+2. **Import existing repositories** by URL or from your connected account
+3. **Push projects directly to GitHub** with automatic repository creation
+4. **Sync changes** between local development and remote repositories
+
+### Version Control Features
+
+- **Automatic commits** for major changes
+- **Diff visualization** to see code changes
+- **Branch management** and merge conflict resolution
+- **Revert to previous versions** for debugging
+- **Collaborative development** with team members
+
+### Export Options
+
+- **Download as ZIP** for easy sharing
+- **Push to GitHub** for version control and collaboration
+- **Import from GitHub** to continue working on existing projects
+
+---
+
+## Deployment Options
+
+bolt.diy provides one-click deployment to popular hosting platforms, making it easy to share your projects with the world.
+
+### Supported Platforms
+
+#### Vercel Deployment
+1. Connect your Vercel account in Settings → Connections → Vercel
+2. Click the deploy button in your project
+3. bolt.diy automatically builds and deploys your project
+4. Get a live URL instantly with Vercel's global CDN
+
+#### Netlify Deployment
+1. Connect your Netlify account in Settings → Connections → Netlify
+2. Deploy with a single click
+3. Automatic build configuration and optimization
+4. Preview deployments for every change
+
+#### GitHub Pages
+1. Connect your GitHub account
+2. Push your project to a GitHub repository
+3. Enable GitHub Pages in repository settings
+4. Automatic deployment from your repository
+
+### Deployment Features
+
+- **Automatic build configuration** for popular frameworks
+- **Environment variable management** for production
+- **Custom domain support** through platform settings
+- **Deployment previews** for testing changes
+- **Rollback capabilities** for quick issue resolution
+
+---
+
+## Supabase Integration
+
+bolt.diy integrates with Supabase to provide backend database functionality, authentication, and real-time features for your applications.
+
+### Setting up Supabase
+
+1. Create a Supabase project at [supabase.com](https://supabase.com)
+2. Get your project URL and API keys from the Supabase dashboard
+3. Configure the connection in your bolt.diy project
+4. Use the Supabase tools to interact with your database
+
+### Database Features
+
+- **Real-time subscriptions** for live data updates
+- **Authentication** with built-in user management
+- **Row Level Security (RLS)** policies for data protection
+- **Built-in API** for CRUD operations
+- **Database migrations** and schema management
+
+### Integration with AI Development
+
+The AI can help you:
+- **Design database schemas** for your applications
+- **Write SQL queries** and database functions
+- **Implement authentication flows**
+- **Create API endpoints** for your frontend
+- **Set up real-time features** for collaborative apps
+
+Supabase integration makes it easy to build full-stack applications with a robust backend infrastructure.
+
+---
+
+## WebContainer and Live Preview
+
+bolt.diy uses WebContainer technology to provide a secure, isolated development environment with live preview capabilities.
+
+### WebContainer Features
+
+- **Secure sandbox environment** - Run code in isolated containers
+- **Live preview** - See your changes instantly without leaving the editor
+- **Full Node.js environment** - Run npm scripts, build tools, and development servers
+- **File system access** - Direct manipulation of project files
+- **Terminal integration** - Execute commands and see real-time output
+
+### Development Workflow
+
+1. **Write code** in the integrated editor
+2. **Run development servers** directly in WebContainer
+3. **Preview your application** in real-time
+4. **Test functionality** with the integrated terminal
+5. **Debug issues** with live error reporting
+
+### Supported Technologies
+
+WebContainer supports all major JavaScript frameworks and tools:
+- React, Vue, Angular, Svelte
+- Next.js, Nuxt, Astro, Remix
+- Vite, Webpack, Parcel
+- Node.js, npm, pnpm, yarn
+- And many more...
+
+The WebContainer integration provides a seamless development experience without the need for local setup.
+
+---
+
+## Project Templates
+
+bolt.diy comes with a comprehensive collection of starter templates to help you quickly bootstrap your projects. Choose from popular frameworks and technologies:
+
+### Frontend Frameworks
+- **React + Vite** - Modern React setup with TypeScript
+- **Vue.js** - Progressive JavaScript framework
+- **Angular** - Enterprise-ready framework
+- **Svelte** - Compiler-based framework for fast apps
+- **SolidJS** - Reactive framework with fine-grained updates
+
+### Full-Stack Frameworks
+- **Next.js with shadcn/ui** - React framework with UI components
+- **Astro** - Static site generator for content-focused sites
+- **Qwik** - Resumable framework for instant loading
+- **Remix** - Full-stack React framework
+- **Nuxt** - Vue.js meta-framework
+
+### Mobile & Cross-Platform
+- **Expo App** - React Native with Expo
+- **React Native** - Cross-platform mobile development
+
+### Presentation & Content
+- **Slidev** - Developer-friendly presentations
+- **Astro Basic** - Lightweight static sites
+
+### Vanilla JavaScript
+- **Vanilla Vite** - Minimal JavaScript setup
+- **Vite TypeScript** - TypeScript without framework
+
+### Getting Started with Templates
+
+1. Start a new project in bolt.diy
+2. Browse available templates in the starter selection
+3. Select your preferred technology stack
+4. The AI will scaffold your project with best practices
+5. Begin development immediately with live preview
+
+All templates are pre-configured with modern tooling, linting, and build processes for immediate productivity.
---
## Available Scripts
-- `pnpm run dev`: Starts the development server.
-- `pnpm run build`: Builds the project.
-- `pnpm run start`: Runs the built application locally using Wrangler Pages. This script uses `bindings.sh` to set up necessary bindings so you don't have to duplicate environment variables.
-- `pnpm run preview`: Builds the project and then starts it locally, useful for testing the production build. Note, HTTP streaming currently doesn't work as expected with `wrangler pages dev`.
-- `pnpm test`: Runs the test suite using Vitest.
-- `pnpm run typecheck`: Runs TypeScript type checking.
-- `pnpm run typegen`: Generates TypeScript types using Wrangler.
-- `pnpm run deploy`: Builds the project and deploys it to Cloudflare Pages.
+### Development Scripts
+- `pnpm run dev`: Starts the development server with hot reloading
+- `pnpm run build`: Builds the project for production
+- `pnpm run start`: Runs the built application locally using Wrangler Pages
+- `pnpm run preview`: Builds and starts locally for production testing
+- `pnpm test`: Runs the test suite using Vitest
+- `pnpm run test:watch`: Runs tests in watch mode
+- `pnpm run lint`: Runs ESLint with auto-fix
+- `pnpm run typecheck`: Runs TypeScript type checking
+- `pnpm run typegen`: Generates TypeScript types using Wrangler
+
+### Docker Scripts
+- `pnpm run dockerbuild`: Builds Docker image for development
+- `pnpm run dockerbuild:prod`: Builds Docker image for production
+- `pnpm run dockerrun`: Runs the Docker container
+- `docker compose --profile development up`: Runs with Docker Compose (development)
+
+### Electron Scripts
+- `pnpm electron:build:mac`: Builds for macOS
+- `pnpm electron:build:win`: Builds for Windows
+- `pnpm electron:build:linux`: Builds for Linux
+- `pnpm electron:build:dist`: Builds for all platforms (Mac, Windows, Linux)
+- `pnpm electron:build:unpack`: Creates unpacked build for testing
+
+### Deployment Scripts
+- `pnpm run deploy`: Builds and deploys to Cloudflare Pages
+- `npm run dockerbuild`: Alternative Docker build command
+
+### Utility Scripts
+- `pnpm run clean`: Cleans build artifacts
+- `pnpm run prepare`: Sets up Husky for git hooks
---
@@ -251,6 +608,23 @@ This will start the Remix Vite development server. You will need Google Chrome C
---
+## Getting Help & Resources
+
+### Help Icon in Sidebar
+bolt.diy includes a convenient help icon (?) in the sidebar that provides quick access to comprehensive documentation. Simply click the help icon to open the full documentation in a new tab.
+
+The documentation includes:
+- **Complete setup guides** for all supported providers
+- **Feature explanations** for advanced capabilities
+- **Troubleshooting guides** for common issues
+- **Best practices** for optimal usage
+- **FAQ section** with detailed answers
+
+### Community Support
+- **GitHub Issues**: Report bugs and request features
+- **Community Forum**: Join discussions at [thinktank.ottomator.ai](https://thinktank.ottomator.ai)
+- **Contributing Guide**: Learn how to contribute to the project
+
## Tips and Tricks
Here are some tips to get the most out of bolt.diy:
@@ -262,3 +636,5 @@ Here are some tips to get the most out of bolt.diy:
- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps Bolt understand the foundation of your project and ensure everything is wired up right before building out more advanced functionality.
- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask Bolt to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly.
+
+- **Access documentation quickly**: Use the help icon (?) in the sidebar for instant access to guides, troubleshooting, and best practices.