Revert "fix: resolve chat conversation hanging and stream interruption issues (#1971)"
This reverts commit e68593f22d.
This commit is contained in:
@@ -1,268 +0,0 @@
|
||||
/**
|
||||
* Stream Recovery Module
|
||||
* Handles stream failures and provides automatic recovery mechanisms
|
||||
* Fixes chat conversation hanging issues
|
||||
* Author: Keoma Wright
|
||||
*/
|
||||
|
||||
import { createScopedLogger } from '~/utils/logger';
|
||||
|
||||
const logger = createScopedLogger('stream-recovery');
|
||||
|
||||
export interface StreamRecoveryOptions {
|
||||
maxRetries?: number;
|
||||
retryDelay?: number;
|
||||
timeout?: number;
|
||||
onRetry?: (attempt: number) => void;
|
||||
onTimeout?: () => void;
|
||||
onError?: (error: any) => void;
|
||||
}
|
||||
|
||||
export class StreamRecoveryManager {
|
||||
private _retryCount = 0;
|
||||
private _timeoutHandle: NodeJS.Timeout | null = null;
|
||||
private _lastActivity: number = Date.now();
|
||||
private _isActive = true;
|
||||
|
||||
constructor(private _options: StreamRecoveryOptions = {}) {
|
||||
this._options = {
|
||||
maxRetries: 3,
|
||||
retryDelay: 1000,
|
||||
timeout: 30000, // 30 seconds default timeout
|
||||
..._options,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Start monitoring the stream for inactivity
|
||||
*/
|
||||
startMonitoring() {
|
||||
this._resetTimeout();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the timeout when activity is detected
|
||||
*/
|
||||
recordActivity() {
|
||||
this._lastActivity = Date.now();
|
||||
this._resetTimeout();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the timeout timer
|
||||
*/
|
||||
private _resetTimeout() {
|
||||
if (this._timeoutHandle) {
|
||||
clearTimeout(this._timeoutHandle);
|
||||
}
|
||||
|
||||
if (!this._isActive) {
|
||||
return;
|
||||
}
|
||||
|
||||
this._timeoutHandle = setTimeout(() => {
|
||||
const inactiveTime = Date.now() - this._lastActivity;
|
||||
logger.warn(`Stream timeout detected after ${inactiveTime}ms of inactivity`);
|
||||
|
||||
if (this._options.onTimeout) {
|
||||
this._options.onTimeout();
|
||||
}
|
||||
|
||||
this._handleTimeout();
|
||||
}, this._options.timeout!);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle stream timeout
|
||||
*/
|
||||
private _handleTimeout() {
|
||||
logger.error('Stream timeout - attempting recovery');
|
||||
|
||||
// Signal that recovery is needed
|
||||
this.attemptRecovery();
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to recover from a stream failure
|
||||
*/
|
||||
async attemptRecovery(): Promise<boolean> {
|
||||
if (this._retryCount >= this._options.maxRetries!) {
|
||||
logger.error(`Max retries (${this._options.maxRetries}) reached - cannot recover`);
|
||||
return false;
|
||||
}
|
||||
|
||||
this._retryCount++;
|
||||
logger.info(`Attempting recovery (attempt ${this._retryCount}/${this._options.maxRetries})`);
|
||||
|
||||
if (this._options.onRetry) {
|
||||
this._options.onRetry(this._retryCount);
|
||||
}
|
||||
|
||||
// Wait before retrying
|
||||
await new Promise((resolve) => setTimeout(resolve, this._options.retryDelay! * this._retryCount));
|
||||
|
||||
// Reset activity tracking
|
||||
this.recordActivity();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle stream errors with recovery
|
||||
*/
|
||||
async handleError(error: any): Promise<boolean> {
|
||||
logger.error('Stream error detected:', error);
|
||||
|
||||
if (this._options.onError) {
|
||||
this._options.onError(error);
|
||||
}
|
||||
|
||||
// Check if error is recoverable
|
||||
if (this._isRecoverableError(error)) {
|
||||
return await this.attemptRecovery();
|
||||
}
|
||||
|
||||
logger.error('Non-recoverable error - cannot continue');
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is recoverable
|
||||
*/
|
||||
private _isRecoverableError(error: any): boolean {
|
||||
const errorMessage = error?.message || error?.toString() || '';
|
||||
|
||||
// List of recoverable error patterns
|
||||
const recoverablePatterns = [
|
||||
'ECONNRESET',
|
||||
'ETIMEDOUT',
|
||||
'ENOTFOUND',
|
||||
'socket hang up',
|
||||
'network',
|
||||
'timeout',
|
||||
'abort',
|
||||
'EPIPE',
|
||||
'502',
|
||||
'503',
|
||||
'504',
|
||||
'rate limit',
|
||||
];
|
||||
|
||||
return recoverablePatterns.some((pattern) => errorMessage.toLowerCase().includes(pattern.toLowerCase()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop monitoring and cleanup
|
||||
*/
|
||||
stop() {
|
||||
this._isActive = false;
|
||||
|
||||
if (this._timeoutHandle) {
|
||||
clearTimeout(this._timeoutHandle);
|
||||
this._timeoutHandle = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the recovery manager
|
||||
*/
|
||||
reset() {
|
||||
this._retryCount = 0;
|
||||
this._lastActivity = Date.now();
|
||||
this._isActive = true;
|
||||
this._resetTimeout();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a wrapped stream with recovery capabilities
|
||||
*/
|
||||
export function createRecoverableStream<T>(
|
||||
streamFactory: () => Promise<ReadableStream<T>>,
|
||||
options?: StreamRecoveryOptions,
|
||||
): ReadableStream<T> {
|
||||
const recovery = new StreamRecoveryManager(options);
|
||||
let currentStream: ReadableStream<T> | null = null;
|
||||
let reader: ReadableStreamDefaultReader<T> | null = null;
|
||||
|
||||
return new ReadableStream<T>({
|
||||
async start(controller) {
|
||||
recovery.startMonitoring();
|
||||
|
||||
try {
|
||||
currentStream = await streamFactory();
|
||||
reader = currentStream.getReader();
|
||||
} catch (error) {
|
||||
logger.error('Failed to create initial stream:', error);
|
||||
|
||||
const canRecover = await recovery.handleError(error);
|
||||
|
||||
if (canRecover) {
|
||||
// Retry creating the stream
|
||||
currentStream = await streamFactory();
|
||||
reader = currentStream.getReader();
|
||||
} else {
|
||||
controller.error(error);
|
||||
return;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
async pull(controller) {
|
||||
if (!reader) {
|
||||
controller.error(new Error('No reader available'));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const { done, value } = await reader.read();
|
||||
|
||||
if (done) {
|
||||
controller.close();
|
||||
recovery.stop();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Record activity to reset timeout
|
||||
recovery.recordActivity();
|
||||
controller.enqueue(value);
|
||||
} catch (error) {
|
||||
logger.error('Error reading from stream:', error);
|
||||
|
||||
const canRecover = await recovery.handleError(error);
|
||||
|
||||
if (canRecover) {
|
||||
// Try to recreate the stream
|
||||
try {
|
||||
if (reader) {
|
||||
reader.releaseLock();
|
||||
}
|
||||
|
||||
currentStream = await streamFactory();
|
||||
reader = currentStream.getReader();
|
||||
|
||||
// Continue reading
|
||||
await this.pull!(controller);
|
||||
} catch (retryError) {
|
||||
logger.error('Recovery failed:', retryError);
|
||||
controller.error(retryError);
|
||||
recovery.stop();
|
||||
}
|
||||
} else {
|
||||
controller.error(error);
|
||||
recovery.stop();
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
cancel() {
|
||||
recovery.stop();
|
||||
|
||||
if (reader) {
|
||||
reader.releaseLock();
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -11,65 +11,6 @@ import { createFilesContext, extractPropertiesFromMessage } from './utils';
|
||||
import { discussPrompt } from '~/lib/common/prompts/discuss-prompt';
|
||||
import type { DesignScheme } from '~/types/design-scheme';
|
||||
|
||||
function getSmartAISystemPrompt(basePrompt: string): string {
|
||||
const smartAIEnhancement = `
|
||||
## SmartAI Mode - Enhanced Conversational Coding Assistant
|
||||
|
||||
You are operating in SmartAI mode, a premium Bolt.gives feature that provides detailed, educational feedback throughout the coding process.
|
||||
|
||||
### Your Communication Style:
|
||||
- Be conversational and friendly, as if pair programming with a colleague
|
||||
- Explain your thought process clearly and educationally
|
||||
- Use natural language, not technical jargon unless necessary
|
||||
- Keep responses visible and engaging
|
||||
|
||||
### What to Communicate:
|
||||
|
||||
**When Starting Tasks:**
|
||||
✨ "I see you want [task description]. Let me [approach explanation]..."
|
||||
✨ Explain your understanding and planned approach
|
||||
✨ Share why you're choosing specific solutions
|
||||
|
||||
**During Implementation:**
|
||||
📝 "Now I'm creating/updating [file] to [purpose]..."
|
||||
📝 Explain what each code section does
|
||||
📝 Share the patterns and best practices you're using
|
||||
📝 Discuss any trade-offs or alternatives considered
|
||||
|
||||
**When Problem-Solving:**
|
||||
🔍 "I noticed [issue]. This is likely because [reasoning]..."
|
||||
🔍 Share your debugging thought process
|
||||
🔍 Explain how you're identifying and fixing issues
|
||||
🔍 Describe why your solution will work
|
||||
|
||||
**After Completing Work:**
|
||||
✅ "I've successfully [what was done]. The key changes include..."
|
||||
✅ Summarize what was accomplished
|
||||
✅ Highlight important decisions made
|
||||
✅ Suggest potential improvements or next steps
|
||||
|
||||
### Example Responses:
|
||||
|
||||
Instead of silence:
|
||||
"I understand you need a contact form. Let me create a modern, accessible form with proper validation. I'll start by setting up the form structure with semantic HTML..."
|
||||
|
||||
While coding:
|
||||
"I'm now adding email validation to ensure users enter valid email addresses. I'll use a regex pattern that covers most common email formats while keeping it user-friendly..."
|
||||
|
||||
When debugging:
|
||||
"I see the button isn't aligning properly with the other elements. This looks like a flexbox issue. Let me adjust the container's display properties to fix the alignment..."
|
||||
|
||||
### Remember:
|
||||
- Users chose SmartAI to learn from your process
|
||||
- Make every action visible and understandable
|
||||
- Be their coding companion, not just a silent worker
|
||||
- Keep the conversation flowing naturally
|
||||
|
||||
${basePrompt}`;
|
||||
|
||||
return smartAIEnhancement;
|
||||
}
|
||||
|
||||
export type Messages = Message[];
|
||||
|
||||
export interface StreamingOptions extends Omit<Parameters<typeof _streamText>[0], 'model'> {
|
||||
@@ -141,19 +82,13 @@ export async function streamText(props: {
|
||||
} = props;
|
||||
let currentModel = DEFAULT_MODEL;
|
||||
let currentProvider = DEFAULT_PROVIDER.name;
|
||||
let smartAIEnabled = false;
|
||||
let processedMessages = messages.map((message) => {
|
||||
const newMessage = { ...message };
|
||||
|
||||
if (message.role === 'user') {
|
||||
const { model, provider, content, smartAI } = extractPropertiesFromMessage(message);
|
||||
const { model, provider, content } = extractPropertiesFromMessage(message);
|
||||
currentModel = model;
|
||||
currentProvider = provider;
|
||||
|
||||
if (smartAI !== undefined) {
|
||||
smartAIEnabled = smartAI;
|
||||
}
|
||||
|
||||
newMessage.content = sanitizeText(content);
|
||||
} else if (message.role == 'assistant') {
|
||||
newMessage.content = sanitizeText(message.content);
|
||||
@@ -207,39 +142,13 @@ export async function streamText(props: {
|
||||
|
||||
const dynamicMaxTokens = modelDetails ? getCompletionTokenLimit(modelDetails) : Math.min(MAX_TOKENS, 16384);
|
||||
|
||||
// Additional safety cap - respect model-specific limits
|
||||
let safeMaxTokens = dynamicMaxTokens;
|
||||
|
||||
// Apply model-specific caps for Anthropic models
|
||||
if (modelDetails?.provider === 'Anthropic') {
|
||||
if (modelDetails.name.includes('claude-sonnet-4') || modelDetails.name.includes('claude-opus-4')) {
|
||||
safeMaxTokens = Math.min(dynamicMaxTokens, 64000);
|
||||
} else if (modelDetails.name.includes('claude-3-7-sonnet')) {
|
||||
safeMaxTokens = Math.min(dynamicMaxTokens, 64000);
|
||||
} else if (modelDetails.name.includes('claude-3-5-sonnet')) {
|
||||
safeMaxTokens = Math.min(dynamicMaxTokens, 8192);
|
||||
} else {
|
||||
safeMaxTokens = Math.min(dynamicMaxTokens, 4096);
|
||||
}
|
||||
} else {
|
||||
// General safety cap for other providers
|
||||
safeMaxTokens = Math.min(dynamicMaxTokens, 128000);
|
||||
}
|
||||
// Use model-specific limits directly - no artificial cap needed
|
||||
const safeMaxTokens = dynamicMaxTokens;
|
||||
|
||||
logger.info(
|
||||
`Max tokens for model ${modelDetails.name} is ${safeMaxTokens} (capped from ${dynamicMaxTokens}) based on model limits`,
|
||||
`Token limits for model ${modelDetails.name}: maxTokens=${safeMaxTokens}, maxTokenAllowed=${modelDetails.maxTokenAllowed}, maxCompletionTokens=${modelDetails.maxCompletionTokens}`,
|
||||
);
|
||||
|
||||
/*
|
||||
* Check if SmartAI is enabled for supported models
|
||||
* SmartAI is enabled if either:
|
||||
* 1. The model itself has isSmartAIEnabled flag (for models with SmartAI in name)
|
||||
* 2. The user explicitly enabled it via message flag
|
||||
*/
|
||||
const isSmartAISupported =
|
||||
modelDetails?.supportsSmartAI && (provider.name === 'Anthropic' || provider.name === 'OpenAI');
|
||||
const useSmartAI = (modelDetails?.isSmartAIEnabled || smartAIEnabled) && isSmartAISupported;
|
||||
|
||||
let systemPrompt =
|
||||
PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
|
||||
cwd: WORK_DIR,
|
||||
@@ -253,11 +162,6 @@ export async function streamText(props: {
|
||||
},
|
||||
}) ?? getSystemPrompt();
|
||||
|
||||
// Enhance system prompt for SmartAI if enabled and supported
|
||||
if (useSmartAI) {
|
||||
systemPrompt = getSmartAISystemPrompt(systemPrompt);
|
||||
}
|
||||
|
||||
if (chatMode === 'build' && contextFiles && contextOptimization) {
|
||||
const codeContext = createFilesContext(contextFiles, true);
|
||||
|
||||
@@ -317,11 +221,18 @@ export async function streamText(props: {
|
||||
|
||||
logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`);
|
||||
|
||||
// DEBUG: Log reasoning model detection
|
||||
// Log reasoning model detection and token parameters
|
||||
const isReasoning = isReasoningModel(modelDetails.name);
|
||||
logger.info(`DEBUG STREAM: Model "${modelDetails.name}" detected as reasoning model: ${isReasoning}`);
|
||||
logger.info(
|
||||
`Model "${modelDetails.name}" is reasoning model: ${isReasoning}, using ${isReasoning ? 'maxCompletionTokens' : 'maxTokens'}: ${safeMaxTokens}`,
|
||||
);
|
||||
|
||||
// console.log(systemPrompt, processedMessages);
|
||||
// Validate token limits before API call
|
||||
if (safeMaxTokens > (modelDetails.maxTokenAllowed || 128000)) {
|
||||
logger.warn(
|
||||
`Token limit warning: requesting ${safeMaxTokens} tokens but model supports max ${modelDetails.maxTokenAllowed || 128000}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Use maxCompletionTokens for reasoning models (o1, GPT-5), maxTokens for traditional models
|
||||
const tokenParams = isReasoning ? { maxCompletionTokens: safeMaxTokens } : { maxTokens: safeMaxTokens };
|
||||
|
||||
@@ -8,7 +8,6 @@ export function extractPropertiesFromMessage(message: Omit<Message, 'id'>): {
|
||||
model: string;
|
||||
provider: string;
|
||||
content: string;
|
||||
smartAI?: boolean;
|
||||
} {
|
||||
const textContent = Array.isArray(message.content)
|
||||
? message.content.find((item) => item.type === 'text')?.text || ''
|
||||
@@ -17,10 +16,6 @@ export function extractPropertiesFromMessage(message: Omit<Message, 'id'>): {
|
||||
const modelMatch = textContent.match(MODEL_REGEX);
|
||||
const providerMatch = textContent.match(PROVIDER_REGEX);
|
||||
|
||||
// Check for SmartAI toggle in the message
|
||||
const smartAIMatch = textContent.match(/\[SmartAI:(true|false)\]/);
|
||||
const smartAI = smartAIMatch ? smartAIMatch[1] === 'true' : undefined;
|
||||
|
||||
/*
|
||||
* Extract model
|
||||
* const modelMatch = message.content.match(MODEL_REGEX);
|
||||
@@ -38,21 +33,15 @@ export function extractPropertiesFromMessage(message: Omit<Message, 'id'>): {
|
||||
if (item.type === 'text') {
|
||||
return {
|
||||
type: 'text',
|
||||
text: item.text
|
||||
?.replace(MODEL_REGEX, '')
|
||||
.replace(PROVIDER_REGEX, '')
|
||||
.replace(/\[SmartAI:(true|false)\]/g, ''),
|
||||
text: item.text?.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, ''),
|
||||
};
|
||||
}
|
||||
|
||||
return item; // Preserve image_url and other types as is
|
||||
})
|
||||
: textContent
|
||||
.replace(MODEL_REGEX, '')
|
||||
.replace(PROVIDER_REGEX, '')
|
||||
.replace(/\[SmartAI:(true|false)\]/g, '');
|
||||
: textContent.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '');
|
||||
|
||||
return { model, provider, content: cleanedContent, smartAI };
|
||||
return { model, provider, content: cleanedContent };
|
||||
}
|
||||
|
||||
export function simplifyBoltActions(input: string): string {
|
||||
|
||||
Reference in New Issue
Block a user