BarBar288's picture
Upload 122 files
27127dd verified
import React from 'react';
import { ChatHistoryProps } from '@/lib/types';
import TypingIndicator from './TypingIndicator';
import { useScrollToBottom } from '@/lib/hooks';
import { AlertTriangle } from 'lucide-react';
const ChatHistory: React.FC<ChatHistoryProps> = ({
messages,
isLoading,
currentModel = 'openai'
}) => {
const scrollRef = useScrollToBottom([messages, isLoading]);
// Check if we're in fallback mode by looking at the model or fallback indicator in messages
const isFallbackMode = currentModel === 'qwen' ||
messages.some(message =>
message.role === 'assistant' &&
message.content.includes('fallback mode')
);
return (
<div
ref={scrollRef}
className="chat-container overflow-y-auto pb-4 px-2"
style={{ height: 'calc(100vh - 180px)' }}
>
{/* Fallback mode indicator */}
{isFallbackMode && (
<div className="bg-amber-50 border-l-4 border-amber-400 p-4 mb-4 rounded-md">
<div className="flex items-center">
<div className="flex-shrink-0">
<AlertTriangle className="h-5 w-5 text-amber-400" />
</div>
<div className="ml-3">
<p className="text-sm text-amber-700">
<strong>Qwen Fallback Mode Active:</strong> The OpenAI API is currently unavailable.
Responses are being generated by the Qwen model instead.
</p>
</div>
</div>
</div>
)}
{messages.map((message, index) => {
// Check if this is a fallback message directly from the content
const isMessageFallback = message.role === 'assistant' &&
(message.content.includes('fallback mode') ||
message.content.includes('Qwen model'));
// Determine if this message appears to be a fallback response
const isAssistantFallbackMessage = message.role === 'assistant' &&
(currentModel === 'qwen' || isMessageFallback);
// Clean up fallback message for display
let displayContent = isAssistantFallbackMessage
? message.content.replace(/\n\n\(Note: I'm currently operating in fallback mode.*\)$/, '')
: message.content;
// Remove any thinking process sections for Qwen responses
if (isAssistantFallbackMessage) {
// Remove <think> tags and their content
displayContent = displayContent.replace(/<think>[\s\S]*?<\/think>/g, '');
// Remove any other XML-like tags
displayContent = displayContent.replace(/<[^>]*>/g, '');
// Clean up any excessive whitespace
displayContent = displayContent.replace(/^\s+|\s+$/g, '');
displayContent = displayContent.replace(/\n{3,}/g, '\n\n');
}
return (
<div
key={index}
className={`flex items-start ${message.role === 'user' ? 'justify-end' : ''} mb-4`}
>
{message.role !== 'user' && (
<div className="flex-shrink-0 mr-3">
<div className={`h-8 w-8 rounded-full ${
isAssistantFallbackMessage ? 'bg-amber-500' : 'bg-primary'
} flex items-center justify-center text-white`}>
<svg xmlns="http://www.w3.org/2000/svg" className="h-5 w-5" viewBox="0 0 20 20" fill="currentColor">
<path d="M2 5a2 2 0 012-2h7a2 2 0 012 2v4a2 2 0 01-2 2H9l-3 3v-3H4a2 2 0 01-2-2V5z" />
<path d="M15 7v2a4 4 0 01-4 4H9.828l-1.766 1.767c.28.149.599.233.938.233h2l3 3v-3h2a2 2 0 002-2V9a2 2 0 00-2-2h-1z" />
</svg>
</div>
</div>
)}
<div
className={`${
message.role === 'user'
? 'bg-primary text-white'
: isAssistantFallbackMessage
? 'bg-amber-50 border border-amber-200 text-gray-800'
: 'bg-white text-gray-800'
} rounded-lg p-4 shadow-sm max-w-[85%]`}
>
<p className="whitespace-pre-wrap">{displayContent}</p>
{isAssistantFallbackMessage && isMessageFallback && (
<p className="mt-2 text-xs text-amber-600 italic">
(This response was generated using the Qwen fallback model)
</p>
)}
</div>
{message.role === 'user' && (
<div className="flex-shrink-0 ml-3">
<div className="h-8 w-8 rounded-full bg-gray-300 flex items-center justify-center text-gray-600">
<svg xmlns="http://www.w3.org/2000/svg" className="h-5 w-5" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M10 9a3 3 0 100-6 3 3 0 000 6zm-7 9a7 7 0 1114 0H3z" clipRule="evenodd" />
</svg>
</div>
</div>
)}
</div>
);
})}
<TypingIndicator isVisible={isLoading} />
</div>
);
};
export default ChatHistory;