import streamlit as st
import requests
import json
import time
from datetime import datetime
import os
import threading
import traceback
from typing import List, Dict, Any, Iterator

# 简化的导入，只保留必要的组件
from langchain_core.callbacks import BaseCallbackHandler

# 设置页面配置
st.set_page_config(
    page_title="本地Ollama聊天机器人",
    page_icon="🤖",
    layout="wide",
    initial_sidebar_state="expanded"
)

# 自定义CSS样式
st.markdown("""
<style>
    @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
    
    :root {
        --primary-color: #4a6fa5;
        --secondary-color: #6c757d;
        --success-color: #28a745;
        --warning-color: #ffc107;
        --danger-color: #dc3545;
        --light-color: #f8f9fa;
        --dark-color: #343a40;
        --background-color: #f0f2f6;
        --chat-bg-color: #ffffff;
        --user-bg-color: #e3f2fd;
        --bot-bg-color: #f5f5f5;
    }
    
    body {
        font-family: 'Inter', sans-serif;
        background-color: var(--background-color);
    }
    
    .main-container {
        max-width: 900px;
        margin: 0 auto;
        padding: 1rem;
    }
    
    .header {
        text-align: center;
        padding: 1.5rem 0;
        margin-bottom: 2rem;
        background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
        border-radius: 12px;
        color: white;
        box-shadow: 0 8px 20px rgba(0,0,0,0.1);
    }
    
    .header h1 {
        font-size: 2.2rem;
        font-weight: 700;
        margin-bottom: 0.5rem;
    }
    
    .header p {
        font-size: 1rem;
        opacity: 0.95;
        margin-bottom: 0;
    }
    
    .api-key-container {
        background-color: var(--light-color);
        padding: 1.5rem;
        border-radius: 10px;
        margin-bottom: 2rem;
        border: 1px solid #e0e0e0;
    }
    
    .api-key-container h3 {
        margin-top: 0;
        color: var(--dark-color);
        font-size: 1.2rem;
        margin-bottom: 1rem;
    }
    
    .api-link {
        color: var(--primary-color);
        text-decoration: none;
        font-weight: 600;
        transition: color 0.3s ease;
    }
    
    .api-link:hover {
        color: #3a5b8f;
        text-decoration: underline;
    }
    
    .chat-container {
        background-color: var(--chat-bg-color);
        border-radius: 12px;
        box-shadow: 0 4px 15px rgba(0,0,0,0.05);
        overflow: hidden;
        border: 1px solid #e0e0e0;
    }
    
    .chat-header {
        background-color: #f8f9fa;
        padding: 1rem 1.5rem;
        border-bottom: 1px solid #e0e0e0;
        display: flex;
        justify-content: space-between;
        align-items: center;
    }
    
    .chat-header h3 {
        margin: 0;
        color: var(--dark-color);
        font-size: 1.1rem;
    }
    
    .clear-btn {
        background-color: transparent;
        color: var(--secondary-color);
        border: 1px solid var(--secondary-color);
        padding: 0.4rem 0.8rem;
        border-radius: 6px;
        cursor: pointer;
        font-size: 0.9rem;
        transition: all 0.3s ease;
    }
    
    .clear-btn:hover {
        background-color: var(--secondary-color);
        color: white;
    }
    
    .chat-history {
        padding: 1.5rem;
        max-height: 500px;
        overflow-y: auto;
        scroll-behavior: smooth;
    }
    
    .message {
        margin-bottom: 1rem;
        animation: fadeIn 0.3s ease-in-out;
    }
    
    @keyframes fadeIn {
        from {
            opacity: 0;
            transform: translateY(10px);
        }
        to {
            opacity: 1;
            transform: translateY(0);
        }
    }
    
    .user-message {
        display: flex;
        justify-content: flex-end;
    }
    
    .bot-message {
        display: flex;
        justify-content: flex-start;
    }
    
    .message-content {
        max-width: 75%;
        padding: 0.8rem 1.2rem;
        border-radius: 18px;
        box-shadow: 0 2px 8px rgba(0,0,0,0.05);
        word-wrap: break-word;
    }
    
    .user-message .message-content {
        background-color: var(--user-bg-color);
        border-bottom-right-radius: 4px;
    }
    
    .bot-message .message-content {
        background-color: var(--bot-bg-color);
        border-bottom-left-radius: 4px;
    }
    
    .message-time {
        font-size: 0.75rem;
        color: var(--secondary-color);
        margin-top: 0.25rem;
        text-align: right;
    }
    
    .bot-message .message-time {
        text-align: left;
    }
    
    .input-container {
        padding: 1.5rem;
        background-color: var(--chat-bg-color);
        border-top: 1px solid #e0e0e0;
    }
    
    .input-field {
        width: 100%;
    }
    
    .stTextArea > div > div {
        border-radius: 8px;
        border: 1px solid #ddd;
        transition: all 0.3s ease;
    }
    
    .stTextArea > div > div:focus-within {
        border-color: var(--primary-color);
        box-shadow: 0 0 0 3px rgba(74, 111, 165, 0.1);
    }
    
    .send-btn {
        background-color: var(--primary-color);
        color: white;
        border: none;
        padding: 0.75rem 2rem;
        font-size: 1rem;
        font-weight: 600;
        border-radius: 8px;
        cursor: pointer;
        transition: all 0.3s ease;
        margin-top: 0.5rem;
        width: 100%;
    }
    
    .send-btn:hover {
        background-color: #3a5b8f;
        transform: translateY(-1px);
        box-shadow: 0 4px 12px rgba(74, 111, 165, 0.3);
    }
    
    .send-btn:disabled {
        background-color: #cccccc;
        cursor: not-allowed;
        transform: none;
        box-shadow: none;
    }
    
    .loading-indicator {
        display: flex;
        align-items: center;
        justify-content: center;
        padding: 1rem;
    }
    
    .loading-spinner {
        border: 3px solid #f3f3f3;
        border-top: 3px solid var(--primary-color);
        border-radius: 50%;
        width: 20px;
        height: 20px;
        animation: spin 1s linear infinite;
        margin-right: 0.5rem;
    }
    
    @keyframes spin {
        0% { transform: rotate(0deg); }
        100% { transform: rotate(360deg); }
    }
    
    .error-message {
        background-color: rgba(220, 53, 69, 0.1);
        color: var(--danger-color);
        padding: 1rem;
        border-radius: 8px;
        margin: 1rem 0;
        border-left: 4px solid var(--danger-color);
    }
    
    .success-message {
        background-color: rgba(40, 167, 69, 0.1);
        color: var(--success-color);
        padding: 1rem;
        border-radius: 8px;
        margin: 1rem 0;
        border-left: 4px solid var(--success-color);
    }
    
    .tip-box {
        background-color: rgba(255, 193, 7, 0.1);
        padding: 1rem;
        border-radius: 8px;
        border-left: 4px solid var(--warning-color);
        margin: 1rem 0;
    }
    
    .tip-box h4 {
        color: #856404;
        margin-top: 0;
        margin-bottom: 0.5rem;
    }
    
    .tip-box p {
        margin: 0;
        color: #856404;
        font-size: 0.9rem;
    }
    
    @media (max-width: 768px) {
        .message-content {
            max-width: 85%;
        }
    }
</style>
""", unsafe_allow_html=True)

# 初始化会话状态
if "messages" not in st.session_state:
    st.session_state.messages = []
    # 添加欢迎消息
    st.session_state.messages.append({
        "role": "assistant",
        "content": "你好！我是你的本地智能助手，有什么我可以帮助你的吗？",
        "timestamp": datetime.now().strftime("%H:%M")
    })

if "error" not in st.session_state:
    st.session_state.error = None

if "loading" not in st.session_state:
    st.session_state.loading = False

if "recommended_questions" not in st.session_state:
    st.session_state.recommended_questions = []

if "selected_question" not in st.session_state:
    st.session_state.selected_question = ""

# Ollama配置常量
OLLAMA_URL = "http://localhost:11434/api/chat"
OLLAMA_MODEL = "qwen3:32b"

# 自定义处理类
class StreamHandler:
    def __init__(self, container, initial_text=""):
        self.container = container
        self.text = initial_text
        self.full_text = ""
        self.error = None
    
    def update_text(self, token: str) -> None:
        """更新文本内容"""
        try:
            self.text += token
            self.full_text += token
            if self.container:
                self.container.markdown(self.text)
        except Exception as e:
            self.error = str(e)

# 先定义所有需要的函数
def call_ollama_api_stream(ollama_messages: list, bot_message_id: str):
    """
    调用Ollama API的流式接口
    """
    try:
        # 处理流式响应
        full_response = ""
        chunks_received = False
        last_update_time = time.time()
        update_interval = 0.1  # 控制UI更新频率，减少重绘
        
        # 直接使用requests进行API调用
        url = OLLAMA_URL
        headers = {
            "Content-Type": "application/json"
        }
        
        data = {
            "model": OLLAMA_MODEL,
            "messages": ollama_messages,
            "stream": True,
            "options": {
                "temperature": 0.7,
                "num_predict": 1024  # 增加生成的token数量
            }
        }
        
        # 使用流式请求，增加连接超时和读取超时
        response = requests.post(
            url,
            headers=headers,
            json=data,
            stream=True,
            timeout=(30, 300)  # 连接超时30秒，读取超时300秒
        )
        
        # 检查响应状态码
        response.raise_for_status()
        
        # 改进的超时机制 - 10分钟超时
        start_time = time.time()
        timeout = 600  # 10分钟超时，足够处理复杂问题
        
        # 快速给用户反馈，显示正在思考中
        st.session_state.messages[int(bot_message_id)]["content"] = "正在生成回复..."
        
        # 处理流式响应
        for chunk in response.iter_lines():
            # 检查超时
            if time.time() - start_time > timeout:
                st.session_state.messages[int(bot_message_id)]["content"] = "处理响应超时，模型生成内容可能较长，请尝试简化问题"
                st.rerun()
                return "处理响应超时"
                
            if chunk:
                try:
                    # 解码响应块
                    chunk_str = chunk.decode('utf-8', errors='replace')
                    
                    # 解析JSON响应
                    chunk_data = json.loads(chunk_str)
                    
                    # 快速响应处理 - 优先查找内容字段
                    content_chunk = ""
                    
                    # 检查所有可能的内容位置
                    if 'message' in chunk_data:
                        message = chunk_data['message']
                        content_chunk = message.get('content', '') or message.get('thinking', '')
                    elif 'response' in chunk_data:
                        content_chunk = chunk_data.get('response', '')
                    
                    # 如果找到内容，更新响应
                    if content_chunk:
                        full_response += content_chunk
                        chunks_received = True
                        
                        # 智能更新UI - 避免过于频繁的更新
                        current_time = time.time()
                        if (current_time - last_update_time > update_interval or 
                            len(full_response) > 50):  # 至少积累50字符或达到时间间隔再更新
                            st.session_state.messages[int(bot_message_id)]["content"] = full_response
                            last_update_time = current_time
                            
                            # 定期强制刷新UI
                            if len(full_response) % 200 < len(content_chunk):
                                st.rerun()
                
                except json.JSONDecodeError:
                    # 静默处理解析错误，继续下一个块
                    continue
        
        # 确保最终更新UI - 无论如何都要更新最后一次
        if chunks_received:
            st.session_state.messages[int(bot_message_id)]["content"] = full_response
            st.rerun()  # 强制最后一次更新UI
        else:
            # 没有收到任何有效响应块，给出明确提示
            st.session_state.messages[int(bot_message_id)]["content"] = "抱歉，未收到模型响应，请检查Ollama服务是否正常运行"
            st.rerun()
            return ""
        
        return full_response
    except requests.exceptions.ConnectionError:
        error_msg = "连接失败，请检查您的Ollama服务是否正常运行。"
        st.session_state.messages[int(bot_message_id)]["content"] = f"⚠️ {error_msg}\n\n请检查：\n1. Ollama服务是否已启动\n2. 是否运行在默认端口11434\n3. 防火墙是否允许访问localhost"
        st.rerun()
        return error_msg
    except requests.exceptions.Timeout:
        error_msg = "模型响应超时，请稍后重试。"
        st.session_state.messages[int(bot_message_id)]["content"] = f"⚠️ {error_msg}\n\n请检查：\n1. 本地模型是否已加载完成\n2. 系统资源是否充足\n3. 问题复杂度是否过高"
        st.rerun()
        return error_msg
    except requests.exceptions.HTTPError as e:
        status_code = e.response.status_code if hasattr(e, 'response') and e.response else "未知"
        error_msg = f"HTTP错误 (状态码: {status_code})："
        
        if status_code == 404:
            error_msg += "找不到指定模型，请检查模型名称是否正确。"
            suggestion = f"请确认'{OLLAMA_MODEL}'模型已通过Ollama下载并可用。"
        elif status_code == 400:
            error_msg += "请求格式错误。"
            suggestion = "请稍后重试，可能是消息格式问题。"
        elif status_code == 500:
            error_msg += "服务器错误，Ollama服务可能遇到问题。"
            suggestion = "请重启Ollama服务并重试。"
        else:
            error_msg += str(e)
            suggestion = "请检查Ollama服务状态后重试。"
        
        st.session_state.messages[int(bot_message_id)]["content"] = f"⚠️ {error_msg}\n\n{suggestion}"
        st.rerun()
        return error_msg
    except Exception as e:
        error_msg = f"模型调用失败: {str(e)}"
        st.session_state.messages[int(bot_message_id)]["content"] = f"⚠️ 发生错误：{str(e)}\n\n请检查：\n1. Ollama服务是否正常运行\n2. {OLLAMA_MODEL}模型是否已正确安装\n3. 本地计算资源是否充足"
        st.rerun()
        return error_msg

def get_streaming_langchain_response(prompt: str, bot_message_id: int) -> str:
    """
    使用Ollama格式构建提示并调用本地模型
    """
    try:
        # 准备Ollama格式的消息
        ollama_messages = []
        
        # 添加系统提示
        ollama_messages.append({
            "role": "system",
            "content": "你是一个友好的AI助手，请根据用户问题提供详细准确的回答。"
        })
        
        # 添加历史消息
        for i, msg in enumerate(st.session_state.messages):
            if i != bot_message_id:  # 跳过正在生成的消息
                ollama_role = "user" if msg["role"] == "user" else "assistant"
                ollama_messages.append({
                    "role": ollama_role,
                    "content": msg["content"]
                })
        
        # 添加当前用户问题
        ollama_messages.append({
            "role": "user",
            "content": prompt
        })
        
        # 执行流式调用
        return call_ollama_api_stream(ollama_messages, str(bot_message_id))
    except Exception as e:
        error_msg = f"模型调用失败: {str(e)}"
        st.session_state.messages[bot_message_id]["content"] = f"⚠️ 发生错误：{str(e)}\n\n请检查：\n1. Ollama服务是否正常运行\n2. {OLLAMA_MODEL}模型是否已正确安装\n3. 本地计算资源是否充足"
        st.rerun()
        return error_msg

def generate_recommended_questions(user_question: str, ai_response: str) -> List[str]:
    """
    生成推荐问题
    """
    try:
        # 构建提示
        prompt = f"""
基于以下对话内容，生成3个相关的后续问题，用于帮助用户继续与AI助手交流。

用户问题: {user_question}

AI回答: {ai_response}

请直接输出3个问题，每个问题占一行，不要添加任何额外的格式或编号。
        """
        
        # 调用本地Ollama API
        url = OLLAMA_URL
        headers = {
            "Content-Type": "application/json"
        }
        
        data = {
            "model": OLLAMA_MODEL,
            "messages": [
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            "stream": False,
            "options": {
                "temperature": 0.7
            }
        }
        
        response = requests.post(url, headers=headers, json=data, timeout=30)
        response.raise_for_status()
        result = response.json()
        
        # 处理响应
        if 'output' in result and 'text' in result['output']:
            content = result['output']['text']
            # 提取问题列表
            questions = [q.strip() for q in content.strip().split("\n") if q.strip()]
            
            # 如果生成的问题不足3个，添加默认问题
            if len(questions) < 3:
                default_questions = [
                    "关于这个话题，您还能提供更多细节吗？",
                    "这个信息对我很有用，您能再举个例子吗？",
                    "您能帮我总结一下关键点吗？"
                ]
                for q in default_questions:
                    if q not in questions:
                        questions.append(q)
                    if len(questions) >= 3:
                        break
            
            return questions[:3]  # 最多返回3个问题
        else:
            raise Exception("API返回格式错误")
        
    except Exception:
        # 返回默认推荐问题
        return [
            "您还想了解什么相关信息？",
            "您对这个回答满意吗？",
            "还有其他我可以帮助您的吗？"
        ]

def send_message():
    user_input = st.session_state.get("user_input", "").strip()
    if not user_input:
        st.session_state.error = "请输入消息内容"
        return
    
    # 清空之前的错误
    st.session_state.error = None
    
    # 添加用户消息到会话
    user_message = {
        "role": "user",
        "content": user_input,
        "timestamp": datetime.now().strftime("%H:%M")
    }
    st.session_state.messages.append(user_message)
    
    # 不直接修改session_state.user_input，而是让Streamlit在下一次渲染时自动处理输入框重置
    # 我们可以使用rerun来确保页面正确刷新
    pass
    
    # 设置加载状态
    st.session_state.loading = True
    
    try:
        # 创建一个空的AI回复占位符
        bot_message_id = len(st.session_state.messages)
        st.session_state.messages.append({
            "role": "assistant",
            "content": "",  # 初始为空，将通过LangChain更新
            "timestamp": datetime.now().strftime("%H:%M")
        })
        
        # 使用Ollama流式调用
        full_response = get_streaming_langchain_response(user_input, bot_message_id)
        
        # 生成新的推荐问题
        st.session_state.recommended_questions = generate_recommended_questions(user_input, full_response)
        
    except Exception as e:
        st.session_state.error = f"API调用失败: {str(e)}"
        # 移除占位符消息
        if len(st.session_state.messages) > 0 and st.session_state.messages[-1]["role"] == "assistant" and not st.session_state.messages[-1]["content"]:
            st.session_state.messages.pop()
    finally:
        # 重置加载状态
        st.session_state.loading = False

# 主容器
st.markdown('<div class="main-container">', unsafe_allow_html=True)

# 标题部分
st.markdown("""
<div class="header">
    <h1>🤖 LangChain智能聊天机器人</h1>
    <p>基于本地Ollama模型的智能对话助手</p>
</div>
""", unsafe_allow_html=True)

# 在侧边栏中添加API密钥设置
with st.sidebar:
    st.markdown("""
    <style>
        .sidebar-header {
            text-align: center;
            padding: 1rem 0;
            margin-bottom: 1rem;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            border-radius: 8px;
            color: white;
        }
        
        .sidebar-header h3 {
            margin: 0;
            font-size: 1.1rem;
        }
        
        .sidebar-section {
            background-color: #f8f9fa;
            padding: 1rem;
            border-radius: 8px;
            margin-bottom: 1rem;
            border: 1px solid #e0e0e0;
        }
        
        .sidebar-section h4 {
            margin-top: 0;
            margin-bottom: 0.75rem;
            color: #343a40;
            font-size: 1rem;
        }
        
        .sidebar-tip {
            background-color: rgba(255, 193, 7, 0.1);
            padding: 0.75rem;
            border-radius: 6px;
            margin-top: 0.75rem;
            font-size: 0.9rem;
        }
        
        .sidebar-tip h5 {
            margin: 0 0 0.5rem 0;
            color: #856404;
            font-size: 0.9rem;
        }
        
        .sidebar-tip p {
            margin: 0;
            color: #856404;
            font-size: 0.85rem;
        }
        
        .sidebar-link {
            color: #4a6fa5;
            text-decoration: none;
            font-weight: 500;
        }
        
        .sidebar-link:hover {
            text-decoration: underline;
        }
    </style>
    """, unsafe_allow_html=True)
    
    # 侧边栏标题
    st.markdown("""
    <div class="sidebar-header">
        <h3>⚙️ 模型设置</h3>
    </div>
    """, unsafe_allow_html=True)
    
    # 模型信息区域
    st.markdown('<div class="sidebar-section">', unsafe_allow_html=True)
    st.markdown('<h4>Ollama配置</h4>', unsafe_allow_html=True)
    
    # 显示当前使用的模型信息
    st.info(f"当前使用模型: **{OLLAMA_MODEL}**\n服务地址: **{OLLAMA_URL}**")
    
    # Ollama使用提示
    st.markdown("""
    <div class="sidebar-tip">
        <h5>💡 如何使用？</h5>
        <p>请确保Ollama服务已启动，并已下载前问32b模型。</p>
        <p>使用命令下载模型: <code>ollama pull wenwen32b</code></p>
    </div>
    """, unsafe_allow_html=True)
    st.markdown('</div>', unsafe_allow_html=True)
    
    # 额外功能区域
    st.markdown('<div class="sidebar-section">', unsafe_allow_html=True)
    st.markdown('<h4>🔧 功能选项</h4>', unsafe_allow_html=True)
    
    # 清除聊天记录按钮
    clear_chat_sidebar = st.button("清除聊天记录", use_container_width=True, key="sidebar_clear_chat")
    if clear_chat_sidebar:
        st.session_state.messages = []
        st.session_state.error = None
        st.sidebar.success("✅ 聊天记录已清除")
    
    # Ollama状态检查
        st.markdown("<div style='margin-top: 1rem; font-size: 0.85rem; color: #6c757d;'>", unsafe_allow_html=True)
        try:
            response = requests.get("http://localhost:11434/api/tags", timeout=2)
            if response.status_code == 200:
                st.markdown("**✅ Ollama服务状态:** 正常运行")
            else:
                st.markdown("**❌ Ollama服务状态:** 无法访问")
        except:
            st.markdown("**❌ Ollama服务状态:** 未启动或端口错误")
        st.markdown("</div>", unsafe_allow_html=True)
    st.markdown('</div>', unsafe_allow_html=True)
    
    # 页脚信息
    st.markdown("""
    <div style='margin-top: 2rem; text-align: center; font-size: 0.8rem; color: #6c757d;'>
        © 2024 本地Ollama聊天机器人<br>
        基于前问32b模型
    </div>
    """, unsafe_allow_html=True)

# 聊天界面
st.markdown('<div class="chat-container">', unsafe_allow_html=True)

# 聊天标题栏
st.markdown("""
<div class="chat-header">
    <h3>对话历史</h3>
</div>
""", unsafe_allow_html=True)

# 顶部提示信息
st.info("💡 确保Ollama服务已启动且前问32b模型已下载完成")

# 显示错误信息
if st.session_state.error:
    st.markdown(f"""
    <div class="error-message">
        <strong>❌ 错误:</strong> {st.session_state.error}
    </div>
    """, unsafe_allow_html=True)

# 聊天历史区域
st.markdown('<div class="chat-history">', unsafe_allow_html=True)

# 显示聊天记录
for message in st.session_state.messages:
    role = message["role"]
    content = message["content"]
    timestamp = message.get("timestamp", datetime.now().strftime("%H:%M"))
    
    if role == "user":
        st.markdown(f"""
        <div class="message user-message">
            <div class="message-content">
                {content}
                <div class="message-time">{timestamp}</div>
            </div>
        </div>
        """, unsafe_allow_html=True)
    else:
        st.markdown(f"""
        <div class="message bot-message">
            <div class="message-content">
                {content}
                <div class="message-time">{timestamp}</div>
            </div>
        </div>
        """, unsafe_allow_html=True)

# 加载指示器
if st.session_state.loading:
    st.markdown("""
    <div class="loading-indicator">
        <div class="loading-spinner"></div>
        <span>AI正在思考中...</span>
    </div>
    """, unsafe_allow_html=True)

st.markdown('</div>', unsafe_allow_html=True)  # 关闭聊天历史区域

# 推荐问题区域
if st.session_state.recommended_questions and not st.session_state.loading:
    st.markdown("""
    <div style='padding: 0 1.5rem 0.5rem;'>
        <h4 style='margin-bottom: 0.5rem; color: #6c757d; font-size: 0.95rem;'>💡 推荐问题：</h4>
    </div>
    """, unsafe_allow_html=True)
    
    cols = st.columns(1) if len(st.session_state.recommended_questions) <= 1 else st.columns(2)
    for i, question in enumerate(st.session_state.recommended_questions):
        col_idx = i % len(cols)
        if cols[col_idx].button(question, use_container_width=True, key=f"recommended_{i}"):
            st.session_state.selected_question = question
            st.session_state.user_input = question
            send_message()

# 输入区域
st.markdown('<div class="input-container">', unsafe_allow_html=True)

# 用户输入
user_input = st.text_area(
    "",
    placeholder="请输入您的问题...",
    label_visibility="collapsed",
    key="user_input",
    height=100
)

# 发送按钮
send_button = st.button("发送消息", use_container_width=True, type="primary", key="send_button")

# 当发送按钮被点击时
if send_button and user_input and user_input.strip():
    send_message()

# 显示使用提示
st.markdown("""
<div class="info-card" style="margin-top: 1.5rem; background-color: #f8f9fa; padding: 1rem; border-radius: 8px; border-left: 4px solid #4a6fa5;">
        <h4>📋 使用说明</h4>
        <ul>
            <li>1. 确保Ollama服务已启动：<code>ollama serve</code></li>
            <li>2. 确保前问32b模型已下载：<code>ollama pull wenwen32b</code></li>
            <li>3. 在输入框中输入您的问题</li>
            <li>4. 点击发送按钮获取AI回复</li>
            <li>5. 可以在左侧边栏清除聊天记录重新开始</li>
            <li>6. 系统将自动生成后续推荐问题</li>
        </ul>
    </div>
""", unsafe_allow_html=True)

# 页脚
st.markdown("""
---
<div style='text-align: center; color: var(--secondary-color); font-size: 0.9rem; margin-top: 2rem;'>
    © 2024 本地Ollama聊天机器人 | 基于前问32b模型
</div>
""", unsafe_allow_html=True)

st.markdown('</div>', unsafe_allow_html=True)  # 关闭输入容器
st.markdown('</div>', unsafe_allow_html=True)  # 关闭聊天容器
st.markdown('</div>', unsafe_allow_html=True)  # 关闭主容器