import streamlit as st
from langchain_core.prompts import ChatPromptTemplate
from langchain.tools.retriever import create_retriever_tool
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_ollama import ChatOllama
from langchain.schema import BaseRetriever, Document
from langchain.callbacks.base import BaseCallbackHandler
from typing import List, Any, Dict, Union
import os
from config.config import DB_CONFIG, OLLAMA_CONFIG
from langchain.memory import ConversationBufferWindowMemory
import time
import uuid
import threading
import random


# Streamlit 缓存 NLP 查询服务
@st.cache_resource
def get_qa_query():
    from nlp_api.qa_query_service import QAQuery
    from config.config import DB_CONFIG
    return QAQuery(DB_CONFIG)

# 缓存 NLP 服务可用性检测结果，避免重复检测
@st.cache_data
def get_nlp_service_status():
    try:
        test_retriever = NLPHybridRetriever()
        return test_retriever.available
    except Exception:
        return False

os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"

def stream_response_with_thinking(agent_executor, question, stream_container):
    """使用LangChain原生流式输出，并对<think>标签内容逐步显示"""
    import re
    from time import sleep
    
    response_text = ""
    think_content = ""
    clean_content = ""
    
    # 1. 直接用LangChain原生流式输出
    for chunk in agent_executor.stream({"input": question}):
        delta = chunk.get("output", "")
        response_text += delta
        stream_container.markdown(response_text)
    
    # 2. 提取<think>标签内容
    think_match = re.search(r'<think>(.*?)</think>', response_text, re.DOTALL)
    if think_match:
        think_content = think_match.group(1).strip()
        clean_content = re.sub(r'<think>.*?</think>', '', response_text, flags=re.DOTALL).strip()
    else:
        clean_content = response_text.strip()
    
    # 3. 清空容器，逐步显示思考内容
    if think_content:
        stream_container.empty()
        displayed = ""
        for char in think_content:
            displayed += char
            stream_container.markdown(displayed + "▋")
            if char in '，。！？；：':
                sleep(0.25)
            elif char == ' ':
                sleep(0.08)
            else:
                sleep(0.04)
        stream_container.markdown(displayed)
        sleep(0.2)
    
    return clean_content, think_content

# NLP混合检索器类
class NLPHybridRetriever(BaseRetriever):
    """基于NLP和数据库混合查询的检索器"""
    qa_query: Any = None
    available: bool = False
    
    def __init__(self):
        super().__init__()
        try:
            self.qa_query = get_qa_query()
            self.available = True
        except Exception as e:
            import traceback
            st.error(f"初始化NLP检索器失败: {e}\n{traceback.format_exc()}")
            self.qa_query = None
            self.available = False
    
    def _get_relevant_documents(self, query: str) -> List[Document]:
        """获取相关文档"""
        if not self.available or not self.qa_query:
            return [Document(page_content="NLP检索器不可用", metadata={"error": True})]
        
        try:
            result = self.qa_query.get_best_answer_hybrid(query)
            
            documents = []
            if result['success'] and result['results']:
                for item in result['results'][:5]:  # 返回前5个最佳结果
                    doc = Document(
                        page_content=f"问题: {item['question']}\n\n答案: {item['answer']}",
                        metadata={
                            'source': 'nlp_database',
                            'question': item['question'],
                            'score': item.get('combined_score', 0),
                            'nlp_matched': item.get('nlp_matched', False),
                            'category_id': item.get('category_id')
                        }
                    )
                    documents.append(doc)
            else:
                # 如果没有找到结果，返回一个提示文档
                documents.append(Document(
                    page_content="未找到相关内容，请尝试其他问题或关键词。",
                    metadata={'source': 'system', 'no_results': True}
                ))
            
            return documents
            
        except Exception as e:
            return [Document(
                page_content=f"检索过程中出现错误: {str(e)}",
                metadata={'source': 'error', 'error': True}
            )]

# 缓存 memory 实例，确保多轮对话记忆
@st.cache_resource
def get_conversation_memory():
    llm = ChatOllama(
        base_url=OLLAMA_CONFIG["base_url"],
        model=OLLAMA_CONFIG["chat_model"]
    )
    # 创建独立的memory，避免与session_state.chat_history重复
    memory = ConversationBufferWindowMemory(llm=llm, k=5, return_messages=True)
    # 从session_state复制历史消息到memory，但不直接绑定引用
    if 'chat_history' in st.session_state and st.session_state.chat_history:
        # 只复制最近的几条消息到memory中，避免重复
        recent_messages = st.session_state.chat_history[-10:]  # 最近10条消息
        for msg in recent_messages:
            if isinstance(msg, dict):
                role = msg.get("role", "user")
                content = msg.get("content", "")
                if role == "user":
                    from langchain.schema import HumanMessage
                    memory.chat_memory.add_message(HumanMessage(content=content))
                elif role == "assistant":
                    from langchain.schema import AIMessage
                    memory.chat_memory.add_message(AIMessage(content=content))
    return memory

@st.cache_resource
def get_chat_history():
    """初始化对话历史"""
    return []

def get_conversational_chain(tools, ques, stream_container=None):
    """构建对话链：模型 + 工具 + 提示词 + 记忆"""
    llm = ChatOllama(
        base_url=OLLAMA_CONFIG["base_url"],
        model=OLLAMA_CONFIG["chat_model"]
    )
    # 获取对话记忆
    memory = get_conversation_memory()
    prompt = ChatPromptTemplate.from_messages([
        (
            "system",
            """你是水库调度领域的专业AI助手，具备以下能力：

【核心职责】
1. 基于知识库内容提供准确、专业的水库调度相关解答
2. 利用对话历史信息，理解上下文和代词指代
3. 当信息不足时，主动使用水利专业词汇重新组织查询

【回答策略】
- 优先使用知识库检索到的准确信息
- 结合对话历史，理解用户的连续提问意图
- 如果用户提到\"它\"、\"这个\"、\"那个\"等代词，请结合历史对话确定具体指代对象
- 当检索结果不够完整时，尝试使用相关的水利专业术语重新查询
- 如果多次查询仍无法找到答案，请明确告知无法作答

【专业要求】
- 回答要准确、简洁、专业
- 涉及数据时请提供具体数值和单位
- 必要时可以引用知识库中的原文内容
- 保持回答的逻辑性和连贯性

【对话记忆利用】
- 仔细阅读历史对话，理解用户的问题背景
- 识别用户可能的连续提问模式
- 当用户使用代词或简化表达时，结合历史信息给出准确回答""",
        ),
        ("user", "{history}"),
        ("human", "{input}"),
        ("placeholder", "{agent_scratchpad}"),
    ])
    tool = [tools]
    agent = create_tool_calling_agent(llm, tool, prompt)
    agent_executor = AgentExecutor(agent=agent, tools=tool, verbose=True, memory=memory)
    
    # 如果提供了流式容器，使用自定义流式输出
    if stream_container:
        return stream_response_with_thinking(agent_executor, ques, stream_container)
    else:
        response = agent_executor.invoke({"input": ques})
        # 兼容非流式模式下的<think>标签处理
        import re
        think_match = re.search(r'<think>(.*?)</think>', response['output'], re.DOTALL)
        if think_match:
            think_content = think_match.group(1).strip()
            clean_content = re.sub(r'<think>.*?</think>', '', response['output'], flags=re.DOTALL).strip()
        else:
            clean_content = response['output'].strip()
            think_content = ""
        return clean_content, think_content

def check_nlp_service_available():
    """检查NLP服务状态"""
    return get_nlp_service_status()

@st.cache_resource
def get_nlp_retriever():
    """获取NLP检索器实例"""
    return NLPHybridRetriever()

def user_input(user_question, stream_container=None):
    """处理用户输入并返回AI回答"""
    if not check_nlp_service_available():
        st.error("❌ NLP查询服务不可用，请检查数据库连接和配置！")
        st.info("💡 请确保：1️⃣ 数据库服务正常 → 2️⃣ 配置文件正确 → 3️⃣ 重新启动应用")
        return None
    
    try:
        nlp_retriever = get_nlp_retriever()
        retrieval_chain = create_retriever_tool(
            nlp_retriever, 
            "nlp_qa_retriever", 
            "基于NLP和数据库混合查询的水库知识检索器"
        )
        response, think_content = get_conversational_chain(retrieval_chain, user_question, stream_container)
        return response, think_content
    except Exception as e:
        st.error(f"❌ 查询过程中出错: {str(e)}")
        st.info("请检查网络连接和服务状态")
        return None

def extract_think_content(content):
    """提取并移除<think>标签内容"""
    import re
    
    think_pattern = r'<think>(.*?)</think>'
    think_matches = re.findall(think_pattern, content, re.DOTALL)
    clean_content = re.sub(think_pattern, '', content, flags=re.DOTALL).strip()
    
    return clean_content, think_matches

def display_chat_message(role, content, timestamp=None, thinking_process=None):
    """显示聊天消息"""
    timestamp = timestamp or time.strftime("%H:%M:%S")
    
    if role == "user":
        with st.chat_message("user"):
            st.markdown(f"**[{timestamp}]** {content}")
    else:
        with st.chat_message("assistant"):
            clean_content, think_contents = extract_think_content(content)
            st.markdown(f"**[{timestamp}]** {clean_content}")
            
            if think_contents:
                with st.expander("🤔 查看AI思考过程", expanded=False):
                    for i, think_content in enumerate(think_contents):
                        if len(think_contents) > 1:
                            st.text(f"思考过程 {i+1}:")
                        st.text(think_content.strip())
                        if i < len(think_contents) - 1:
                            st.divider()
            
            elif thinking_process:
                # 兼容字符串和字典两种情况
                with st.expander("🤔 查看AI推理过程", expanded=False):
                    if isinstance(thinking_process, dict):
                        st.text(f"问题: {thinking_process.get('question', '')}")
                        # 显示流式思考步骤
                        if thinking_process.get('thinking_steps'):
                            st.text("思考步骤:")
                            for i, step in enumerate(thinking_process['thinking_steps']):
                                st.text(f"  {i+1}. {step}")
                        # 显示传统的中间步骤
                        elif thinking_process.get('intermediate_steps'):
                            st.text("推理步骤:")
                            for i, step in enumerate(thinking_process['intermediate_steps']):
                                st.text(f"  {i+1}. {step}")
                        else:
                            st.text("直接回答，无中间步骤")
                        # 显示错误信息（如果有）
                        if thinking_process.get('error'):
                            st.error(f"处理过程中出现错误: {thinking_process['error']}")
                        else:
                            st.success("✅ 推理完成")
                    else:
                        st.text(str(thinking_process))

def clear_chat_history():
    """清除聊天历史和缓存，并强制重建对话记忆实例"""
    st.session_state.chat_history = []
    st.cache_resource.clear()
    if '_cache_resource' in st.session_state:
        del st.session_state['_cache_resource']

def main():
    """主函数：构建Streamlit应用界面"""
    st.set_page_config("🤖 LangChain 水库问答系统")
    st.header("🤖 LangChain 水库问答助手")
    
    if 'chat_history' not in st.session_state:
        st.session_state.chat_history = []
    
    col1, col2, col3 = st.columns([2, 1, 1])
    
    with col1:
        if check_nlp_service_available():
            st.success("✅ NLP查询服务：已就绪")
        else:
            st.error("❌ NLP查询服务：不可用")
    
    with col2:
        if st.button("🔄 刷新状态"):
            st.rerun()
    
    with col3:
        if st.button("🗑️ 清除对话"):
            clear_chat_history()
            st.rerun()
    
    chat_container = st.container()
    
    with chat_container:
        if st.session_state.chat_history:
            for message in st.session_state.chat_history:
                # 兼容dict、AIMessage、HumanMessage等对象，避免TypeError
                if isinstance(message, dict):
                    role = message.get("role", "user")
                    content = message.get("content", "")
                    thinking_process = message.get("thinking_process")
                else:
                    role = getattr(message, "role", "user")
                    content = getattr(message, "content", str(message))
                    thinking_process = getattr(message, "thinking_process", None)
                if role == "user":
                    with st.chat_message("user"):
                        st.markdown(content)
                elif role == "assistant":
                    with st.chat_message("assistant"):
                        st.markdown(content)
                        if thinking_process:
                            with st.expander("AI思考过程", expanded=False):
                                st.markdown(thinking_process)
        else:
            st.info("💬 请输入您的问题，AI将为您提供专业解答！")
    
    service_available = check_nlp_service_available()
    user_question = st.chat_input(
        "请输入您的问题...",
        disabled=not service_available
    )
    
    if user_question and service_available:
        timestamp = time.strftime("%H:%M:%S")
        # 更严格去重：检查最后两条是否有相同内容的用户问题，避免重复
        if not (st.session_state.chat_history and any(
            (msg.get("role") if isinstance(msg, dict) else getattr(msg, "role", "user")) == "user" and 
            (msg.get("content") if isinstance(msg, dict) else getattr(msg, "content", "")) == user_question 
            for msg in st.session_state.chat_history[-2:])):
            st.session_state.chat_history.append({
                "id": str(uuid.uuid4()),
                "role": "user",
                "content": user_question,
                "timestamp": timestamp
            })
        
        # 创建临时AI回答容器（用于流式输出）
        temp_ai_container = st.chat_message("assistant")
        with temp_ai_container:
            with st.expander("🤔 AI思考过程", expanded=True):
                stream_container = st.empty()
        
        # 使用流式输出
        ai_response_result = user_input(user_question, stream_container)
        
        if ai_response_result:
            if isinstance(ai_response_result, tuple) and len(ai_response_result) == 2:
                ai_response, thinking_process = ai_response_result
            else:
                ai_response = ai_response_result
                thinking_process = None
            
            ai_timestamp = time.strftime("%H:%M:%S")
            
            # 清除临时显示容器
            temp_ai_container.empty()
            
            # 保存AI回答到聊天历史
            st.session_state.chat_history.append({
                "role": "assistant",
                "content": ai_response,
                "timestamp": ai_timestamp,
                "thinking_process": thinking_process
            })
            
            # 刷新页面以显示完整对话历史
            st.rerun()
    
    elif user_question and not service_available:
        st.error("❌ NLP查询服务不可用，请检查配置！")

    # 侧边栏
    with st.sidebar:
        st.title("⚙️ 系统信息")
        
        st.subheader("🗑️ 缓存管理")
        col1, col2 = st.columns(2)
        
        with col1:
            if st.button("🧹 清除记忆", help="清除对话历史记忆"):
                clear_chat_history()
                st.success("✅ 对话记忆已清除")
                st.rerun()
        
        with col2:
            if st.button("🔄 重置缓存", help="清除所有缓存资源"):
                clear_chat_history()
                st.cache_data.clear()
                st.success("✅ 所有缓存已清除")
                st.rerun()
        
        st.divider()
        
        st.subheader("📊 服务状态")
        if check_nlp_service_available():
            st.success("✅ NLP服务：运行中")
            st.success("✅ 数据库：已连接")
        else:
            st.error("❌ NLP服务：离线")
            st.error("❌ 数据库：连接失败")
            
        st.info("🤖 模型：Ollama (本地)")
        
        st.divider()
        st.subheader("🔧 配置说明")
        st.markdown("""
        **数据库配置**
        - 确保数据库服务正常运行
        - 检查 `config/config.py` 配置
        - 验证数据库连接参数
        
        **模型配置**
        - 确保 Ollama 服务启动
        - 检查模型是否已下载
        """)
        
        st.divider()
        st.subheader("📖 使用说明")
        st.markdown("""
        1. 🔍 确认服务状态正常
        2. 💬 在主界面输入问题
        3. 🤖 等待AI分析回答
        4. 📋 查看检索结果
        
        **缓存管理**
        - 🧹 清除记忆：仅清除对话历史
        - 🔄 重置缓存：清除所有缓存资源
        """)
        
        st.divider()
        st.subheader("🔧 技术栈")
        st.markdown("""
        - **LangChain**: AI应用框架
        - **NLP查询**: 混合检索
        - **Ollama**: 本地大模型
        - **Streamlit**: Web界面
        - **数据库**: 结构化存储
        """)

if __name__ == "__main__":
    main()
