import inspect

import streamlit as st
import os
import yaml
from src.models.local_llm import OpenAILLM
from src.models.embeddings import LocalEmbeddings
from src.retriever.vector_store import VectorStoreManager
from src.retriever.document_loader import DocumentProcessor
from src.agent.rag_graph import RAGGraph

# 页面配置
st.set_page_config(
    page_title="LangGraph RAG智能体",
    page_icon="🤖",
    layout="wide"
)


# 缓存模型加载
@st.cache_resource
def load_models():
    """加载模型和组件"""
    with st.spinner("正在加载模型..."):
        # 加载embedding模型
        embeddings = LocalEmbeddings()

        # 加载向量存储
        vector_store = VectorStoreManager(embeddings)

        # 加载大模型
        llm = OpenAILLM()

        # 构建RAG图
        rag_graph = RAGGraph(llm, vector_store)

        return embeddings, vector_store, llm, rag_graph


def main():
    st.title("🤖 LangGraph RAG智能体")
    st.markdown("基于本地大模型和向量数据库的智能问答系统")

    # 侧边栏
    with st.sidebar:
        st.header("📚 文档管理")

        # 文档上传
        uploaded_files = st.file_uploader(
            "上传文档",
            type=['pdf', 'txt'],
            accept_multiple_files=True
        )

        if uploaded_files and st.button("处理文档"):
            with st.spinner("正在处理文档..."):
                # 保存上传的文件
                os.makedirs("data/documents", exist_ok=True)
                for file in uploaded_files:
                    with open(f"data/documents/{file.name}", "wb") as f:
                        f.write(file.getbuffer())

                # 加载并索引文档
                embeddings, vector_store, _, _ = load_models()
                doc_processor = DocumentProcessor()
                documents = doc_processor.load_documents("data/documents")
                vector_store.add_documents(documents)

                st.success(f"已处理 {len(documents)} 个文档片段")

        st.markdown("---")
        st.header("⚙️ 系统状态")

        # 显示配置信息
        if os.path.exists("config.yaml"):
            with open("config.yaml", 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
                st.json(config)

    # 主界面
    col1, col2 = st.columns([2, 1])

    with col1:
        st.header("💬 智能问答")

        # 初始化会话状态
        if "messages" not in st.session_state:
            st.session_state.messages = []

        # 显示聊天历史
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])

        # 用户输入
        if prompt := st.chat_input("请输入您的问题..."):
            # 添加用户消息
            st.session_state.messages.append({"role": "user", "content": prompt})
            with st.chat_message("user"):
                st.markdown(prompt)

            # 生成回答
            with st.chat_message("assistant"):
                message_placeholder = st.empty()
                full_response = ""
                with st.spinner("思考中..."):
                    try:
                        # 加载模型(可以放在初始化里面)
                        _, _, _, rag_graph = load_models()

                        # 执行RAG查询
                        result = rag_graph.invoke(prompt)
                        answer = result.get("answer", "抱歉，无法生成回答。")
                    except Exception as e:
                        st.error(f"生成回答时出错: {str(e)}")
                try:
                    # st.markdown(answer)
                    # st.session_state.messages.append({"role": "assistant", "content": answer})
                    # 处理流式响应
                    if inspect.isgenerator(answer) or hasattr(answer, '__iter__') and not isinstance(answer, (
                    str, list, dict, tuple)):
                        # 直接迭代生成器
                        try:
                            for chunk in answer:
                                if isinstance(chunk, dict) and 'data' in chunk:
                                    full_response += chunk['data']
                                else:
                                    full_response += str(chunk)
                                message_placeholder.markdown(full_response + "▌")
                            message_placeholder.markdown(full_response)
                        except Exception as e:
                            full_response = str(answer)
                            message_placeholder.markdown(full_response)
                    else:
                        full_response = str(answer)
                        message_placeholder.markdown(full_response)

                    st.session_state.messages.append({"role": "assistant", "content": full_response})

                    # 显示检索到的文档
                    if result.get("retrieved_documents"):
                        with st.expander("📄 参考文档"):
                            for i, doc in enumerate(result["retrieved_documents"]):
                                st.markdown(f"**文档 {i + 1}:**")
                                content = doc.page_content.replace('\n', ' ')
                                st.markdown(content)
                                if "retrieval_scores" in result and i < len(result["retrieval_scores"]):
                                    st.markdown(f"*相关度: {result['retrieval_scores'][i]:.3f}*")
                                st.markdown("---")

                except Exception as e:
                    st.error(f"生成回答时出错: {str(e)}")

    with col2:
        st.header("📊 系统信息")

        # 显示模型信息
        if os.path.exists("config.yaml"):
            with open("config.yaml", 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)

                st.subheader("🧠 OpenAI模型")
                st.info(f"模型: {config['llm']['model_name']}")
                st.info(f"API端点: {config['llm']['base_url']}")

                # API密钥状态检查
                api_key = config['llm'].get('api_key') or os.getenv('OPENAI_API_KEY')
                if api_key:
                    st.success("✅ API密钥已配置")
                else:
                    st.error("❌ 未配置API密钥")

                st.subheader("🔍 嵌入模型")
                st.info(f"模型: {config['embeddings']['model_name']}")

                st.subheader("💾 向量数据库")
                st.info(f"类型: {config['vector_store']['type']}")

        # 清除会话按钮
        if st.button("🗑️ 清除会话"):
            st.session_state.messages = []
            st.rerun()


if __name__ == "__main__":
    main()