"""
儿童肺炎支原体肺炎诊疗咨询专家智能体
基于儿童肺炎支原体肺炎诊疗指南的AI咨询系统
"""

import os
import streamlit as st
from openai import OpenAI
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.memory import ConversationBufferMemory
from langchain_community.embeddings import SentenceTransformerEmbeddings
from dotenv import load_dotenv

load_dotenv()

class PediatricPneumoniaAgent:
    def __init__(self):
        """初始化儿童肺炎支原体肺炎诊疗咨询专家"""
        self.client = None
        self.vectorstore = None
        self.memory = None
        self.embeddings = None
        
    def initialize_llm(self):
        """初始化Kimi语言模型"""
        api_key = os.getenv("KIMI_API_KEY")
        base_url = os.getenv("KIMI_BASE_URL", "https://api.moonshot.cn/v1")
        
        if not api_key:
            raise ValueError("请设置KIMI_API_KEY环境变量")
        
        # 使用OpenAI包直接调用Kimi API
        self.client = OpenAI(
            api_key=api_key,
            base_url=base_url
        )
        
        # 使用本地sentence-transformers嵌入模型，不依赖外部API
        self.embeddings = SentenceTransformerEmbeddings(
            model_name="all-MiniLM-L6-v2"
        )
        
    def load_and_process_pdf(self, pdf_path):
        """加载和处理PDF文档"""
        try:
            # 加载PDF
            loader = PyPDFLoader(pdf_path)
            documents = loader.load()
            
            # 文本分割
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=1000,
                chunk_overlap=200,
                separators=["\n\n", "\n", "。", "，"]
            )
            splits = text_splitter.split_documents(documents)
            
            # 使用已初始化的嵌入模型创建向量存储
            self.vectorstore = Chroma.from_documents(
                documents=splits,
                embedding=self.embeddings,
                persist_directory="./chroma_db"
            )
            
            return True
        except Exception as e:
            st.error(f"加载PDF文档失败: {str(e)}")
            return False
    
    def setup_conversation_chain(self):
        """设置对话记忆"""
        # 初始化记忆
        self.memory = ConversationBufferMemory(
            memory_key="chat_history",
            return_messages=True,
            output_key="answer"
        )
    
    def get_response(self, question):
        """获取AI响应"""
        if not self.vectorstore:
            return "系统尚未初始化，请先上传PDF文档"
        
        try:
            # 检索相关文档
            retriever = self.vectorstore.as_retriever(search_kwargs={"k": 3})
            relevant_docs = retriever.get_relevant_documents(question)
            
            # 构建上下文
            context = "\n\n".join([doc.page_content for doc in relevant_docs])
            
            # 获取历史对话
            chat_history = []
            if self.memory:
                chat_history = self.memory.load_memory_variables({}).get("chat_history", [])
            
            # 构建消息
            messages = [
                {"role": "system", "content": "你是一个专业的儿童肺炎支原体肺炎诊疗专家，请基于提供的医学指南内容准确回答患者的问题。"}
            ]
            
            # 添加历史对话
            for msg in chat_history:
                if msg.type == "human":
                    messages.append({"role": "user", "content": msg.content})
                elif msg.type == "ai":
                    messages.append({"role": "assistant", "content": msg.content})
            
            # 添加当前问题和上下文
            user_message = f"基于以下医学指南内容回答问题：\n\n{context}\n\n问题：{question}"
            messages.append({"role": "user", "content": user_message})
            
            # 调用OpenAI API
            response = self.client.chat.completions.create(
                model="moonshot-v1-8k",
                messages=messages,
                temperature=0.3,
                max_tokens=1000
            )
            
            answer = response.choices[0].message.content
            
            # 更新记忆
            if self.memory:
                self.memory.save_context(
                    {"question": question},
                    {"answer": answer}
                )
            
            return answer
            
        except Exception as e:
            return f"获取响应时出错: {str(e)}"
    
    def clear_memory(self):
        """清除对话记忆"""
        if self.memory:
            self.memory.clear()

def main():
    st.set_page_config(
        page_title="儿童肺炎支原体肺炎诊疗咨询专家",
        page_icon="🏥",
        layout="wide"
    )

    st.title("🏥 儿童肺炎支原体肺炎诊疗咨询专家")
    st.markdown("基于《儿童肺炎支原体肺炎诊疗指南》的AI智能咨询系统")
    
    # 初始化智能体
    if 'agent' not in st.session_state:
        st.session_state.agent = PediatricPneumoniaAgent()
        try:
            st.session_state.agent.initialize_llm()
            st.success("✅ AI模型初始化成功")

            # 自动加载本地PDF文档
            pdf_path = "儿童肺炎支原体肺炎诊疗指南.pdf"
            if os.path.exists(pdf_path):
                with st.spinner("正在加载诊疗指南文档..."):
                    if st.session_state.agent.load_and_process_pdf(pdf_path):
                        st.session_state.agent.setup_conversation_chain()
                        st.success(f"✅ 已加载文档: {pdf_path}")
                    else:
                        st.error(f"❌ 文档加载失败: {pdf_path}")
            else:
                st.error(f"❌ 未找到文档: {pdf_path}")
                st.info("请确保'儿童肺炎支原体肺炎诊疗指南.pdf'文件位于当前目录")
                return

        except Exception as e:
            st.error(f"❌ 模型初始化失败: {str(e)}")
            return

    # 侧边栏
    with st.sidebar:
        st.header("⚙️ 系统设置")

        if st.button("🗑️ 清除对话记忆"):
            st.session_state.agent.clear_memory()
            st.session_state.messages = []
            st.rerun()

        st.markdown("---")
        st.markdown("### 📊 系统信息")
        st.info(f"当前文档: 儿童肺炎支原体肺炎诊疗指南.pdf")
        st.info(f"向量数据库: {'已加载' if st.session_state.agent.vectorstore else '未加载'}")
        st.info(f"对话记忆: {'已启用' if st.session_state.agent.memory else '未启用'}")
    
    # 检查向量数据库是否已加载
    if st.session_state.agent.vectorstore is None:
        st.error("❌ 知识库加载失败，无法提供咨询服务")
        st.info("请检查'儿童肺炎支原体肺炎诊疗指南.pdf'文件是否存在")
        return

    # 问题示例
    st.markdown("### 💡 常见问题示例")
    example_questions = [
        "儿童肺炎支原体肺炎的典型症状有哪些？",
        "如何诊断儿童肺炎支原体肺炎？",
        "儿童肺炎支原体肺炎的治疗方案是什么？",
        "抗生素治疗的疗程应该是多久？",
        "有哪些并发症需要特别注意？",
        "如何判断病情严重程度？",
        "什么情况下需要住院治疗？",
        "康复期需要注意什么？"
    ]

    col1, col2 = st.columns(2)
    with col1:
        for i, question in enumerate(example_questions[:4]):
            if st.button(question, key=f"q{i}"):
                st.session_state.current_question = question

    with col2:
        for i, question in enumerate(example_questions[4:]):
            if st.button(question, key=f"q{i+4}"):
                st.session_state.current_question = question
    
    # 对话界面
    st.markdown("---")
    st.markdown("### 💬 咨询对话")
    
    # 初始化聊天记录
    if "messages" not in st.session_state:
        st.session_state.messages = []
    
    # 显示聊天记录
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])
    
    # 处理当前问题
    if "current_question" in st.session_state:
        question = st.session_state.current_question
        del st.session_state.current_question
        
        with st.chat_message("user"):
            st.markdown(question)
        
        with st.chat_message("assistant"):
            with st.spinner("思考中..."):
                response = st.session_state.agent.get_response(question)
                st.markdown(response)
        
        st.session_state.messages.append({"role": "user", "content": question})
        st.session_state.messages.append({"role": "assistant", "content": response})
    
    # 自定义输入
    if prompt := st.chat_input("请输入您的问题..."):
        with st.chat_message("user"):
            st.markdown(prompt)
        
        with st.chat_message("assistant"):
            with st.spinner("思考中..."):
                response = st.session_state.agent.get_response(prompt)
                st.markdown(response)
        
        st.session_state.messages.append({"role": "user", "content": prompt})
        st.session_state.messages.append({"role": "assistant", "content": response})

if __name__ == "__main__":
    main()