"""PaperPal主Agent类"""

from typing import Dict, Any, List
from langgraph.graph import StateGraph, END
import streamlit as st
from agent.nodes import PaperPalNodes
from agent.utils import PDFProcessor, ValidationUtils
from config import Config
from prompts.comparison_prompts import PAPER_COMPARISON_PROMPT
from agent.paper_tracer import PaperTracer

class PaperPalAgent:
    """PaperPal论文速读Agent"""
    
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.nodes = PaperPalNodes(api_key)
        self.pdf_processor = PDFProcessor()
        self.tracer = PaperTracer(api_key)
        self.workflow = None
        self._build_workflow()
    
    def _build_workflow(self):
        """构建LangGraph工作流"""
        
        # 定义状态图
        workflow = StateGraph(dict)
        
        # 添加节点
        workflow.add_node("preprocess", self.nodes.preprocess_node)
        workflow.add_node("analysis", self.nodes.analysis_node)
        workflow.add_node("summary", self.nodes.summary_node)
        workflow.add_node("mindmap", self.nodes.mindmap_node)
        workflow.add_node("formula", self.nodes.formula_node)
        workflow.add_node("recommendation_analysis", self.nodes.recommendation_analysis_node)
        workflow.add_node("recommendation_generation", self.nodes.recommendation_generation_node)
        workflow.add_node("expert_review", self.nodes.expert_review_node) 
        workflow.add_node("trace", self.tracer.analyze_citation_impact_node)
        workflow.add_node("integration", self.nodes.integration_node)
        
        # 设置起始节点
        workflow.set_entry_point("preprocess")
        
        # 定义节点之间的连接
        workflow.add_edge("preprocess", "analysis")
        workflow.add_edge("analysis", "summary")
        workflow.add_edge("summary", "mindmap")
        workflow.add_edge("mindmap", "formula")
        workflow.add_edge("formula", "recommendation_analysis")
        workflow.add_edge("recommendation_analysis", "recommendation_generation")
        workflow.add_edge("recommendation_generation", "expert_review") 
        workflow.add_edge("expert_review", "trace")
        workflow.add_edge("trace", "integration")
        workflow.add_edge("integration", END)
        
        # 编译工作流
        self.workflow = workflow.compile()
    
    def process_pdf(self, pdf_file) -> Dict[str, Any]:
        """处理PDF文件的主要方法"""

        # 验证文件
        if not ValidationUtils.validate_pdf_file(pdf_file):
            return {"error": "文件验证失败"}

        # 保存上传的文件
        file_path = f"{Config.UPLOAD_DIR}/{pdf_file.name}"
        with open(file_path, "wb") as f:
            f.write(pdf_file.getbuffer())

        # 提取PDF文本
        st.info("📄 正在提取PDF文本...")
        raw_data = self.pdf_processor.extract_text_hybrid(file_path)

        # 校验结构化返回内容
        if not raw_data or "text" not in raw_data or not raw_data["text"].strip():
            return {"error": "无法从PDF中提取文本内容"}

        # 初始化状态
        initial_state = {
            "file_name": pdf_file.name,
            "file_path": file_path,
            "raw_text": raw_data,  # ✅ 是结构化对象
            "text_length": len(raw_data["text"])
        }

        st.success(f"✅ 文本提取完成！")

        # 运行工作流
        try:
            st.info("🚀 开始分析处理...")
            final_state = self.workflow.invoke(initial_state)
            return final_state
        except Exception as e:
            st.error(f"处理过程中发生错误: {str(e)}")
            return {"error": f"处理失败: {str(e)}"}

    def get_processing_status(self, state: Dict[str, Any]) -> Dict[str, bool]:
        """获取处理状态"""
        return {
            "预处理": state.get("preprocessing_complete", False),
            "内容分析": state.get("analysis_complete", False),
            "摘要生成": state.get("summary_complete", False),
            "思维导图": state.get("mindmap_complete", False),
            "公式解释": state.get("formula_complete", False),
            "论文推荐": state.get("recommendation_complete", False),
            "专家分析": state.get("expert_review_complete", False),
            "论文溯源": state.get("trace_complete", False),
            "结果整合": state.get("integration_complete", False)
        }
    
    def format_results(self, state: Dict[str, Any]) -> Dict[str, str]:
        """格式化输出结果"""
        results = {
            "summary": state.get("summary", "摘要生成失败"),
            "mindmap": state.get("mindmap", "思维导图生成失败"),
            "formula_explanation": state.get("formula_explanation", "公式解释生成失败"),
            "recommendations": state.get("recommendations", "推荐获取失败"),
            "expert_feedback": state.get("expert_feedback", "专家评价生成失败"),  
            "processing_report": state.get("processing_report", "处理报告生成失败"),
            "citation_analysis": state.get("citation_analysis", "溯源分析失败"),  # 新增溯源结果
            "citation_graph": state.get("citation_graph", "")
        }
        
        return results
    
    def get_paper_info(self, state: Dict[str, Any]) -> Dict[str, Any]:
        """获取论文基本信息"""
        recommendation_analysis_complete = state.get("recommendation_analysis_complete", False)
        recommendation_generation_complete = state.get("recommendation_complete", False)

        # 判断所有任务（包括推荐任务）是否完成
        all_tasks_complete = (
                state.get("preprocessing_complete", False) and
                state.get("analysis_complete", False) and
                state.get("summary_complete", False) and
                state.get("mindmap_complete", False) and
                state.get("formula_complete", False) and
                state.get("integration_complete", False) and
                recommendation_analysis_complete and
                recommendation_generation_complete
        )

        return {
            "文件名": state.get("file_name", "未知"),
            "文本长度": f"{state.get('text_length', 0):,} 字符",
            "章节数量": len(state.get("sections", [])),
            "公式数量": len(state.get("formulas", [])),
            "推荐分析": "完成" if recommendation_analysis_complete else "未完成",
            "推荐生成": "完成" if recommendation_generation_complete else "未完成",
            "处理状态": "完成" if state.get("all_complete", False) else "部分完成"
        }
    
    def compare_papers(self, paper_a: Dict[str, Any], paper_b: Dict[str, Any]) -> Dict[str, Any]:
        """对比两篇论文"""
        # 获取两篇论文的摘要
        paper_a_content = paper_a.get("summary", "")
        paper_b_content = paper_b.get("summary", "")
        
        if not paper_a_content or not paper_b_content:
            return {"error": "无法获取论文内容"}
        
        # 构建对比提示词
        prompt = PAPER_COMPARISON_PROMPT.format(
            paper_a_content=paper_a_content[:3000],  # 限制长度
            paper_b_content=paper_b_content[:3000]
        )
        
        # 调用GLM生成对比分析
        comparison = self.nodes.call_glm(prompt, max_tokens=4000)
        
        return {
            "comparison": comparison,
            "paper_a_title": paper_a.get("file_name", "论文A"),
            "paper_b_title": paper_b.get("file_name", "论文B")
        }

class PaperPalChatbot:
    """PaperPal聊天机器人，用于后续问答"""
    
    def __init__(self, api_key: str, paper_content: str):
        self.api_key = api_key
        self.paper_content = paper_content
        self.nodes = PaperPalNodes(api_key)
        self.chat_history = []
    
    def ask_question(self, question: str) -> str:
        """基于论文内容回答问题"""
        
        # 构建问答提示词
        prompt = f"""
        基于以下论文内容回答用户问题：
        
        论文内容（摘要）：
        {self.paper_content}
        
        用户问题：{question}
        
        请基于论文内容准确回答，如果问题超出论文范围，请说明无法从论文中找到相关信息。
        回答要简洁明了，重点突出。
        """
        
        # 调用GLM获取回答
        answer = self.nodes.call_glm(prompt, max_tokens=1000)
        
        # 记录对话历史
        self.chat_history.append({
            "question": question,
            "answer": answer
        })
        
        return answer
    
    def get_chat_history(self) -> List[Dict[str, str]]:
        """获取聊天历史"""
        return self.chat_history
    
class PaperComparator:
    """论文对比器"""
    
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.nodes = PaperPalNodes(api_key)
    
    def compare(self, paper_a_content: str, paper_b_content: str) -> str:
        """对比两篇论文内容"""
        prompt = PAPER_COMPARISON_PROMPT.format(
            paper_a_content=paper_a_content[:3000],
            paper_b_content=paper_b_content[:3000]
        )
        return self.nodes.call_glm(prompt, max_tokens=4000)