#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AIDA Framework 主程序 - LangChain Pipeline
提供完整的Stage1-Stage2-Stage3文档处理流水线
"""

import sys
import os
import json
import time
from pathlib import Path
from datetime import datetime
from typing import Dict, Any, Optional, List
from loguru import logger
import traceback

# 添加各阶段模块到路径
sys.path.append(str(Path(__file__).parent / "aida_framework"))
sys.path.append(str(Path(__file__).parent / "aida_framework" / "stage1"))
sys.path.append(str(Path(__file__).parent / "aida_framework" / "stage2"))
sys.path.append(str(Path(__file__).parent / "aida_framework" / "stage3"))

# 导入各阶段处理器
from aida_framework.stage1.stage1_processor import Stage1Processor
from aida_framework.stage2.stage2_processor import Stage2Processor
from aida_framework.stage3.processor import Stage3Processor
from aida_framework.stage3.config import APIConfig


class AIDAPipeline:
    """
    AIDA框架完整处理流水线
    基于LangChain思想设计的Stage1→Stage2→Stage3处理管道
    """
    
    def __init__(self, api_key: str = None, storage_base_path: str = "storage"):
        """
        初始化AIDA Pipeline
        
        Args:
            api_key: API密钥，如果为None则使用默认值
            storage_base_path: 存储基础路径
        """
        # 配置日志
        logger.remove()
        logger.add(
            sys.stdout,
            format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
            level="INFO"
        )
        logger.add("aida_pipeline.log", rotation="10 MB", level="DEBUG")
        
        # 设置API密钥
        self.api_key = api_key or "sk-umjpwhgrwtwcrifwfrczofcfeprfvhhcrvvpnbhkwrkvjnux"
        self.storage_base_path = storage_base_path
        
        # 初始化处理器
        self.stage1_processor = None
        self.stage2_processor = None
        self.stage3_processor = None
        
        # 流水线状态
        self.pipeline_state = {
            "current_stage": None,
            "session_id": None,
            "stage1_result": None,
            "stage2_result": None,
            "stage3_result": None,
            "errors": [],
            "start_time": None,
            "end_time": None
        }
    
    def _initialize_processors(self):
        """初始化各阶段处理器"""
        try:
            # Stage1处理器
            self.stage1_processor = Stage1Processor(storage_base_path=self.storage_base_path)
            
            # Stage2处理器
            self.stage2_processor = Stage2Processor(api_key=self.api_key, model="BAAI/bge-m3")
            
            # Stage3处理器
            api_config = APIConfig(
                api_key=self.api_key,
                base_url="https://api.siliconflow.cn/v1",
                text_model="Qwen/Qwen2.5-7B-Instruct",
                vision_model="THUDM/GLM-4.1V-9B-Thinking"
            )
            self.stage3_processor = Stage3Processor(api_config)
            
            logger.info("✅ 所有处理器初始化完成")
            return True
            
        except Exception as e:
            error_msg = f"处理器初始化失败: {str(e)}"
            logger.error(error_msg)
            self.pipeline_state["errors"].append(error_msg)
            return False
    
    def stage1_node(self, document_path: str) -> Dict[str, Any]:
        """
        Stage1节点：文档解析和内容提取
        
        Args:
            document_path: 文档路径
            
        Returns:
            Stage1处理结果
        """
        logger.info("="*80)
        logger.info("🔄 Stage1: 文档解析和内容提取")
        logger.info("="*80)
        
        self.pipeline_state["current_stage"] = "stage1"
        
        try:
            # 验证文档存在
            if not os.path.exists(document_path):
                raise FileNotFoundError(f"文档文件不存在: {document_path}")
            
            logger.info(f"📄 处理文档: {document_path}")
            
            # 执行Stage1处理
            stage1_start = time.time()
            stage1_result = self.stage1_processor.process_pdf(document_path, show_details=True)
            stage1_duration = time.time() - stage1_start
            
            # 更新状态
            stage1_result["processing_duration"] = stage1_duration
            self.pipeline_state["stage1_result"] = stage1_result
            
            if stage1_result.get("success", False):
                self.pipeline_state["session_id"] = stage1_result["session_id"]
                logger.info(f"✅ Stage1完成 (耗时: {stage1_duration:.2f}秒)")
                logger.info(f"📁 会话ID: {stage1_result['session_id']}")
                logger.info(f"📁 存储路径: {stage1_result['storage_path']}")
                
                # 记录提取的内容统计
                if "content_stats" in stage1_result:
                    stats = stage1_result["content_stats"]
                    logger.info("📊 内容提取统计:")
                    logger.info(f"   文本段落: {stats.get('text_segments', 0)}")
                    logger.info(f"   图片数量: {stats.get('images', 0)}")
                    logger.info(f"   表格数量: {stats.get('tables', 0)}")
                
                return stage1_result
            else:
                error_msg = f"Stage1处理失败: {stage1_result.get('error', '未知错误')}"
                logger.error(error_msg)
                self.pipeline_state["errors"].append(error_msg)
                return stage1_result
                
        except Exception as e:
            error_msg = f"Stage1节点异常: {str(e)}"
            logger.error(error_msg)
            logger.error(traceback.format_exc())
            self.pipeline_state["errors"].append(error_msg)
            return {"success": False, "error": error_msg}
    
    def stage2_node(self, stage1_result: Dict[str, Any]) -> Dict[str, Any]:
        """
        Stage2节点：向量化和索引构建
        
        Args:
            stage1_result: Stage1的处理结果
            
        Returns:
            Stage2处理结果
        """
        logger.info("="*80)
        logger.info("🔄 Stage2: 向量化和索引构建")
        logger.info("="*80)
        
        self.pipeline_state["current_stage"] = "stage2"
        
        try:
            if not stage1_result.get("success", False):
                error_msg = "Stage1未成功完成，无法执行Stage2"
                logger.error(error_msg)
                self.pipeline_state["errors"].append(error_msg)
                return {"success": False, "error": error_msg}
            
            storage_path = stage1_result["storage_path"]
            logger.info(f"📁 处理会话路径: {storage_path}")
            
            # 执行Stage2处理
            stage2_start = time.time()
            stage2_result = self.stage2_processor.process_session(storage_path)
            stage2_duration = time.time() - stage2_start
            
            # 更新状态
            stage2_result["processing_duration"] = stage2_duration
            self.pipeline_state["stage2_result"] = stage2_result
            
            if stage2_result.get("success", False):
                logger.info(f"✅ Stage2完成 (耗时: {stage2_duration:.2f}秒)")
                
                # 记录详细的embedding统计
                if "statistics" in stage2_result:
                    stats = stage2_result["statistics"]
                    logger.info("📊 向量化统计:")
                    logger.info(f"   总分组数: {stats.get('total_groups', 0)}")
                    logger.info(f"   处理分组数: {stats.get('processed_groups', 0)}")
                    logger.info(f"   总元素数: {stats.get('total_elements', 0)}")
                    logger.info(f"   成功向量化: {stats.get('successful_embeddings', 0)}")
                    logger.info(f"   失败向量化: {stats.get('failed_embeddings', 0)}")
                    logger.info(f"   成功率: {stats.get('embedding_success_rate', 0):.1f}%")
                    
                    # API统计
                    if "embedding_api_stats" in stats:
                        api_stats = stats["embedding_api_stats"]
                        logger.info(f"   API调用次数: {api_stats.get('total_calls', 0)}")
                        logger.info(f"   处理token数: {api_stats.get('total_tokens', 0)}")
                
                # 向量存储统计
                if "vector_store_stats" in stage2_result:
                    vector_stats = stage2_result["vector_store_stats"]
                    logger.info("📊 向量存储统计:")
                    logger.info(f"   文档块数: {vector_stats.get('total_chunks', 0)}")
                    logger.info(f"   向量维度: {vector_stats.get('vector_dimension', 0)}")
                    logger.info(f"   存储元素数: {vector_stats.get('total_elements', 0)}")
                
                return stage2_result
            else:
                error_msg = f"Stage2处理失败: {stage2_result.get('error', '未知错误')}"
                logger.error(error_msg)
                self.pipeline_state["errors"].append(error_msg)
                return stage2_result
                
        except Exception as e:
            error_msg = f"Stage2节点异常: {str(e)}"
            logger.error(error_msg)
            logger.error(traceback.format_exc())
            self.pipeline_state["errors"].append(error_msg)
            return {"success": False, "error": error_msg}
    
    def stage3_node(self, stage2_result: Dict[str, Any]) -> Dict[str, Any]:
        """
        Stage3节点：多代理审查和分析
        
        Args:
            stage2_result: Stage2的处理结果
            
        Returns:
            Stage3处理结果
        """
        logger.info("="*80)
        logger.info("🔄 Stage3: 多代理审查和分析")
        logger.info("="*80)
        
        self.pipeline_state["current_stage"] = "stage3"
        
        try:
            if not stage2_result.get("success", False):
                error_msg = "Stage2未成功完成，无法执行Stage3"
                logger.error(error_msg)
                self.pipeline_state["errors"].append(error_msg)
                return {"success": False, "error": error_msg}
            
            session_id = self.pipeline_state["session_id"]
            logger.info(f"📁 处理会话ID: {session_id}")
            
            # 执行Stage3处理
            stage3_start = time.time()
            stage3_result = self.stage3_processor.process(
                session_id=session_id,
                save_results=True
            )
            stage3_duration = time.time() - stage3_start
            
            # 更新状态
            stage3_result["processing_duration"] = stage3_duration
            self.pipeline_state["stage3_result"] = stage3_result
            
            if stage3_result.get("success", False):
                logger.info(f"✅ Stage3完成 (耗时: {stage3_duration:.2f}秒)")
                
                # 记录代理审查统计
                if "analysis_results" in stage3_result:
                    analysis = stage3_result["analysis_results"]
                    logger.info("📊 多代理审查统计:")
                    logger.info(f"   参与代理数: {len(analysis)}")
                    
                    for agent_name, result in analysis.items():
                        if isinstance(result, dict) and "response" in result:
                            logger.info(f"   {agent_name}: 审查完成")
                        else:
                            logger.info(f"   {agent_name}: 审查失败")
                
                # 记录图片和表格的特殊处理
                if "image_analysis" in stage3_result:
                    logger.info(f"🖼️  图片分析: {len(stage3_result['image_analysis'])}个图片")
                
                if "table_analysis" in stage3_result:
                    logger.info(f"📊 表格分析: {len(stage3_result['table_analysis'])}个表格")
                
                return stage3_result
            else:
                error_msg = f"Stage3处理失败: {stage3_result.get('error', '未知错误')}"
                logger.error(error_msg)
                self.pipeline_state["errors"].append(error_msg)
                return stage3_result
                
        except Exception as e:
            error_msg = f"Stage3节点异常: {str(e)}"
            logger.error(error_msg)
            logger.error(traceback.format_exc())
            self.pipeline_state["errors"].append(error_msg)
            return {"success": False, "error": error_msg}
    
    def run_pipeline(self, document_path: str) -> Dict[str, Any]:
        """
        运行完整的处理流水线
        
        Args:
            document_path: 文档路径
            
        Returns:
            完整的处理结果
        """
        logger.info("🚀 启动AIDA完整处理流水线")
        logger.info(f"📄 输入文档: {document_path}")
        
        # 记录开始时间
        self.pipeline_state["start_time"] = datetime.now()
        
        try:
            # 初始化处理器
            if not self._initialize_processors():
                return self._generate_final_result(success=False, error="处理器初始化失败")
            
            # Stage1: 文档解析和内容提取
            stage1_result = self.stage1_node(document_path)
            if not stage1_result.get("success", False):
                return self._generate_final_result(success=False, error="Stage1处理失败")
            
            # Stage2: 向量化和索引构建
            stage2_result = self.stage2_node(stage1_result)
            if not stage2_result.get("success", False):
                return self._generate_final_result(success=False, error="Stage2处理失败")
            
            # Stage3: 多代理审查和分析
            stage3_result = self.stage3_node(stage2_result)
            if not stage3_result.get("success", False):
                return self._generate_final_result(success=False, error="Stage3处理失败")
            
            # 生成最终结果
            return self._generate_final_result(success=True)
            
        except Exception as e:
            error_msg = f"流水线执行异常: {str(e)}"
            logger.error(error_msg)
            logger.error(traceback.format_exc())
            return self._generate_final_result(success=False, error=error_msg)
    
    def _generate_final_result(self, success: bool, error: str = None) -> Dict[str, Any]:
        """
        生成最终的处理结果
        
        Args:
            success: 是否成功
            error: 错误信息
            
        Returns:
            最终结果字典
        """
        # 记录结束时间
        self.pipeline_state["end_time"] = datetime.now()
        
        # 计算总处理时间
        if self.pipeline_state["start_time"]:
            total_duration = (self.pipeline_state["end_time"] - self.pipeline_state["start_time"]).total_seconds()
        else:
            total_duration = 0
        
        # 构建最终结果
        final_result = {
            "pipeline_info": {
                "success": success,
                "session_id": self.pipeline_state["session_id"],
                "start_time": self.pipeline_state["start_time"].isoformat() if self.pipeline_state["start_time"] else None,
                "end_time": self.pipeline_state["end_time"].isoformat() if self.pipeline_state["end_time"] else None,
                "total_duration_seconds": total_duration,
                "current_stage": self.pipeline_state["current_stage"],
                "errors": self.pipeline_state["errors"]
            },
            "stage1_results": self.pipeline_state["stage1_result"],
            "stage2_results": self.pipeline_state["stage2_result"],
            "stage3_results": self.pipeline_state["stage3_result"]
        }
        
        # 添加错误信息
        if error:
            final_result["pipeline_info"]["error"] = error
            self.pipeline_state["errors"].append(error)
        
        # 保存结果到文件 - 按日期和会话组织目录结构
        now = datetime.now()
        date_str = now.strftime('%Y/%m/%d')
        session_time = now.strftime('%H%M%S')
        session_id = self.pipeline_state["session_id"]
        
        # 创建目录结构: results/YYYY/MM/DD/session_HHMMSS_sessionid/
        results_dir = os.path.join("results", date_str, f"session_{session_time}_{session_id}")
        os.makedirs(results_dir, exist_ok=True)
        
        # 生成文件名
        timestamp = now.strftime('%Y%m%d_%H%M%S')
        results_filename = os.path.join(results_dir, f"aida_pipeline_results_{timestamp}.json")
        
        try:
            with open(results_filename, 'w', encoding='utf-8') as f:
                json.dump(final_result, f, ensure_ascii=False, indent=2, default=str)
            
            logger.info(f"📊 完整结果已保存到: {results_filename}")
            final_result["pipeline_info"]["results_file"] = results_filename
            final_result["pipeline_info"]["results_directory"] = results_dir
            
        except Exception as e:
            logger.error(f"保存结果文件失败: {str(e)}")
        
        # 自动生成人类可读的文本报告
        text_report_filename = self._generate_human_readable_report(final_result, timestamp, results_dir)
        if text_report_filename:
            final_result["pipeline_info"]["text_report_file"] = text_report_filename
        
        # 输出总结
        self._print_pipeline_summary(final_result)
        
        return final_result
    
    def _generate_human_readable_report(self, final_result: Dict[str, Any], timestamp: str, results_dir: str) -> Optional[str]:
        """
        生成人类可读的Markdown报告
        
        Args:
            final_result: 完整的处理结果
            timestamp: 时间戳
            
        Returns:
            生成的Markdown报告文件名，如果失败则返回None
        """
        try:
            # 检查stage3结果中是否有formatted_report
            stage3_result = final_result.get("stage3_results")
            if not stage3_result or not stage3_result.get("success"):
                logger.warning("Stage3未成功完成，无法生成文本报告")
                return None
            
            # 尝试从不同位置提取formatted_report
            formatted_report = None
            
            # 方法1: 直接从stage3_result中获取
            if "formatted_report" in stage3_result:
                formatted_report = stage3_result["formatted_report"]
            
            # 方法2: 从analysis_results中获取
            elif "analysis_results" in stage3_result:
                analysis_results = stage3_result["analysis_results"]
                if isinstance(analysis_results, dict):
                    for agent_name, agent_result in analysis_results.items():
                        if isinstance(agent_result, dict) and "formatted_report" in agent_result:
                            formatted_report = agent_result["formatted_report"]
                            break
            
            # 方法3: 从summary_agent结果中获取
            if not formatted_report and "analysis_results" in stage3_result:
                analysis_results = stage3_result["analysis_results"]
                if isinstance(analysis_results, dict) and "summary_agent" in analysis_results:
                    summary_result = analysis_results["summary_agent"]
                    if isinstance(summary_result, dict) and "formatted_report" in summary_result:
                        formatted_report = summary_result["formatted_report"]
            
            if not formatted_report:
                logger.warning("未找到formatted_report，无法生成Markdown报告")
                return None
            
            # 清理格式问题
            if isinstance(formatted_report, str):
                # 移除可能的JSON片段
                import re
                # 移除开头的JSON标记
                formatted_report = re.sub(r'^```json\s*', '', formatted_report)
                formatted_report = re.sub(r'```\s*$', '', formatted_report)
                # 移除其他可能的格式标记
                formatted_report = re.sub(r'^```\w*\s*', '', formatted_report)
                formatted_report = formatted_report.strip()
            
            # 生成文件名 - 改为Markdown格式，保存到会话目录
            report_filename = os.path.join(results_dir, f"AIDA_智能文档分析报告_{timestamp}.md")
            
            # 确保Markdown格式正确（处理换行符问题）
            if formatted_report and not formatted_report.endswith('\n'):
                formatted_report += '\n'
            
            # 如果内容看起来像是没有换行符的单行，尝试修复
            if '##' in formatted_report and '\n' not in formatted_report:
                # 在Markdown标题前添加换行符
                import re
                formatted_report = re.sub(r'(##[^#])', r'\n\1', formatted_report)
                formatted_report = re.sub(r'(###[^#])', r'\n\1', formatted_report)
                formatted_report = re.sub(r'(- )', r'\n\1', formatted_report)
                formatted_report = re.sub(r'(\*\*[^*]+\*\*)', r'\n\1', formatted_report)
                # 清理多余的换行符
                formatted_report = re.sub(r'\n+', '\n', formatted_report)
                formatted_report = formatted_report.strip() + '\n'
            
            # 保存Markdown报告
            with open(report_filename, 'w', encoding='utf-8') as f:
                f.write(formatted_report)
            
            logger.info(f"📄 Markdown报告已生成: {report_filename}")
            return report_filename
            
        except Exception as e:
            logger.error(f"生成Markdown报告失败: {str(e)}")
            logger.error(traceback.format_exc())
            return None
    
    def _print_pipeline_summary(self, final_result: Dict[str, Any]):
        """打印流水线处理总结"""
        logger.info("\n" + "="*80)
        logger.info("📊 AIDA流水线处理总结")
        logger.info("="*80)
        
        pipeline_info = final_result["pipeline_info"]
        
        if pipeline_info["success"]:
            logger.info("✅ 整体结果: 成功")
        else:
            logger.info("❌ 整体结果: 失败")
        
        logger.info(f"⏱️  总处理时间: {pipeline_info['total_duration_seconds']:.2f}秒")
        logger.info(f"📁 会话ID: {pipeline_info['session_id']}")
        
        # Stage1总结
        stage1_result = final_result.get("stage1_results")
        if stage1_result and stage1_result.get("success"):
            logger.info("✅ Stage1 (文档解析): 成功")
            if "processing_duration" in stage1_result:
                logger.info(f"   耗时: {stage1_result['processing_duration']:.2f}秒")
        else:
            logger.info("❌ Stage1 (文档解析): 失败")
        
        # Stage2总结
        stage2_result = final_result.get("stage2_results")
        if stage2_result and stage2_result.get("success"):
            logger.info("✅ Stage2 (向量化): 成功")
            if "processing_duration" in stage2_result:
                logger.info(f"   耗时: {stage2_result['processing_duration']:.2f}秒")
            if "statistics" in stage2_result:
                stats = stage2_result["statistics"]
                logger.info(f"   向量化成功率: {stats.get('embedding_success_rate', 0):.1f}%")
        else:
            logger.info("❌ Stage2 (向量化): 失败")
        
        # Stage3总结
        stage3_result = final_result.get("stage3_results")
        if stage3_result and stage3_result.get("success"):
            logger.info("✅ Stage3 (多代理审查): 成功")
            if "processing_duration" in stage3_result:
                logger.info(f"   耗时: {stage3_result['processing_duration']:.2f}秒")
            if "analysis_results" in stage3_result:
                analysis = stage3_result["analysis_results"]
                logger.info(f"   参与代理数: {len(analysis)}")
        else:
            logger.info("❌ Stage3 (多代理审查): 失败")
        
        # 错误总结
        if pipeline_info["errors"]:
            logger.info("⚠️  错误信息:")
            for error in pipeline_info["errors"]:
                logger.info(f"   - {error}")
        
        # 输出报告文件信息
        if "results_file" in pipeline_info:
            logger.info(f"📊 JSON结果文件: {pipeline_info['results_file']}")
        
        if "text_report_file" in pipeline_info:
            logger.info(f"📄 Markdown报告: {pipeline_info['text_report_file']}")
        
        logger.info("="*80)


def process_document(document_path: str, api_key: str = None) -> Dict[str, Any]:
    """
    处理文档的简单接口函数
    
    Args:
        document_path: 文档路径
        api_key: API密钥，可选
        
    Returns:
        处理结果
    """
    pipeline = AIDAPipeline(api_key=api_key)
    return pipeline.run_pipeline(document_path)


def main():
    """主函数 - 提供命令行接口"""
    import argparse
    
    parser = argparse.ArgumentParser(description="AIDA Framework 完整处理流水线")
    parser.add_argument("document", help="要处理的文档路径")
    parser.add_argument("--api-key", help="API密钥")
    parser.add_argument("--storage", default="storage", help="存储基础路径")
    
    args = parser.parse_args()
    
    # 验证文档存在
    if not os.path.exists(args.document):
        print(f"❌ 错误: 文档文件不存在: {args.document}")
        sys.exit(1)
    
    # 运行流水线
    print(f"🚀 开始处理文档: {args.document}")
    result = process_document(args.document, args.api_key)
    
    # 输出结果
    if result["pipeline_info"]["success"]:
        print("✅ 文档处理完成！")
        print(f"📊 JSON结果文件: {result['pipeline_info'].get('results_file', 'N/A')}")
        
        # 显示Markdown报告文件
        text_report_file = result['pipeline_info'].get('text_report_file')
        if text_report_file:
            print(f"📄 Markdown报告: {text_report_file}")
            print("\n💡 提示: 可以直接打开Markdown报告文件查看分析结果")
        else:
            print("⚠️  未生成Markdown报告文件")
    else:
        print("❌ 文档处理失败！")
        print(f"错误: {result['pipeline_info'].get('error', '未知错误')}")
        sys.exit(1)


if __name__ == "__main__":
    # 如果直接运行，使用默认文档进行测试
    if len(sys.argv) == 1:
        # 默认测试
        test_document = "beijing.pdf"
        if os.path.exists(test_document):
            print(f"🧪 运行默认测试: {test_document}")
            result = process_document(test_document)
        else:
            print("❌ 默认测试文档不存在，请提供文档路径")
            print("用法: python main.py <文档路径>")
            sys.exit(1)
    else:
        # 命令行模式
        main()