"""
第二阶段处理器 (Stage 2 Processor)
整合索引与上下文构建的完整流程
"""

import os
import json
import logging
import tempfile
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime

from .embedding_engine import EmbeddingEngine
from .vector_store import VectorStore

class Stage2Processor:
    """
    第二阶段处理器，负责索引与上下文构建
    """
    
    def __init__(self, api_key: str, model: str = "BAAI/bge-m3"):
        """
        初始化第二阶段处理器
        
        Args:
            api_key: SiliconFlow API密钥
            model: 嵌入模型名称
        """
        self.api_key = api_key
        self.model = model
        self.logger = logging.getLogger(__name__)
        
        # 初始化组件
        self.embedding_engine = EmbeddingEngine(api_key, model)
        self.vector_store = None
        
        # 处理统计
        self.stats = {
            "start_time": None,
            "end_time": None,
            "total_groups": 0,
            "processed_groups": 0,
            "total_elements": 0,
            "successful_embeddings": 0,
            "failed_embeddings": 0
        }
        
        self.logger.info("第二阶段处理器初始化完成")
    
    def process_session(self, session_path: str, temp_storage_path: Optional[str] = None) -> Dict[str, Any]:
        """
        处理指定会话的数据，进行向量化
        
        Args:
            session_path: 会话存储路径
            temp_storage_path: 临时存储路径，如果为None则自动创建
            
        Returns:
            处理结果字典
        """
        self.stats["start_time"] = datetime.now()
        self.logger.info(f"开始处理会话: {session_path}")
        
        try:
            # 验证会话路径
            if not os.path.exists(session_path):
                raise ValueError(f"会话路径不存在: {session_path}")
            
            level3_groups_path = os.path.join(session_path, "text", "level3_groups")
            if not os.path.exists(level3_groups_path):
                raise ValueError(f"level3_groups路径不存在: {level3_groups_path}")
            
            # 创建临时存储
            if temp_storage_path is None:
                temp_storage_path = tempfile.mkdtemp(prefix="aida_stage2_")
            
            self.vector_store = VectorStore(temp_storage_path, auto_cleanup=True)
            
            # 读取所有level3_groups文件
            group_files = [f for f in os.listdir(level3_groups_path) if f.endswith('.json')]
            self.stats["total_groups"] = len(group_files)
            
            self.logger.info(f"找到 {len(group_files)} 个三级标题分组")
            
            # 处理每个分组
            for group_file in group_files:
                group_path = os.path.join(level3_groups_path, group_file)
                success = self._process_group_file(group_path)
                
                if success:
                    self.stats["processed_groups"] += 1
                
                self.logger.info(f"处理进度: {self.stats['processed_groups']}/{self.stats['total_groups']}")
            
            # 保存向量数据
            if self.vector_store.save_to_disk():
                self.logger.info("向量数据保存成功")
            else:
                self.logger.error("向量数据保存失败")
            
            # 完成处理
            self.stats["end_time"] = datetime.now()
            processing_time = (self.stats["end_time"] - self.stats["start_time"]).total_seconds()
            
            # 计算embedding成功率
            stats = self._get_processing_stats()
            embedding_success_rate = stats.get("embedding_success_rate", 0)
            
            # 判断是否成功：embedding成功率需要大于50%
            is_success = embedding_success_rate > 50.0
            
            if not is_success:
                self.logger.error(f"Stage2处理失败：embedding成功率过低 ({embedding_success_rate:.1f}%)")
            
            result = {
                "success": is_success,
                "session_path": session_path,
                "temp_storage_path": temp_storage_path,
                "processing_time_seconds": processing_time,
                "statistics": stats,
                "vector_store_stats": self.vector_store.get_statistics() if self.vector_store else {}
            }
            
            if is_success:
                self.logger.info(f"第二阶段处理成功，耗时: {processing_time:.2f}秒，embedding成功率: {embedding_success_rate:.1f}%")
            else:
                self.logger.error(f"第二阶段处理失败，耗时: {processing_time:.2f}秒，embedding成功率: {embedding_success_rate:.1f}%")
            
            return result
            
        except Exception as e:
            self.logger.error(f"第二阶段处理失败: {e}")
            return {
                "success": False,
                "error": str(e),
                "session_path": session_path,
                "statistics": self._get_processing_stats()
            }
    
    def _process_group_file(self, group_file_path: str) -> bool:
        """
        处理单个分组文件
        
        Args:
            group_file_path: 分组文件路径
            
        Returns:
            是否处理成功
        """
        try:
            # 读取分组数据
            with open(group_file_path, 'r', encoding='utf-8') as f:
                group_data = json.load(f)
            
            level3_number = group_data.get("level3_number", "未知")
            elements = group_data.get("elements", [])
            
            if not elements:
                self.logger.warning(f"分组 {level3_number} 没有元素，跳过")
                return True
            
            self.stats["total_elements"] += len(elements)
            
            # 合并所有元素的内容为一个大块
            chunk_content = self._merge_elements_content(elements, level3_number)
            
            if not chunk_content.strip():
                self.logger.warning(f"分组 {level3_number} 内容为空，跳过")
                return True
            
            # 生成向量
            vector = self.embedding_engine.embed_text(chunk_content)
            
            if vector is None:
                self.logger.error(f"分组 {level3_number} 向量化失败")
                self.stats["failed_embeddings"] += 1
                return False
            
            self.stats["successful_embeddings"] += 1
            
            # 生成块ID
            chunk_id = f"chunk_{level3_number}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
            
            # 添加到向量存储
            success = self.vector_store.add_chunk(
                chunk_id=chunk_id,
                content=chunk_content,
                vector=vector,
                elements=elements,
                level3_number=level3_number
            )
            
            if success:
                self.logger.debug(f"分组 {level3_number} 处理成功，包含 {len(elements)} 个元素")
            else:
                self.logger.error(f"分组 {level3_number} 添加到向量存储失败")
            
            return success
            
        except Exception as e:
            self.logger.error(f"处理分组文件失败 {group_file_path}: {e}")
            return False
    
    def _merge_elements_content(self, elements: List[Dict[str, Any]], level3_number: str) -> str:
        """
        合并元素内容为一个大块（仅处理文本内容，过滤图像和表格）
        
        Args:
            elements: 元素列表
            level3_number: 三级标题编号
            
        Returns:
            合并后的内容
        """
        content_parts = []
        
        # 添加三级标题作为开头
        content_parts.append(f"=== {level3_number} ===\n")
        
        # 只处理文本类型的元素，过滤图像和表格
        titles = []
        texts = []
        
        for element in elements:
            element_type = element.get("category", "")
            subcategory = element.get("subcategory", "")
            content = element.get("content", "").strip()
            
            # 跳过图像和表格内容
            if element_type in ["image", "table"]:
                self.logger.debug(f"跳过{element_type}元素: {element.get('element_id', 'unknown')}")
                continue
            
            if not content:
                continue
            
            # 只处理文本类型的内容
            if element_type == "text":
                if "title" in subcategory.lower():
                    titles.append(content)
                else:
                    texts.append(content)
        
        # 按顺序添加内容
        if titles:
            content_parts.append("【标题】")
            content_parts.extend(titles)
            content_parts.append("")
        
        if texts:
            content_parts.append("【正文内容】")
            content_parts.extend(texts)
            content_parts.append("")
        
        # 添加元数据信息（只统计文本元素）
        text_elements_count = len(titles) + len(texts)
        content_parts.append(f"【元数据】包含 {text_elements_count} 个文本元素")
        
        return "\n".join(content_parts)
    
    def load_vector_store(self, temp_storage_path: str) -> bool:
        """
        加载已存在的向量存储
        
        Args:
            temp_storage_path: 向量存储路径
            
        Returns:
            是否加载成功
        """
        try:
            if not os.path.exists(temp_storage_path):
                self.logger.error(f"向量存储路径不存在: {temp_storage_path}")
                return False
            
            # 创建向量存储实例
            self.vector_store = VectorStore(temp_storage_path, auto_cleanup=False)
            
            # 显式加载数据
            if not self.vector_store.load_from_disk():
                self.logger.error("向量存储数据加载失败")
                return False
            
            # 检查是否成功加载
            if self.vector_store.get_chunk_count() > 0:
                self.logger.info(f"成功加载向量存储，包含 {self.vector_store.get_chunk_count()} 个文档块")
                return True
            else:
                self.logger.warning("向量存储加载成功但没有找到文档块")
                return False
                
        except Exception as e:
            self.logger.error(f"加载向量存储失败: {e}")
            return False

    def retrieve_content(self, query: str, top_k: int = 5) -> Dict[str, Any]:
        """
        检索相关内容
        
        Args:
            query: 查询文本
            top_k: 返回结果数量
            
        Returns:
            检索结果字典，包含success状态和results列表
        """
        if not self.vector_store:
            self.logger.error("向量存储未初始化，无法进行检索")
            return {
                "success": False,
                "error": "向量存储未初始化",
                "results": []
            }

        try:
            # 搜索相似内容
            results = self.search_similar_content(query, top_k)
            
            # 格式化返回结果
            formatted_results = []
            for chunk_data, similarity_score in results:
                formatted_results.append({
                    "content": chunk_data.get("content", ""),
                    "level3_number": chunk_data.get("level3_number", ""),
                    "similarity_score": similarity_score,
                    "chunk_id": chunk_data.get("chunk_id", ""),
                    "elements_count": len(chunk_data.get("elements", []))
                })
            
            self.logger.info(f"检索查询'{query}'成功，返回{len(formatted_results)}个结果")
            return {
                "success": True,
                "results": formatted_results,
                "query": query,
                "total_count": len(formatted_results)
            }
            
        except Exception as e:
            self.logger.error(f"检索内容失败: {e}")
            return {
                "success": False,
                "error": str(e),
                "results": []
            }
    
    def search_similar_content(self, query: str, top_k: int = 5) -> List[Tuple[Dict[str, Any], float]]:
        """
        搜索相似内容
        
        Args:
            query: 查询文本
            top_k: 返回结果数量
            
        Returns:
            相似内容列表
        """
        if not self.vector_store:
            self.logger.error("向量存储未初始化")
            return []
        
        # 将查询转换为向量
        query_vector = self.embedding_engine.embed_text(query)
        if query_vector is None:
            self.logger.error("查询向量化失败")
            return []
        
        # 搜索相似内容
        results = self.vector_store.search_similar(query_vector, top_k)
        
        self.logger.info(f"搜索查询: '{query}', 返回 {len(results)} 个结果")
        return results
    
    def get_chunk_by_level3(self, level3_number: str) -> Optional[Dict[str, Any]]:
        """
        根据三级标题编号获取文档块
        
        Args:
            level3_number: 三级标题编号
            
        Returns:
            文档块数据
        """
        if not self.vector_store:
            return None
        
        chunks = self.vector_store.get_chunks_by_level3(level3_number)
        return chunks[0] if chunks else None
    
    def _get_processing_stats(self) -> Dict[str, Any]:
        """
        获取处理统计信息
        
        Returns:
            统计信息字典
        """
        stats = self.stats.copy()
        
        # 添加API统计
        stats["embedding_api_stats"] = self.embedding_engine.get_stats()
        
        # 计算成功率 - 修正计算逻辑
        total_processed = stats["successful_embeddings"] + stats["failed_embeddings"]
        if total_processed > 0:
            stats["embedding_success_rate"] = (
                stats["successful_embeddings"] / total_processed
            ) * 100
        else:
            stats["embedding_success_rate"] = 0
        
        return stats
    
    def cleanup(self):
        """清理资源"""
        if self.vector_store:
            self.vector_store.cleanup()
        self.embedding_engine.reset_stats()


if __name__ == "__main__":
    # 测试代码
    import sys
    
    # 设置日志
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
    
    if len(sys.argv) < 2:
        print("用法: python stage2_processor.py <session_path>")
        sys.exit(1)
    
    session_path = sys.argv[1]
    api_key = "sk-umjpwhgrwtwcrifwfrczofcfeprfvhhcrvvpnbhkwrkvjnux"  # 实际使用时应从环境变量获取
    
    processor = Stage2Processor(api_key)
    result = processor.process_session(session_path)
    
    if result["success"]:
        print("✅ 第二阶段处理成功")
        print(f"临时存储路径: {result['temp_storage_path']}")
        print(f"处理时间: {result['processing_time_seconds']:.2f}秒")
        print(f"统计信息: {json.dumps(result['statistics'], ensure_ascii=False, indent=2)}")
    else:
        print("❌ 第二阶段处理失败")
        print(f"错误: {result['error']}")