"""评估项提取相关节点"""

import asyncio
import sys
import traceback
from pocketflow import AsyncParallelBatchNode
from ..utils.call_llm import call_llm_async
from ..utils.extract_checkpoints_utils import create_extraction_prompt
from ..utils.merge_checkpoints_utils import group_checkpoints_by_module, ai_deduplication_async
from ..utils.utils import parse_llm_json_response

class ExtractCheckpointsNode(AsyncParallelBatchNode):
    """并行处理所有文本块，提取评估项与细项"""
    
    def __init__(self, max_concurrency=15, max_retries=3, **kwargs):
        super().__init__(**kwargs)
        self.semaphore = asyncio.Semaphore(max_concurrency)  # 15线程并发控制
        self.max_retries = max_retries
    
    async def prep_async(self, shared):
        text_chunks = shared.get("text_chunks", [])
        if not text_chunks:
            print("警告: 没有文本块可处理")
            return []
        
        print(f"准备并行处理 {len(text_chunks)} 个文本块")
        return text_chunks
    
    async def exec_async(self, chunk):
        """对单个文本块提取评估项"""
        async with self.semaphore:  # 控制并发数
            chunk_id = chunk["chunk_id"]
            chunk_text = chunk["text"]
            chunk_title = chunk.get("title", "无标题")
            
            # 跳过太短的文本块
            if len(chunk_text.strip()) < 10:
                print(f"块 {chunk_id} (标题: {chunk_title}): 文本过短，跳过处理")
                return {"chunk_id": chunk_id, "title": chunk_title, "checkpoints": []}
            
            # 创建提示词并调用LLM
            prompt = create_extraction_prompt(chunk_id, chunk_text)
            
            # 添加重试机制
            for retry in range(self.max_retries):
                try:
                    response = await call_llm_async(prompt)
                    checkpoints = parse_llm_json_response(response, expected_type=list, default_value=[])
                    
                    if checkpoints:
                        print(f"块 {chunk_id} (标题: {chunk_title}): 提取到 {len(checkpoints)} 个评估项")
                    else:
                        print(f"块 {chunk_id} (标题: {chunk_title}): 未提取到评估项")
                        print(f"LLM完整响应: {response}")
                    
                    return {"chunk_id": chunk_id, "title": chunk_title, "checkpoints": checkpoints}
                    
                except Exception as e:
                    if retry < self.max_retries - 1:
                        print(f"处理块 {chunk_id} 时出错 (重试 {retry+1}/{self.max_retries}): {str(e)}")
                        await asyncio.sleep(1)  # 等待1秒后重试
                    else:
                        print(f"处理块 {chunk_id} 失败，达到最大重试次数: {str(e)}")
                        # 打印详细错误信息
                        exc_type, exc_value, exc_traceback = sys.exc_info()
                        traceback_details = traceback.format_exception(exc_type, exc_value, exc_traceback)
                        print("详细错误信息:")
                        for line in traceback_details:
                            print(line)
                        return {"chunk_id": chunk_id, "title": chunk_title, "checkpoints": []}
    
    async def post_async(self, shared, prep_res, exec_res_list):
        # 检查是否所有块都被处理
        processed_chunks = len(exec_res_list)
        total_chunks = len(prep_res)
        if processed_chunks != total_chunks:
            print(f"警告: 只处理了 {processed_chunks}/{total_chunks} 个块")
            
        # 将每个块的结果存储到共享存储
        total_checkpoints = 0
        chunks_with_checkpoints = 0
        
        for result in exec_res_list:
            chunk_id = result["chunk_id"]
            checkpoints = result["checkpoints"]
            shared["chunk_checkpoints"][chunk_id] = checkpoints
            
            if checkpoints:
                total_checkpoints += len(checkpoints)
                chunks_with_checkpoints += 1
        
        print(f"并行处理完成: {chunks_with_checkpoints}/{len(exec_res_list)} 个块提取到评估项，总计 {total_checkpoints} 个评估项")
        
        # 保存详细的块处理结果到共享存储
        shared["extraction_summary"] = {
            "total_chunks": total_chunks,
            "processed_chunks": processed_chunks,
            "chunks_with_data": chunks_with_checkpoints,
            "total_checkpoints": total_checkpoints,
            "chunk_details": [
                {
                    "chunk_id": result["chunk_id"],
                    "title": result.get("title", "无标题"),
                    "checkpoints_count": len(result["checkpoints"])
                }
                for result in exec_res_list
            ]
        }
        
        return "default"

class MergeCheckpointsNode(AsyncParallelBatchNode):
    """汇总并使用AI智能去重所有提取的评估项"""
    
    def __init__(self, max_concurrency=3, max_retries=3, **kwargs):
        super().__init__(**kwargs)
        self.semaphore = asyncio.Semaphore(max_concurrency)  # 并发控制
        self.max_retries = max_retries
    
    async def prep_async(self, shared):
        chunk_checkpoints = shared.get("chunk_checkpoints", {})
        all_checkpoints = []
        
        # 收集所有块的评估项
        for chunk_id, checkpoints in chunk_checkpoints.items():
            all_checkpoints.extend(checkpoints)
        
        if not all_checkpoints:
            print("没有评估项需要处理")
            return []
        
        print(f"开始按模块分组并行去重，原始评估项数量: {len(all_checkpoints)}")
        
        # 按模块分组，返回模块组列表用于并行处理
        module_groups = group_checkpoints_by_module(all_checkpoints)
        
        # 转换为BatchNode需要的格式
        module_items = []
        for module, checkpoints in module_groups.items():
            module_items.append({
                "module": module,
                "checkpoints": checkpoints
            })
        
        print(f"按module分组完成，共 {len(module_items)} 个模块待处理")
        return module_items
    
    async def exec_async(self, module_item):
        """异步处理单个模块的去重"""
        async with self.semaphore:  # 控制并发数
            module = module_item["module"]
            checkpoints = module_item["checkpoints"]
            
            if len(checkpoints) <= 1:
                return {"module": module, "unique_checkpoints": checkpoints}
            
            print(f"正在处理模块 '{module}': {len(checkpoints)} 个评估项")
            
            # 添加重试机制
            for retry in range(self.max_retries):
                try:
                    unique_checkpoints = await ai_deduplication_async(checkpoints)
                    
                    print(f"模块 '{module}' 去重完成: {len(checkpoints)} -> {len(unique_checkpoints)} 个评估项")
                    return {"module": module, "unique_checkpoints": unique_checkpoints}
                    
                except Exception as e:
                    if retry < self.max_retries - 1:
                        print(f"处理模块 '{module}' 时出错 (重试 {retry+1}/{self.max_retries}): {str(e)}")
                        await asyncio.sleep(1)  # 等待1秒后重试
                    else:
                        print(f"处理模块 '{module}' 失败，达到最大重试次数: {str(e)}")
                        # 失败时返回原始数据
                        return {"module": module, "unique_checkpoints": checkpoints}
    
    async def post_async(self, shared, prep_res, exec_res_list):
        # 汇总所有模块的去重结果
        all_unique_checkpoints = []
        total_original = 0
        total_unique = 0
        
        for result in exec_res_list:
            module = result["module"]
            unique_checkpoints = result["unique_checkpoints"]
            all_unique_checkpoints.extend(unique_checkpoints)
            
            # 统计信息
            original_count = len([item for item in prep_res if item["module"] == module][0]["checkpoints"])
            total_original += original_count
            total_unique += len(unique_checkpoints)
        
        print(f"所有模块去重完成: {total_original} -> {total_unique} 个评估项")
        
        # 存储结果
        shared["checkpoints"] = all_unique_checkpoints
        
        # 保存去重统计信息
        shared["deduplication_summary"] = {
            "total_original": total_original,
            "total_unique": total_unique,
            "modules_processed": len(exec_res_list),
            "module_details": [
                {
                    "module": result["module"],
                    "unique_count": len(result["unique_checkpoints"])
                }
                for result in exec_res_list
            ]
        }
        
        return "default"
