from typing import List, Dict, Any, Optional
from utils.logger import Logger
from utils.exceptions import ToolError
from apps.tool_call.api_client import APIClient
import json
import re

logger = Logger.get_logger(agent_name="task_reviewer")


class TaskReviewer:
    """任务回顾机制：智能体完成子任务后的结果总结和冗余信息过滤"""
    
    def __init__(self):
        """初始化任务回顾器"""
        self.api_client = APIClient()
        logger.info("TaskReviewer initialized successfully")
    
    def analyze_sub_task_results(self, sub_task_results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析子任务结果，提取关键信息和识别冗余
        
        Args:
            sub_task_results: 子任务结果列表
            
        Returns:
            分析结果，包含成功/失败任务统计、关键信息摘要等
        """
        try:
            # 基础统计
            total_tasks = len(sub_task_results)
            successful_tasks = [res for res in sub_task_results if res.get("status") == "completed"]
            failed_tasks = [res for res in sub_task_results if res.get("status") == "failed"]
            
            # 提取关键信息
            key_information = []
            for result in successful_tasks:
                result_content = self._extract_result_content(result)
                if result_content:
                    # 提取核心信息点
                    key_points = self._extract_key_points(result_content)
                    key_information.extend([(result.get("sub_task_id"), point) for point in key_points])
            
            # 识别冗余信息
            redundant_pairs = self._identify_redundancies(key_information)
            
            return {
                "total_tasks": total_tasks,
                "successful_tasks": len(successful_tasks),
                "failed_tasks": len(failed_tasks),
                "key_information": key_information,
                "redundant_pairs": redundant_pairs,
                "success_rate": len(successful_tasks) / total_tasks if total_tasks > 0 else 0
            }
        except Exception as e:
            logger.error(f"分析子任务结果失败: {str(e)}", exc_info=True)
            raise ToolError(
                message="子任务结果分析失败",
                context={"error": str(e)}
            ) from e
    
    def _extract_result_content(self, result: Dict[str, Any]) -> Optional[str]:
        """从子任务结果中提取文本内容"""
        result_data = result.get("result", {})
        
        # 处理不同格式的结果
        if isinstance(result_data, str):
            return result_data
        elif isinstance(result_data, dict):
            # 尝试获取常见的结果字段
            content_fields = ["content", "answer", "response", "result", "text", "data"]
            for field in content_fields:
                if field in result_data and isinstance(result_data[field], str):
                    return result_data[field]
            # 如果没有找到，返回JSON字符串
            try:
                return json.dumps(result_data, ensure_ascii=False)
            except:
                return str(result_data)
        return str(result_data)
    
    def _extract_key_points(self, text: str) -> List[str]:
        """从文本中提取关键信息点"""
        # 简单的关键词提取和句子分割
        # 这里可以替换为更复杂的NLP模型
        sentences = re.split(r'[。！？.!?]', text)
        key_points = []
        
        # 过滤掉太短或太通用的句子
        for sentence in sentences:
            sentence = sentence.strip()
            if len(sentence) > 10:  # 至少10个字符
                # 检查是否包含关键信息（数值、日期、实体等）
                if any(re.search(pattern, sentence) for pattern in [
                    r'\d+[年月日天时分秒]?',  # 日期时间
                    r'[\d]+[.，,][\d]+',      # 小数
                    r'[A-Za-z]+\.[A-Za-z]+',  # 域名/邮箱
                    r'[\u4e00-\u9fa5]{2,}市|[\u4e00-\u9fa5]{2,}省',  # 地点
                ]):
                    key_points.append(sentence)
        
        # 如果没有识别到关键信息点，返回原始句子（过滤空行）
        if not key_points:
            key_points = [s.strip() for s in sentences if s.strip()]
        
        return key_points[:5]  # 最多返回5个关键点
    
    def _identify_redundancies(self, key_information: List[tuple]) -> List[tuple]:
        """识别冗余的信息对"""
        redundant_pairs = []
        n = len(key_information)
        
        # 简单的冗余检测：检查两个信息点是否包含大部分相同内容
        for i in range(n):
            for j in range(i + 1, n):
                task1, info1 = key_information[i]
                task2, info2 = key_information[j]
                
                # 计算相似度（这里使用简单的文本重叠率）
                similarity = self._calculate_text_similarity(info1, info2)
                
                if similarity > 0.6:  # 超过60%的相似度认为是冗余
                    redundant_pairs.append((i, j, similarity))
        
        return redundant_pairs
    
    def _calculate_text_similarity(self, text1: str, text2: str) -> float:
        """简单的文本相似度计算"""
        # 提取词（使用字符级n-gram，避免分词）
        def get_ngrams(text, n=2):
            return set(text[i:i+n] for i in range(len(text)-n+1))
        
        ngrams1 = get_ngrams(text1)
        ngrams2 = get_ngrams(text2)
        
        if not ngrams1 or not ngrams2:
            return 0.0
        
        # Jaccard相似度
        intersection = len(ngrams1.intersection(ngrams2))
        union = len(ngrams1.union(ngrams2))
        return intersection / union
    
    async def generate_review_summary(self, 
                                     parent_task_id: str, 
                                     original_query: str, 
                                     analysis_result: Dict[str, Any],
                                     sub_task_results: List[Dict[str, Any]]) -> str:
        """生成任务回顾摘要
        
        Args:
            parent_task_id: 父任务ID
            original_query: 原始查询
            analysis_result: 分析结果
            sub_task_results: 原始子任务结果
            
        Returns:
            生成的回顾摘要
        """
        try:
            # 构建提示词
            successful_results = [res for res in sub_task_results if res.get("status") == "completed"]
            results_content = "\n".join([
                f"子任务{idx+1}（{res.get('sub_task_id', 'unknown')}）结果：{self._extract_result_content(res)[:300]}"
                for idx, res in enumerate(successful_results)
            ])
            
            # 冗余信息提示
            redundancy_info = ""
            if analysis_result.get("redundant_pairs"):
                redundancy_info = f"\n冗余信息数量：{len(analysis_result['redundant_pairs'])} 对"
            
            prompt = f"""
            # 任务回顾分析
            
            ## 任务信息
            原始问题：{original_query}
            任务ID：{parent_task_id}
            子任务总数：{analysis_result['total_tasks']}
            成功任务数：{analysis_result['successful_tasks']}
            失败任务数：{analysis_result['failed_tasks']}
            成功率：{analysis_result['success_rate']:.2%}
            {redundancy_info}
            
            ## 子任务执行结果
            {results_content}
            
            ## 分析要求
            请基于以上信息，执行以下任务：
            1. 总结所有子任务的执行情况
            2. 识别并过滤掉冗余信息
            3. 提取最关键、最有价值的信息点
            4. 生成一个精炼、准确、完整的回答，直接回应用户的原始问题
            5. 去除重复内容，确保信息的唯一性和有效性
            
            要求回答简洁明了，重点突出，避免不必要的重复，总长度不超过500字。
            """
            
            # 调用LLM生成回顾摘要
            api_params = {
                "model": "gpt-3.5-turbo",
                "messages": [{"role": "user", "content": prompt}],
                "max_tokens": 1024,
                "temperature": 0.3  # 降低随机性，保证输出质量
            }
            
            response = self.api_client.call_openai_api(api_params)
            summary = response["choices"][0]["message"]["content"]
            
            logger.info(f"任务回顾摘要生成完成: parent_task_id={parent_task_id}")
            return summary
        except Exception as e:
            logger.error(f"生成任务回顾摘要失败: {str(e)}", exc_info=True)
            raise ToolError(
                message="任务回顾摘要生成失败",
                context={"parent_task_id": parent_task_id, "error": str(e)}
            ) from e
    
    def filter_redundant_information(self, information_list: List[str]) -> List[str]:
        """过滤冗余信息列表
        
        Args:
            information_list: 信息列表
            
        Returns:
            去冗余后的信息列表
        """
        filtered = []
        for info in information_list:
            # 检查是否与已保留的信息冗余
            is_redundant = False
            for kept in filtered:
                if self._calculate_text_similarity(info, kept) > 0.7:
                    is_redundant = True
                    break
            
            if not is_redundant:
                filtered.append(info)
        
        return filtered
    
    def evaluate_result_quality(self, summary: str, original_query: str) -> Dict[str, Any]:
        """评估结果质量
        
        Args:
            summary: 生成的摘要
            original_query: 原始查询
            
        Returns:
            质量评估结果
        """
        # 简单的质量评估指标
        coverage_score = self._calculate_coverage(summary, original_query)
        conciseness_score = self._calculate_conciseness(summary)
        accuracy_score = self._estimate_accuracy(summary)
        
        overall_score = (coverage_score * 0.4 + conciseness_score * 0.3 + accuracy_score * 0.3)
        
        return {
            "coverage_score": coverage_score,  # 覆盖度：回答是否覆盖了问题的主要方面
            "conciseness_score": conciseness_score,  # 简洁度：是否简洁明了
            "accuracy_score": accuracy_score,  # 准确度：基于内容的合理性评估
            "overall_score": overall_score,  # 总分
            "quality_level": self._get_quality_level(overall_score)
        }
    
    def _calculate_coverage(self, summary: str, query: str) -> float:
        """计算回答对问题的覆盖度"""
        # 提取查询中的关键词
        query_keywords = self._extract_keywords(query)
        if not query_keywords:
            return 0.5  # 默认中等覆盖度
        
        # 检查关键词在摘要中的出现
        covered = sum(1 for keyword in query_keywords if keyword.lower() in summary.lower())
        return covered / len(query_keywords)
    
    def _extract_keywords(self, text: str) -> List[str]:
        """简单的关键词提取"""
        # 移除停用词（简单版本）
        stop_words = {"的", "了", "和", "是", "在", "有", "我", "你", "他", "她", "它", "这", "那", "个", "一", "不", "也", "就", "都", "而", "及", "与", "或", "但", "如果", "因为", "所以", "虽然", "但是"}
        
        # 分词（简单字符分割）
        import re
        words = re.findall(r'[\u4e00-\u9fa5]{2,}|[a-zA-Z]+', text)
        keywords = [word for word in words if word not in stop_words and len(word) > 1]
        
        return keywords[:10]  # 最多返回10个关键词
    
    def _calculate_conciseness(self, text: str) -> float:
        """计算简洁度（字符数适中为最佳）"""
        length = len(text)
        # 理想长度：100-300字
        if 100 <= length <= 300:
            return 1.0
        elif length < 100:
            return max(0.5, length / 100)
        else:
            return max(0.5, 1.0 - (length - 300) / 500)
    
    def _estimate_accuracy(self, text: str) -> float:
        """简单估计准确度（基于内容特征）"""
        # 检查是否包含不确定词汇
        uncertainty_words = ["可能", "也许", "大概", "应该", "似乎", "好像", "不确定", "不太清楚"]
        uncertainty_count = sum(1 for word in uncertainty_words if word in text)
        
        # 检查是否有具体数据或明确结论
        has_specific_data = bool(re.search(r'\d+', text))
        has_clear_conclusion = any(word in text for word in ["因此", "所以", "总之", "综上所述", "结论", "表明", "显示"])
        
        # 计算准确度得分
        base_score = 0.7
        score = base_score
        
        # 不确定词汇会降低得分
        score -= min(0.3, uncertainty_count * 0.05)
        
        # 具体数据和明确结论会提高得分
        if has_specific_data:
            score += 0.15
        if has_clear_conclusion:
            score += 0.15
        
        return max(0.5, min(1.0, score))
    
    def _get_quality_level(self, score: float) -> str:
        """根据分数获取质量等级"""
        if score >= 0.85:
            return "优秀"
        elif score >= 0.7:
            return "良好"
        elif score >= 0.6:
            return "合格"
        else:
            return "需改进"


# 全局任务回顾器实例
task_reviewer = TaskReviewer()


# 测试入口
if __name__ == "__main__":
    test_logger = Logger.update_context(task_id="reviewer_test", agent_name="task_reviewer")
    test_logger.info("开始测试任务回顾器")
    
    # 测试分析子任务结果
    test_sub_task_results = [
        {
            "sub_task_id": "task_001",
            "status": "completed",
            "result": {"content": "北京今天的天气是晴，气温25-32度，空气质量良好。"}
        },
        {
            "sub_task_id": "task_002",
            "status": "completed",
            "result": "计算结果：3的平方等于9。"
        },
        {
            "sub_task_id": "task_003",
            "status": "completed",
            "result": {"answer": "北京今日天气晴朗，温度范围25到32摄氏度，建议外出做好防晒措施。"}
        }
    ]
    
    try:
        # 1. 分析子任务结果
        analysis = task_reviewer.analyze_sub_task_results(test_sub_task_results)
        test_logger.info(f"子任务分析结果: {analysis}")
        
        # 2. 生成回顾摘要
        import asyncio
        async def test_summary():
            summary = await task_reviewer.generate_review_summary(
                parent_task_id="test_parent_001",
                original_query="查询北京的天气，然后计算3的平方",
                analysis_result=analysis,
                sub_task_results=test_sub_task_results
            )
            test_logger.info(f"回顾摘要: {summary}")
            
            # 3. 评估结果质量
            quality = task_reviewer.evaluate_result_quality(summary, "查询北京的天气，然后计算3的平方")
            test_logger.info(f"质量评估: {quality}")
        
        asyncio.run(test_summary())
        
        test_logger.success("任务回顾器测试完成")
    except Exception as e:
        test_logger.error(f"测试失败: {e}")