"""
模型评估模块
用于评估学生模型的性能，包括生成质量、与教师模型的对比和性能基准测试
"""

import torch
import json
import time
import psutil
import numpy as np
from typing import Dict, List, Any, Tuple
from pathlib import Path
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_recall_fscore_support

class ModelEvaluator:
    def __init__(self,
                 student_model: torch.nn.Module,
                 teacher_model: Any,
                 test_data: List[Dict[str, Any]],
                 output_dir: str = "训练示例/评估结果"):
        """
        初始化评估器
        Args:
            student_model: 学生模型
            teacher_model: 教师模型
            test_data: 测试数据
            output_dir: 输出目录
        """
        self.student = student_model
        self.teacher = teacher_model
        self.test_data = test_data
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # 初始化评估结果存储
        self.evaluation_results = {
            "quality_metrics": {},
            "comparison_metrics": {},
            "performance_metrics": {}
        }
        
        # 初始化示例输出存储
        self.sample_outputs = []

    def evaluate_quality(self, num_samples: int = 100) -> Dict[str, float]:
        """
        评估生成题目质量
        Args:
            num_samples: 评估样本数量
        Returns:
            质量评估指标
        """
        print("开始评估生成质量...")
        
        quality_metrics = {
            "format_accuracy": 0.0,  # 格式正确率
            "knowledge_coverage": 0.0,  # 知识点覆盖率
            "difficulty_accuracy": 0.0,  # 难度符合率
        }
        
        samples = self.test_data[:num_samples]
        for sample in tqdm(samples, desc="评估生成质量"):
            # 生成题目
            student_output = self.student.generate(sample["input"])
            
            # 评估格式正确性
            if self._check_format(student_output):
                quality_metrics["format_accuracy"] += 1
            
            # 评估知识点覆盖
            if self._check_knowledge_coverage(student_output, sample["knowledge_points"]):
                quality_metrics["knowledge_coverage"] += 1
            
            # 评估难度符合度
            if self._check_difficulty(student_output, sample["difficulty"]):
                quality_metrics["difficulty_accuracy"] += 1
            
            # 保存示例输出
            if len(self.sample_outputs) < 10:  # 保存前10个样例
                self.sample_outputs.append({
                    "input": sample,
                    "output": student_output
                })
        
        # 计算平均指标
        for metric in quality_metrics:
            quality_metrics[metric] /= num_samples
        
        self.evaluation_results["quality_metrics"] = quality_metrics
        return quality_metrics

    def compare_with_teacher(self, num_samples: int = 100) -> Dict[str, float]:
        """
        与教师模型进行对比
        Args:
            num_samples: 对比样本数量
        Returns:
            对比指标
        """
        print("开始与教师模型对比...")
        
        comparison_metrics = {
            "content_similarity": 0.0,  # 内容相似度
            "answer_accuracy": 0.0,     # 答案准确率
            "explanation_quality": 0.0,  # 解析质量
        }
        
        samples = self.test_data[:num_samples]
        for sample in tqdm(samples, desc="模型对比"):
            # 获取两个模型的输出
            teacher_output = self.teacher.generate(sample["input"])
            student_output = self.student.generate(sample["input"])
            
            # 计算内容相似度
            similarity = self._calculate_similarity(teacher_output, student_output)
            comparison_metrics["content_similarity"] += similarity
            
            # 检查答案一致性
            if self._check_answer_consistency(teacher_output, student_output):
                comparison_metrics["answer_accuracy"] += 1
            
            # 评估解析质量
            explanation_score = self._evaluate_explanation(student_output, teacher_output)
            comparison_metrics["explanation_quality"] += explanation_score
        
        # 计算平均指标
        for metric in comparison_metrics:
            comparison_metrics[metric] /= num_samples
        
        self.evaluation_results["comparison_metrics"] = comparison_metrics
        return comparison_metrics

    def benchmark_performance(self, num_iterations: int = 100) -> Dict[str, float]:
        """
        性能基准测试
        Args:
            num_iterations: 测试迭代次数
        Returns:
            性能指标
        """
        print("开始性能基准测试...")
        
        performance_metrics = {
            "average_latency": 0.0,    # 平均延迟（ms）
            "memory_usage": 0.0,       # 内存使用（MB）
            "throughput": 0.0,         # 吞吐量（题/秒）
        }
        
        # 测试延迟和吞吐量
        latencies = []
        start_time = time.time()
        
        for _ in tqdm(range(num_iterations), desc="性能测试"):
            sample = self.test_data[0]  # 使用同一个样本进行测试
            
            # 测量单次推理时间
            start = time.time()
            _ = self.student.generate(sample["input"])
            end = time.time()
            
            latency = (end - start) * 1000  # 转换为毫秒
            latencies.append(latency)
        
        total_time = time.time() - start_time
        
        # 计算性能指标
        performance_metrics["average_latency"] = np.mean(latencies)
        performance_metrics["throughput"] = num_iterations / total_time
        performance_metrics["memory_usage"] = psutil.Process().memory_info().rss / 1024 / 1024  # MB
        
        self.evaluation_results["performance_metrics"] = performance_metrics
        return performance_metrics

    def save_results(self) -> None:
        """保存评估结果"""
        # 保存评估报告
        report_path = self.output_dir / "evaluation_report.json"
        with open(report_path, "w", encoding="utf-8") as f:
            json.dump(self.evaluation_results, f, indent=2, ensure_ascii=False)
        
        # 保存性能测试报告
        benchmark_path = self.output_dir / "benchmark_results.json"
        with open(benchmark_path, "w", encoding="utf-8") as f:
            json.dump(self.evaluation_results["performance_metrics"], f, indent=2, ensure_ascii=False)
        
        # 保存示例输出
        samples_path = self.output_dir / "sample_outputs.json"
        with open(samples_path, "w", encoding="utf-8") as f:
            json.dump(self.sample_outputs, f, indent=2, ensure_ascii=False)

    def _check_format(self, output: Dict[str, Any]) -> bool:
        """检查输出格式是否正确"""
        required_fields = ['题目', '答案', '解析', '知识点']
        return all(field in output for field in required_fields)

    def _check_knowledge_coverage(self, output: Dict[str, Any], required_points: List[str]) -> bool:
        """检查知识点覆盖情况"""
        output_points = output.get('知识点', '').split(';')
        return all(point in output_points for point in required_points)

    def _check_difficulty(self, output: Dict[str, Any], target_difficulty: int) -> bool:
        """检查难度是否符合要求"""
        # 实现难度评估逻辑
        return True  # TODO: 实现具体的难度评估

    def _calculate_similarity(self, output1: Dict[str, Any], output2: Dict[str, Any]) -> float:
        """计算两个输出的相似度"""
        from difflib import SequenceMatcher
        
        text1 = output1.get('题目', '') + output1.get('答案', '')
        text2 = output2.get('题目', '') + output2.get('答案', '')
        
        return SequenceMatcher(None, text1, text2).ratio()

    def _check_answer_consistency(self, output1: Dict[str, Any], output2: Dict[str, Any]) -> bool:
        """检查答案是否一致"""
        return output1.get('答案', '') == output2.get('答案', '')

    def _evaluate_explanation(self, student_output: Dict[str, Any], teacher_output: Dict[str, Any]) -> float:
        """评估解析质量"""
        # 实现解析质量评估逻辑
        return 0.8  # TODO: 实现具体的解析质量评估

    def analyze_and_suggest(self) -> Dict[str, Any]:
        """
        分析评估结果并提供优化建议
        详细的优化策略请参考 readMe.md 中的"评估与优化"章节
        Returns:
            优化建议字典
        """
        suggestions = {
            "quality_suggestions": [],
            "performance_suggestions": [],
            "model_suggestions": []
        }
        
        # 分析质量指标
        quality_metrics = self.evaluation_results.get("quality_metrics", {})
        if quality_metrics.get("format_accuracy", 1.0) < 0.95:
            suggestions["quality_suggestions"].append({
                "issue": "格式准确率低于95%",
                "suggestions": [
                    "检查模型输出的格式控制",
                    "在提示模板中强化格式要求",
                    "增加格式相关的训练样本"
                ]
            })
        
        if quality_metrics.get("difficulty_accuracy", 1.0) < 0.9:
            suggestions["quality_suggestions"].append({
                "issue": "难度控制不准确",
                "suggestions": [
                    "调整教师模型的temperature参数（当前值: 0.7）",
                    "在提示中更明确地指定难度要求",
                    "增加特定难度等级的训练样本",
                    "考虑在损失函数中加入难度控制的权重"
                ],
                "parameter_adjustments": {
                    "temperature": "降低至0.5以提高稳定性",
                    "difficulty_weight": "在损失函数中增加难度项权重"
                }
            })
        
        # 分析性能指标
        performance_metrics = self.evaluation_results.get("performance_metrics", {})
        if performance_metrics.get("average_latency", 0) > 100:  # 大于100ms
            suggestions["performance_suggestions"].append({
                "issue": "推理延迟过高",
                "suggestions": [
                    "考虑进一步模型量化",
                    "减少模型层数",
                    "使用更激进的模型剪枝",
                    "优化推理引擎配置"
                ],
                "parameter_adjustments": {
                    "num_layers": "考虑从6层减少到4层",
                    "hidden_size": "考虑从512减少到384",
                    "quantization": "尝试int8量化"
                }
            })
        
        # 分析模型效果
        comparison_metrics = self.evaluation_results.get("comparison_metrics", {})
        if comparison_metrics.get("answer_accuracy", 1.0) < 0.9:
            suggestions["model_suggestions"].append({
                "issue": "答案准确率低于90%",
                "suggestions": [
                    "增加训练轮次",
                    "调整知识蒸馏温度",
                    "平衡软标签和硬标签的权重",
                    "增加训练数据量"
                ],
                "parameter_adjustments": {
                    "num_epochs": "增加50%的训练轮次",
                    "distillation_temperature": "提高到2.5-3.0",
                    "alpha": "调整软标签权重到0.7"
                }
            })

        # 保存优化建议
        suggestion_path = self.output_dir / "optimization_suggestions.json"
        with open(suggestion_path, "w", encoding="utf-8") as f:
            json.dump(suggestions, f, indent=2, ensure_ascii=False)
        
        return suggestions

    def get_optimization_params(self) -> Dict[str, Any]:
        """
        根据评估结果生成优化后的参数配置
        详细的参数调整策略请参考 readMe.md 中的"评估与优化"章节
        Returns:
            优化后的参数配置
        """
        # 基础参数配置
        optimized_params = {
            "training": {
                "num_epochs": self.current_epochs,
                "learning_rate": self.current_lr,
                "batch_size": self.current_batch_size,
                "distillation_temperature": self.current_temp
            },
            "model": {
                "num_layers": self.student.config.n_layer,
                "hidden_size": self.student.config.n_embd,
                "num_attention_heads": self.student.config.n_head
            }
        }
        
        # 根据评估结果调整参数
        quality_metrics = self.evaluation_results.get("quality_metrics", {})
        comparison_metrics = self.evaluation_results.get("comparison_metrics", {})
        
        # 调整训练参数
        if comparison_metrics.get("answer_accuracy", 1.0) < 0.9:
            optimized_params["training"].update({
                "num_epochs": int(self.current_epochs * 1.5),  # 增加训练轮次
                "distillation_temperature": min(3.0, self.current_temp * 1.2),  # 提高温度
                "alpha": 0.7  # 增加软标签权重
            })
        
        # 调整模型参数
        if quality_metrics.get("difficulty_accuracy", 1.0) < 0.9:
            optimized_params["model"].update({
                "hidden_size": min(768, int(self.student.config.n_embd * 1.2)),  # 增加模型容量
                "num_attention_heads": min(12, self.student.config.n_head + 2)  # 增加注意力头数
            })
        
        # 保存优化参数
        params_path = self.output_dir / "optimized_parameters.json"
        with open(params_path, "w", encoding="utf-8") as f:
            json.dump(optimized_params, f, indent=2, ensure_ascii=False)
        
        return optimized_params 