import os
# 必须在所有import之前设置！
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'  # 解决OpenMP冲突
os.environ['OMP_NUM_THREADS'] = '1'  # 限制OpenMP线程数
import yaml
from ultralytics import YOLO
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json


class YOLOv8Evaluator:
    def __init__(self, config_path="configs/default.yaml"):
        with open(config_path, 'r', encoding='utf-8') as f:
            self.config = yaml.safe_load(f)

        # 设置路径
        self.eval_output_dir = Path(self.config['paths']['runs']['eval'])
        self.eval_output_dir.mkdir(parents=True, exist_ok=True)

        # 加载模型
        # model_path = self.config['tuning']['path'] if self.config['tuning']['resume_from'] else \
        # self.config['training']['base']['model']
        model_path = self.config['paths']['evaluate']['model'] if self.config['tuning']['resume_from'] else \
        self.config['tuning']['path']
        self.model = YOLO(model_path)

        # 数据集配置
        self.dataset_yaml = self.config['paths']['processed']['dataset']
        self.class_names = self.config['dataset']['classes']['names']

    def evaluate_model(self):
        """执行完整的模型评估流程"""
        print(f"🚀 开始评估模型: {self.config['training']['base']['name']}")

        # 1. 基础评估
        metrics = self._basic_evaluation()

        # 2. 高级评估
        self._advanced_evaluation(metrics)

        # 3. 可视化结果
        self._visualize_results(metrics)

        # 4. 保存结果
        self._save_results(metrics)

        print(f"✅ 评估完成! 结果保存在: {self.eval_output_dir}")

    def _basic_evaluation(self):
        """执行基础评估并返回指标"""
        print("🔍 进行基础评估...")

        # 在验证集上评估
        results = self.model.val(
            data=self.dataset_yaml,
            batch=self.config['training']['base']['batch'],
            imgsz=self.config['training']['base']['imgsz'],
            conf=0.5,  # 默认置信度阈值
            iou=0.6,  # 默认IoU阈值
            split='val',
            name=f"eval_{self.config['training']['base']['name']}",
            save_json=True,
            save_hybrid = True,
            plots = True
        )

        # 提取关键指标
        metrics = {
            'mAP50': results.box.map50,
            'mAP50-95': results.box.map,
            'precision': results.box.mp,
            'recall': results.box.mr,
            'f1_score': 2 * (results.box.mp * results.box.mr) / (results.box.mp + results.box.mr + 1e-16),
            'confusion_matrix': results.confusion_matrix.matrix,
            'speed': results.speed['inference'],  # 只提取推理时间
            'speed_details': results.speed  # 保存完整的速度信息
        }

        # 打印基础评估结果
        print("\n📊 基础评估结果:")
        print(f"- mAP50: {metrics['mAP50']:.4f}")
        print(f"- mAP50-95: {metrics['mAP50-95']:.4f}")
        print(f"- 精确度: {metrics['precision']:.4f}")
        print(f"- 召回率: {metrics['recall']:.4f}")
        print(f"- F1分数: {metrics['f1_score']:.4f}")
        print(f"- 推理速度: {metrics['speed']:.2f} ms/img")

        return metrics

    def _advanced_evaluation(self, metrics):
        """执行高级评估"""
        print("🔬 进行高级评估...")

        # 不同置信度阈值下的性能
        conf_thresholds = [0.25, 0.5, 0.75]
        conf_results = []

        for conf in conf_thresholds:
            res = self.model.val(
                data=self.dataset_yaml,
                conf=conf,
                split='val',
                plots=False,
                verbose=False
            )
            conf_results.append({
                'confidence': conf,
                'mAP50': res.box.map50,
                'precision': res.box.mp,
                'recall': res.box.mr
            })

        metrics['confidence_analysis'] = pd.DataFrame(conf_results)

        # 每个类别的性能
        class_results = []
        for i, class_name in enumerate(self.class_names):
            # 使用单独的验证运行获取每个类别的指标
            res = self.model.val(
                data=self.dataset_yaml,
                classes=[i],
                split='val',
                plots=False,
                verbose=False
            )
            class_results.append({
                'class': class_name,
                'precision': res.box.mp,
                'recall': res.box.mr,
                'mAP50': res.box.map50,
                'mAP50-95': res.box.map
            })

        metrics['class_analysis'] = pd.DataFrame(class_results)

        # 打印类别分析结果
        print("\n📈 类别性能分析:")
        print(metrics['class_analysis'].to_string(index=False))

    def _visualize_results(self, metrics):
        """可视化评估结果"""
        print("📊 生成可视化图表...")

        plt.style.use('default')
        sns.set_palette("husl")

        # 1. 置信度阈值分析
        plt.figure(figsize=(10, 6))
        metrics['confidence_analysis'].plot(x='confidence', y=['precision', 'recall'],
                                            kind='line', marker='o', ax=plt.gca())
        plt.title('Precision-Recall vs Confidence Threshold')
        plt.ylabel('Score')
        plt.grid(True)
        plt.savefig(self.eval_output_dir / 'confidence_analysis.png')
        plt.close()

        # 2. 类别性能分析
        plt.figure(figsize=(12, 6))
        metrics['class_analysis'].plot(x='class', y=['mAP50', 'precision', 'recall'],
                                       kind='bar', ax=plt.gca())
        plt.title('Performance by Class')
        plt.ylabel('Score')
        plt.xticks(rotation=45)
        plt.grid(True, axis='y')
        plt.tight_layout()
        plt.savefig(self.eval_output_dir / 'class_performance.png')
        plt.close()

        # 3. 混淆矩阵热力图
        plt.figure(figsize=(10, 8))
        sns.heatmap(metrics['confusion_matrix'],
                    annot=True, fmt='g',
                    xticklabels=self.class_names,
                    yticklabels=self.class_names)
        plt.title('Confusion Matrix')
        plt.xlabel('Predicted')
        plt.ylabel('Actual')
        plt.savefig(self.eval_output_dir / 'confusion_matrix.png')
        plt.close()

    def _save_results(self, metrics):
        """保存评估结果"""
        print("💾 保存评估结果...")

        # 保存指标为JSON
        with open(self.eval_output_dir / 'metrics.json', 'w') as f:
            json.dump({
                'mAP50': metrics['mAP50'],
                'mAP50-95': metrics['mAP50-95'],
                'precision': metrics['precision'],
                'recall': metrics['recall'],
                'f1_score': metrics['f1_score'],
                'speed': metrics['speed']
            }, f, indent=4)

        # 保存详细结果为CSV
        metrics['confidence_analysis'].to_csv(self.eval_output_dir / 'confidence_analysis.csv', index=False)
        metrics['class_analysis'].to_csv(self.eval_output_dir / 'class_analysis.csv', index=False)

        # 生成Markdown格式的评估报告
        with open(self.eval_output_dir / 'report.md', 'w') as f:
            f.write(f"# YOLOv8 模型评估报告\n\n")
            f.write(f"## 基本信息\n")
            f.write(f"- 模型名称: {self.config['training']['base']['name']}\n")
            f.write(f"- 数据集: {self.dataset_yaml}\n")
            f.write(f"- 类别: {', '.join(self.class_names)}\n\n")

            f.write(f"## 关键指标\n")
            f.write(f"- mAP@0.5: {metrics['mAP50']:.4f}\n")
            f.write(f"- mAP@0.5:0.95: {metrics['mAP50-95']:.4f}\n")
            f.write(f"- 精确度: {metrics['precision']:.4f}\n")
            f.write(f"- 召回率: {metrics['recall']:.4f}\n")
            f.write(f"- F1分数: {metrics['f1_score']:.4f}\n")
            f.write(f"- 推理速度: {metrics['speed']:.2f} ms/img\n\n")

            f.write(f"## 可视化图表\n")
            f.write(f"![Confidence Analysis](confidence_analysis.png)\n\n")
            f.write(f"![Class Performance](class_performance.png)\n\n")
            f.write(f"![Confusion Matrix](confusion_matrix.png)\n")


if __name__ == "__main__":
    evaluator = YOLOv8Evaluator()
    evaluator.evaluate_model()