"""
YOLOv8 模型评估脚本 (兼容Ultralytics 8.3.107+)
功能：
1. 加载训练好的模型
2. 在验证集上评估指标(mAP、精度、召回率等)
3. 生成混淆矩阵和PR曲线
4. 可视化检测样例
"""
import os
# 必须在所有import之前设置！
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'  # 解决OpenMP冲突
os.environ['OMP_NUM_THREADS'] = '1'  # 限制OpenMP线程数
"""
YOLOv8 模型评估脚本 (修复文件操作问题)
"""
import yaml
from pathlib import Path
from ultralytics import YOLO
import logging
import torch

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler('logs/eval.log'), logging.StreamHandler()]
)
logger = logging.getLogger(__name__)


class YOLOv8Evaluator:
    def __init__(self, config_path="configs/default.yaml"):
        self.config = self.load_config(config_path)
        self.setup_paths()
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'

    def load_config(self, config_path):
        """安全加载配置文件"""
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                return yaml.safe_load(f)
        except Exception as e:
            logger.error(f"配置文件加载失败: {str(e)}")
            raise

    def setup_paths(self):
        """设置并验证所有路径"""
        try:
            base_dir = Path(__file__).parent.resolve()

            # 数据集配置路径
            self.dataset_yaml = str((base_dir / self.config["paths"]["processed"]["dataset"]).resolve())
            if not Path(self.dataset_yaml).exists():
                raise FileNotFoundError(f"数据集配置文件不存在: {self.dataset_yaml}")

            # 模型路径
            self.model_path = str(
                (base_dir / "runs" / "train" / self.config['training']['base']['name'] / "weights" / "best.pt").resolve())
            if not Path(self.model_path).exists():
                raise FileNotFoundError(f"模型文件不存在: {self.model_path}")

            # 输出目录
            self.output_dir = base_dir / "runs" / "eval"
            self.output_dir.mkdir(parents=True, exist_ok=True)

        except Exception as e:
            logger.error(f"路径设置失败: {str(e)}")
            raise

    def evaluate_model(self):
        """执行模型评估"""
        try:
            logger.info(f"加载模型: {self.model_path}")
            model = YOLO(self.model_path)

            # 验证集评估
            logger.info("开始评估验证集...")
            metrics = model.val(
                data=self.dataset_yaml,
                split='val',
                device=self.device,
                plots=True,
                save_json=True,
                conf=0.5,
                iou=0.6
            )

            # 保存指标结果
            self.save_metrics(metrics)

            # 可视化样例
            self.visualize_predictions(model)

            return metrics
        except Exception as e:
            logger.error(f"评估过程出错: {str(e)}")
            raise

    def save_metrics(self, metrics):
        """安全保存评估指标"""
        result_file = self.output_dir / "evaluation_results.txt"

        try:
            with open(result_file, 'w', encoding='utf-8') as f:
                f.write("=== 模型评估结果 ===\n")
                f.write(f"模型路径: {self.model_path}\n")
                f.write(f"数据集: {self.dataset_yaml}\n\n")

                f.write("全局指标:\n")
                f.write(f"mAP50: {metrics.box.map50:.4f}\n")
                f.write(f"mAP50-95: {metrics.box.map:.4f}\n")
                f.write(f"平均精确率(mp): {metrics.box.mp:.4f}\n")
                f.write(f"平均召回率(mr): {metrics.box.mr:.4f}\n\n")

                # 获取类别名称
                try:
                    with open(self.dataset_yaml, 'r', encoding='utf-8') as yaml_file:
                        class_names = yaml.safe_load(yaml_file)['names']

                    f.write("各类别指标:\n")
                    for i, name in class_names.items():
                        f.write(f"{name}:\n")
                        f.write(f"  精确率: {metrics.box.p[i]:.4f}\n")
                        f.write(f"  召回率: {metrics.box.r[i]:.4f}\n")
                        f.write(f"  AP50: {metrics.box.ap50[i]:.4f}\n")
                        f.write(f"  AP50-95: {metrics.box.ap[i]:.4f}\n\n")
                except Exception as e:
                    logger.warning(f"无法读取类别名称: {str(e)}")

            logger.info(f"评估结果已保存到: {result_file}")
        except Exception as e:
            logger.error(f"保存评估结果失败: {str(e)}")
            raise

    def visualize_predictions(self, model, num_samples=8):
        """可视化预测结果"""
        try:
            import cv2
            from glob import glob

            # 读取数据集配置
            with open(self.dataset_yaml, 'r', encoding='utf-8') as f:
                dataset_cfg = yaml.safe_load(f)

            val_dir = Path(dataset_cfg['path']) / dataset_cfg['val']
            val_images = list(val_dir.glob('*.*'))[:num_samples]

            if not val_images:
                logger.warning(f"验证集无图像: {val_dir}")
                return

            vis_dir = self.output_dir / "visualizations"
            vis_dir.mkdir(exist_ok=True)

            logger.info(f"生成可视化样例...")
            for img_path in val_images:
                try:
                    results = model.predict(str(img_path), save=False)
                    res_plotted = results[0].plot()
                    output_path = str(vis_dir / img_path.name)
                    cv2.imwrite(output_path, res_plotted)
                except Exception as e:
                    logger.warning(f"处理图像失败 {img_path}: {str(e)}")
                    continue

            logger.info(f"可视化结果保存到: {vis_dir}")
        except Exception as e:
            logger.error(f"可视化过程出错: {str(e)}")
            raise


if __name__ == "__main__":
    try:
        logger.info("=" * 50)
        logger.info("YOLOv8 模型评估开始")
        logger.info("=" * 50)

        evaluator = YOLOv8Evaluator()
        metrics = evaluator.evaluate_model()

        logger.info("\n评估指标:")
        logger.info(f"mAP50: {metrics.box.map50:.4f}")
        logger.info(f"mAP50-95: {metrics.box.map:.4f}")
        logger.info(f"平均精确率: {metrics.box.mp:.4f}")
        logger.info(f"平均召回率: {metrics.box.mr:.4f}")

        logger.info("=" * 50)
        logger.info("评估完成！结果保存在 runs/eval/ 目录")
        logger.info("=" * 50)
    except Exception as e:
        logger.error(f"评估脚本执行失败: {str(e)}")
        exit(1)