import os
import shutil

import yaml
from ultralytics import YOLO
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json
import numpy as np
from typing import Dict, Any, Union
import warnings
import torch

# 忽略特定警告
warnings.filterwarnings("ignore", category=UserWarning)


class NumpyEncoder(json.JSONEncoder):
    """自定义JSON编码器处理NumPy数据类型"""

    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        return super().default(obj)


class YOLOv8Pipeline:
    def __init__(self, config_path: str = "configs/default.yaml"):
        """
        初始化模型管道
        Args:
            config_path: 配置文件路径
        """
        self.config = self._load_config(config_path)
        self.model = None
        self.class_names = self.config['dataset']['classes']['names']

        # 创建所有必要的输出目录
        self._create_dirs()

    def _load_config(self, config_path: str) -> Dict[str, Any]:
        """加载配置文件"""
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)

    def _create_dirs(self):
        """创建所有必要的目录"""
        # 确保所有路径都存在
        Path(self.config['paths']['root_dir']).mkdir(parents=True, exist_ok=True)

        # 明确创建models目录
        self.models_dir = Path(self.config['paths']['runs']['export'])
        self.models_dir.mkdir(parents=True, exist_ok=True)

        # 其他目录
        self.eval_dir = Path(self.config['paths']['runs']['eval'])
        self.export_dir = Path(self.config['paths']['runs']['export'])
        self.eval_dir.mkdir(parents=True, exist_ok=True)
        self.export_dir.mkdir(parents=True, exist_ok=True)

    def load_model(self) -> None:
        """加载训练好的模型"""
        model_path = self.config['paths']['evaluate']['model'] if self.config['tuning']['resume_from'] else \
        self.config['tuning']['path']
        self.model = YOLO(model_path)
        print(f"✅ Model loaded from {model_path}")

    def evaluate(self) -> Dict[str, Any]:
        """
        完整评估流程
        Returns:
            评估指标字典
        """
        if not self.model:
            self.load_model()

        print("🚀 Starting evaluation...")

        # 基础评估
        metrics = self._basic_eval()

        # 高级评估
        self._advanced_eval(metrics)

        # 可视化
        self._visualize(metrics)

        # 保存结果
        self._save_results(metrics)

        print(f"✅ Evaluation saved to {self.eval_dir}")
        return metrics

    def _basic_eval(self) -> Dict[str, Any]:
        """执行基础评估"""
        print("🔍 Running basic evaluation...")
        results = self.model.val(
            data=self.config['paths']['processed']['dataset'],
            batch=self.config['training']['base']['batch'],
            imgsz=self.config['training']['base']['imgsz'],
            conf=0.5,
            iou=0.6,
            split='val',
            name=f"eval_{self.config['training']['base']['name']}",
            save_json=True
        )

        metrics = {
            'mAP50': float(results.box.map50),
            'mAP50-95': float(results.box.map),
            'precision': float(results.box.mp),
            'recall': float(results.box.mr),
            'f1_score': float(2 * (results.box.mp * results.box.mr) / (results.box.mp + results.box.mr + 1e-16)),
            'confusion_matrix': results.confusion_matrix.matrix.tolist(),
            'speed': {
                'preprocess': float(results.speed['preprocess']),
                'inference': float(results.speed['inference']),
                'postprocess': float(results.speed['postprocess']),
                'total': float(sum(results.speed.values()))
            }
        }

        print("\n📊 Basic Metrics:")
        for k, v in metrics.items():
            if k != 'confusion_matrix':
                print(f"- {k}: {v:.4f}" if isinstance(v, float) else f"- {k}: {v}")

        return metrics

    def _advanced_eval(self, metrics: Dict[str, Any]) -> None:
        """高级评估分析"""
        print("\n🔬 Running advanced evaluation...")

        # 置信度阈值分析
        conf_results = []
        for conf in [0.25, 0.5, 0.75]:
            res = self.model.val(
                data=self.config['paths']['processed']['dataset'],
                conf=conf,
                split='val',
                plots=False,
                verbose=False
            )
            conf_results.append({
                'confidence': float(conf),
                'mAP50': float(res.box.map50),
                'precision': float(res.box.mp),
                'recall': float(res.box.mr)
            })
        metrics['confidence_analysis'] = conf_results

        # 类别分析
        class_results = []
        for i, name in enumerate(self.class_names):
            res = self.model.val(
                data=self.config['paths']['processed']['dataset'],
                classes=[i],
                split='val',
                plots=False,
                verbose=False
            )
            class_results.append({
                'class': name,
                'precision': float(res.box.mp),
                'recall': float(res.box.mr),
                'mAP50': float(res.box.map50),
                'mAP50-95': float(res.box.map)
            })
        metrics['class_analysis'] = class_results

        print("\n📈 Class Performance:")
        print(pd.DataFrame(metrics['class_analysis']).to_string(index=False))

    def _visualize(self, metrics: Dict[str, Any]) -> None:
        """可视化结果"""
        print("\n📊 Generating visualizations...")

        # 1. 置信度分析
        df_conf = pd.DataFrame(metrics['confidence_analysis'])
        plt.figure(figsize=(10, 6))
        df_conf.plot(x='confidence', y=['precision', 'recall'], kind='line', marker='o')
        plt.title('Precision-Recall vs Confidence Threshold')
        plt.ylabel('Score')
        plt.grid(True)
        plt.savefig(self.eval_dir / 'confidence_analysis.png', bbox_inches='tight')
        plt.close()

        # 2. 类别性能
        df_class = pd.DataFrame(metrics['class_analysis'])
        plt.figure(figsize=(12, 6))
        df_class.plot(x='class', y=['mAP50', 'precision', 'recall'], kind='bar')
        plt.title('Class Performance Comparison')
        plt.ylabel('Score')
        plt.xticks(rotation=45)
        plt.grid(True, axis='y')
        plt.tight_layout()
        plt.savefig(self.eval_dir / 'class_performance.png')
        plt.close()

        # 3. 混淆矩阵
        plt.figure(figsize=(10, 8))
        sns.heatmap(np.array(metrics['confusion_matrix']),
                    annot=True, fmt='g',
                    xticklabels=self.class_names,
                    yticklabels=self.class_names)
        plt.title('Confusion Matrix')
        plt.xlabel('Predicted')
        plt.ylabel('Actual')
        plt.savefig(self.eval_dir / 'confusion_matrix.png', bbox_inches='tight')
        plt.close()

    def _save_results(self, metrics: Dict[str, Any]) -> None:
        """保存评估结果"""
        print("\n💾 Saving results...")

        # 保存JSON指标
        with open(self.eval_dir / 'metrics.json', 'w') as f:
            json.dump(metrics, f, indent=4, cls=NumpyEncoder)

        # 保存CSV文件
        pd.DataFrame(metrics['confidence_analysis']).to_csv(
            self.eval_dir / 'confidence_analysis.csv', index=False)
        pd.DataFrame(metrics['class_analysis']).to_csv(
            self.eval_dir / 'class_analysis.csv', index=False)

        # 保存Markdown报告
        self._generate_report(metrics)

    def _generate_report(self, metrics: Dict[str, Any]) -> None:
        """生成Markdown报告"""
        report = [
            "# YOLOv8 Evaluation Report",
            "## Basic Metrics",
            f"- mAP50: {metrics['mAP50']:.4f}",
            f"- mAP50-95: {metrics['mAP50-95']:.4f}",
            f"- Precision: {metrics['precision']:.4f}",
            f"- Recall: {metrics['recall']:.4f}",
            f"- F1 Score: {metrics['f1_score']:.4f}",
            "\n## Speed Metrics",
            f"- Preprocess: {metrics['speed']['preprocess']:.2f} ms/img",
            f"- Inference: {metrics['speed']['inference']:.2f} ms/img",
            f"- Postprocess: {metrics['speed']['postprocess']:.2f} ms/img",
            f"- Total: {metrics['speed']['total']:.2f} ms/img",
            "\n## Class Performance",
            pd.DataFrame(metrics['class_analysis']).to_markdown(index=False),
            "\n## Confidence Analysis",
            pd.DataFrame(metrics['confidence_analysis']).to_markdown(index=False),
            "\n## Visualizations",
            "![Confidence Analysis](confidence_analysis.png)",
            "![Class Performance](class_performance.png)",
            "![Confusion Matrix](confusion_matrix.png)"
        ]

        with open(self.eval_dir / 'report.md', 'w') as f:
            f.write("\n".join(report))

    def export_model(self, format: str = 'onnx') -> str:
        """
        导出生产环境模型
        Args:
            format: 导出格式 (onnx, torchscript)
        Returns:
            导出的模型路径
        """
        if not self.model:
            self.load_model()

        print(f"🛠️ Exporting model to {format.upper()}...")

        # 1. 获取原始模型所在目录
        original_dir = Path(self.model.ckpt_path).parent if hasattr(self.model, 'ckpt_path') else Path.cwd()

        # 3. 定义源文件和目标路径
        source_file = Path(original_dir / f'best.{format}')
        dest_file = Path(self.export_dir / f'model.{format}')

        self.model.export(
            format=format,
            imgsz=self.config['training']['base']['imgsz'],
            opset=12,
            simplify=True,
            dynamic=False,
            device='cpu'
        )

        # 4. 移动文件到目标目录
        if source_file.exists():
            # 如果目标文件已存在，先删除
            if dest_file.exists():
                os.remove(dest_file)
            shutil.move(str(source_file), str(dest_file))
            print(f"✅ Model successfully moved to: {dest_file}")
            return str(dest_file)
        else:
            raise FileNotFoundError(f"Exported model not found at: {source_file}")

        print(f"✅ Model exported to {export_path}")
        return export_path


if __name__ == "__main__":
    try:
        pipeline = YOLOv8Pipeline()

        # 1. 加载模型
        pipeline.load_model()

        # 2. 执行评估（可选）
        # metrics = pipeline.evaluate()

        # 3. 导出模型
        pipeline.export_model('onnx')

    except Exception as e:
        print(f"❌ Error: {str(e)}")
        raise