"""
传统方法评估脚本 - 与YOLO标注对比
计算IoU、精确率、召回率等指标
"""

import cv2
import numpy as np
from pathlib import Path
import json
from typing import List, Dict, Tuple
import matplotlib.pyplot as plt
from traditional_damage_detection import *


def read_yolo_labels(label_path, img_width, img_height):
    """读取YOLO格式标注"""
    annotations = []

    if not Path(label_path).exists():
        return annotations

    with open(label_path, 'r') as f:
        for line in f:
            parts = line.strip().split()
            if len(parts) >= 5:
                class_id = int(parts[0])
                x_center = float(parts[1])
                y_center = float(parts[2])
                width = float(parts[3])
                height = float(parts[4])

                annotations.append({
                    'class': class_id,
                    'bbox': [x_center, y_center, width, height]
                })

    return annotations


def calculate_iou(bbox1, bbox2):
    """计算两个边界框的IoU (YOLO格式: [x_center, y_center, width, height])"""
    # 转换为 [x1, y1, x2, y2] 格式
    x1_1 = bbox1[0] - bbox1[2] / 2
    y1_1 = bbox1[1] - bbox1[3] / 2
    x2_1 = bbox1[0] + bbox1[2] / 2
    y2_1 = bbox1[1] + bbox1[3] / 2

    x1_2 = bbox2[0] - bbox2[2] / 2
    y1_2 = bbox2[1] - bbox2[3] / 2
    x2_2 = bbox2[0] + bbox2[2] / 2
    y2_2 = bbox2[1] + bbox2[3] / 2

    # 计算交集
    x1_i = max(x1_1, x1_2)
    y1_i = max(y1_1, y1_2)
    x2_i = min(x2_1, x2_2)
    y2_i = min(y2_1, y2_2)

    if x2_i < x1_i or y2_i < y1_i:
        return 0.0

    intersection = (x2_i - x1_i) * (y2_i - y1_i)

    # 计算并集
    area1 = bbox1[2] * bbox1[3]
    area2 = bbox2[2] * bbox2[3]
    union = area1 + area2 - intersection

    return intersection / union if union > 0 else 0.0


def match_detections(ground_truth, predictions, iou_threshold=0.5):
    """匹配预测结果和真实标注"""
    matched_gt = set()
    matched_pred = set()
    matches = []

    for i, gt in enumerate(ground_truth):
        best_iou = 0
        best_pred_idx = -1

        for j, pred in enumerate(predictions):
            if j in matched_pred:
                continue

            iou = calculate_iou(gt['bbox'], pred['bbox'])

            if iou > best_iou and iou >= iou_threshold:
                best_iou = iou
                best_pred_idx = j

        if best_pred_idx != -1:
            matched_gt.add(i)
            matched_pred.add(best_pred_idx)
            matches.append({
                'gt_idx': i,
                'pred_idx': best_pred_idx,
                'iou': best_iou,
                'gt_class': gt['class'],
                'pred_class': predictions[best_pred_idx]['class']
            })

    return matches, matched_gt, matched_pred


def calculate_metrics(ground_truth, predictions, iou_threshold=0.5):
    """计算评估指标"""
    matches, matched_gt, matched_pred = match_detections(ground_truth, predictions, iou_threshold)

    # 基本指标
    tp = len(matches)
    fp = len(predictions) - len(matched_pred)
    fn = len(ground_truth) - len(matched_gt)

    precision = tp / (tp + fp) if (tp + fp) > 0 else 0
    recall = tp / (tp + fn) if (tp + fn) > 0 else 0
    f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

    # 类别准确率
    correct_class = sum(1 for m in matches if m['gt_class'] == m['pred_class'])
    class_accuracy = correct_class / tp if tp > 0 else 0

    # 平均IoU
    avg_iou = np.mean([m['iou'] for m in matches]) if matches else 0

    # 混淆矩阵
    confusion_matrix = np.zeros((3, 3), dtype=int)
    for m in matches:
        confusion_matrix[m['gt_class'], m['pred_class']] += 1

    return {
        'precision': precision,
        'recall': recall,
        'f1_score': f1_score,
        'class_accuracy': class_accuracy,
        'avg_iou': avg_iou,
        'true_positives': tp,
        'false_positives': fp,
        'false_negatives': fn,
        'confusion_matrix': confusion_matrix.tolist(),
        'num_matches': len(matches)
    }


def evaluate_single_image(image_path, label_path, detector, detector_name):
    """评估单张图像"""
    # 读取图像尺寸
    image = cv2.imread(str(image_path))
    if image is None:
        print(f"无法读取图像: {image_path}")
        return None

    h, w = image.shape[:2]

    # 读取真实标注
    ground_truth = read_yolo_labels(label_path, w, h)

    # 获取检测结果
    predictions = detector.detect(image_path)

    # 计算指标
    metrics = calculate_metrics(ground_truth, predictions)

    return {
        'image': Path(image_path).name,
        'detector': detector_name,
        'metrics': metrics,
        'num_gt': len(ground_truth),
        'num_pred': len(predictions)
    }


def evaluate_dataset(image_dir, label_dir, output_dir='evaluation_results'):
    """评估整个数据集"""
    image_dir = Path(image_dir)
    label_dir = Path(label_dir)
    output_dir = Path(output_dir)
    output_dir.mkdir(exist_ok=True)

    # 获取所有图像
    image_paths = list(image_dir.glob('*.jpg')) + list(image_dir.glob('*.png'))

    # 初始化检测器
    detectors = {
        'EdgeContour': Method1_EdgeContourDetection(),
        'ColorSegmentation': Method2_ColorSegmentation(),
        'TextureAnalysis': Method3_TextureAnalysis(),
        'MorphologicalOps': Method4_MorphologicalOperations(),
        'Watershed': Method5_WatershedSegmentation(),
        'HybridMethod': Method6_HybridMethod()
    }

    # 存储所有结果
    all_results = {name: [] for name in detectors.keys()}

    print(f"\n开始评估 {len(image_paths)} 张图像...")
    print("=" * 80)

    for img_path in image_paths:
        # 对应的标注文件
        label_path = label_dir / (img_path.stem + '.txt')

        if not label_path.exists():
            print(f"警告: 未找到标注文件 {label_path}")
            continue

        print(f"\n处理: {img_path.name}")

        for detector_name, detector in detectors.items():
            result = evaluate_single_image(img_path, label_path, detector, detector_name)
            if result:
                all_results[detector_name].append(result)

                # 打印简要结果
                m = result['metrics']
                print(f"  {detector_name:20s} - P: {m['precision']:.3f}, "
                      f"R: {m['recall']:.3f}, F1: {m['f1_score']:.3f}, "
                      f"IoU: {m['avg_iou']:.3f}")

    # 计算平均指标
    print("\n" + "=" * 80)
    print("平均指标汇总:")
    print("=" * 80)

    summary = {}
    for detector_name, results in all_results.items():
        if not results:
            continue

        avg_metrics = {
            'precision': np.mean([r['metrics']['precision'] for r in results]),
            'recall': np.mean([r['metrics']['recall'] for r in results]),
            'f1_score': np.mean([r['metrics']['f1_score'] for r in results]),
            'class_accuracy': np.mean([r['metrics']['class_accuracy'] for r in results]),
            'avg_iou': np.mean([r['metrics']['avg_iou'] for r in results]),
            'total_tp': sum([r['metrics']['true_positives'] for r in results]),
            'total_fp': sum([r['metrics']['false_positives'] for r in results]),
            'total_fn': sum([r['metrics']['false_negatives'] for r in results])
        }

        summary[detector_name] = avg_metrics

        print(f"\n{detector_name}:")
        print(f"  精确率 (Precision):    {avg_metrics['precision']:.4f}")
        print(f"  召回率 (Recall):       {avg_metrics['recall']:.4f}")
        print(f"  F1分数:                {avg_metrics['f1_score']:.4f}")
        print(f"  类别准确率:            {avg_metrics['class_accuracy']:.4f}")
        print(f"  平均IoU:               {avg_metrics['avg_iou']:.4f}")
        print(f"  总TP/FP/FN:            {avg_metrics['total_tp']}/{avg_metrics['total_fp']}/{avg_metrics['total_fn']}")

    # 保存详细结果
    output_json = output_dir / 'evaluation_results.json'
    with open(output_json, 'w', encoding='utf-8') as f:
        json.dump({
            'summary': summary,
            'detailed_results': all_results
        }, f, indent=2, ensure_ascii=False)

    print(f"\n详细结果已保存至: {output_json}")

    # 可视化对比
    visualize_comparison(summary, output_dir)

    return summary, all_results


def visualize_comparison(summary, output_dir):
    """可视化不同方法的对比"""
    if not summary:
        return

    methods = list(summary.keys())
    metrics = ['precision', 'recall', 'f1_score', 'avg_iou']
    metric_names = ['精确率', '召回率', 'F1分数', '平均IoU']

    # 创建对比图
    fig, axes = plt.subplots(2, 2, figsize=(15, 12))
    fig.suptitle('传统方法性能对比', fontsize=16, fontweight='bold')

    for idx, (metric, metric_name) in enumerate(zip(metrics, metric_names)):
        ax = axes[idx // 2, idx % 2]

        values = [summary[m][metric] for m in methods]
        bars = ax.bar(range(len(methods)), values, color='steelblue', alpha=0.8)

        # 添加数值标签
        for i, (bar, val) in enumerate(zip(bars, values)):
            height = bar.get_height()
            ax.text(bar.get_x() + bar.get_width() / 2., height,
                    f'{val:.3f}',
                    ha='center', va='bottom', fontsize=10)

        ax.set_xlabel('方法', fontsize=12)
        ax.set_ylabel(metric_name, fontsize=12)
        ax.set_title(f'{metric_name}对比', fontsize=13, fontweight='bold')
        ax.set_xticks(range(len(methods)))
        ax.set_xticklabels(methods, rotation=45, ha='right')
        ax.set_ylim(0, 1.0)
        ax.grid(axis='y', alpha=0.3)

    plt.tight_layout()
    output_path = Path(output_dir) / 'methods_comparison.png'
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    print(f"\n对比图已保存至: {output_path}")
    plt.close()

    # 绘制雷达图
    fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection='polar'))

    angles = np.linspace(0, 2 * np.pi, len(metrics), endpoint=False).tolist()
    angles += angles[:1]  # 闭合

    for method in methods:
        values = [summary[method][m] for m in metrics]
        values += values[:1]  # 闭合
        ax.plot(angles, values, 'o-', linewidth=2, label=method)
        ax.fill(angles, values, alpha=0.15)

    ax.set_xticks(angles[:-1])
    ax.set_xticklabels(metric_names, fontsize=11)
    ax.set_ylim(0, 1)
    ax.set_title('方法性能雷达图', fontsize=14, fontweight='bold', pad=20)
    ax.legend(loc='upper right', bbox_to_anchor=(1.3, 1.1))
    ax.grid(True)

    output_path = Path(output_dir) / 'radar_comparison.png'
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    print(f"雷达图已保存至: {output_path}")
    plt.close()


def analyze_confusion_matrices(all_results, output_dir):
    """分析混淆矩阵"""
    output_dir = Path(output_dir)

    class_names = ['轻微', '中度', '严重']

    for detector_name, results in all_results.items():
        if not results:
            continue

        # 累计混淆矩阵
        total_cm = np.zeros((3, 3), dtype=int)
        for r in results:
            cm = np.array(r['metrics']['confusion_matrix'])
            total_cm += cm

        # 绘制混淆矩阵
        fig, ax = plt.subplots(figsize=(8, 7))
        im = ax.imshow(total_cm, cmap='Blues')

        # 添加colorbar
        cbar = ax.figure.colorbar(im, ax=ax)
        cbar.ax.set_ylabel('数量', rotation=-90, va="bottom", fontsize=11)

        # 设置刻度
        ax.set_xticks(np.arange(len(class_names)))
        ax.set_yticks(np.arange(len(class_names)))
        ax.set_xticklabels(class_names)
        ax.set_yticklabels(class_names)

        # 标签
        ax.set_xlabel('预测类别', fontsize=12)
        ax.set_ylabel('真实类别', fontsize=12)
        ax.set_title(f'{detector_name} - 混淆矩阵', fontsize=13, fontweight='bold')

        # 在每个格子中显示数值
        for i in range(len(class_names)):
            for j in range(len(class_names)):
                text = ax.text(j, i, total_cm[i, j],
                               ha="center", va="center", color="black", fontsize=12)

        plt.tight_layout()
        output_path = output_dir / f'confusion_matrix_{detector_name}.png'
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        plt.close()

        print(f"{detector_name} 混淆矩阵已保存至: {output_path}")


if __name__ == "__main__":
    print("=" * 80)
    print("传统方法评估系统")
    print("=" * 80)

    # 配置路径
    IMAGE_DIR = "path/to/your/images"  # 替换为您的图像目录
    LABEL_DIR = "path/to/your/labels"  # 替换为您的YOLO标注目录
    OUTPUT_DIR = "evaluation_results"

    # 检查路径
    if Path(IMAGE_DIR).exists() and Path(LABEL_DIR).exists():
        # 运行评估
        summary, all_results = evaluate_dataset(IMAGE_DIR, LABEL_DIR, OUTPUT_DIR)

        # 分析混淆矩阵
        print("\n生成混淆矩阵...")
        analyze_confusion_matrices(all_results, OUTPUT_DIR)

        print("\n" + "=" * 80)
        print("评估完成!")
        print("=" * 80)

    else:
        print("\n请设置正确的图像和标注目录路径!")
        print(f"图像目录: {IMAGE_DIR}")
        print(f"标注目录: {LABEL_DIR}")
        print("\n使用方法示例:")
        print("  summary, results = evaluate_dataset('images/', 'labels/', 'output/')")