import os
import cv2
import numpy as np
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from ultralytics import YOLO
from pathlib import Path
import time
from tqdm import tqdm  # 用于显示进度条

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows系统
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 配置参数
PT_MODEL_PATH =r'H:\vscode\work\runs\detect\train_yolov8s10\weights\best.pt'
DATA_DIR = r'H:\vscode\work\labelingPhoto\labelingPhoto'  # 数据文件夹路径
IMAGE_EXT = ['.jpg', '.jpeg', '.png', '.bmp']  # 支持的图像格式
CONF_THRESHOLD = 0.25  # 置信度阈值
IOU_THRESHOLD = 0.5  # IoU匹配阈值
POOR_DETECTION_THRESHOLD = 0.5  # 差检测筛选阈值：FN/(TP+FN) > 此值的图片会被标记
BOX_LINEWIDTH = 1  # 边界框线条宽度（数值越小线条越细）

# 加载类别名称（根据你的数据集修改）
CLASSES = ['i2', 'i4', 'i5', 'il100', 'il60', 'il80', 'io', 'ip', 'p10', 'p11', 'p12', 'p19', 'p23', 'p26', 'p27', 'p3',
           'p5', 'p6',
           'pg', 'ph4', 'ph4.5', 'ph5', 'pl100', 'pl120', 'pl20', 'pl30', 'pl40', 'pl5', 'pl50', 'pl60', 'pl70', 'pl80',
           'pm20',
           'pm30', 'pm55', 'pn', 'pne', 'po', 'pr40', 'w13', 'w32', 'w55', 'w57', 'w59', 'wo',
           ]


# 1. 从XML文件中解析标注框（Pascal VOC格式）
def parse_xml_annotation(xml_path):
    """解析XML标注文件，返回边界框坐标和类别ID"""
    if not os.path.exists(xml_path):
        return np.array([]), np.array([])

    try:
        tree = ET.parse(xml_path)
        root = tree.getroot()
        boxes, class_ids = [], []

        for obj in root.findall('object'):
            # 获取类别
            class_name = obj.find('name').text
            if class_name not in CLASSES:
                continue  # 跳过不在类别列表中的标注
            class_id = CLASSES.index(class_name)

            # 获取边界框坐标
            bbox = obj.find('bndbox')
            xmin = float(bbox.find('xmin').text)
            ymin = float(bbox.find('ymin').text)
            xmax = float(bbox.find('xmax').text)
            ymax = float(bbox.find('ymax').text)

            boxes.append([xmin, ymin, xmax, ymax])
            class_ids.append(class_id)

        return np.array(boxes), np.array(class_ids)
    except Exception as e:
        print(f"解析XML出错 [{xml_path}]: {str(e)}")
        return np.array([]), np.array([])


# 2. 使用YOLOv8模型进行预测
def predict_with_yolov8(model, image_path, conf_threshold=0.25):
    """使用YOLOv8模型预测图像中的目标"""
    if not os.path.exists(image_path):
        return np.array([]), np.array([]), np.array([]), None

    try:
        # 读取图像
        image = cv2.imread(image_path)
        if image is None:
            raise ValueError(f"无法读取图像: {image_path}")

        # 预测
        results = model(image, conf=conf_threshold)[0]

        # 提取预测结果
        boxes = results.boxes.xyxy.cpu().numpy()  # 边界框坐标 (x1, y1, x2, y2)
        scores = results.boxes.conf.cpu().numpy()  # 置信度
        class_ids = results.boxes.cls.cpu().numpy().astype(int)  # 类别ID

        return boxes, scores, class_ids, image
    except Exception as e:
        print(f"模型预测出错 [{image_path}]: {str(e)}")
        return np.array([]), np.array([]), np.array([]), None


# 3. 计算IoU（交并比）
def calculate_iou(box1, box2):
    """计算两个边界框的IoU"""
    x1_1, y1_1, x2_1, y2_1 = box1
    x1_2, y1_2, x2_2, y2_2 = box2

    x1 = max(x1_1, x1_2)
    y1 = max(y1_1, y1_2)
    x2 = min(x2_1, x2_2)
    y2 = min(y2_1, y2_2)

    intersection_area = max(0, x2 - x1) * max(0, y2 - y1)
    box1_area = (x2_1 - x1_1) * (y2_1 - y1_1)
    box2_area = (x2_2 - x1_2) * (y2_2 - y1_2)
    union_area = box1_area + box2_area - intersection_area

    return intersection_area / union_area if union_area > 0 else 0


# 4. 匹配预测框和真实框
def match_predictions_to_ground_truth(pred_boxes, pred_scores, pred_class_ids, gt_boxes, gt_class_ids,
                                      iou_threshold=0.5):
    """将预测框与真实框进行匹配，并计算IoU"""
    if len(gt_boxes) == 0:
        return []

    matches = []
    gt_matched = [False] * len(gt_boxes)  # 记录真实框是否已匹配

    # 对每个预测框，寻找最佳匹配的真实框（按IoU和类别）
    for i, (pred_box, pred_score, pred_class) in enumerate(zip(pred_boxes, pred_scores, pred_class_ids)):
        best_iou = 0
        best_gt_idx = -1

        for j, (gt_box, gt_class) in enumerate(zip(gt_boxes, gt_class_ids)):
            if pred_class != gt_class or gt_matched[j]:
                continue  # 跳过不同类别或已匹配的真实框

            iou = calculate_iou(pred_box, gt_box)
            if iou > best_iou and iou >= iou_threshold:
                best_iou = iou
                best_gt_idx = j

        matches.append({
            'pred_idx': i,
            'gt_idx': best_gt_idx,
            'iou': best_iou,
            'pred_box': pred_box,
            'pred_class': pred_class,
            'pred_score': pred_score
        })

        if best_gt_idx >= 0:
            gt_matched[best_gt_idx] = True  # 标记真实框为已匹配

    return matches


# 5. 评估单张图片的性能
def evaluate_single_image(model, image_path, xml_path, save_visualization=True):
    """评估单张图片的检测性能，返回TP/FP/FN及类别统计"""
    # 解析XML标注
    gt_boxes, gt_class_ids = parse_xml_annotation(xml_path)
    num_gt = len(gt_boxes)  # 真实目标数量

    # 模型预测
    pred_boxes, pred_scores, pred_class_ids, image = predict_with_yolov8(
        model, image_path, CONF_THRESHOLD
    )

    if image is None:
        return 0, 0, 0, {}, False, os.path.basename(image_path), num_gt, []

    # 匹配预测框和真实框
    matches = match_predictions_to_ground_truth(
        pred_boxes, pred_scores, pred_class_ids,
        gt_boxes, gt_class_ids, IOU_THRESHOLD
    )

    # 计算TP/FP/FN
    tp = sum(1 for m in matches if m['gt_idx'] >= 0)
    fp = len(matches) - tp
    fn = num_gt - tp  # 未检测到的目标数量

    # 计算类别性能
    class_stats = {}
    for class_id in set(gt_class_ids):
        class_name = CLASSES[class_id]
        class_gt_boxes = gt_boxes[gt_class_ids == class_id]
        class_pred_matches = [m for m in matches if m['pred_class'] == class_id]

        class_tp = sum(1 for m in class_pred_matches if m['gt_idx'] >= 0)
        class_fp = len(class_pred_matches) - class_tp
        class_fn = len(class_gt_boxes) - class_tp

        class_stats[class_name] = {
            'tp': class_tp,
            'fp': class_fp,
            'fn': class_fn
        }

    # 收集该图片中所有预测框的TP/FP状态及置信度（用于计算AP）
    image_detections = []
    for match in matches:
        is_tp = match['gt_idx'] >= 0
        image_detections.append({
            'class_id': match['pred_class'],
            'confidence': match['pred_score'],
            'is_tp': is_tp
        })

    # 可视化对比结果（可选）
    if save_visualization and len(gt_boxes) > 0:
        try:
            visualize_comparison(image, pred_boxes, pred_scores, pred_class_ids,
                                 gt_boxes, gt_class_ids, matches, image_path)
            return tp, fp, fn, class_stats, True, os.path.basename(image_path), num_gt, image_detections
        except Exception as e:
            print(f"可视化出错 [{image_path}]: {str(e)}")

    return tp, fp, fn, class_stats, False, os.path.basename(image_path), num_gt, image_detections


# 6. 可视化对比结果（修改linewidth为BOX_LINEWIDTH）
def visualize_comparison(image, pred_boxes, pred_scores, pred_class_ids, gt_boxes, gt_class_ids, matches, image_path):
    """可视化预测框和真实框的对比"""
    vis_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    fig, ax = plt.subplots(figsize=(12, 8))
    ax.imshow(vis_image)

    # 绘制真实框（绿色，线条宽度BOX_LINEWIDTH）
    for box, class_id in zip(gt_boxes, gt_class_ids):
        x1, y1, x2, y2 = box
        width = x2 - x1
        height = y2 - y1
        rect = Rectangle((x1, y1), width, height, linewidth=BOX_LINEWIDTH, edgecolor='g', facecolor='none')
        ax.add_patch(rect)
        ax.text(x1, y1 - 5, f"{CLASSES[class_id]} (GT)", color='g', fontsize=8)

    # 绘制预测框（红色=FP，蓝色=TP，线条宽度BOX_LINEWIDTH）
    for match in matches:
        x1, y1, x2, y2 = match['pred_box']
        width = x2 - x1
        height = y2 - y1

        if match['gt_idx'] >= 0:  # 正确检测（TP）
            color = 'b'
            label = f"{CLASSES[match['pred_class']]}: {match['pred_score']:.2f} (IoU: {match['iou']:.2f})"
        else:  # 错误检测（FP）
            color = 'r'
            label = f"{CLASSES[match['pred_class']]}: {match['pred_score']:.2f} (FP)"

        rect = Rectangle((x1, y1), width, height, linewidth=BOX_LINEWIDTH, edgecolor=color, facecolor='none')
        ax.add_patch(rect)
        ax.text(x1, y2 + 15, label, color=color, fontsize=8)

    # 设置标题和坐标轴
    ax.set_title('YOLOv8预测结果与真实标注对比')
    ax.set_axis_off()

    # 保存图像
    output_path = Path(image_path).stem + "_comparison.jpg"
    plt.savefig(output_path, dpi=300, bbox_inches='tight')


# 7. 计算AP（平均精度）
def calculate_ap(recall, precision):
    """计算AP值（11点插值法）"""
    # 确保recall和precision已按recall排序
    # 11点插值法：在recall=0,0.1,...,1处计算最大precision
    ap = 0.0
    for t in np.arange(0.0, 1.1, 0.1):
        if np.sum(recall >= t) == 0:
            p = 0
        else:
            p = np.max(precision[recall >= t])
        ap += p / 11.0
    return ap


# 8. 主函数：批量评估所有图片
def main():
    start_time = time.time()

    # 检查模型文件
    if not os.path.exists(PT_MODEL_PATH):
        raise FileNotFoundError(f"模型文件不存在: {PT_MODEL_PATH}")

    # 加载模型（仅加载一次）
    print("正在加载模型...")
    model = YOLO(PT_MODEL_PATH)

    # 收集所有图片和XML文件
    print("正在扫描数据文件夹...")
    image_files = []
    xml_files = {}

    for file in os.listdir(DATA_DIR):
        file_path = os.path.join(DATA_DIR, file)
        file_base = os.path.splitext(file)[0]

        if any(file.lower().endswith(ext) for ext in IMAGE_EXT):
            image_files.append(file_path)
        elif file.lower().endswith('.xml'):
            xml_files[file_base] = file_path

    print(f"找到 {len(image_files)} 张图片和 {len(xml_files)} 个XML标注文件")

    # 过滤出有对应XML的图片
    valid_image_files = []
    for img_file in image_files:
        img_base = os.path.splitext(os.path.basename(img_file))[0]
        if img_base in xml_files:
            valid_image_files.append(img_file)

    print(f"开始评估 {len(valid_image_files)} 组有效数据...")

    # 初始化统计变量
    total_tp = 0
    total_fp = 0
    total_fn = 0
    class_total_stats = {cls: {'tp': 0, 'fp': 0, 'fn': 0, 'detections': []} for cls in CLASSES}
    visualized_count = 0
    poor_detection_images = []  # 存储检测效果差的图片信息
    all_detections = []  # 存储所有图片的检测结果（用于计算AP）

    # 遍历所有图片进行评估（带进度条）
    for img_file in tqdm(valid_image_files, desc="评估进度"):
        img_base = os.path.splitext(os.path.basename(img_file))[0]
        xml_file = xml_files[img_base]

        tp, fp, fn, class_stats, visualized, img_name, num_gt, image_detections = evaluate_single_image(
            model, img_file, xml_file, save_visualization=True
        )

        total_tp += tp
        total_fp += fp
        total_fn += fn

        # 累计类别统计
        for cls_name, stats in class_stats.items():
            class_total_stats[cls_name]['tp'] += stats['tp']
            class_total_stats[cls_name]['fp'] += stats['fp']
            class_total_stats[cls_name]['fn'] += stats['fn']

        if visualized:
            visualized_count += 1

        # 筛选检测效果差的图片（FN占比超过阈值）
        if num_gt > 0:
            fn_ratio = fn / num_gt  # 未检测率
            if fn_ratio > POOR_DETECTION_THRESHOLD:
                poor_detection_images.append({
                    'image_name': img_name,
                    'num_gt': num_gt,
                    'num_fn': fn,
                    'fn_ratio': fn_ratio
                })

        # 收集所有检测结果（用于计算AP）
        all_detections.extend(image_detections)

    # 计算整体精确率和召回率
    overall_precision = total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0
    overall_recall = total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0

    # 计算各类别精确率和召回率
    class_performance = {}
    for cls_name, stats in class_total_stats.items():
        tp = stats['tp']
        fp = stats['fp']
        fn = stats['fn']

        precision = tp / (tp + fp) if (tp + fp) > 0 else 0
        recall = tp / (tp + fn) if (tp + fn) > 0 else 0

        class_performance[cls_name] = {
            'precision': precision,
            'recall': recall,
            'tp': tp,
            'fp': fp,
            'fn': fn
        }

    # 计算AP和mAP
    class_aps = {}
    for class_id in range(len(CLASSES)):
        class_name = CLASSES[class_id]
        class_detections = [d for d in all_detections if d['class_id'] == class_id]

        # 统计该类别的真实目标总数
        total_gt = class_total_stats[class_name]['tp'] + class_total_stats[class_name]['fn']
        if total_gt == 0:
            class_aps[class_name] = 0.0
            continue

        # 按置信度降序排序
        class_detections.sort(key=lambda x: x['confidence'], reverse=True)

        # 计算TP和FP数组
        tp_array = np.array([d['is_tp'] for d in class_detections], dtype=bool)
        fp_array = ~tp_array

        # 计算累计TP和FP
        cum_tp = np.cumsum(tp_array)
        cum_fp = np.cumsum(fp_array)

        # 计算精确率和召回率
        precision = cum_tp / (cum_tp + cum_fp)
        recall = cum_tp / total_gt

        # 处理空数组情况
        if len(precision) == 0 or len(recall) == 0:
            class_aps[class_name] = 0.0
            continue

        # 计算AP（11点插值法）
        ap = calculate_ap(recall, precision)
        class_aps[class_name] = ap

    # 计算mAP
    valid_aps = [ap for ap in class_aps.values() if ap > 0]
    mAP = np.mean(valid_aps) if valid_aps else 0.0

    # 打印评估结果
    print("\n===== 整体评估结果 =====")
    print(f"总正确检测 (TP): {total_tp}")
    print(f"总错误检测 (FP): {total_fp}")
    print(f"总未检测到 (FN): {total_fn}")
    print(f"整体精确率 (Precision): {overall_precision:.4f}")
    print(f"整体召回率 (Recall): {overall_recall:.4f}")
    print(f"mAP (平均精度均值): {mAP:.4f}")
    print(f"可视化图像数量: {visualized_count}")

    print("\n===== 类别性能排名（按AP降序）===")
    for cls_name, metrics in sorted(class_aps.items(), key=lambda x: x[1], reverse=True):
        class_perf = class_performance[cls_name]
        print(f"{cls_name}: "
              f"AP={metrics:.4f}, "
              f"Precision={class_perf['precision']:.4f}, "
              f"Recall={class_perf['recall']:.4f}, "
              f"TP={class_perf['tp']}, FP={class_perf['fp']}, FN={class_perf['fn']}")

    # 打印性能较差的类别（AP<0.5）
    poor_ap_classes = [
        (cls_name, ap) for cls_name, ap in class_aps.items()
        if ap < 0.5 and class_performance[cls_name]['tp'] + class_performance[cls_name]['fn'] > 0
    ]

    if poor_ap_classes:
        print("\n===== AP较低的类别（<0.5）===")
        for cls_name, ap in poor_ap_classes:
            class_perf = class_performance[cls_name]
            print(f"{cls_name}: AP={ap:.4f}, "
                  f"未检测率={(class_perf['fn'] / (class_perf['tp'] + class_perf['fn'])):.2%}")

    # 打印检测效果差的图片
    if poor_detection_images:
        print("\n===== 检测效果差的图片（未检测率 > {:.1%}）===".format(POOR_DETECTION_THRESHOLD))
        # 按未检测率降序排序
        poor_detection_images.sort(key=lambda x: x['fn_ratio'], reverse=True)
        for img_info in poor_detection_images:
            print(f"图片: {img_info['image_name']}, "
                  f"真实目标: {img_info['num_gt']}, "
                  f"未检测: {img_info['num_fn']}, "
                  f"未检测率: {img_info['fn_ratio']:.2%}")
    else:
        print("\n没有检测效果差的图片（未检测率均 ≤ {:.1%}）".format(POOR_DETECTION_THRESHOLD))

    end_time = time.time()
    print(f"\n评估完成，总耗时: {end_time - start_time:.2f}秒")


if __name__ == "__main__":
    main()