"""
!!!!这个后续改为用cv2的代码
这个版本实现的功能是：
首先从大图中识别出目标，然后在根据需要的几类目标
从大图中扣出来小图，将小图放入到另一个网络中去识别
最后将识别的结果进行聚类
关于解体是直接进行聚类，然后其他的局部损伤是把损伤部位提取出来然后在聚类
"""
from ultralytics import YOLO
import os
import argparse
import numpy as np
import cv2
from tqdm import tqdm
from sahi import AutoDetectionModel
from sahi.predict import get_sliced_prediction
from tools import get_color_from_class, chinese_pad, cluster_largest_region

CLASS_COUNT = 15

class_id_to_enname = {
    0: "part_damaged_ship",
    1: "part_damaged_launchcar",
    2: "part_damaged_launchcar",
    3: "hard_damaged_launchcar",
    4: "turnover_launchcar",
    5: "part_damaged_plane",
    6: "hard_damaged_plane",
    7: "fuel_leaking_plane",
    8: "part_damaged_radarcar",
    9: "hard_damaged_radarcar",
    10: "turnover_radarcar",
    11: "ship",
    12: "launch_car",
    13: "radar_car",
    14: "plane"
}

class_id_to_cnname = {
    0: "局部损毁的船",
    1: "头部损毁的发射车",
    2: "尾部损毁的发射车",
    3: "解体的发射车",
    4: "倾覆的发射车",
    5: "局部损毁的飞机",
    6: "解体的飞机",
    7: "漏油的飞机",
    8: "局部损毁的雷达车",
    9: "解体的雷达车",
    10: "倾覆的雷达车",
    11: "船",
    12: "发射车",
    13: "雷达车",
    14: "飞机"
}

damage_analysis_classes = [0, 1, 2, 5, 7, 8]
broken_classes = [3, 6, 9]
output_yolo_dir = r"F:\yolo_2025_gitee\test_img\result"
os.makedirs(output_yolo_dir, exist_ok=True)

# parser = argparse.ArgumentParser()
# parser.add_argument('--input_dir', type=str, default='./tmp', required=False, help='输入图片的路径')
# args = parser.parse_args()
# input_path = args.input_dir
input_path = r'F:\yolo_2025_gitee\test_img\imgs'

# 加载模型
detection_model = AutoDetectionModel.from_pretrained(
    model_type='yolov11',
    model_path=r'F:\yolo_2025_gitee\test_img\weights\last_1.pt',
    confidence_threshold=0.45,
    device='cuda:0'
)

# 加载具体毁伤区域模型
area_detection_model = YOLO(r'F:\yolo_2025_gitee\test_img\weights\best_2.pt')


def handle_per_img(img_path, results):
    img_name = os.path.basename(img_path)
    img_base = os.path.splitext(img_name)[0]

    img = cv2.imread(img_path)
    img_draw = img.copy()
    class_counts = {}
    targets = []

    for idx, result in enumerate(results.object_prediction_list, 1):
        box = result.bbox.to_xyxy()
        cls_id = result.category.id
        conf_score = result.score.value

        if conf_score < 0.5:
            continue

        x1, y1, x2, y2 = map(int, box)
        cx = (x1 + x2) // 2
        cy = (y1 + y2) // 2
        en_name = class_id_to_enname.get(cls_id, f"unknown id")
        cn_name = class_id_to_cnname.get(cls_id, f"未知类别")
        class_counts[cn_name] = class_counts.get(cn_name, 0) + 1

        area = 0
        if cls_id in damage_analysis_classes:
            cropped_img = img[y1:y2, x1:x2]
            # crop_filename = f"1_{img_base}.jpg"
            # crop_save_path = os.path.join(output_yolo_dir, crop_filename)
            # cv2.imwrite(crop_save_path, cropped_img)
            area_results = area_detection_model.predict(source=cropped_img, save=False)
            box_1 = area_results[0].boxes
            xyxy_small = box_1.xyxy.cpu().numpy()
            cls_ids_small = box_1.cls.cpu().numpy().astype(int)
            confs_small = box_1.conf.cpu().numpy()
            for idx_noneed, (box_small, cls_id, conf_score) in enumerate(zip(xyxy_small, cls_ids_small, confs_small), 1):
                if conf_score < 0.5:
                    continue
                x1_small, y1_small, x2_small, y2_small = map(int, box_small)
                cropped_img_small = cropped_img[y1_small:y2_small, x1_small:x2_small]
                # crop_filename = f"2_{img_base}.jpg"
                # crop_save_path = os.path.join(output_yolo_dir, crop_filename)
                # cv2.imwrite(crop_save_path, cropped_img_small)
                per_area, contours = cluster_largest_region(cropped_img_small)
                offset_x = x1 + x1_small
                offset_y = y1 + y1_small
                contours_global = [contour + np.array([[offset_x, offset_y]]) for contour in contours]
                cv2.drawContours(img_draw, contours_global, -1, (0, 0, 255), 2)
                area += per_area

        elif cls_id in broken_classes:
            cropped_img = img[y1:y2, x1:x2]
            area, contours = cluster_largest_region(cropped_img)
            offset_x = x1
            offset_y = y1
            contours_global = [contour + np.array([[offset_x, offset_y]]) for contour in contours]
            cv2.drawContours(img_draw, contours_global, -1, (0, 0, 255), 2)

        targets.append((idx, (cx, cy), en_name, area))

        color = get_color_from_class(int(cls_id))
        cv2.rectangle(img_draw, (x1, y1), (x2, y2), color, 2)
        label = f"{en_name} {conf_score:.2f}  idx:{idx}"
        cv2.rectangle(img_draw, (x1, y1 - 25), (x1 + len(label) * 12, y1), color, -1)
        cv2.putText(img_draw, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 0, 0), 1, cv2.LINE_AA)

    out_img_path = os.path.join(output_yolo_dir, img_name)
    cv2.imwrite(out_img_path, img_draw)

    summary_parts = [f"{v}辆{name}" for name, v in class_counts.items()]
    summary = "图中共识别到" + "，".join(summary_parts) + "，具体目标信息如下：\n"
    columns = [('目标idx', 12), ('目标位置', 20), ('损毁类型', 28), ('损毁面积', 16)]
    header = "".join(chinese_pad(col[0], col[1]) for col in columns)
    summary += header + "\n"
    for tid, (cx, cy), name, area in targets:
        row = [
            chinese_pad(str(tid), 12),
            chinese_pad(f"({cx:>4},{cy:>4})", 20),
            chinese_pad(name, 28),
            chinese_pad(str(area), 16)
        ]
        summary += "".join(row) + "\n"

    out_txt_path = os.path.join(output_yolo_dir, f"{img_base}.txt")
    with open(out_txt_path, "w", encoding="utf-8") as f:
        f.write(summary)


if os.path.isfile(input_path):
    result = get_sliced_prediction(
        input_path,
        detection_model,
        slice_height=1280,
        slice_width=1280,
        overlap_height_ratio=0.2,
        overlap_width_ratio=0.2
    )
    handle_per_img(input_path, result)

elif os.path.isdir(input_path):
    imgs = os.listdir(input_path)
    for img in tqdm(imgs):
        img_path = os.path.join(input_path, img)
        if os.path.isfile(img_path):
            results = get_sliced_prediction(
                img_path,
                detection_model,
                slice_height=1280,
                slice_width=1280,
                overlap_height_ratio=0.2,
                overlap_width_ratio=0.2,
                verbose=False
            )
            handle_per_img(img_path, results)
else:
    raise ValueError('input_path error')
