"""
这个版本实现的功能是：
首先从大图中识别出目标，然后在根据需要的几类目标
从大图中扣出来小图，将小图放入到另一个网络中去识别
最后将识别的结果进行聚类
关于解体是直接进行聚类，然后其他的局部损伤是把损伤部位提取出来然后在聚类
"""

from sahi import AutoDetectionModel
from sahi.predict import get_sliced_prediction
import os
from PIL import Image, ImageDraw, ImageFont
import argparse
from tqdm import tqdm
from tools import *

# ----------------- 配置参数 -----------------
STROKE_WIDTH = 2                 # 文字描边宽度
BOX_WIDTH = 3                    # 检测框线宽
CLASS_COUNT = 15                 # yolo识别的类别总数


# 自定义类别索引与名称
class_id_to_enname = {
    0: "part_damaged_ship",
    1: "part_damaged_launchcar",
    2: "part_damaged_launchcar",
    3: "hard_damaged_launchcar",
    4: "turnover_launchcar",
    5: "part_damaged_plane",
    6: "hard_damaged_plane",
    7: "fuel_leaking_plane",
    8: "part_damaged_radarcar",
    9: "hard_damaged_radarcar",
    10: "turnover_radarcar",
    11: "ship",
    12: "launch_car",
    13: "radar_car",
    14: "plane"
}

class_id_to_cnname = {
    0: "局部损毁的船",
    1: "头部损毁的发射车",
    2: "尾部损毁的发射车",
    3: "解体的发射车",
    4: "倾覆的发射车",
    5: "局部损毁的飞机",
    6: "解体的飞机",
    7: "漏油的飞机",
    8: "局部损毁的雷达车",
    9: "解体的雷达车",
    10: "倾覆的雷达车",
    11: "船",
    12: "发射车",
    13: "雷达车",
    14: "飞机"
}

# 需要进行损毁部位识别的类别ID
damage_analysis_classes = [0, 1, 2, 5, 7, 8, ]
# 解体的类别ID
broken_classes = [3, 6, 9]
font = None
font_path = os.path.join(os.path.abspath("."), "Arial.ttf")
try:
    font = ImageFont.truetype(font_path, 24)
except Exception as e:
    print(f"字体加载失败: {e}")
    font = ImageFont.load_default()

# 输出文本目录和框目录
output_yolo_dir = r"F:\yolo_2025_gitee\senmatic_description\result"
os.makedirs(output_yolo_dir, exist_ok=True)

# parser = argparse.ArgumentParser()
# parser.add_argument('--input_dir', type=str, default='./tmp', required=False, help='输入图片的路径')
# args = parser.parse_args()
#
# input_path = args.input_dir
input_path = r'F:\yolo_2025_gitee\senmatic_description\imgs'

# 加载15类目标识别模型
detection_model = AutoDetectionModel.from_pretrained(
    model_type='yolov11',
    model_path=r'F:\yolo_2025_gitee\senmatic_description\weights\best.pt',
    confidence_threshold=0.45,
    device='cuda:0'
)

# 加载具体毁伤区域模型
area_detection_model = AutoDetectionModel.from_pretrained(
    model_type='yolov11',
    model_path=r'F:\yolo_2025_gitee\senmatic_description\weights\last.pt',
    confidence_threshold=0.45,
    device='cuda:0'
)


def handle_per_img(img_path, results):
    """
    处理sahi得到的单张图片结果
    包含两阶段：1.是识别到大的目标
    2.是将需要的类别的大的目标扣出来然后识别毁伤部位
    3.将毁伤部位进行聚类得到毁伤的面积
    :param img_path: 传入处理的图像名
            results: sahi处理得到的预测结果
    :return:
    """
    # 提取图像文件名 如image.png
    img_name = os.path.basename(img_path)
    img_base = os.path.splitext(img_name)[0]

    # 加载原图(大图)
    img = Image.open(img_path).convert("RGB")
    img_array = np.array(img)
    draw = ImageDraw.Draw(img)

    # 分类计数统计
    class_counts = {}
    targets = []

    # 针对一张图片中的每一个目标
    for idx, result in enumerate(results.object_prediction_list, 1):

        box = result.bbox.to_xyxy()
        cls_id = result.category.id
        conf_score = result.score.value

        if conf_score < 0.5:
            continue
        x1, y1, x2, y2 = map(int, box)
        cx = (x1 + x2) // 2
        cy = (y1 + y2) // 2
        en_name = class_id_to_enname.get(cls_id, f"unknown id")
        cn_name = class_id_to_cnname.get(cls_id, f"未知类别")
        class_counts[cn_name] = class_counts.get(cn_name, 0) + 1

        # 第二阶段的损毁部位识别
        if cls_id in damage_analysis_classes:
            # 首先是识别到损毁的部位
            # 然后对损毁的部位进行聚类得到边界
            # 裁剪出小图
            cropped_img = img_array[y1:y2, x1:x2]
            cropped_pil_img = Image.fromarray(cropped_img)
            # output_path = f"./data/cropped_image/{img_name}"
            # cropped_pil_img.save(output_path)
            area_results = get_sliced_prediction(
                image=cropped_pil_img,
                detection_model=detection_model,
                slice_height=1280,
                slice_width=1280,
                overlap_height_ratio=0.2,
                overlap_width_ratio=0.2,
                verbose=False
            )
            # 遍历小图的结果然后进行聚类
            per_area = 0
            area = 0
            for i, result in enumerate(area_results.object_prediction_list, start=1):
                # 对于每一个目标进行裁剪出小图然后聚类
                box_small = result.bbox.to_xyxy()
                conf_score_small = result.score.value
                if conf_score_small < 0.5:
                    continue
                # 这个是小图的坐标
                x1_small, y1_small, x2_small, y2_small = map(int, box_small)

                cropped_img_small = cropped_img[y1_small:y2_small, x1_small:x2_small]
                # cropped_pil_img_small = Image.fromarray(cropped_img_small)
                per_area, contours = cluster_largest_region(cropped_img_small)
                offset_x = x1 + x1_small
                offset_y = y1 + y1_small
                contours_global = [contour + np.array([[offset_x, offset_y]]) for contour in contours]
                for contour in contours_global:
                    # 将 [[x, y]] 转换为 [(x1, y1), (x2, y2), ...]
                    points = [tuple(pt[0]) for pt in contour]
                    if len(points) > 1:
                        draw.line(points + [points[0]], fill=(255, 0, 0), width=2)
            area += per_area
        elif cls_id in broken_classes:
            # 直接进行聚类得到边界
            cropped_img = img_array[y1:y2, x1:x2]
            # cropped_pil_img = Image.fromarray(cropped_img)
            # output_path = f"./data/cropped_image/{img_name}"
            # cropped_pil_img.save(output_path)
            area, contours = cluster_largest_region(cropped_img)
            offset_x = x1
            offset_y = y1
            contours_global = [contour + np.array([[offset_x, offset_y]]) for contour in contours]
            for contour in contours_global:
                # 将 [[x, y]] 转换为 [(x1, y1), (x2, y2), ...]
                points = [tuple(pt[0]) for pt in contour]
                if len(points) > 1:
                    draw.line(points + [points[0]], fill=(255, 0, 0), width=2)

        else:
            area = 0

        targets.append((idx, (cx, cy), en_name, area))

        # 绘制框
        color = get_color_from_class(int(cls_id))
        draw.rectangle([x1, y1, x2, y2], outline=color, width=BOX_WIDTH)
        # 绘制字
        label = f"{en_name} {conf_score:.2f}  idx:{idx}"
        left, top, right, bottom = font.getbbox(label)
        text_width = right - left
        text_height = bottom - top
        draw.rectangle([x1, y1 - text_height - 5, x1 + text_width, y1], fill=color)
        # 绘制文字
        b, g, r = color
        hex_color = f'#{r:02x}{g:02x}{b:02x}'
        text_color, stroke_color = get_contrast_colors(hex_color)
        draw.text(
            (x1, y1 - text_height - 5),
            label,
            fill=text_color,
            font=font,
            stroke_width=STROKE_WIDTH,
            stroke_fill=stroke_color
        )

    # 绘制结果
    out_img_path = os.path.join(output_yolo_dir, f"{img_name}")
    img.save(out_img_path)

    # 构造文字描述
    summary_parts = [f"{v}辆{name}" for name, v in class_counts.items()]
    summary = "图中共识别到" + "，".join(summary_parts) + "，具体目标信息如下：\n"
    columns = [
        ('目标idx', 12),
        ('目标位置', 20),
        ('损毁类型', 28),
        ('损毁面积', 16)
    ]
    header = "".join(chinese_pad(col[0], col[1]) for col in columns)
    summary += header + "\n"
    for tid, (cx, cy), name, area in targets:
        row = [
            chinese_pad(str(tid), 12),
            chinese_pad(f"({cx:>4},{cy:>4})", 20),  # 坐标右对齐
            chinese_pad(name, 28),
            chinese_pad(str(area), 16)
        ]
        summary += "".join(row) + "\n"

    # 写入txt文件
    out_path = os.path.join(output_yolo_dir, f"{img_base}.txt")
    with open(out_path, "w", encoding="utf-8") as f:
        f.write(summary)


if os.path.isfile(input_path):
    # 切片推理
    result = get_sliced_prediction(
        input_path,
        detection_model,
        slice_height=1280,
        slice_width=1280,
        overlap_height_ratio=0.2,
        overlap_width_ratio=0.2
    )
    # 保存可视化结果
    # result.export_visuals(export_dir='output/')
    # for pred in result.object_prediction_list:
    #     box = pred.bbox.to_xyxy()
    #     cate = pred.category.name
    #     conf = pred.score.value
    handle_per_img(input_path, result)

elif os.path.isdir(input_path):
    imgs = os.listdir(input_path)
    for img in tqdm(imgs):
        img_path = os.path.join(input_path, img)
        if os.path.isfile(img_path):
            results = get_sliced_prediction(
                img_path,
                detection_model,
                slice_height=1280,
                slice_width=1280,
                overlap_height_ratio=0.2,
                overlap_width_ratio=0.2,
                verbose=False
            )

            handle_per_img(img_path, results)


else:
    raise 'input_path error'
