# 步骤 1: 清理代码
# 删除不必要的打印语句以提高代码运行速度，可以使用日志记录来代替 print 语句，以更好地控制日志输出。

# 步骤 2: 导入必要的库和模块
# 这里导入所需的库和模块，包括 os、json、torch、box_iou 等。
from ultralytics import YOLO
import cv2
from PIL import Image
import torch
import torch.nn.functional as F
from torchvision import transforms
import numpy as np
import clip
import json
import os
import matplotlib.pyplot as plt
from datasets.hico_text_label import (
    hico_text_label,
    hico_text_label_no_object,
    hico_object2label_index,
    obj2name,
    hoi_index2action,
    action2index,
    hoi2otherhoi,
)
from extract_actions import *
from renew_one_p_o import renew_hoi_labels
from add_hoi_labels_1p1o import add_hoi_labels
import time

# 记录程序开始时间
start_time = time.time()

# 步骤 3: 设置输入和输出路径
# 指定原始 JSON 文件的路径和生成数据的目录路径。
valid_obj_ids = (
    1,
    2,
    3,
    4,
    5,
    6,
    7,
    8,
    9,
    10,
    11,
    13,
    14,
    15,
    16,
    17,
    18,
    19,
    20,
    21,
    22,
    23,
    24,
    25,
    27,
    28,
    31,
    32,
    33,
    34,
    35,
    36,
    37,
    38,
    39,
    40,
    41,
    42,
    43,
    44,
    46,
    47,
    48,
    49,
    50,
    51,
    52,
    53,
    54,
    55,
    56,
    57,
    58,
    59,
    60,
    61,
    62,
    63,
    64,
    65,
    67,
    70,
    72,
    73,
    74,
    75,
    76,
    77,
    78,
    79,
    80,
    81,
    82,
    84,
    85,
    86,
    87,
    88,
    89,
    90,
)

thre = 0.56
json_path = (
    "/home/jxy/datasets/gen-vlkt/hico_20160224_det/annotations/trainval_hico.json"
)
base_dir = "/home/jxy/datasets/gen-vlkt/hico_20160224_det/images/train2015"
save_images_dir = "./save_image/"
# save_hoi_txt_dir = os.path.join("./", os.path.basename(os.getcwd()))
# save_hoi_txt_dir = "/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/save_hoi_txt/yolo_conf_0.5"
save_hoi_txt_dir_base = "/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/results/weakly0915save_hoi/"
work_name = "add_hoi_labels_mhmo"
save_hoi_txt_dir = os.path.join(save_hoi_txt_dir_base, work_name)
# 检查目录是否存在，如果不存在则创建它
if not os.path.exists(save_hoi_txt_dir):
    os.makedirs(save_hoi_txt_dir)

print(save_hoi_txt_dir)

pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
device = "cuda" if torch.cuda.is_available() else "cpu"

# 加载 CLIP 模型
CLIP_model, preprocess = clip.load("ViT-B/32", device=device)

with torch.no_grad():
    # 创建一个字典用于存储 HOI 的文本标签
    hoi_text_label = {}

    count_ = 0

    # 遍历原始的 hico_text_label 字典，将键从 (4, 4) 格式修改为从 0 到 599 的整数，以便后续处理
    for id in hico_text_label.keys():
        hoi_text_label[count_] = hico_text_label[id]
        count_ += 1

    # 使用 CLIP 的 tokenize 函数将 HOI 文本标签转化为张量形式，并放到设备上
    text = torch.cat(
        [clip.tokenize(hoi_text_label[id]) for id in hoi_text_label.keys()]
    ).to(device)

    # 使用 CLIP 模型对文本进行编码，得到文本特征
    text_features = CLIP_model.encode_text(text)
    print(text_features.shape)  # 输出编码后的文本特征的形状

    # 创建一个字典用于存储没有物体的 HOI 的文本标签
    hoi_text_label_no_object = {}

    # 遍历原始的 hico_text_label_no_object 字典，同样将键格式修改为从 0 到 599 的整数
    for id in hico_text_label_no_object.keys():
        hoi_text_label_no_object[count_] = hico_text_label_no_object[id]
        count_ += 1

    # 使用 CLIP 的 tokenize 函数将没有物体的 HOI 文本标签转化为张量形式，并放到设备上
    text_no_object = torch.cat(
        [
            clip.tokenize(hoi_text_label_no_object[id])
            for id in hoi_text_label_no_object.keys()
        ]
    ).to(device)

    # 使用 CLIP 模型对没有物体的文本进行编码，得到文本特征
    text_features_no_object = CLIP_model.encode_text(text_no_object)
    print(text_features_no_object.shape)  # 输出编码后的没有物体的文本特征的形状
# 步骤 5: 定义计数和统计变量
# 在此处定义各种计数和统计变量，以便记录结果。
# 初始化计数变量

no_detect_human_num = 0  # 没有检测到人的数量。
only_one_human_num = 0  # 只有一个人的数量
no_have_num = 0  # 没有HOI的数量
no_human_object_num = 0  # 没有人和物体的数量
save_hoi = []  # 贼大的 我们后面要存的list
count_one_person_one_object = 0
list_one_person_one_object = []
count_zero_person_zero_object = 0
# 步骤 4: 打开原始 JSON 文件
# 使用 with open 打开原始 JSON 文件，并将其加载为 json_data。
with open(json_path, "r", encoding="utf8") as f:
    json_data = json.load(f)
    print(len(json_data))  # 打印数据的长度，此处为 37633 条数据
    count_temp = 0

    # 步骤 6: 循环遍历原始数据
    # 开始迭代处理每个 HOI 数据。
    # 标记处理单张图像
    for hoi in json_data:
        # if os.path.basename(hoi["file_name"]) != "HICO_train2015_00001660.jpg":
        #     continue
        if os.path.basename(hoi["file_name"]) != "HICO_train2015_00000938.jpg":
            continue
        print("开始处理图像，人工标注标签：")
        print(hoi)  # 打印每张图像的训练集注释

        annotated_actions = extract_actions(
            hoi
        )  # ['race', 'ride', 'sit_on', 'straddle']

        # 步骤 7: 清空注释和 HOI 注释
        hoi["annotations"] = []
        hoi["hoi_annotation"] = []

        # Predict with the model
        # 步骤 8: 处理每张照片
        # 在此循环中，处理每张照片的数据。首先获取文件名，并准备生成数据的文件名。
        im1 = Image.open(os.path.join(base_dir, hoi["file_name"]))
        im1_tensor = pil2tensor(im1)
        h, w = im1.size  # 获取图像的高度和宽度

        with torch.no_grad():
            # 加载 YOLO 模型以进行物体检测
            yolov8_model = YOLO(
                "/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/yolov8x.pt"
            )
            yolov8_seg = YOLO(
                "/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/yolov8x-seg.pt"
            )

            # 使用 YOLO 模型进行物体检测，设置置信度阈值为 0.5
            results = yolov8_model(im1, conf=0.5)
            seg_results = yolov8_seg(im1)

        for result in results:
            # 提取检测结果
            boxes_f1 = result.boxes.xyxy  # 检测框坐标，使用 xyxy 格式 (N, 4)
            boxes_f2 = result.boxes.xywh  # 检测框坐标，使用 xywh 格式 (N, 4)
            conf = result.boxes.conf  # 置信度分数，(N, 1)
            cls = result.boxes.cls  # 类别信息，(N, 1)

            # 对检测结果按照类别进行排序
            cls_sort_index = torch.argsort(cls)
            boxes_f1 = boxes_f1[cls_sort_index]
            boxes_f2 = boxes_f2[cls_sort_index]
            conf = conf[cls_sort_index]
            cls = cls[cls_sort_index]
            print(len(cls))  # torch.Size([2])
            print(cls)  # tensor([0., 3.], device='cuda:0')

            try:
                # 尝试获取分割结果信息
                print(seg_results[0].masks.masks.shape)  # 打印分割掩码的形状
                print(seg_results[0].boxes.cls)  # 打印分割结果的类别信息
                masks = seg_results[0].masks.masks  # 获取分割掩码
                masks_cls = seg_results[0].boxes.cls  # 获取分割结果的类别

            except:
                # 如果出现异常（例如没有分割结果），则创建空的分割掩码和类别
                masks = torch.zeros((1, im1_tensor.shape[1], im1_tensor.shape[2])).to(
                    device
                )  # 创建形状为(1, H, W)的空掩码
                masks_cls = torch.tensor([0]).to(device)  # 创建一个类别为0的张量
                print(masks.shape, masks_cls.shape)  # 打印空掩码和类别的形状

            # 下面这个没有用
            if masks.shape[0] == 0:
                masks = torch.zeros((1, im1_tensor.shape[1], im1_tensor.shape[2])).to(
                    device
                )
                masks_cls = torch.tensor([0]).to(device)
                # 判断这个mask是不是和原图的大小一样
            if masks.shape[1] != im1.size[1] or masks.shape[2] != im1.size[0]:
                print("resize", masks.shape, im1.size)
                masks = F.interpolate(
                    masks.unsqueeze(0), (im1.size[1], im1.size[0]), mode="nearest"
                ).squeeze(0)
                print(masks.shape)
                assert masks.shape[1] == im1.size[1], masks.shape[2] == im1.size[0]
                # exit()

            person_end_index = -1  # 寻找第一个不是人框的index
            for c_i in range(cls.shape[0]):
                if cls[c_i].cpu().item() != 0:
                    person_end_index = c_i
                    break
            print("person_end_index", person_end_index)
            # # 说明没有检测出任何人和物
            # if cls.shape[0] == 0:
            #     count_zero_person_zero_object += 1
            #     print("没有检测出任何人和物")
            #     no_human_object_num += 1
            #     print("filename", hoi["file_name"])
            #     # 随便赋值 主要是数据集标注的问题 没有人也给你标个人
            #     # 一张图像肯定会有一个交互动作
            #     hoi["annotations"] = [
            #         {"bbox": [0, 0, 1, 1], "category_id": valid_obj_ids[0]},
            #         {"bbox": [0, 0, 1, 1], "category_id": valid_obj_ids[0]},
            #     ]
            #     # hoi["hoi_annotation"] = []
            #     hoi["hoi_annotation"].append(
            #         {
            #             "subject_id": 0,
            #             "object_id": 1,
            #             "category_id": 1,
            #             "hoi_category_id": 1,
            #         }
            #     )
            #     print(hoi)
            #     with open(
            #         os.path.join(
            #             save_hoi_txt_dir, hoi["file_name"].split(".")[0] + ".txt"
            #         ),
            #         "w+",
            #     ) as f:
            #         f.write(str(hoi))
            #     save_hoi.append(hoi)

            #     # with open("")
            #     continue

            # 判断一个人和一个物体的条件
            save_dir = "/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/weaklyHOI/masks"
            person_num = 0
            object_num = 0
            if person_end_index == 1 and len(cls) == 2:
                # 可视化分割结果并保存为图片
                # res_plotted = seg_results[0].plot()  # 创建分割可视化的图片
                # cv2.imwrite(
                #     "result_seg.png", res_plotted
                # )  # 保存分割可视化图片为 "result_seg.png"
                # 增加计数
                count_one_person_one_object += 1
                max_possible_dict = {}
                for i in range(0, person_end_index):
                    box_f1_1 = boxes_f1[i]
                    cls_1 = cls[i]  # tensor(0., device='cuda:0')    肯定是人
                    img_new_1 = pil2tensor(im1)
                    print(img_new_1.shape)
                    # 挑选出当前boxes下面属于该类的index
                    index_list_img1 = [
                        index_
                        for index_ in range(masks_cls.shape[0])
                        if masks_cls[index_] == cls_1
                    ]
                    print(index_list_img1)  # [0]
                    for j in range(i + 1, boxes_f1.shape[0]):
                        box_f1_2 = boxes_f1[j]
                        cls_2 = cls[j]  # tensor(8., device='cuda:0')
                        img_new_2 = pil2tensor(im1)
                        print(img_new_2.shape)
                        index_list_img2 = [
                            index_
                            for index_ in range(masks_cls.shape[0])
                            if masks_cls[index_] == cls_2
                        ]
                        # cls: 3.0   motorcycle
                        print(
                            "cls:",
                            cls_2.cpu().item(),
                            " ",
                            obj2name[cls_2.cpu().item()],
                        )
                        # 添加物体类别
                        hoi["annotations"] = [
                            {
                                "bbox": [
                                    int(box_f1_1[0].cpu().item()),
                                    int(box_f1_1[1].cpu().item()),
                                    int(box_f1_1[2].cpu().item()),
                                    int(box_f1_1[3].cpu().item()),
                                ],
                                "category_id": valid_obj_ids[int(cls_1.cpu().item())],
                            }
                        ]
                        hoi["annotations"].append(
                            {
                                "bbox": [
                                    int(box_f1_2[0].cpu().item()),
                                    int(box_f1_2[1].cpu().item()),
                                    int(box_f1_2[2].cpu().item()),
                                    int(box_f1_2[3].cpu().item()),
                                ],
                                "category_id": valid_obj_ids[int(cls_2.cpu().item())],
                            }
                        )
                hoi = add_hoi_labels(annotated_actions, hoi)
                print(hoi)
                with open(
                    os.path.join(
                        save_hoi_txt_dir, hoi["file_name"].split(".")[0] + ".txt"
                    ),
                    "w+",
                ) as f:
                    f.write(str(hoi))
                save_hoi.append(hoi)
                list_one_person_one_object.append(hoi["file_name"])

            # 判断多个人和多个物体的条件
            # if person_end_index >= 2 and len(cls) >= 2:
            elif len(cls) >= 3:
                res_plotted = seg_results[0].plot()
                cv2.imwrite(os.path.join(save_dir, "result_seg.png"), res_plotted)
                print("多个人和多个物体")
                person_num += 1
                for i in range(0, person_end_index):
                    box_f1_1 = boxes_f1[i]  # torch.Size([4, 3]) 4个位置信息 3个物体
                    cls_1 = cls[i]  # tensor(0., device='cuda:0')    肯定是人
                    img_new_1 = pil2tensor(im1)
                    # 挑选出当前boxes下面属于该类的index
                    index_list_img1 = [
                        index_
                        for index_ in range(masks_cls.shape[0])
                        if masks_cls[index_] == cls_1
                    ]
                    print(index_list_img1)  # [2, 3]

                    # channel repeat 3 times, H and W repeat 1 times respectively.
                    if img_new_1.shape[0] == 1:
                        after_delete_mask = torch.sum(
                            masks[index_list_img1], dim=0, keepdim=True
                        )
                    else:
                        after_delete_mask = torch.sum(
                            masks[index_list_img1], dim=0, keepdim=True
                        ).repeat(3, 1, 1)

                    img_new_1[after_delete_mask <= 0] = 0.0
                    img_new_1 = tensor2pil(img_new_1)

                    # 把所有除了人之外的内容置黑
                    img_new_1.save(os.path.join(save_dir, "box1_" + str(i) + ".jpg"))

                    # 根据box裁剪图像 按照坐标把人给扣出来
                    img_new_1 = img_new_1.crop(
                        (
                            int(box_f1_1[0].cpu().item()),
                            int(box_f1_1[1].cpu().item()),
                            int(box_f1_1[2].cpu().item()),
                            int(box_f1_1[3].cpu().item()),
                        )
                    )

                    img_new_1.save(os.path.join(save_dir, "af_box_" + str(i) + ".jpg"))

                    for j in range(i + 1, boxes_f1.shape[0]):
                        object_num += 1
                        box_f1_2 = boxes_f1[j]
                        cls_2 = cls[j]  # tensor(8., device='cuda:0')
                        blank = Image.new("RGB", im1.size, (0, 0, 0))
                        img_new_2 = pil2tensor(im1)
                        print(img_new_2.shape)
                        index_list_img2 = [
                            index_
                            for index_ in range(masks_cls.shape[0])
                            if masks_cls[index_] == cls_2
                        ]
                        # # 保存一下该物体的mask，为了方便之后进行填充
                        if img_new_2.shape[0] == 1:
                            after_delete_mask = torch.sum(
                                masks[index_list_img2], dim=0, keepdim=True
                            )
                        else:
                            after_delete_mask = torch.sum(
                                masks[index_list_img2], dim=0, keepdim=True
                            ).repeat(3, 1, 1)
                        img_new_2[after_delete_mask <= 0] = 0.0
                        img_new_2 = tensor2pil(img_new_2)
                        img_new_2.save(
                            os.path.join(save_dir, "box2_" + str(i) + ".jpg")
                        )

                        img_new_2 = img_new_2.crop(
                            (
                                int(box_f1_2[0].cpu().item()),
                                int(box_f1_2[1].cpu().item()),
                                int(box_f1_2[2].cpu().item()),
                                int(box_f1_2[3].cpu().item()),
                            )
                        )
                        img_new_2.save(
                            os.path.join(save_dir, "af_box2_" + str(i) + ".jpg")
                        )

                        blank_tensor = pil2tensor(blank)
                        # blank_tensor = torch.zeros(blank_tensor.shape)
                        img_new_1_tensor = pil2tensor(img_new_1)
                        img_new_2_tensor = pil2tensor(img_new_2)
                        compute_repeat = torch.zeros(blank_tensor.shape)

                        blank_tensor[
                            :,
                            int(box_f1_1[1].cpu().item()) : int(
                                box_f1_1[3].cpu().item()
                            ),
                            int(box_f1_1[0].cpu().item()) : int(
                                box_f1_1[2].cpu().item()
                            ),
                        ] += img_new_1_tensor
                        compute_repeat[
                            :,
                            int(box_f1_1[1].cpu().item()) : int(
                                box_f1_1[3].cpu().item()
                            ),
                            int(box_f1_1[0].cpu().item()) : int(
                                box_f1_1[2].cpu().item()
                            ),
                        ] += 1
                        # temp = tensor2pil(blank_tensor)
                        # temp.save(os.path.join(save_dir, "temp1.jpg"))
                        blank_tensor[
                            :,
                            int(box_f1_2[1].cpu().item()) : int(
                                box_f1_2[3].cpu().item()
                            ),
                            int(box_f1_2[0].cpu().item()) : int(
                                box_f1_2[2].cpu().item()
                            ),
                        ] += img_new_2_tensor
                        compute_repeat[
                            :,
                            int(box_f1_2[1].cpu().item()) : int(
                                box_f1_2[3].cpu().item()
                            ),
                            int(box_f1_2[0].cpu().item()) : int(
                                box_f1_2[2].cpu().item()
                            ),
                        ] += 1
                        temp = tensor2pil(blank_tensor)

                        compute_repeat[
                            torch.sum(masks, dim=0, keepdim=True).repeat(3, 1, 1) > 0
                        ] = 1
                        blank_tensor[compute_repeat > 0] = torch.div(
                            blank_tensor[compute_repeat > 0],
                            compute_repeat[compute_repeat > 0],
                        )

                        blank = tensor2pil(blank_tensor)
                        temp = tensor2pil(blank_tensor)
                        blank.save(os.path.join(save_dir, "blank.jpg"))

                        img_new_2.close()

                        max_possible_dict["annotations"] = [
                            {
                                "bbox": [
                                    int(box_f1_1[0].cpu().item()),
                                    int(box_f1_1[1].cpu().item()),
                                    int(box_f1_1[2].cpu().item()),
                                    int(box_f1_1[3].cpu().item()),
                                ],
                                "category_id": valid_obj_ids[int(cls_1.cpu().item())],
                            }
                        ]
                        max_possible_dict["annotations"].append(
                            {
                                "bbox": [
                                    int(box_f1_2[0].cpu().item()),
                                    int(box_f1_2[1].cpu().item()),
                                    int(box_f1_2[2].cpu().item()),
                                    int(box_f1_2[3].cpu().item()),
                                ],
                                "category_id": valid_obj_ids[int(cls_2.cpu().item())],
                            }
                        )
                        if i not in save_boxes_index:
                            save_boxes_index.append(i)
                            hoi["annotations"].append(
                                {
                                    "bbox": [
                                        int(box_f1_1[0].cpu().item()),
                                        int(box_f1_1[1].cpu().item()),
                                        int(box_f1_1[2].cpu().item()),
                                        int(box_f1_1[3].cpu().item()),
                                    ],
                                    "category_id": valid_obj_ids[
                                        int(cls_1.cpu().item())
                                    ],
                                }
                            )
                        if j not in save_boxes_index:
                            save_boxes_index.append(j)
                            hoi["annotations"].append(
                                {
                                    "bbox": [
                                        int(box_f1_2[0].cpu().item()),
                                        int(box_f1_2[1].cpu().item()),
                                        int(box_f1_2[2].cpu().item()),
                                        int(box_f1_2[3].cpu().item()),
                                    ],
                                    "category_id": valid_obj_ids[
                                        int(cls_2.cpu().item())
                                    ],
                                }
                            )

                        with torch.no_grad():
                            image = preprocess(blank).unsqueeze(0).to(device)
                            image_features = CLIP_model.encode_image(image)
                            # print(image_features.shape) # torch.Size([1, 512])
                            # print(text_features.shape)  # torch.Size([600, 512])
                            # exit()
                            # image_features = self.encode_image(image)
                            # text_features = self.encode_text(text)

                            # normalized features
                            image_features = image_features / image_features.norm(
                                dim=1, keepdim=True
                            )
                            text_features = text_features / text_features.norm(
                                dim=1, keepdim=True
                            )
                            text_features_no_object = (
                                text_features_no_object
                                / text_features_no_object.norm(dim=1, keepdim=True)
                            )

                            # cosine similarity as logits
                            # logit_scale = self.logit_scale.exp()
                            logits_per_image = image_features @ text_features.t()
                            logits_per_image_no_object = (
                                image_features @ text_features_no_object.t()
                            )
                            logits_per_image = (
                                logits_per_image + logits_per_image_no_object
                            )
                            logits_per_image_temp = logits_per_image
                            # plt.figure()
                            # plt.hist(logits_per_image_temp[0].cpu().numpy())
                            # plt.savefig("save_image/blank_" + str(i) + "_" + str(j) + "_hist.jpg")
                            # plt.close()

                            print("=====================")
                            print("mean", logits_per_image_temp.mean())
                            start_index, end_index = hico_object2label_index[
                                cls_2.cpu().item()
                            ]

                            logits_per_image_object_range = torch.zeros(
                                logits_per_image.shape
                            ).to(device)
                            logits_per_image_object_range[
                                :, start_index : end_index + 1
                            ] = logits_per_image[:, start_index : end_index + 1]
                            logits_per_image = (
                                logits_per_image_object_range  # torch.Size([1, 600])
                            )
                            # cls: 8.0   boat
                            print(
                                "cls:",
                                cls_2.cpu().item(),
                                " ",
                                obj2name[cls_2.cpu().item()],
                            )

            else:
                print("其他情况，任意赋值")
                no_detect_human_num += 1
                print("filename", hoi["file_name"])
                hoi["annotations"] = [
                    {"bbox": [0, 0, 1, 1], "category_id": valid_obj_ids[0]}
                ]
                # hoi["hoi_annotation"] = []
                # shape[0] 就是N N个物体 box_i 是数字 从0开始
                for box_i in range(boxes_f1.shape[0]):
                    hoi["annotations"].append(
                        {
                            "bbox": [
                                int(boxes_f1[box_i][0].cpu().item()),
                                int(boxes_f1[box_i][1].cpu().item()),
                                int(boxes_f1[box_i][2].cpu().item()),
                                int(boxes_f1[box_i][3].cpu().item()),
                            ],
                            "category_id": valid_obj_ids[int(cls[box_i].cpu().item())],
                        }
                    )
                    # 取出当前物体类别对应的相应的动作取值范围，因为原始标签是从1开始，所以最后end_index要+1
                    start_index, end_index = hico_object2label_index[
                        cls[box_i].cpu().item()
                    ]
                    hoi["hoi_annotation"].append(
                        {
                            "subject_id": 0,
                            "object_id": box_i + 1,
                            "category_id": action2index[
                                hoi_index2action[end_index + 1]
                            ],
                            "hoi_category_id": end_index + 1,
                        }
                    )
                print(hoi)
                with open(
                    os.path.join(
                        save_hoi_txt_dir, hoi["file_name"].split(".")[0] + ".txt"
                    ),
                    "w+",
                ) as f:
                    f.write(str(hoi))
                save_hoi.append(hoi)

                continue
# 步骤 8: 打开生成的 HOI 数据文件
# 使用 with open 打开生成的 HOI 数据文件，准备读取数据。

# 步骤 9: 提取原始数据和生成数据
# 从原始数据和生成数据中提取所需的信息，如原始框框、HOI 注释等。

# 步骤 10: 进行匹配和计算
# 在这个步骤中，进行原始数据和生成数据的匹配，计算正确匹配的数量等。

# 步骤 11: 计算统计数据
# 在这个步骤中，根据匹配结果计算各种统计数据，如正确匹配的数量、占比等。

# 步骤 12: 输出统计结果
# 将计算的统计结果输出，可以将结果写入文件，以供后续分析和报告使用。
print("一个人和物体都没有检测到的情况：", count_zero_person_zero_object)
print("只检测到一个人和一个物体的数量:", count_one_person_one_object)


with open(os.path.join(save_hoi_txt_dir_base, work_name + ".txt"), "w+") as f:
    f.write(str(save_hoi))
with open(os.path.join(save_hoi_txt_dir_base, work_name + ".json"), "w+") as file_obj:
    json.dump(save_hoi, file_obj)
    print(file_obj.name)
with open(
    os.path.join(save_hoi_txt_dir_base, "list_one_person_one_object" + ".txt"), "w+"
) as f:
    f.write(str(list_one_person_one_object))

# 记录程序结束时间
end_time = time.time()

# 计算运行时间（以秒为单位）
total_seconds = end_time - start_time
# 将总秒数转换为小时和分钟
hours = int(total_seconds // 3600)
minutes = int((total_seconds % 3600) // 60)

# 打印运行时间
print(f"程序运行时间：{hours} 小时 {minutes} 分钟")
