from ultralytics import YOLO
import cv2
from PIL import Image
import torch
import torch.nn.functional as F
from torchvision import transforms
import numpy as np
import clip
import json
import os
import matplotlib.pyplot as plt
from datasets.hico_text_label import hico_text_label, hico_text_label_no_object, hico_object2label_index, obj2name, hoi_index2action, action2index, hoi2otherhoi
# 'HICO_train2015_00000005.jpg 物体类别标错
# {"file_name": "HICO_train2015_00000001.jpg", "img_id": 1, 
# "annotations": [{"bbox": [207, 32, 426, 299], "category_id": 1}, {"bbox": [58, 97, 571, 404], "category_id": 4}], 
# "hoi_annotation": [{"subject_id": 0, "object_id": 1, "category_id": 73, "hoi_category_id": 153}, 
#                    {"subject_id": 0, "object_id": 1, "category_id": 77, "hoi_category_id": 154}, 
#                    {"subject_id": 0, "object_id": 1, "category_id": 88, "hoi_category_id": 155}, 
#                    {"subject_id": 0, "object_id": 1, "category_id": 99, "hoi_category_id": 156}]}

# {"file_name": "HICO_train2015_00000009.jpg", "img_id": 9, 
# "annotations": [{"bbox": [190, 101, 290, 305], "category_id": 1}, 
#                 {"bbox": [210, 99, 431, 335], "category_id": 8}, 
#                 {"bbox": [339, 93, 597, 406], "category_id": 1}], 
# "hoi_annotation": [{"subject_id": 0, "object_id": 1, "category_id": 53, "hoi_category_id": 571}, 
#                    {"subject_id": 2, "object_id": 1, "category_id": 53, "hoi_category_id": 571}]}
# ('ViT-bigG-14', 'laion2b_s39b_b160k'),
# model, train_transform, eval_transform = open_clip.create_model_and_transforms('ViT-bigG-14', pretrained='laion2b_s39b_b160k')



valid_obj_ids = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
                               14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
                               24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
                               37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
                               48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
                               58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
                               72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
                               82, 84, 85, 86, 87, 88, 89, 90)

thre = 0.56
json_path = "/home/jxy/datasets/gen-vlkt/hico_20160224_det/annotations/trainval_hico.json"
base_dir = "/home/jxy/datasets/gen-vlkt/hico_20160224_det/images/train2015"
save_images_dir = "./save_image/"
save_hoi_txt_dir = os.path.join("./", os.path.basename(os.getcwd()))
# save_hoi_txt_dir = "/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/save_hoi_txt/yolo_conf_0.5"

if not os.path.exists(save_images_dir):
    os.mkdir(save_images_dir)
if not os.path.exists(save_hoi_txt_dir):
    os.mkdir(save_hoi_txt_dir)
print(save_hoi_txt_dir)
# exit()

pil2tensor = transforms.ToTensor()
tensor2pil = transforms.ToPILImage()
# model = YOLO('path/to/best.pt')  # load a custom model

# init CLIP

# 指定设备 因为CUDA-capable devices are busy
# 所以直接指定CPU就行了
device = "cuda" if torch.cuda.is_available() else "cpu"
# device = torch.device("cpu")  # 也不行
# device =  "cpu"

# clip
CLIP_model, preprocess = clip.load("ViT-B/32", device=device)

with torch.no_grad():
    hoi_text_label = {}

    count_ = 0
    # hico_text_label 把原来gen-vlkt预定义好的键（eg.(4, 4))给修改成从0到599 
    for id in hico_text_label.keys():
        # print(id) (4, 4) 就是每一个键而已
        # exit()
        hoi_text_label[count_] = hico_text_label[id]    # 键为整数从0到599
        # 是上面新建的一个hoi_text_label 这两个不一样 把count_当成键 然后通过键id把值赋值过去 
        # 这样就是按照顺序来的了
        count_ += 1
    # print(hoi_text_label)
    # exit()
    # {0: 'a photo of a person boarding an airplane',
    #  1: 'a photo of a person directing an airplane',
    
    
    text = torch.cat([clip.tokenize(hoi_text_label[id]) for id in hoi_text_label.keys()]).to(device)
    text_features = CLIP_model.encode_text(text)
    print(text_features.shape)  # torch.Size([600, 512])
    # exit()
    
    hoi_text_label_no_object = {}
    for id in hico_text_label_no_object.keys():
        hoi_text_label_no_object[count_] = hico_text_label_no_object[id]
        count_ += 1

    text_no_object = torch.cat([clip.tokenize(hoi_text_label_no_object[id]) for id in hoi_text_label_no_object.keys()]).to(device)
    text_features_no_object = CLIP_model.encode_text(text_no_object)
    print(text_features_no_object.shape)    # torch.Size([600, 512])
    # exit()


# temp_text = torch.cat([clip.tokenize(temp) for temp in temp_text_label]).to(device)
# end init CLIP


# print("Label probs:", probs)  # prints: [[0.9927937  0.00421068 0.00299572]]
no_detect_human_num = 0 # 没有检测到人的数量。
only_one_human_num = 0  # 只有一个人的数量
no_have_num = 0         # 没有HOI的数量
no_human_object_num = 0 # 没有人和物体的数量
save_hoi = []# 贼大的 我们后面要存的list
with open(json_path,'r',encoding='utf8') as f:
    json_data = json.load(f)
    # json_data = eval(json_data)
    # print(type(json_data))  # <class 'list'> 列表
    print(len(json_data))   # 37633
    # exit()
    count_temp = 0
    for hoi in json_data:
        # if os.path.basename(hoi["file_name"]) != "HICO_train2015_00005142.jpg":
        # #     continue
        # if os.path.basename(hoi["file_name"]) != "HICO_train2015_00000544.jpg":
        #     continue
        if os.path.basename(hoi["file_name"]) != "HICO_train2015_00001924.jpg" :
            continue


        print(hoi)  # 每张图像的训练集注释
        '''
        {'file_name': 'HICO_train2015_00000001.jpg', 'img_id': 1,
        # 下面两个都直接置空了
        'annotations': [{'bbox': [207, 32, 426, 299], 'category_id': 1}, 
                        {'bbox': [58, 97, 571, 404], 'category_id': 4}],
        'hoi_annotation': [{'subject_id': 0, 'object_id': 1, 'category_id': 73, 'hoi_category_id': 153}, 
                            {'subject_id': 0, 'object_id': 1, 'category_id': 77, 'hoi_category_id': 154}, 
                            {'subject_id': 0, 'object_id': 1, 'category_id': 88, 'hoi_category_id': 155}, 
                            {'subject_id': 0, 'object_id': 1, 'category_id': 99, 'hoi_category_id': 156}]}
        '''
        hoi['annotations'] = []
        hoi['hoi_annotation'] = []
        # Predict with the model
        im1 = Image.open(os.path.join(base_dir, hoi['file_name']))
        im1_tensor = pil2tensor(im1)
        h,w = im1.size  # (640, 458)    (640, 480)

        # 使用白色来填充背景 from：www.jb51.net
        # (alpha band as paste mask).
        
        # print(im1.size) # (640, 480)
        with torch.no_grad():
            # Load a model
            # 目标检测
            # yolov8_model = YOLO('./yolov8x.pt')  # load an official model
            yolov8_model = YOLO('/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/yolov8x.pt')  # load an official model
            # 语义分割
            # yolov8_seg = YOLO("./yolov8x-seg.pt")
            yolov8_seg = YOLO("/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/yolov8x-seg.pt")

            results = yolov8_model(im1, conf=0.5)  # predict on an image
            seg_results = yolov8_seg(im1)
            # results = yolov8_model.predict(im1,device="cpu")
            # seg_results = yolov8_seg.predict(im1,device="cpu")

            
            # 这个有问题 
            # results = yolov8_model(im1，device = cpu)  # predict on an image
            
            '''
            0: 480x640 1 person, 1 motorcycle, 14.8ms
            Speed: 1.2ms preprocess, 14.8ms inference, 0.5ms postprocess per image at shape (1, 3, 480, 640)
            0: 480x640 1 person, 1 motorcycle, 17.3ms
            Speed: 0.5ms preprocess, 17.3ms inference, 0.6ms postprocess per image at shape (1, 3, 480, 640)
            
            '''
            
        # print(results)
        # 检测出物体的数量？ 为啥都是1 不应该是2么 感觉这个不太对
        # print(len(results))
        # print(len(seg_results))
        # exit()
        # hoi = {}
        # 对目标检测的结果进行了一些处理和排序
        for result in results:
            # detection
            boxes_f1 = result.boxes.xyxy   # box with xyxy format, (N, 4)
            boxes_f2 = result.boxes.xywh   # box with xywh format, (N, 4)
            conf = result.boxes.conf   # confidence score, (N, 1)
            cls = result.boxes.cls    # cls, (N, 1) 要看一下是不是从0开始 好像记录里有记 
            # yolo检测的类别 是按照yolo的检测顺序
            
            cls_sort_index = torch.argsort(cls)
            # print(cls_sort_index)   # tensor([1, 0], device='cuda:0')
            # exit()

            boxes_f1 = boxes_f1[cls_sort_index]
            # print(boxes_f1.shape)   # torch.Size([2, 4])
            # print(boxes_f1)

            boxes_f2 = boxes_f2[cls_sort_index]
            # print(boxes_f2.shape)   # torch.Size([2, 4])
            # print(boxes_f2)

            conf = conf[cls_sort_index]
            # print(conf.shape)   # torch.Size([2])
            # print(conf)
            # exit()

            cls = cls[cls_sort_index]
            print(cls.shape)    # torch.Size([2])
            print(cls)  # tensor([0., 3.], device='cuda:0')
            # exit()
            
            # 有时候没法分割 所以搞了个try
            try: 
                print(seg_results[0].masks.masks.shape)
                # print(seg_results[0].masks.masks[0])
                # print(seg_results[0].masks.masks[0].max(), seg_results[0].masks.masks[0].min())
                # print(seg_results[0].masks.masks[1].max(), seg_results[0].masks.masks[1].min())
                # print(len(seg_results[0].masks.xy))
                # print(seg_results[0].masks.xy[0])
                print(seg_results[0].boxes.cls)
                masks = seg_results[0].masks.masks
                # masks = masks[cls_sort_index]
                masks_cls = seg_results[0].boxes.cls
                # exit()
            except:
                masks = torch.zeros((1, im1_tensor.shape[1], im1_tensor.shape[2])).to(device)
                masks_cls = torch.tensor([0]).to(device)
                print(masks.shape, masks_cls.shape) # torch.Size([1, 458, 640]) torch.Size([1])
                # exit()
                # print(seg_results[0].masks.masks.shape)
                # print(seg_results[0].boxes.cls)
            # exit()

            
            # mask_probs = seg_results[0].pro
            # 下面这个没有用
            if masks.shape[0] == 0:
                masks = torch.zeros((1, im1_tensor.shape[1], im1_tensor.shape[2])).to(device)
                masks_cls = torch.tensor([0]).to(device)
                # 判断这个mask是不是和原图的大小一样
            if masks.shape[1] != im1.size[1] or masks.shape[2] != im1.size[0]:
                print("resize", masks.shape, im1.size)
                masks = F.interpolate(masks.unsqueeze(0), (im1.size[1], im1.size[0]), mode="nearest").squeeze(0)
                print(masks.shape)
                assert masks.shape[1] == im1.size[1], masks.shape[2] == im1.size[0]
                # exit() 
                
            # segement
            
            # print(seg_results[0].masks.xyn.shape)
            # 这个在整体跑的时候应该可以注释掉 画出分割的结果图
            res_plotted = seg_results[0].plot()
            cv2.imwrite("result_seg.png", res_plotted)
            # cv2.imshow("result", res_plotted)
            # exit()
            if cls.shape[0] == 0: # 说明没有检测出任何人和物
                print("没有检测出任何人和物")
                no_human_object_num += 1
                print("filename", hoi["file_name"])
                # 随便赋值 主要是数据集标注的问题 没有人也给你标个人
                # 一张图像肯定会有一个交互动作
                hoi["annotations"] = [{"bbox": [0, 0, 1, 1], "category_id": valid_obj_ids[0]}, {"bbox": [0, 0, 1, 1], "category_id": valid_obj_ids[0]}]
                # hoi["hoi_annotation"] = []
                hoi["hoi_annotation"].append({"subject_id": 0, "object_id": 1, "category_id": 1, "hoi_category_id": 1})
                print(hoi)
                with open(os.path.join(save_hoi_txt_dir, hoi["file_name"].split(".")[0] + ".txt"), "w+") as f:
                    f.write(str(hoi))
                save_hoi.append(hoi)
                
                # with open("")
                continue
            person_end_index = -1 # 寻找第一个不是人框的index
            for c_i in range(cls.shape[0]):
                if cls[c_i].cpu().item() != 0:
                    person_end_index = c_i
                    break
            print("person_end_index", person_end_index)
            if person_end_index == 0: # 如果要是没有检测出人，那么只能自行构造一个。
                print("没有检测出人")
                no_detect_human_num += 1
                print("filename", hoi["file_name"])
                hoi["annotations"] = [{"bbox": [0, 0, 1, 1], "category_id": valid_obj_ids[0]}]
                # hoi["hoi_annotation"] = []
                # shape[0] 就是N N个物体 box_i 是数字 从0开始
                for box_i in range(boxes_f1.shape[0]):
                    hoi["annotations"].append({"bbox": [int(boxes_f1[box_i][0].cpu().item()), int(boxes_f1[box_i][1].cpu().item()), int(boxes_f1[box_i][2].cpu().item()), int(boxes_f1[box_i][3].cpu().item())],
                                            "category_id": valid_obj_ids[int(cls[box_i].cpu().item())]})
                    # 取出当前物体类别对应的相应的动作取值范围，因为原始标签是从1开始，所以最后end_index要+1
                    start_index, end_index = hico_object2label_index[cls[box_i].cpu().item()]
                    hoi["hoi_annotation"].append({"subject_id": 0, "object_id": box_i + 1, "category_id": action2index[hoi_index2action[end_index+1]], "hoi_category_id": end_index+1})
                print(hoi)
                with open(os.path.join(save_hoi_txt_dir, hoi["file_name"].split(".")[0] + ".txt"), "w+") as f:
                    f.write(str(hoi))
                save_hoi.append(hoi)
                
                continue
                # exit()
            if person_end_index == -1: # 全是人
                print("全是人")
                if cls.shape[0] == 1: # 只有一个人
                    print("只有一个人")
                    only_one_human_num += 1
                    print("filename", hoi["file_name"])
                    hoi["annotations"] = [{"bbox": [int(boxes_f1[0][0].cpu().item()), int(boxes_f1[0][1].cpu().item()), int(boxes_f1[0][2].cpu().item()), int(boxes_f1[0][3].cpu().item())],
                                           "category_id": valid_obj_ids[0]}]
                    # hoi["hoi_annotation"] = []
                    for box_i in range(boxes_f1.shape[0]):
                        hoi["annotations"].append({"bbox": [int(boxes_f1[box_i][0].cpu().item()), int(boxes_f1[box_i][1].cpu().item()), int(boxes_f1[box_i][2].cpu().item()), int(boxes_f1[box_i][3].cpu().item())],
                                                "category_id": valid_obj_ids[int(cls[box_i].cpu().item())]})
                        # 取出当前物体类别对应的相应的动作取值范围，因为原始标签是从1开始，所以最后end_index要+1
                        start_index, end_index = hico_object2label_index[cls[box_i].cpu().item()]
                        hoi["hoi_annotation"].append({"subject_id": 0, "object_id": box_i + 1, "category_id": action2index[hoi_index2action[end_index+1]], "hoi_category_id": end_index+1})
                    print(hoi)
                    with open(os.path.join(save_hoi_txt_dir, hoi["file_name"].split(".")[0] + ".txt"), "w+") as f:
                        f.write(str(hoi))
                    save_hoi.append(hoi)
                    # exit()
                    continue
                person_end_index = cls.shape[0]
                # exit()
            # print(person_end_index)
            # exit()

            # 如果遍历一遍，按照阈值进行筛选，可能没有符合条件的，但是原数据集中一张图片里起码有一种，所以备选就是相似度最高的那个
            max_possible_dict = {}
            max_possible_prob = 0
            # 判断有没有符合条件的
            is_have = False
            # 保存已经存入的boxes的index
            save_boxes_index = []
            # 重新定义hoi
            # hoi["hoi_annotation"] = []
            # hoi["annotations"] = []
            print(im1.size) # (640, 458)
            print(im1_tensor.shape) # torch.Size([3, 458, 640])
            
            
            # 这里就是正常情况了 即有多个人和多个物体 就是一个循环而已
            # 从第一个人开始 遍历每个人 每个人都要和其他人和其他物体进行配对 
            # 所以有两层for 这是第一层for
            for i in range(0, person_end_index):
                box_f1_1 = boxes_f1[i]  # torch.Size([4, 3]) 4个位置信息 3个物体
                cls_1 = cls[i]
                # masks_1 = masks[i]
                img_new_1 = pil2tensor(im1)
                
                print(img_new_1.shape)  # torch.Size([3, 458, 640])
                # 挑选出当前boxes下面属于该类的index
                index_list_img1 = [index_ for index_ in range(masks_cls.shape[0]) if masks_cls[index_] == cls_1]
                print(index_list_img1)  # [0]
                
                # channel repeat 3 times, H and W repeat 1 times respectively.
                if img_new_1.shape[0] == 1:
                    after_delete_mask = torch.sum(masks[index_list_img1], dim=0, keepdim=True)
                else:
                    after_delete_mask = torch.sum(masks[index_list_img1], dim=0, keepdim=True).repeat(3, 1, 1)
                
                # save_mask = 
                img_new_1[after_delete_mask <= 0] = 0.0
                img_new_1 = tensor2pil(img_new_1)
                
                # 根据box裁剪图像 按照坐标把人给扣出来 mask很难不用理解先
                img_new_1 = img_new_1.crop((int(box_f1_1[0].cpu().item()), int(box_f1_1[1].cpu().item()),
                                    int(box_f1_1[2].cpu().item()), int(box_f1_1[3].cpu().item())))
                # img_new_1.save("save_image/box1_" + str(i) + ".jpg")
                # blank = Image.new('RGB', im1.size, (255,255,255))
                # blank.save('blank.png')
                # exit()
                # 从人最后的索引开始 循环物体 让人和物体配对
                for j in range(i + 1, boxes_f1.shape[0]):
                    box_f1_2 = boxes_f1[j]
                    cls_2 = cls[j]
                    # masks_2 = masks[j]
                    blank = Image.new('RGB', im1.size, (0,0,0))
                    
                    # print(blank.size)
                    
                    img_new_2 = pil2tensor(im1)
                    print(img_new_2.shape)
                    index_list_img2 = [index_ for index_ in range(masks_cls.shape[0]) if masks_cls[index_] == cls_2]
                    # # 保存一下该物体的mask，为了方便之后进行填充
                    # save_index_list_img2 = [index_ for index_ in range(masks_cls.shape[0]) if masks_cls[index_] == cls_1]
                    # channel repeat 3 times, H and W repeat 1 times respectively.
                    # mask的逻辑很难
                    if img_new_2.shape[0] == 1:
                        after_delete_mask = torch.sum(masks[index_list_img2], dim=0, keepdim=True)
                    else:
                        after_delete_mask = torch.sum(masks[index_list_img2], dim=0, keepdim=True).repeat(3, 1, 1)
                    img_new_2[after_delete_mask <= 0] = 0.0
                    img_new_2 = tensor2pil(img_new_2) 
                    
                    # exit()
                    
                    # 物体抠出来
                    img_new_2 = img_new_2.crop((int(box_f1_2[0].cpu().item()), int(box_f1_2[1].cpu().item()),
                                    int(box_f1_2[2].cpu().item()), int(box_f1_2[3].cpu().item())))
                    # img_new_2.save("save_image/box2_" + str(j) + ".jpg")
                    # exit()
                    # img_new.save("temp.jpg")
                    # 生成一个空的tensor 把mask的信息全部粘上去
                    blank_tensor = pil2tensor(blank)
                    # blank_tensor = torch.zeros(blank_tensor.shape)
                    img_new_1_tensor = pil2tensor(img_new_1)
                    img_new_2_tensor = pil2tensor(img_new_2)
                    compute_repeat = torch.zeros(blank_tensor.shape)

                    
                    blank_tensor[:, int(box_f1_1[1].cpu().item()): int(box_f1_1[3].cpu().item()), int(box_f1_1[0].cpu().item()): int(box_f1_1[2].cpu().item())] += img_new_1_tensor
                    compute_repeat[:, int(box_f1_1[1].cpu().item()): int(box_f1_1[3].cpu().item()), int(box_f1_1[0].cpu().item()): int(box_f1_1[2].cpu().item())] += 1
                    temp = tensor2pil(blank_tensor)
                    # temp.save("save_image/temp1.jpg")
                    blank_tensor[:, int(box_f1_2[1].cpu().item()): int(box_f1_2[3].cpu().item()), int(box_f1_2[0].cpu().item()): int(box_f1_2[2].cpu().item())] += img_new_2_tensor
                    compute_repeat[:, int(box_f1_2[1].cpu().item()): int(box_f1_2[3].cpu().item()), int(box_f1_2[0].cpu().item()): int(box_f1_2[2].cpu().item())] += 1  
                    temp = tensor2pil(blank_tensor)
                    # temp.save("save_image/temp2.jpg")
                    # 只有背景的部分才会+2次，有mask的1次
                    compute_repeat[torch.sum(masks, dim=0, keepdim=True).repeat(3,1,1) > 0] = 1
                    blank_tensor[compute_repeat > 0] = torch.div(blank_tensor[compute_repeat > 0], compute_repeat[compute_repeat > 0])
                    
                    # 
                    # print("compute_repeat", compute_repeat.min(), compute_repeat.max())
                    # 重新填充检测出的实例
                    # blank_tensor[torch.sum(masks, dim=0, keepdim=True).repeat(3,1,1) > 0] = im1_tensor[torch.sum(masks, dim=0, keepdim=True).repeat(3,1,1) > 0]
                    # blank_tensor[:, int(box_f1_1[1].cpu().item()): int(box_f1_1[3].cpu().item()), int(box_f1_1[0].cpu().item()): int(box_f1_1[2].cpu().item())] = \
                    # blank_tensor[:, int(box_f1_1[1].cpu().item()): int(box_f1_1[3].cpu().item()), int(box_f1_1[0].cpu().item()): int(box_f1_1[2].cpu().item())]

                    # blank_tensor[:, int(box_f1_2[1].cpu().item()): int(box_f1_2[3].cpu().item()), int(box_f1_2[0].cpu().item()): int(box_f1_2[2].cpu().item())] = \
                    # blank_tensor[:, int(box_f1_2[1].cpu().item()): int(box_f1_2[3].cpu().item()), int(box_f1_2[0].cpu().item()): int(box_f1_2[2].cpu().item())]

                    

                    # blank_tensor[:, int(box_f1_1[1].cpu().item()): int(box_f1_1[3].cpu().item()), int(box_f1_1[0].cpu().item()): int(box_f1_1[2].cpu().item())] = \
                    # torch.div(blank_tensor[:, int(box_f1_1[1].cpu().item()): int(box_f1_1[3].cpu().item()), int(box_f1_1[0].cpu().item()): int(box_f1_1[2].cpu().item())], \
                    #           compute_repeat[:, int(box_f1_1[1].cpu().item()): int(box_f1_1[3].cpu().item()), int(box_f1_1[0].cpu().item()): int(box_f1_1[2].cpu().item())])

                    # blank_tensor[:, int(box_f1_2[1].cpu().item()): int(box_f1_2[3].cpu().item()), int(box_f1_2[0].cpu().item()): int(box_f1_2[2].cpu().item())] = \
                    # torch.div(blank_tensor[:, int(box_f1_2[1].cpu().item()): int(box_f1_2[3].cpu().item()), int(box_f1_2[0].cpu().item()): int(box_f1_2[2].cpu().item())], \
                    #           compute_repeat[:, int(box_f1_2[1].cpu().item()): int(box_f1_2[3].cpu().item()), int(box_f1_2[0].cpu().item()): int(box_f1_2[2].cpu().item())])
                    
                    # blank_tensor = torch.div(blank_tensor, compute_repeat)
                    # 再把tensor转化成图片的格式 320*480 
                    blank = tensor2pil(blank_tensor)
                    # print()
                    # blank.paste(img_new_1, (int(box_f1_1[0].cpu().item()), int(box_f1_1[1].cpu().item()), int(box_f1_1[2].cpu().item()), int(box_f1_1[3].cpu().item())))
                    # blank.paste(img_new_2, (int(box_f1_2[0].cpu().item()), int(box_f1_2[1].cpu().item()), int(box_f1_2[2].cpu().item()), int(box_f1_2[3].cpu().item())))
                    # blank.save("save_image/blank_" + str(i) + "_" + str(j) + ".jpg")
                    # exit()
                    img_new_2.close()
                    # exit()
                    
                    
                    
                    
                    
                    
                    with torch.no_grad():
                        image = preprocess(blank).unsqueeze(0).to(device)
                        image_features = CLIP_model.encode_image(image)
                        # print(image_features.shape) # torch.Size([1, 512])
                        # print(text_features.shape)  # torch.Size([600, 512])
                        # exit()
                        # image_features = self.encode_image(image)
                        # text_features = self.encode_text(text)

                        # normalized features
                        image_features = image_features / image_features.norm(dim=1, keepdim=True)
                        text_features = text_features / text_features.norm(dim=1, keepdim=True)
                        text_features_no_object = text_features_no_object / text_features_no_object.norm(dim=1, keepdim=True)

                        # cosine similarity as logits
                        # logit_scale = self.logit_scale.exp()
                        logits_per_image = image_features @ text_features.t()
                        logits_per_image_no_object = image_features @ text_features_no_object.t()
                        logits_per_image = logits_per_image + logits_per_image_no_object
                        logits_per_image_temp = logits_per_image
                        # plt.figure()
                        # plt.hist(logits_per_image_temp[0].cpu().numpy())
                        # plt.savefig("save_image/blank_" + str(i) + "_" + str(j) + "_hist.jpg")
                        # plt.close()
                        
                        
                        
                        
                        print("=====================")
                        print("mean", logits_per_image_temp.mean())
                        start_index, end_index = hico_object2label_index[cls_2.cpu().item()]
                        
                        logits_per_image_object_range = torch.zeros(logits_per_image.shape).to(device)
                        logits_per_image_object_range[:, start_index: end_index+1] = logits_per_image[:, start_index: end_index+1]
                        logits_per_image = logits_per_image_object_range    # torch.Size([1, 600])
                        
                        print("cls:", cls_2.cpu().item(), " ", obj2name[cls_2.cpu().item()])
                        # print(start_index, end_index)
                        # exit()
                        # print(logits_per_image.shape)
                        # print(logits_per_image)
                        print(logits_per_image.max(), logits_per_image.min())
                        print(logits_per_image[0, start_index: end_index])
                        
                        print(logits_per_image[logits_per_image > logits_per_image.max() - logits_per_image.max() * 0.1])
                        # print(logits_per_image[logits_per_image > logits_per_image.max() - logits_per_image.max() * 0.1].shape)
                        print(torch.where(logits_per_image > logits_per_image.max() - logits_per_image.max() * 0.1)[1])

                        thre = logits_per_image.max() - logits_per_image_temp.mean() - logits_per_image.max() * 0.23
                        print("thre:", thre)
                        # 更新一下其他hoi的值
                        max_index = logits_per_image.argmax(dim=1).cpu().item()
                        print("max_index ", max_index)
                        print("other correlation hoi ", hoi2otherhoi[max_index])
                        logits_per_image[:, hoi2otherhoi[max_index]] += logits_per_image[:, hoi2otherhoi[max_index]] * 0.07
                        print(logits_per_image[0, start_index: end_index])
                        
                        
                        if thre < 0:
                            # print(logits_per_image.max())
                            if max_possible_prob < logits_per_image.max().cpu().item():
                                max_possible_prob = logits_per_image.max().cpu().item()
                                max_possible_dict["annotations"] = [{"bbox": [int(box_f1_1[0].cpu().item()), int(box_f1_1[1].cpu().item()), int(box_f1_1[2].cpu().item()), int(box_f1_1[3].cpu().item())], "category_id": valid_obj_ids[int(cls_1.cpu().item())]}]
                                max_possible_dict["annotations"].append({"bbox": [int(box_f1_2[0].cpu().item()), int(box_f1_2[1].cpu().item()), int(box_f1_2[2].cpu().item()), int(box_f1_2[3].cpu().item())], "category_id": valid_obj_ids[int(cls_2.cpu().item())]})
                                # max_index = logits_per_image.argmax(dim=1).item()
                                # # hoi_index = hoi_index_tensor[h_i].cpu().item()
                                # max_possible_dict["hoi_annotation"] = [{"subject_id": 0, "object_id": 1, "category_id": action2index[hoi_index2action[max_index + 1]], "hoi_category_id": max_index + 1}]
                                hoi_index_tensor = torch.where(logits_per_image > logits_per_image.max() - logits_per_image.max() * 0.1)[1]
                                # print(hoi_index_tensor)
                                if hoi_index_tensor.shape[0] > 5: # 如果该人-物对儿提取的关系大于5个，那个只取前五个就行
                                    print("该人-物对的关系大于5个，取前五个")
                                    temp = logits_per_image[logits_per_image > logits_per_image.max() - logits_per_image.max() * 0.1]
                                    # print(temp)
                                    temp_sort_index = torch.argsort(-temp)
                                    # print(temp_sort_index)
                                    hoi_index_tensor = hoi_index_tensor[temp_sort_index]
                                    # print(hoi_index_tensor)
                                    hoi_index_tensor = hoi_index_tensor[:5]
                                    # print(hoi_index_tensor)
                                    # exit()
                                max_possible_dict["hoi_annotation"] = []
                                for h_i in range(hoi_index_tensor.shape[0]):
                                    hoi_index = hoi_index_tensor[h_i].cpu().item()
                                    max_possible_dict["hoi_annotation"].append({"subject_id": 0, "object_id": 1, "category_id": action2index[hoi_index2action[hoi_index + 1]], "hoi_category_id": hoi_index + 1})
                        
                            # 重采样  resample
                            continue
                        else:
                            is_have = True
                        # print(hoi)
                        if i not in save_boxes_index:
                            save_boxes_index.append(i)
                            hoi["annotations"].append({"bbox": [int(box_f1_1[0].cpu().item()), int(box_f1_1[1].cpu().item()), int(box_f1_1[2].cpu().item()), int(box_f1_1[3].cpu().item())], "category_id": valid_obj_ids[int(cls_1.cpu().item())]})
                        if j not in save_boxes_index:
                            save_boxes_index.append(j)
                            hoi["annotations"].append({"bbox": [int(box_f1_2[0].cpu().item()), int(box_f1_2[1].cpu().item()), int(box_f1_2[2].cpu().item()), int(box_f1_2[3].cpu().item())], "category_id": valid_obj_ids[int(cls_2.cpu().item())]})
                        # hoi["hoi_annotation"] = []
                        hoi_index_tensor = torch.where(logits_per_image > logits_per_image.max() - logits_per_image.max() * 0.1)[1]
                        # print(hoi_index_tensor)
                        if hoi_index_tensor.shape[0] > 5: # 如果该人-物对儿提取的关系大于5个，那个只取前五个就行
                            print("该人-物对的关系大于5个，取前五个")
                            temp = logits_per_image[logits_per_image > logits_per_image.max() - logits_per_image.max() * 0.1]
                            # print(temp)
                            temp_sort_index = torch.argsort(-temp)
                            # print(temp_sort_index)
                            hoi_index_tensor = hoi_index_tensor[temp_sort_index]
                            # print(hoi_index_tensor)
                            hoi_index_tensor = hoi_index_tensor[:5]
                            # print(hoi_index_tensor)
                            # exit()

                        for h_i in range(hoi_index_tensor.shape[0]):
                            hoi_index = hoi_index_tensor[h_i].cpu().item()
                            hoi["hoi_annotation"].append({"subject_id": save_boxes_index.index(i), "object_id": save_boxes_index.index(j), "category_id": action2index[hoi_index2action[hoi_index + 1]], "hoi_category_id": hoi_index + 1})
                        # for box_i in range(boxes_f1.shape[0]):
                        #     hoi["annotations"].append({"boxes": [int(boxes_f1[box_i][0].cpu().item()), int(boxes_f1[box_i][1].cpu().item()), int(boxes_f1[box_i][2].cpu().item()), int(boxes_f1[box_i][3].cpu().item())],
                        #                             "category_id": int(cls[box_i].cpu().item()) + 1})
                        #     # 取出当前物体类别对应的相应的动作取值范围，因为原始标签是从1开始，所以最后end_index要+1
                        #     start_index, end_index = hico_object2label_index[cls[box_i].cpu().item()]
                        #     hoi["hoi_annotation"].append({"subject_id": 0, "object_id": box_i + 1, "category_id": action2index[hoi_index2action[end_index+1]], "hoi_category_id": end_index+1})
                        # print(hoi)
                        print("======================")
                        # exit()
                        # logits_per_text = logits_per_image.t()

                        # shape = [global_batch_size, global_batch_size]
                        # return logits_per_image, logits_per_text
            
                        # logits_per_image, logits_per_text = CLIP_model(image, text)
                        # logits_per_image_temp, logits_per_text_temp = CLIP_model(image, temp_text)
                        # print(logits_per_image_temp)
                        # print(logits_per_image.max(), logits_per_image.min())
                        # probs = logits_per_image.softmax(dim=-1).cpu()
                        # # probs = torch.sigmoid(logits_per_image).cpu()
                        # # print(probs)
                        
                        # print(probs.argmax(dim=1).item())
                        # print(probs[0, probs.argmax(dim=1).item()])
                        # print(hoi_text_label[probs.argmax(dim=1).item()])
                        # print(torch.where(hoi_text_label > hoi_text_label))
                    blank.close()
                    print("第", i+1, "次添加")
                    print(hoi)
                    # exit()
                img_new_1.close()
            if is_have == False:
                print("is_have", is_have)
                no_have_num += 1
                print("filename", hoi["file_name"])
                print()
                hoi["annotations"] = max_possible_dict["annotations"]
                hoi["hoi_annotation"] = max_possible_dict["hoi_annotation"]

                print(hoi)
                # exit()
            with open(os.path.join(save_hoi_txt_dir, hoi["file_name"].split(".")[0] + ".txt"), "w+") as f:
                f.write(str(hoi))
            save_hoi.append(hoi)
            # break
            # print(max_possible_dict)
            print(hoi)
            # exit()
        count_temp += 1
        # if count_temp == 100:
        #     break
        # break
print("no_detect_human_num", no_detect_human_num)
print("only_one_human_num", only_one_human_num)
print("no_have_num", no_have_num)
print("no_human_object_num", no_human_object_num)

# import json
# HH = [2, 4, 6, 8, 10]
# with open(os.path.join("./", os.path.basename(os.getcwd()) + ".txt"), "w+") as f:
#     f.write(str(save_hoi))
# filename = "./hico-det_add_seg-max_adapte-black-dynamic_threshold.json"
# with open(os.path.join("./", os.path.basename(os.getcwd()) + ".json"), 'w+') as file_obj:
#     json.dump(save_hoi, file_obj)
#     print(file_obj.name)

result_dir = "/home/jxy/program/hoi2/600HOI-600HOI_no_object-add_seg_only_instance-max_adapt-dynamic_threshold_add_weight_23-black_add_correlation_hoi/results"
result_name = 'yolo_conf_0.5'
with open(os.path.join(result_dir, result_name + ".txt"), "w+") as f:
    f.write(str(save_hoi))
filename = "./hico-det_add_seg-max_adapte-black-dynamic_threshold.json"
with open(os.path.join(result_dir, result_name + ".json"), 'w+') as file_obj:
    json.dump(save_hoi, file_obj)
    print(file_obj.name)
    
    
# Each result is composed of torch.Tensor by default, 
# in which you can easily use following functionality:
# result = result.cuda()
# result = result.cpu()
# result = result.to("cpu")
# result = result.numpy()
res_plotted = results[0].plot()
cv2.imwrite("result.png", res_plotted)