import cv2
import numpy as np
import torch
import torchvision.transforms.functional as F
import matplotlib.pyplot as plt
from PIL import Image
import os
from util.box_ops import box_cxcywh_to_xyxy
colors_hp = [(0, 0, 255), (0, 255, 0), (255, 0, 0),
             (255, 0, 0),   (0, 0, 255), (255, 0, 0), 
             (0, 0, 255),   (255, 0, 0), (0, 0, 255)]
colors_action_hp = [(255, 0, 0), (255, 0, 0),(255, 255, 0),
                    (255, 0, 0), (0, 0, 255), (255, 0, 0), 
                    (0, 0, 255),(255, 0, 0), (0, 0, 255)]
#   hoi标签
hico_verb = {1: 'adjust', 2: 'assemble', 3: 'block', 4: 'blow', 5: 'board', 6: 'break', 7: 'brush_with', 8: 'buy', 9: 'carry', 10: 'catch', 
            11: 'chase', 12: 'check', 13: 'clean', 14: 'control', 15: 'cook', 16: 'cut', 17: 'cut_with', 18: 'direct', 19: 'drag', 20: 'dribble', 
            21: 'drink_with', 22: 'drive', 23: 'dry', 24: 'eat', 25: 'eat_at', 26: 'exit', 27: 'feed', 28: 'fill', 29: 'flip', 30: 'flush', 
            31: 'fly', 32: 'greet', 33: 'grind', 34: 'groom', 35: 'herd', 36: 'hit', 37: 'hold', 38: 'hop_on', 39: 'hose', 40: 'hug', 
            41: 'hunt', 42: 'inspect', 43: 'install', 44: 'jump', 45: 'kick', 46: 'kiss', 47: 'lasso', 48: 'launch', 49: 'lick', 50: 'lie_on', 
            51: 'lift', 52: 'light', 53: 'load', 54: 'lose', 55: 'make', 56: 'milk', 57: 'move', 58: 'no_interaction', 59: 'open', 60: 'operate', 
            61: 'pack', 62: 'paint', 63: 'park', 64: 'pay', 65: 'peel', 66: 'pet', 67: 'pick', 68: 'pick_up', 69: 'point', 70: 'pour', 
            71: 'pull', 72: 'push', 73: 'race', 74: 'read', 75: 'release', 76: 'repair', 77: 'ride', 78: 'row', 79: 'run', 80: 'sail', 
            81: 'scratch', 82: 'serve', 83: 'set', 84: 'shear', 85: 'sign', 86: 'sip', 87: 'sit_at', 88: 'sit_on', 89: 'slide', 90: 'smell', 
            91: 'spin', 92: 'squeeze', 93: 'stab', 94: 'stand_on', 95: 'stand_under', 96: 'stick', 97: 'stir', 98: 'stop_at', 99: 'straddle', 100: 'swing', 
            101: 'tag', 102: 'talk_on', 103: 'teach', 104: 'text_on', 105: 'throw', 106: 'tie', 107: 'toast', 108: 'train', 109: 'turn', 110: 'type_on', 
            111: 'walk', 112: 'wash', 113: 'watch', 114: 'wave', 115: 'wear', 116: 'wield', 117: 'zip'}

vcoco_verb = {0:'hold_obj', 1:'stand', 2:'sit_instr', 3:'ride_instr', 4:'walk', 5:'look_obj', 6:'hit_instr', 7:'hit_obj',
            8:'eat_obj', 9:'eat_instr', 10:'jump_instr', 11:'lay_instr', 12:'talk_on_phone_instr', 13:'carry_obj',
            14:'throw_obj', 15:'catch_obj', 16:'cut_instr', 17:'cut_obj', 18:'run', 19:'work_on_computer_instr',
            20:'ski_instr', 21:'surf_instr', 22:'skateboard_instr', 23:'smile', 24:'drink_instr', 25:'kick_obj',
            26:'point_instr', 27:'read_obj', 28:'snowboard_instr'}


def tensorboard_plt(orig_target_sizes, samples, outputs, epoch, tb_writer, args, targets, results, per_batch):
    topK = args.topK 
    plt.figure()
    plt.axis('off') 
    for num_samples in range(len(results)):
        path = os.path.join(args.output_img, str(per_batch + num_samples))
        if not os.path.exists(path):
            os.makedirs(path)  
        keep = get_topk(results[num_samples], topK)
        
        mask = (~samples.mask[num_samples]).unsqueeze(-1).repeat(1,1,3).cpu().numpy()
        hs = int(mask.sum(0)[0, 0])
        
        img_ori = samples.tensors[num_samples]
        img_ori = img_ori.permute(1,2,0).cpu().numpy()
        img_ori = folat2uint8(img_ori)

        #   原始图片大小
        h_ori, w_ori = orig_target_sizes[num_samples]
        img_ori_box = img_ori.copy()
        img_ori_box = img_ori_box[mask].reshape(hs,-1,3)
        img_ori_box = resize(img_ori_box, (h_ori, w_ori))
        plt.imshow(img_ori_box)
        if args.output_img is not None: plt.savefig(path+"/img_ori_box.png")
        plt.show()        
        
        pred_img = draw_img_pred(img_ori_box.copy(), results[num_samples], top_k=args.top_k, 
                                        rel_threshold=args.rel_threshold, dataset_verb=args.dataset_file)
        plt.imshow(pred_img)
        if args.output_img is not None: plt.savefig(path+"/pred_img.png")
        plt.show()
        # tb_writer.add_image('pred_rel_relations_'+str(i), pred_img, global_step=epoch, dataformats='HWC')
        # GT_img = draw_img_gt(img_ori_box.copy(), targets[num_samples], top_k=args.top_k, 
        #                           rel_threshold=args.rel_threshold, dataset_verb=args.dataset_file)
        # tb_writer.add_image('gt_rel_relations_'+str(i), GT_img, global_step=epoch, dataformats='HWC')
        
        h, w = outputs['src'][num_samples].shape[-2:]
        #   N, 256, 35, 35
        # img = paint_heatmap(outputs['src'][num_samples], img_ori.copy(), mask, hs)
        # img = img_add(pred_img.copy(), resize(img, (h_ori, w_ori)))
        # # tb_writer.add_image('resnet_heatmap_'+str(per_batch + num_samples), img, global_step=epoch, dataformats='HWC')
        # plt.imshow(img) 
        # if args.output_img is not None: plt.savefig(path+"/resnet_heatmap.png")
        # plt.show()
        
        
        #   N, 256, 35, 35
        # img = paint_heatmap(outputs['memory'][num_samples], img_ori.copy(), mask, hs)
        # img = img_add(pred_img.copy(), resize(img, (h_ori, w_ori)))
        # # tb_writer.add_image('memory_heatmap_'+str(per_batch + num_samples), img, global_step=epoch, dataformats='HWC')
        # plt.imshow(img)
        # if args.output_img is not None: plt.savefig(path+"/memory_heatmap.png")
        # plt.show()
        
        #   N,  100, 35x35 
        img = paint_heatmap(outputs['obj_dec_attn_weights'][-1][num_samples, keep].view(topK, h, w), img_ori.copy(), mask, hs)
        img = img_add(pred_img.copy(), resize(img, (h_ori, w_ori)))
        # tb_writer.add_image('OD_dec_heatmap_'+str(per_batch + num_samples), img, global_step=epoch, dataformats='HWC')   
        plt.imshow(img)
        if args.output_img is not None: plt.savefig(path+"/OD_dec_heatmap.png")
        plt.show()
        
        # #   N,  100, 35x35 
        # img = paint_heatmap(outputs['human_dec_attn_weights'][-1][num_samples, keep].view(topK, h, w), img_ori.copy(), mask, hs)
        # img = img_add(pred_img.copy(), resize(img, (h_ori, w_ori)))
        # # tb_writer.add_image('HM_dec_heatmap_'+str(per_batch + num_samples), img, global_step=epoch, dataformats='HWC')       
        # plt.imshow(img)
        # if args.output_img is not None: plt.savefig(path+"/HM_dec_heatmap.png")
        # plt.show()
        
        #   N,  100, 35x35 
        img = paint_heatmap(outputs['interaction_dec_attn_weights'][-1][num_samples, keep].view(topK, h, w), img_ori.copy(), mask, hs)
        img = img_add(pred_img.copy(), resize(img, (h_ori, w_ori)))
        # tb_writer.add_image('HOI_dec_heatmap_'+str(per_batch + num_samples), img, global_step=epoch, dataformats='HWC')   
        plt.imshow(img)
        if args.output_img is not None: plt.savefig(path+"/HOI_dec_heatmap.png")
        plt.show()
        
        
        # tsne = TSNE(n_components=2, init='random', learning_rate='auto')
        # fig, ax = plt.subplots() 
        # #   100, 256
        # X = outputs['out_query'][-1, num_samples]
        # out_verb_logits = outputs['pred_verb_logits'][num_samples]
        # index = out_verb_logits.max(-1)
        # for i in range(out_verb_logits.shape[-1]):
        #     temp = X[index==i].reshape(-1,256)
        #     if temp.shape[0]<2:
        #         continue
        #     tsne.fit_transform(temp)  
        #     ax.scatter(tsne.embedding_[:,0],tsne.embedding_[:,1], marker='o', label=str(i))   
        # ax.legend(loc='lower right', fontsize=12, frameon=True, fancybox=True, framealpha=0.2, borderpad=0.3,
        #         ncol=3, markerfirst=True, markerscale=1, numpoints=1, handlelength=3.5, bbox_to_anchor=(2,0))
        # ax.set_title('tsne_result Scatter')
        # tb_writer.add_figure('tsne_result', fig, global_step=epoch, close=False, walltime=None)
    return tb_writer


def folat2uint8(img):
    img = (img - img.min()) / (img.max() - img.min())
    img = np.asarray(255.0 * img, np.uint8)
    return img

def bgr2rgb(img):
    img = img.detach().cpu().numpy()
    img = np.asarray(img, np.uint8)
    b,g,r = cv2.split(img)   # 分解Opencv里的标准格式B、G、R
    img = cv2.merge([r,g,b]) # 将BGR格式转化为常用的RGB格式
    return img


def rgb2bgr(img):
    img = img.detach().cpu().numpy()
    img = np.asarray(img, np.uint8)
    r,g,b = cv2.split(img)   # 分解Opencv里的标准格式B、G、R
    img = cv2.merge([b,g,r]) # 将BGR格式转化为常用的RGB格式
    return img

def paint_heatmap(heatmap, img, mask, hs, type='mean'):
    if type == 'max':
        heatmap = heatmap.max(0, keepdim=True)[0]
    elif type == 'mean':
        heatmap = heatmap.mean(0, keepdim=True)

    for i in range(heatmap.shape[0]):
        img = paint_heatmaps(img, heatmap[i])
    
    img = img[mask].reshape(hs,-1,3) 
    return img

def paint_heatmaps(img, heatmap, alpha = 0.5):
    height, width = img.shape[0], img.shape[1]
    overlay = img.copy()
    
    heatmap = heatmap.view(1, 1, heatmap.shape[0], heatmap.shape[1])
    heatmap =  torch.nn.functional.interpolate(heatmap, size=(height, width), scale_factor=None, mode='bilinear', align_corners=True)
    heatmap = heatmap.squeeze(0).permute(1, 2, 0).detach().cpu().numpy()
    heatmap = folat2uint8(heatmap).reshape(height, width)
    #heatmap = (heatmap - heatmap.min()) // (heatmap.max() - heatmap.min())
    #heatmap = np.asarray(colorize(heatmap), np.uint8)
    heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
    heatmap = np.concatenate([heatmap[...,2:], heatmap[...,1:2], heatmap[...,:1]], -1)

    
    cv2.rectangle(overlay, (0, 0), (width, height), (0, 0, 255), -1) # 设置蓝色为热度图基本色
    cv2.addWeighted(overlay, alpha, img, 1-alpha, 0, img) # 将背景热度图覆盖到原图
    cv2.addWeighted(heatmap, alpha, img, 1-alpha, 0, img) # 将热度图覆盖到原图
    
    # heatmap = ( (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min()) ) * 255
    # heatmap = heatmap[0].detach().cpu().numpy()
    # heatmap = cv2.resize(heatmap, (width, height), interpolation=cv2.INTER_CUBIC)
    # heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min())
    # heatmap = colorize(heatmap)
    # img = img * alpha  + heatmap * (1-alpha)
    return img


def img_add(img, heatmap, alpha = 0.5):
    cv2.addWeighted(heatmap, alpha, img, 1-alpha, 0, img) # 将热度图覆盖到原图   
    return img 



def draw_img_gt(img, output, top_k, rel_threshold=0, dataset_verb='vcoco', thick=1):
    list_action = []
    for i in range(output['hois'].shape[0]):
        subject_id = int(output['hois'][i, 0].numpy())
        object_id = int(output['hois'][i, 1].numpy())
        category_id = int(output['hois'][i, 2].numpy())
        score = float(np.array(1.0))

        single_out = [subject_id, object_id, category_id, score]
        list_action.append(single_out)
    list_action = sorted(list_action, key=lambda x:x[-1], reverse=True)
    action_dict, action_cate = [], []
    for output_i in list_action[:top_k]:
        subject_id, object_id, category_id, score = output_i

        if score<=rel_threshold:
            continue
        subject_obj = output['labels'][subject_id]
        subject_box = output['boxes'][subject_id]
        object_obj = output['labels'][object_id]
        object_box = output['boxes'][object_id]
        img = draw_box_on_img(subject_box, img, subject_obj, thick)
        img = draw_box_on_img(object_box, img, object_obj, thick)
        
        point_1 = [int((subject_box[0]+subject_box[2])*1.0/2),int((subject_box[1]+subject_box[3])*1.0/2)]
        point_2 = [int((object_box[0]+object_box[2])*1.0/2),int((object_box[1]+object_box[3])*1.0/2)]
        if [point_1,point_2] not in action_dict:
            action_dict.append([point_1,point_2])
            action_cate.append([])
        action_cate[action_dict.index([point_1,point_2])].append(category_id)
    #print(action_dict)
    #   画交互线
    for action_item in action_dict:
        img = draw_line_on_img(action_item,img, action_cate[action_dict.index(action_item)], dataset_verb, thick)
    return img




def draw_img_pred(img, results, top_k, rel_threshold=0, dataset_verb='vcoco', thick=2):
    list_action = []
    for i in range(results['verb_scores'].shape[0]):
        subject_id = int(results['sub_ids'][i].numpy())#diff
        object_id = int(results['obj_ids'][i].numpy())#diff
        #   100,
        category_id = int(results['verb_scores'].max(-1)[1][i].numpy())#diff
        score = float(results['verb_scores'].max(-1)[0][i].numpy())#diff
        single_out = [subject_id, object_id, category_id, score]
        list_action.append(single_out)
        
    list_action = sorted(list_action, key=lambda x:x[-1], reverse=True)
    
    action_dict, action_cate = [], []
    bbox_len = len(results['sub_ids'])
    for output_i in list_action[:top_k]:
        subject_id, object_id, category_id, score = output_i

        if score<=rel_threshold:
            continue
        subject_label = results['labels'][subject_id]
        subject_box = results['boxes'][subject_id]
        subject_score = results['score'][subject_id]
        
        object_label = results['labels'][object_id]
        object_box = results['boxes'][object_id]
        object_score = results['score'][object_id]
        
        img = draw_box_on_img(subject_box, img, subject_label, thick)
        img = draw_box_on_img(object_box, img, object_label, thick)
        
        point_1 = [int((subject_box[0]+subject_box[2])*1.0/2), int((subject_box[1]+subject_box[3])*1.0/2)]
        point_2 = [int((object_box[0]+object_box[2])*1.0/2), int((object_box[1]+object_box[3])*1.0/2)]
        if [point_1,point_2] not in action_dict:
            action_dict.append([point_1, point_2])
            action_cate.append([])
        action_cate[action_dict.index([point_1,point_2])].append(category_id)
    #print(action_dict)
    #   画交互线
    for action_item in action_dict:
        img = draw_line_on_img(action_item,img, action_cate[action_dict.index(action_item)], dataset_verb, thick)
    return img


def draw_point_on_img(xy, img, class_index, thick=1):
    '''
        在图片上画边框
    '''
    if class_index >= 1:
        if class_index == 80:
            class_index = 2
        else:
            class_index = 1
    vis_img = img.copy()#   image
    draw_point=[int(xy[0]),int(xy[1])]
    cv2.circle(vis_img,(draw_point[0],draw_point[1]), thick+1 , colors_hp[class_index], -1)
    return vis_img

def draw_box_on_img(box, img, class_index, thick=1):
    '''
        在图片上画边框
    '''
    if class_index >= 1:
        if class_index == 80:
            class_index = 2
        else:
            class_index = 1
    vis_img = img.copy()#   image
    box = [int(x) for x in box]
    cv2.rectangle(vis_img, (box[0], box[1]), (box[2], box[3]), colors_hp[class_index], thick)
    draw_point=[int((box[0]+box[2])*1.0/2),int((box[1]+box[3])*1.0/2)]
    cv2.circle(vis_img,(draw_point[0],draw_point[1]), thick+1, colors_hp[class_index], -1)
    return vis_img

def draw_line_on_img(line, img, class_index, dataset_verb, score, thick=1):
    #print(class_index)
    vis_img = img.copy()
    cv2.line(vis_img, (line[0][0], line[0][1]), (line[1][0], line[1][1]), colors_action_hp[0], thick) #5
    if dataset_verb=='vcoco':
        verb = vcoco_verb
    elif dataset_verb=='hico':
        verb = hico_verb
    for i in range(len(class_index)):
        if i == 0:
            action_str = verb[class_index[i]]
        else:
            action_str= action_str + '/' + verb[class_index[i]]
    #action_str = 'jump'
    font=cv2.FONT_HERSHEY_SIMPLEX
    img=cv2.putText(vis_img, action_str, 
                    (int((line[0][0]+line[1][0])/2), int((line[1][1]+line[1][1])/2+20)), 
                    font, 1, colors_action_hp[1], thick)
    # img=cv2.putText(vis_img, str(score) ,(int((line[0][0]+line[1][0])/2),int((line[1][1]+line[1][1])/2+50)),font,1.2,colors_action_hp[class_index],3)
    return vis_img

def rescale_bboxes(out_bbox, size):
    #把比例坐标乘以图像的宽和高，变成真实坐标
    img_w, img_h = size
    b = box_cxcywh_to_xyxy(out_bbox)
    b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
    return b


def resize(img, size):
    img = Image.fromarray(img) 
    img = F.resize(img, size)
    img = np.array(img)
    return img

def colorize(gray_img):
    out = np.zeros(gray_img.shape + (3,))
    for y in range(out.shape[0]):
        for x in range(out.shape[1]):
            out[y,x,:] = getJetColor(gray_img[y,x], 0, 1)
    return out

def getJetColor(v, vmin, vmax):
    c = np.zeros((3))
    if (v < vmin):
        v = vmin
    if (v > vmax):
        v = vmax
    dv = vmax - vmin
    if (v < (vmin + 0.125 * dv)): 
        c[0] = 256 * (0.5 + (v * 4)) #B: 0.5 ~ 1
    elif (v < (vmin + 0.375 * dv)):
        c[0] = 255
        c[1] = 256 * (v - 0.125) * 4 #G: 0 ~ 1
    elif (v < (vmin + 0.625 * dv)):
        c[0] = 256 * (-4 * v + 2.5)  #B: 1 ~ 0
        c[1] = 255
        c[2] = 256 * (4 * (v - 0.375)) #R: 0 ~ 1
    elif (v < (vmin + 0.875 * dv)):
        c[1] = 256 * (-4 * v + 3.5)  #G: 1 ~ 0
        c[2] = 255
    else:
        c[2] = 256 * (-4 * v + 4.5) #R: 1 ~ 0.5                      
    return c





def get_topk(results, topK):
    score = torch.ones((results['verb_scores'].shape[0]))
    for index in range(results['verb_scores'].shape[0]):
        score[index] = float(results['verb_scores'].max(-1)[0][index].numpy())
    thres = np.sort(score.detach().cpu().numpy())[::-1][topK]
    keep = score > thres
    return keep