# 1. get result from AlphaPose and YOLOv8
import json
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import numpy as np
# from scipy.optimize import linear_sum_assignment
# import pocket
# from PIL import Image, ImageDraw
import cv2
import os
# import natsort
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# import matplotlib.patheffects as peff

RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)

DEFAULT_FONT = cv2.FONT_HERSHEY_SIMPLEX


# def precalc_values(polyCorners, polyX, polyY):
#     constant = np.zeros(len(polyX))
#     multiple = np.zeros(len(polyX))
#     i = j = polyCorners-1
#     for i in range(polyCorners):
#         if(polyY[j]==polyY[i]):
#             constant[i] = polyX[i]
#             multiple[i] = 0
#         else:
#             constant[i] = polyX[i]-(polyY[i]*polyX[j])/(polyY[j]-polyY[i])+(polyY[i]*polyX[i])/(polyY[j]-polyY[i]);
#             multiple[i] = (polyX[j]-polyX[i])/(polyY[j]-polyY[i])
#         j=i
#     return constant, multiple
# def pointInPolygon(polyCorners, polyX, polyY, x, y):
#     constant, multiple = precalc_values(polyCorners, polyX, polyY)
#     i = j = polyCorners - 1
#     oddNodes = False
#     for i in range(polyCorners):
#         if((polyY[i]<y and polyY[j]>=y) or (polyY[j]<y and polyY[i]>=y )):
#             oddNodes^=(y*multiple[i]+constant[i]<x) #Bitwise Exclusive Or
#             # if(polyX[i]+(y-polyY[i])/(polyY[j]-polyY[i])*(polyX[j]-polyX[i])<x):
#                 # oddNodes = not oddNodes

#         j = i
#     return oddNodes

def pointInRec(polyX, polyY, x, y):
    if x>=polyX[0] and x<=polyX[1] and y>=polyY[0] and y<= polyY[1]:
        return True
    else:
        return False

def visualise_entire_image(image, box, hm, cost_matrix, ip, dir_name):
    """Visualise bounding box pairs in the whole image by classes"""
    ind_cm = np.argmax(cost_matrix, axis=1)# Only the first occurrence is returned//make sure human not in lower position
    print(ind_cm)
    # Visualise detected human-object pairs with attached scores
    for i in range(len(ind_cm)):
        if cost_matrix[i][ind_cm[i]]==0:
            continue
        bx_h = hm[ind_cm[i]][3]
        bx_h = [bx_h[0], bx_h[1], bx_h[0]+bx_h[2], bx_h[1]+bx_h[3]]
        bx_o = box[i][1]

        # pocket.utils.draw_box_pairs(image, bx_h, bx_o, width=5)
        # plt.imshow(image)
        # plt.axis('off')
        # txt = plt.text(*bx_h[:2], f"{hm[ind_cm[i]][2]:.2f}", fontsize=15, fontweight='semibold', color='w')
        # txt.set_path_effects([peff.withStroke(linewidth=5, foreground='#000000')])
        # plt.draw()
        # txt = plt.text(*bx_o[:2], f"{box[i][2]:.2f}", fontsize=15, fontweight='semibold', color='b')
        # txt.set_path_effects([peff.withStroke(linewidth=5, foreground='#000000')])
        # plt.draw()
        cv2.rectangle(image, (int(bx_h[0]), int(bx_h[1])), (int(bx_h[2]),int(bx_h[3])), BLUE, 5)
        cv2.putText(image, f"{hm[ind_cm[i]][2]:.2f}", (int(bx_h[0]), int((bx_h[1] + 26))), DEFAULT_FONT, 1, BLUE, 3)

        cv2.rectangle(image, (int(bx_o[0]), int(bx_o[1])), (int(bx_o[2]),int(bx_o[3])), ORANGE, 5)
        cv2.putText(image, f"{box[i][2]:.2f}", (int(bx_o[0]), int((bx_o[1] + 26))), DEFAULT_FONT, 1, RED, 3)
        cv2.putText(image, box[i][3], (int(bx_o[0]), int((bx_o[1]))), DEFAULT_FONT, 1, RED, 3)
        # b_h_centre = (bx_h[:2]+bx_h[2:])/2
        # b_o_centre = (bx_o[:2]+bx_o[2:])/2
        # print('b_h_centre, b_o_centre:', (bx_h[0]+bx_h[2])//2,(bx_h[1]+bx_h[3])//2, (bx_o[0]+bx_o[2])//2,(bx_o[1]+bx_o[3])//2)
        cv2.line(image, (int(bx_h[0]+bx_h[2])//2,int(bx_h[1]+bx_h[3])//2), (int(bx_o[0]+bx_o[2])//2,int(bx_o[1]+bx_o[3])//2), RED, 5)
        cv2.imwrite("/home/airport/HOI/POSE/YOLOV8/action_"+dir_name+"/"+str(ip+1)+"_action.jpg", image)


    # plt.show()
    # plt.savefig('6_action.jpg')
    # plt.close('all')
    return

dir_name = "100459"
# dir_name = "165953"

for root, dirs, files in os.walk("/home/airport/HOI/POSE/AlphaPose/examples/demo_"+dir_name):
    im_names = files
    im_names = natsort.natsorted(im_names)

# result from /home/airport/HOI/POSE/AlphaPose/scripts/demo_inference.py
with open("/home/airport/HOI/POSE/AlphaPose/examples/res_"+dir_name+"/alphapose-results.json", 'r') as openfile:
    hm_object = json.load(openfile)
    
    print(type(hm_object), len(hm_object))
    print('hm:', hm_object[0].keys(), type(hm_object[0]['keypoints']), len(hm_object[0]['keypoints']))

for ip in range(len(im_names)):
    ## im = Image.open('/home/airport/HOI/POSE/upt/theexamples/demo/6.jpg')
    im = cv2.imread("/home/airport/HOI/POSE/AlphaPose/examples/demo_"+dir_name+"/"+im_names[ip])
    # result from /home/airport/HOI/POSE/YOLOV8/detection.py
    with open("/home/airport/HOI/POSE/YOLOV8/json_"+dir_name+"/results_det_"+str(ip+1)+".json", 'r') as openfile:
    
        det_object = json.load(openfile)

        print(type(det_object), len(det_object))
        # print('det:', det_object[0].keys())
        
    # with open('/home/airport/HOI/POSE/AlphaPose/examples/res/alphapose-results.json', 'r') as openfile:
    
    #     # Reading from json file
    #     hm_object = json.load(openfile)
    
    #     print(type(hm_object), len(hm_object))
    #     print('hm:', hm_object[0].keys(), type(hm_object[0]['keypoints']), len(hm_object[0]['keypoints']))

    box = []
    for k in range(len(det_object)):
        if det_object[k]['name']=='suitcase' or  det_object[k]['name']=='backpack' or  det_object[k]['name']=='handbag' or  det_object[k]['name']=="cell phone" or  det_object[k]['name']=="laptop":
            box.append([k, np.array([det_object[k]['box']['x1'], det_object[k]['box']['y1'],det_object[k]['box']['x2'],det_object[k]['box']['y2']]), det_object[k]['confidence'], det_object[k]['name']])#small2large
    hm = []
    for ii in range(len(hm_object)):
        # print()
        if hm_object[ii]['image_id']==(str(ip+1)+".jpg"):
            hm.append([ii, hm_object[ii]['keypoints'], hm_object[ii]['score'],hm_object[ii]['box']])#136 points for each person{94-114, 21 Left Hand Keypoints}{115-135, 21 Right Hand Keypoints}
    print('box hands len:', len(box), len(hm))

    if len(hm)==0 or len(box)==0:
        continue
    cost_matrix = np.zeros((len(box), len(hm)))


    for i in range(len(box)):
        polyCorners = len(box[i][1])
        polyX = np.array([box[i][1][0], box[i][1][2]])
        polyY = np.array([box[i][1][1], box[i][1][3]])
        for j in range(len(hm)):
            count_hande_points = 0
            for k in range(94,136):
                # if hm[j][1][k*3+2]<0.05: #confidence
                #     continue
                x = hm[j][1][k*3]
                y = hm[j][1][k*3+1]
                # if k==94+12:
                #     print(i,box[i][0], polyX, polyY, j, hm[j][0], x,y)
                #     print(pointInRec(polyX, polyY, x, y))
                count_hande_points += pointInRec(polyX, polyY, x, y)
            cost_matrix[i,j] = count_hande_points
            if cost_matrix[i,j] > 0:
                print('pair:', box[i][0], box[i][2], hm[j][0], hm[j][2])
                
    print(cost_matrix)


    visualise_entire_image(im, box, hm, cost_matrix, ip, dir_name)











# def match_mask_by_iou(mask1, mask2):
#     """
#     Match individual objects in two object masks by Hungarian algorithm.
#     :param mask1: (B, N, K) torch.Tensor.
#     :param mask2: (B, N, K) torch.Tensor.
#     :return:
#         perm: (B, K, K) torch.Tensor, permutation for alignment.
#     """
#     # Transform soft mask to hard segmentation (one-hot)
#     n_batch, _, n_object = mask1.size()
#     segm_pred1 = mask1.argmax(-1).detach()
#     segm_pred2 = mask2.argmax(-1).detach()
#     segm_pred1 = torch.eye(n_object, dtype=torch.float32,
#                            device=segm_pred1.device)[segm_pred1]
#     segm_pred2 = torch.eye(n_object, dtype=torch.float32,
#                            device=segm_pred2.device)[segm_pred2]

#     # Match according to IoU
#     intersection = torch.einsum('bng,bnp->bgp', segm_pred1, segm_pred2)     # (B, K, K)
#     union = torch.sum(segm_pred1, dim=1).unsqueeze(-1) + torch.sum(segm_pred2, dim=1, keepdim=True) - intersection  # (B, K, K)
#     iou = intersection / union.clamp(1e-10)
#     perm = []
#     for b in range(n_batch):
#         iou_score = iou[b].cpu().numpy()
#         _, col_ind = linear_sum_assignment(iou_score, maximize=True)
#         perm.append(col_ind)
#     perm = torch.from_numpy(np.stack(perm, 0))
#     perm = torch.eye(n_object, dtype=torch.float32, device=segm_pred1.device)[perm]
#     return perm