# 1. get result from AlphaPose and YOLOv8
import json
import numpy as np
import cv2
import os

RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)

DEFAULT_FONT = cv2.FONT_HERSHEY_SIMPLEX



def pointInRec(polyX, polyY, x, y):
    if x>=polyX[0] and x<=polyX[1] and y>=polyY[0] and y<= polyY[1]:
        return True
    else:
        return False

def visualise_entire_image(image, box, hm, cost_matrix, ip, dir_name):
    """Visualise bounding box pairs in the whole image by classes"""
    ind_cm = np.argmax(cost_matrix, axis=1)# Only the first occurrence is returned//make sure human not in lower position
    print(ind_cm)
    # Visualise detected human-object pairs with attached scores
    for i in range(len(ind_cm)):
        if cost_matrix[i][ind_cm[i]]==0:
            continue
        bx_h = hm[ind_cm[i]][3]
        bx_h = [bx_h[0], bx_h[1], bx_h[0]+bx_h[2], bx_h[1]+bx_h[3]]
        bx_o = box[i][1]

        # pocket.utils.draw_box_pairs(image, bx_h, bx_o, width=5)
        # plt.imshow(image)
        # plt.axis('off')
        # txt = plt.text(*bx_h[:2], f"{hm[ind_cm[i]][2]:.2f}", fontsize=15, fontweight='semibold', color='w')
        # txt.set_path_effects([peff.withStroke(linewidth=5, foreground='#000000')])
        # plt.draw()
        # txt = plt.text(*bx_o[:2], f"{box[i][2]:.2f}", fontsize=15, fontweight='semibold', color='b')
        # txt.set_path_effects([peff.withStroke(linewidth=5, foreground='#000000')])
        # plt.draw()
        cv2.rectangle(image, (int(bx_h[0]), int(bx_h[1])), (int(bx_h[2]),int(bx_h[3])), BLUE, 5)
        cv2.putText(image, f"{hm[ind_cm[i]][2]:.2f}", (int(bx_h[0]), int((bx_h[1] + 26))), DEFAULT_FONT, 1, BLUE, 3)

        cv2.rectangle(image, (int(bx_o[0]), int(bx_o[1])), (int(bx_o[2]),int(bx_o[3])), ORANGE, 5)
        cv2.putText(image, f"{box[i][2]:.2f}", (int(bx_o[0]), int((bx_o[1] + 26))), DEFAULT_FONT, 1, RED, 3)
        cv2.putText(image, box[i][3], (int(bx_o[0]), int((bx_o[1]))), DEFAULT_FONT, 1, RED, 3)
        # b_h_centre = (bx_h[:2]+bx_h[2:])/2
        # b_o_centre = (bx_o[:2]+bx_o[2:])/2
        # print('b_h_centre, b_o_centre:', (bx_h[0]+bx_h[2])//2,(bx_h[1]+bx_h[3])//2, (bx_o[0]+bx_o[2])//2,(bx_o[1]+bx_o[3])//2)
        cv2.line(image, (int(bx_h[0]+bx_h[2])//2,int(bx_h[1]+bx_h[3])//2), (int(bx_o[0]+bx_o[2])//2,int(bx_o[1]+bx_o[3])//2), RED, 5)
        cv2.imwrite("/home/airport/HOI/POSE/YOLOV8/action_"+dir_name+"/"+str(ip+1)+"_action.jpg", image)


    # plt.show()
    # plt.savefig('6_action.jpg')
    # plt.close('all')
    return

dir_name = "100459"
# dir_name = "165953"

for root, dirs, files in os.walk("/home/airport/HOI/POSE/AlphaPose/examples/demo_"+dir_name):
    im_names = files
    im_names = natsort.natsorted(im_names)

# result from /home/airport/HOI/POSE/AlphaPose/scripts/demo_inference.py
with open("/home/airport/HOI/POSE/AlphaPose/examples/res_"+dir_name+"/alphapose-results.json", 'r') as openfile:
    hm_object = json.load(openfile)
    
    print(type(hm_object), len(hm_object))
    print('hm:', hm_object[0].keys(), type(hm_object[0]['keypoints']), len(hm_object[0]['keypoints']))

for ip in range(len(im_names)):
    ## im = Image.open('/home/airport/HOI/POSE/upt/theexamples/demo/6.jpg')
    im = cv2.imread("/home/airport/HOI/POSE/AlphaPose/examples/demo_"+dir_name+"/"+im_names[ip])
    # result from /home/airport/HOI/POSE/YOLOV8/detection.py
    with open("/home/airport/HOI/POSE/YOLOV8/json_"+dir_name+"/results_det_"+str(ip+1)+".json", 'r') as openfile:
    
        det_object = json.load(openfile)

        print(type(det_object), len(det_object))
        # print('det:', det_object[0].keys())
        
    # with open('/home/airport/HOI/POSE/AlphaPose/examples/res/alphapose-results.json', 'r') as openfile:
    
    #     # Reading from json file
    #     hm_object = json.load(openfile)
    
    #     print(type(hm_object), len(hm_object))
    #     print('hm:', hm_object[0].keys(), type(hm_object[0]['keypoints']), len(hm_object[0]['keypoints']))

    box = []
    for k in range(len(det_object)):
        if det_object[k]['name']=='suitcase' or  det_object[k]['name']=='backpack' or  det_object[k]['name']=='handbag' or  det_object[k]['name']=="cell phone" or  det_object[k]['name']=="laptop":
            box.append([k, np.array([det_object[k]['box']['x1'], det_object[k]['box']['y1'],det_object[k]['box']['x2'],det_object[k]['box']['y2']]), det_object[k]['confidence'], det_object[k]['name']])#small2large
    hm = []
    for ii in range(len(hm_object)):
        # print()
        if hm_object[ii]['image_id']==(str(ip+1)+".jpg"):
            hm.append([ii, hm_object[ii]['keypoints'], hm_object[ii]['score'],hm_object[ii]['box']])#136 points for each person{94-114, 21 Left Hand Keypoints}{115-135, 21 Right Hand Keypoints}
    print('box hands len:', len(box), len(hm))

    if len(hm)==0 or len(box)==0:
        continue
    cost_matrix = np.zeros((len(box), len(hm)))


    for i in range(len(box)):
        polyCorners = len(box[i][1])
        polyX = np.array([box[i][1][0], box[i][1][2]])
        polyY = np.array([box[i][1][1], box[i][1][3]])
        for j in range(len(hm)):
            count_hande_points = 0
            for k in range(94,136):
                # if hm[j][1][k*3+2]<0.05: #confidence
                #     continue
                x = hm[j][1][k*3]
                y = hm[j][1][k*3+1]
                # if k==94+12:
                #     print(i,box[i][0], polyX, polyY, j, hm[j][0], x,y)
                #     print(pointInRec(polyX, polyY, x, y))
                count_hande_points += pointInRec(polyX, polyY, x, y)
            cost_matrix[i,j] = count_hande_points
            if cost_matrix[i,j] > 0:
                print('pair:', box[i][0], box[i][2], hm[j][0], hm[j][2])
                
    print(cost_matrix)


    visualise_entire_image(im, box, hm, cost_matrix, ip, dir_name)
