# 1. get result from AlphaPose and YOLOv8
import json
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import numpy as np
# from scipy.optimize import linear_sum_assignment
# import pocket
# from PIL import Image, ImageDraw
import cv2
import os
# import natsort
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# import matplotlib.patheffects as peff

RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)

DEFAULT_FONT = cv2.FONT_HERSHEY_SIMPLEX


def pointInRec(polyX, polyY, x, y):
    if x>=polyX[0] and x<=polyX[1] and y>=polyY[0] and y<= polyY[1]:
        return True
    else:
        return False

def visualise_entire_image(image, box, hm, cost_matrix, current_id, ip, dir_name):
    """Visualise bounding box pairs in the whole image by classes"""
    ind_cm = np.argmax(cost_matrix, axis=1)# Only the first occurrence is returned//make sure human not in lower position
    # print(ind_cm)
    # Visualise detected human-object pairs with attached scores
    for i in range(len(ind_cm)):
        if cost_matrix[i][ind_cm[i]]==0:
            continue
        bx_h = hm[ind_cm[i]][3]
        bx_h = [bx_h[0], bx_h[1], bx_h[0]+bx_h[2], bx_h[1]+bx_h[3]]
        bx_o = box[i][1]

        # pocket.utils.draw_box_pairs(image, bx_h, bx_o, width=5)
        # plt.imshow(image)
        # plt.axis('off')
        # txt = plt.text(*bx_h[:2], f"{hm[ind_cm[i]][2]:.2f}", fontsize=15, fontweight='semibold', color='w')
        # txt.set_path_effects([peff.withStroke(linewidth=5, foreground='#000000')])
        # plt.draw()
        # txt = plt.text(*bx_o[:2], f"{box[i][2]:.2f}", fontsize=15, fontweight='semibold', color='b')
        # txt.set_path_effects([peff.withStroke(linewidth=5, foreground='#000000')])
        # plt.draw()
        cv2.rectangle(image, (int(bx_h[0]), int(bx_h[1])), (int(bx_h[2]),int(bx_h[3])), BLUE, 5)
        cv2.putText(image, f"{hm[ind_cm[i]][2]:.2f}", (int(bx_h[0]), int((bx_h[1] + 26))), DEFAULT_FONT, 1, BLUE, 2)

        cv2.rectangle(image, (int(bx_o[0]), int(bx_o[1])), (int(bx_o[2]),int(bx_o[3])), ORANGE, 5)
        cv2.putText(image, f"{box[i][2]:.2f}", (int(bx_o[0]), int((bx_o[1] + 26))), DEFAULT_FONT, 1, RED, 2)
        cv2.putText(image, box[i][3], (int(bx_o[0]), int((bx_o[1]))), DEFAULT_FONT, 1, RED, 2)
        # b_h_centre = (bx_h[:2]+bx_h[2:])/2
        # b_o_centre = (bx_o[:2]+bx_o[2:])/2
        # print('b_h_centre, b_o_centre:', (bx_h[0]+bx_h[2])//2,(bx_h[1]+bx_h[3])//2, (bx_o[0]+bx_o[2])//2,(bx_o[1]+bx_o[3])//2)
        cv2.line(image, (int(bx_h[0]+bx_h[2])//2,int(bx_h[1]+bx_h[3])//2), (int(bx_o[0]+bx_o[2])//2,int(bx_o[1]+bx_o[3])//2), RED, 5)
        cv2.imwrite(f"{dir_name}{current_id}/match_action_{ip}.jpg", image)


    # plt.show()
    # plt.savefig('6_action.jpg')
    # plt.close('all')
    return

import copy
def pose_match(image, hm_object, det_object, current_id, frame_id, out_dir):
    ip = frame_id
    
    # print(type(hm_object), len(hm_object))
    # print('hm:', hm_object[0].keys(), type(hm_object[0]['keypoints']), len(hm_object[0]['keypoints']))

    im = copy.deepcopy(image)

    box = []
    for k in range(len(det_object)):
        # if det_object[k]['name']=='suitcase' or  det_object[k]['name']=='backpack' or  det_object[k]['name']=='handbag' or  det_object[k]['name']=="cell phone" or  det_object[k]['name']=="laptop":
        if det_object[k]['name']!='person':
            box.append([k, np.array([det_object[k]['box']['x1'], det_object[k]['box']['y1'],det_object[k]['box']['x2'],det_object[k]['box']['y2']]), det_object[k]['confidence'], det_object[k]['name']])#small2large
    hm = []
    for ii in range(len(hm_object)):
        hm.append([ii, hm_object[ii]['keypoints'], hm_object[ii]['score'],hm_object[ii]['box']])#136 points for each person{94-114, 21 Left Hand Keypoints}{115-135, 21 Right Hand Keypoints}
    # print('box hands len:', len(box), len(hm))

    cost_matrix = np.zeros((len(box), len(hm)))

    pairs = []

    for i in range(len(box)):
        polyCorners = len(box[i][1])
        polyX = np.array([box[i][1][0], box[i][1][2]])
        polyY = np.array([box[i][1][1], box[i][1][3]])
        for j in range(len(hm)):
            count_hande_points = 0
            for k in range(94,136):
                # if hm[j][1][k*3+2]<0.05: #confidence
                #     continue
                x = hm[j][1][k*3]
                y = hm[j][1][k*3+1]
                # if k==94+12:
                #     print(i,box[i][0], polyX, polyY, j, hm[j][0], x,y)
                #     print(pointInRec(polyX, polyY, x, y))
                count_hande_points += pointInRec(polyX, polyY, x, y)
            cost_matrix[i,j] = count_hande_points
            if cost_matrix[i,j] > 0:
                person = {key: value for key, value in copy.deepcopy(hm_object)[hm[j][0]].items() if key != 'keypoints'}
                person['name'] = 'person'
                person['box'][2] = person['box'][0] + person['box'][2]
                person['box'][3] = person['box'][1] + person['box'][3]
                pair = {
                    'person': person,
                    'object': det_object[box[i][0]]
                }
                pairs.append(pair)
                # print('pair:', box[i][0], box[i][2], hm[j][0], hm[j][2])
                pass
    
    # print(cost_matrix)
    visualise_entire_image(im, box, hm, cost_matrix, current_id, ip, out_dir)
    return pairs