import cv2
import numpy as np
import base64
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
import math
from numpy.lib.type_check import imag
import json
import torch
import requests
from PyQt5.QtGui import QImage, QPixmap

def cvimg_to_qpixmap(cvimg):
    height, width, depth = cvimg.shape
    cvimg = cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB)
    cvimg = QImage(cvimg.data, width, height, width * depth, QImage.Format_RGB888)
    qpixmap = QPixmap.fromImage(cvimg)
    return qpixmap


def bytes2cv(bytes_img):
    img_np_arr = np.frombuffer(bytes_img, np.uint8)
    image = cv2.imdecode(np.array(bytearray(img_np_arr),
                                  dtype='uint8'), cv2.IMREAD_UNCHANGED)
    return image


def cv2bytes(im):
    return np.array(cv2.imencode('.jpg', im)[1]).tobytes()


def image_to_base64(image_np):
    image = cv2.imencode('.jpg', image_np)[1]
    image_code = str(base64.b64encode(image))[2:-1]

    return image_code


def base64_to_image(base64_code):
    # base64解码
    img_data = base64.b64decode(base64_code)
    # 转换为np数组
    img_array = np.fromstring(img_data, np.uint8)
    # 转换成opencv可用格式
    img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)

    return img


def np_to_str(np_data):
    return np_data.tobytes()


def str_to_np(str_data):
    return np.fromstring(str_data, dtype=np.float32)


def tensor_to_str(tensor_data):
    np_data = tensor_data.numpy()
    str_data = np_to_str(np_data)
    return str_data


def str_to_tensor(str_data):
    np_data = str_to_np(str_data)
    tensor_data = torch.from_numpy(np_data)
    return tensor_data


def tensor_to_list(tensor_data):
    np_data = tensor_data.numpy()
    list_data = np_data.tolist()
    return list_data


def list_to_tensor(list_data):
    np_data = np.array(list_data, dtype=np.float32)
    tensor_data = torch.from_numpy(np_data)
    return tensor_data


def send_image(frame):
    if frame is None:
        return
    encoded_string = image_to_base64(frame)
    url = "http://0.0.0.0:8080/"
    data = json.dumps({'image': encoded_string})

    r = requests.post(url=url, json=data)  # 直接把字典传给 requests.post() 的 json 参数
    tmp_string = r.text
    return tmp_string


def vis_frame_from_str_result(str_result, frame):
    result = eval(str_result)
    for human in result:
        human['keypoints'] = list_to_tensor(human['keypoints'])
        human['kp_score'] = list_to_tensor(human['kp_score'])
        human['proposal_score'] = list_to_tensor(human['proposal_score'])

    result = {
        # 'imgname': frame,
        'result': result
    }

    img = vis_frame(frame, result)
    return img


def vis_frame(frame, im_res, format='coco'):
    '''
    frame: frame image
    im_res: im_res of predictions
    format: coco or mpii

    return rendered image
    '''
    if format == 'coco':
        l_pair = [
            (0, 1), (0, 2), (1, 3), (2, 4),  # Head
            (5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
            (17, 11), (17, 12),  # Body
            (11, 13), (12, 14), (13, 15), (14, 16)
        ]

        p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0),
                   # Nose, LEye, REye, LEar, REar
                   (77, 255, 255), (77, 255, 204), (77, 204, 255), (191,
                                                                    255, 77), (77, 191, 255), (191, 255, 77),
                   # LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
                   (204, 77, 255), (77, 255, 204), (191, 77, 255), (77,
                                                                    255, 191), (127, 77, 255), (77, 255, 127),
                   (0, 255, 255)]  # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
        line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
                      (77, 255, 222), (77, 196, 255), (77, 135,
                                                       255), (191, 255, 77), (77, 255, 77),
                      (77, 222, 255), (255, 156, 127),
                      (0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]
    elif format == 'mpii':
        l_pair = [
            (8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
            (13, 14), (14, 15), (3, 4), (4, 5),
            (8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
        ]
        p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED,
                   RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
        line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE,
                      RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
    else:
        raise NotImplementedError

    # im_name = im_res['imgname'].split('/')[-1]
    img = frame
    height, width = img.shape[:2]
    img = cv2.resize(img, (int(width / 2), int(height / 2)))
    for human in im_res:
        part_line = {}
        kp_preds = human['keypoints']
        kp_scores = human['kp_score']
        kp_preds = torch.cat((kp_preds, torch.unsqueeze(
            (kp_preds[5, :] + kp_preds[6, :]) / 2, 0)))
        kp_scores = torch.cat((kp_scores, torch.unsqueeze(
            (kp_scores[5, :] + kp_scores[6, :]) / 2, 0)))
        # Draw keypoints
        for n in range(kp_scores.shape[0]):
            if kp_scores[n] <= 0.05:
                continue
            cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
            part_line[n] = (int(cor_x / 2), int(cor_y / 2))
            bg = img.copy()
            cv2.circle(bg, (int(cor_x / 2), int(cor_y / 2)), 2, p_color[n], -1)
            # Now create a mask of logo and create its inverse mask also
            transparency = float(max(0, min(1, kp_scores[n])))
            img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)
        # Draw limbs
        for i, (start_p, end_p) in enumerate(l_pair):
            if start_p in part_line and end_p in part_line:
                start_xy = part_line[start_p]
                end_xy = part_line[end_p]
                bg = img.copy()

                X = (start_xy[0], end_xy[0])
                Y = (start_xy[1], end_xy[1])
                mX = np.mean(X)
                mY = np.mean(Y)
                length = ((Y[0] - Y[1]) ** 2 + (X[0] - X[1]) ** 2) ** 0.5
                angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))
                stickwidth = (kp_scores[start_p] + kp_scores[end_p]) + 1
                polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(
                    length / 2), int(stickwidth)), int(angle), 0, 360, 1)
                cv2.fillConvexPoly(bg, polygon, line_color[i])
                # cv2.line(bg, start_xy, end_xy, line_color[i], (2 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
                transparency = float(
                    max(0, min(1, 0.5 * (kp_scores[start_p] + kp_scores[end_p]))))
                img = cv2.addWeighted(
                    bg, transparency, img, 1 - transparency, 0)
    img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
    return img


def json_result_to_real_result(json_result):

    for human in json_result.values():
        human['keypoints'] = list_to_tensor(human['keypoints'])
        human['kp_score'] = list_to_tensor(human['kp_score'])
        human['proposal_score'] = list_to_tensor(human['proposal_score'])

    result = json_result.values()

    return result


def real_result_to_list_result(real_result):
    for human in real_result:
        human['keypoints'] = tensor_to_list(human['keypoints'])
        human['kp_score'] = tensor_to_list(human['kp_score'])
        human['proposal_score'] = tensor_to_list(human['proposal_score'])
    return real_result


if __name__ == '__main__':
    tensor_data = torch.ones((5, 5))
    tmp_data = tensor_to_str(tensor_data)
    ret = str_to_tensor(tmp_data)
    print(tensor_data == ret.reshape(5, 5))
