import json
import requests
from fn import vis_frame
import cv2
import numpy as np
import base64
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO

from numpy.lib.type_check import imag
from torch import dtype
import torch


def cvimg_to_qpixmap(cvimg):
    height, width, depth = cvimg.shape
    cvimg = cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB)
    cvimg = QImage(cvimg.data, width, height, width * depth, QImage.Format_RGB888)
    qpixmap = QPixmap.fromImage(cvimg)
    return qpixmap


def bytes2cv(bytes_img):
    img_np_arr = np.frombuffer(bytes_img, np.uint8)
    image = cv2.imdecode(np.array(bytearray(img_np_arr),
                         dtype='uint8'), cv2.IMREAD_UNCHANGED)
    return image


def cv2bytes(im):
    return np.array(cv2.imencode('.jpg', im)[1]).tobytes()


def image_to_base64(image_np):
 
    image = cv2.imencode('.jpg',image_np)[1]
    image_code = str(base64.b64encode(image))[2:-1]
 
    return image_code


def base64_to_image(base64_code):
 
    # base64解码
    img_data = base64.b64decode(base64_code)
    # 转换为np数组
    img_array = np.fromstring(img_data, np.uint8)
    # 转换成opencv可用格式
    img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
 
    return img


def np_to_str(np_data):
    return np_data.tobytes()


def str_to_np(str_data):
    return np.fromstring(str_data, dtype=np.float32)


def tensor_to_str(tensor_data):
    np_data = tensor_data.numpy()
    str_data = np_to_str(np_data)
    return str_data


def str_to_tensor(str_data):
    np_data = str_to_np(str_data)
    tensor_data = torch.from_numpy(np_data)
    return tensor_data


def tensor_to_list(tensor_data):
    np_data = tensor_data.numpy()
    list_data = np_data.tolist()
    return list_data


def list_to_tensor(list_data):
    np_data = np.array(list_data,dtype=np.float32)
    tensor_data = torch.from_numpy(np_data)
    return tensor_data


def vis_frame_from_str_result(str_result, frame):
    result = eval(str_result)
    for human in result:
        human['keypoints'] = list_to_tensor(human['keypoints'])
        human['kp_score'] = list_to_tensor(human['kp_score'])
        human['proposal_score']  = list_to_tensor(human['proposal_score'])
    
    result ={
        'imgname': frame,
        'result': result
    }

    img = vis_frame(frame, result)
    return img


def send_image(frame):
    if frame is None:
        return
    encoded_string = image_to_base64(frame)
    url = "http://0.0.0.0:8080/"
    data = json.dumps({'image': encoded_string})

    r = requests.post(url=url, json=data)  # 直接把字典传给 requests.post() 的 json 参数
    tmp_string = r.text
    return tmp_string


def str_result_to_real_result(result):
    result = eval(result)
    for human in result:
        human['keypoints'] = list_to_tensor(human['keypoints'])
        human['kp_score'] = list_to_tensor(human['kp_score'])
        human['proposal_score'] = list_to_tensor(human['proposal_score'])
    return result

if __name__ == '__main__':
    tensor_data = torch.ones((5, 5))
    tmp_data = tensor_to_str(tensor_data)
    ret = str_to_tensor(tmp_data)
    print(tensor_data == ret.reshape(5,5))