from queue import Queue
from Utils import base64_to_image, bytes2cv, image_to_base64
from SPPE.src.utils.img import im_to_torch
import socket
import threading
from yolo.darknet import Darknet
from yolo.preprocess import my_prep_image
from ImageLoader2 import ImageLoader2
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms

import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt

from dataloader import ImageLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco, crop_from_dets
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *

import cv2
import os
import sys
from tqdm import tqdm
import time
from fn import getTime, vis_frame
from threading import Thread
from pPose_nms import pose_nms, write_json

args = opt
args.dataset = 'coco'


def f1(image):
    img = []
    orig_img = []
    im_name = []
    im_dim_list = []

    inp_dim = int(opt.inp_dim)
    im_name_k = image
    img_k, orig_img_k, im_dim_list_k = my_prep_image(
        im_name_k, inp_dim)

    img.append(img_k)
    orig_img.append(orig_img_k)
    im_name.append(im_name_k)
    im_dim_list.append(im_dim_list_k)

    with torch.no_grad():
        # Human Detection
        img = torch.cat(img)
        im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
        im_dim_list_ = im_dim_list

    return (img, orig_img, im_name, im_dim_list)


def f2(det_model, img, orig_img, im_name, im_dim_list):
    det_inp_dim = int(det_model.net_info['height'])
    with torch.no_grad():
        # Human Detection
        img = img.cuda()
        prediction = det_model(img, CUDA=True)
        # NMS process
        dets = dynamic_write_results(prediction, opt.confidence,
                                     opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
        if isinstance(dets, int) or dets.shape[0] == 0:
            for k in range(len(orig_img)):
                return (orig_img[k], im_name[k], None, None, None, None, None)

        dets = dets.cpu()
        im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
        scaling_factor = torch.min(
            det_inp_dim / im_dim_list, 1)[0].view(-1, 1)

        # coordinate transfer
        dets[:, [1, 3]] -= (det_inp_dim - scaling_factor *
                            im_dim_list[:, 0].view(-1, 1)) / 2
        dets[:, [2, 4]] -= (det_inp_dim - scaling_factor *
                            im_dim_list[:, 1].view(-1, 1)) / 2

        dets[:, 1:5] /= scaling_factor
        for j in range(dets.shape[0]):
            dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
            dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
        boxes = dets[:, 1:5]
        scores = dets[:, 5:6]

    for k in range(len(orig_img)):
        boxes_k = boxes[dets[:, 0] == k]
        if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
            return (orig_img[k], im_name[k], None, None, None, None, None)
        inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
        pt1 = torch.zeros(boxes_k.size(0), 2)
        pt2 = torch.zeros(boxes_k.size(0), 2)
        return (orig_img[k], im_name[k], boxes_k, scores[dets[:, 0] == k], inps, pt1, pt2)


def f3(orig_img, im_name, boxes, scores, inps, pt1, pt2):
    with torch.no_grad():
        if orig_img is None:
            return (None, None, None, None, None, None, None)
            return
        if boxes is None or boxes.nelement() == 0:
            return (None, orig_img, im_name, boxes, scores, None, None)
        inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
        inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)

        return (inps, orig_img, im_name, boxes, scores, pt1, pt2)


def start_pose_model():
    # Load pose model
    pose_dataset = Mscoco()
    if args.fast_inference:
        pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
    else:
        pose_model = InferenNet(4 * 1 + 1, pose_dataset)
    pose_model.cuda()
    pose_model.eval()
    return pose_model


def start_det_model():
    det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
    det_model.load_weights('models/yolo/yolov3-spp.weights')
    det_model.net_info['height'] = opt.inp_dim
    det_inp_dim = int(det_model.net_info['height'])
    assert det_inp_dim % 32 == 0
    assert det_inp_dim > 32
    det_model.cuda()
    det_model.eval()
    return det_model


def prepare_model():
    det_model = start_det_model()
    pose_model = start_pose_model()
    return det_model, pose_model


def alphapose_return_image(det_model, pose_model, image):
    img, orig_img, im_name, im_dim_list = f1(image)
    orig_img, im_name, boxes, scores, inps, pt1, pt2 = f2(det_model, img, orig_img, im_name, im_dim_list)
    inps, orig_img, im_name, boxes, scores, pt1, pt2 = f3(orig_img, im_name, boxes, scores, inps, pt1, pt2)

    if boxes is None or boxes.nelement() == 0:
        return orig_img

    runtime_profile = {
        'dt': [],
        'pt': [],
        'pn': []
    }

    data_len = 1
    im_names_desc = tqdm(range(data_len))
    batchSize = args.posebatch

    img = None

    for i in im_names_desc:
        start_time = getTime()
        with torch.no_grad():
            ckpt_time, det_time = getTime(start_time)
            runtime_profile['dt'].append(det_time)
            # Pose Estimation

            datalen = inps.size(0)
            leftover = 0
            if (datalen) % batchSize:
                leftover = 1
            num_batches = datalen // batchSize + leftover
            hm = []
            for j in range(num_batches):
                inps_j = inps[j *
                              batchSize:min((j + 1) * batchSize, datalen)].cuda()
                hm_j = pose_model(inps_j)
                hm.append(hm_j)
            hm = torch.cat(hm)
            ckpt_time, pose_time = getTime(ckpt_time)
            runtime_profile['pt'].append(pose_time)
            hm = hm.cpu()

            orig_img = np.array(orig_img, dtype=np.uint8)
            hm_data = hm
            if boxes is None:
                img = orig_img
                pass
            else:
                preds_hm, preds_img, preds_scores = getPrediction(
                    hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                result = pose_nms(boxes, scores, preds_img, preds_scores)
                result = {
                    'imgname': im_name,
                    'result': result
                }

                # vis_frame用渲染图片，将预测结果在图片上可视化
                return_img = vis_frame(orig_img, result)

            ckpt_time, post_time = getTime(ckpt_time)
            runtime_profile['pn'].append(post_time)

        if args.profile:
            # TQDM
            im_names_desc.set_description(
                'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
                    dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']),
                    pn=np.mean(runtime_profile['pn']))
            )

    print('===========================> Finish Model Running.')
    if (args.save_img or args.save_video) and not args.vis_fast:
        print('===========================> Rendering remaining images in the queue...')
        print(
            '===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')

    return return_img


def alphapose_return_coordinate_and_scores(det_model, pose_model, image):
    # return: result -> dict
    # result(if not is None):{'keypoints': coordinate,'kp_scores': scores of keypoints}

    img, orig_img, im_name, im_dim_list = f1(image)
    orig_img, im_name, boxes, scores, inps, pt1, pt2 = f2(det_model, img, orig_img, im_name, im_dim_list)
    inps, orig_img, im_name, boxes, scores, pt1, pt2 = f3(orig_img, im_name, boxes, scores, inps, pt1, pt2)

    if boxes is None or boxes.nelement() == 0:
        return orig_img

    runtime_profile = {
        'dt': [],
        'pt': [],
        'pn': []
    }

    data_len = 1
    im_names_desc = tqdm(range(data_len))
    batchSize = args.posebatch

    img = None

    for i in im_names_desc:
        start_time = getTime()
        with torch.no_grad():
            ckpt_time, det_time = getTime(start_time)
            runtime_profile['dt'].append(det_time)
            # Pose Estimation

            datalen = inps.size(0)
            leftover = 0
            if (datalen) % batchSize:
                leftover = 1
            num_batches = datalen // batchSize + leftover
            hm = []
            for j in range(num_batches):
                inps_j = inps[j *
                              batchSize:min((j + 1) * batchSize, datalen)].cuda()
                hm_j = pose_model(inps_j)
                hm.append(hm_j)
            hm = torch.cat(hm)
            ckpt_time, pose_time = getTime(ckpt_time)
            runtime_profile['pt'].append(pose_time)
            hm = hm.cpu()

            orig_img = np.array(orig_img, dtype=np.uint8)
            hm_data = hm
            if boxes is None:
                img = orig_img
                pass
            else:
                preds_hm, preds_img, preds_scores = getPrediction(
                    hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                result = pose_nms(boxes, scores, preds_img, preds_scores)

    return result

