import os
import platform
from threading import Thread
from queue import Queue

import cv2
import numpy as np

import torch
import torch.multiprocessing as mp

from alphapose import builder
from alphapose import SimpleTransform, SimpleTransform3DSMPL

ARGS_SP = (platform.system() == 'Windows')


def wait_and_put(queue, item):
    queue.put(item)


def wait_and_get(queue):
    return queue.get()


def clear(queue):
    while not queue.empty():
        queue.get()


class DetectorBase:
    def __init__(self, args, qsize):
        if args is None:
            exit(0)
        """
        initialize the queue used to store data
        image_queue:    the buffer storing pre-processed images for object detection
        det_queue:      the buffer storing human detection results
        pose_queue:     the buffer storing post-processed cropped human image for pose estimation
        """
        self.opt = args
        if self.opt.sp:
            self._stopped = False
            self.image_queue = Queue(maxsize=qsize)
            self.det_queue = Queue(maxsize=10 * qsize)
            self.pose_queue = Queue(maxsize=10 * qsize)
        else:
            self._stopped = mp.Value('b', False)
            self.image_queue = mp.Queue(maxsize=qsize)
            self.det_queue = mp.Queue(maxsize=10 * qsize)
            self.pose_queue = mp.Queue(maxsize=10 * qsize)

    def read(self):
        return wait_and_get(self.pose_queue)

    @property
    def stopped(self):
        if self.opt.sp:
            return self._stopped
        else:
            return self._stopped.value

    def start_subprocess_worker(self, target):
        if self.opt is not None and self.opt.sp:
            p = Thread(target=target, args=())
        else:
            p = mp.Process(target=target, args=())
        p.start()
        return p

    def terminate(self):
        if self.opt.sp:
            self._stopped = True
        else:
            self._stopped.value = True
        self.stop()

    def stop(self):
        # clear queues
        self.clear_queues()

    def clear_queues(self):
        clear(self.image_queue)
        clear(self.det_queue)
        clear(self.pose_queue)


class PoseDetector(DetectorBase):

    def __init__(self, detector, args, cfg=None, batchSize=1, queueSize=128, mode='image'):
        super().__init__(args, queueSize)
        if detector is not None:
            self.detector = detector
        else:
            return

        self.img_list = list([])
        self.img_num = 0
        self.num_batches = 0
        self.batchSize = batchSize
        self.mode = mode

        self.cfg = cfg
        if self.cfg is not None:
            self._sigma = cfg.DATA_PRESET.SIGMA
            self._input_size = cfg.DATA_PRESET.IMAGE_SIZE
            self._output_size = cfg.DATA_PRESET.HEATMAP_SIZE
        else:
            return

        self.device = args.device

        if cfg.DATA_PRESET.TYPE == 'simple':
            pose_dataset = builder.retrieve_dataset(self.cfg.DATASET.TRAIN)
            self.transformation = SimpleTransform(pose_dataset,
                                                  scale_factor=0,
                                                  input_size=self._input_size,
                                                  output_size=self._output_size,
                                                  rot=0,
                                                  sigma=self._sigma,
                                                  train=False,
                                                  add_dpg=False,
                                                  gpu_device=self.device)
        elif cfg.DATA_PRESET.TYPE == 'simple_smpl':
            # TODO: new features
            from easydict import EasyDict as edict
            dummpy_set = edict({
                'joint_pairs_17': None,
                'joint_pairs_24': None,
                'joint_pairs_29': None,
                'bbox_3d_shape': (2.2, 2.2, 2.2)
            })
            self.transformation = SimpleTransform3DSMPL(dummpy_set,
                                                        scale_factor=cfg.DATASET.SCALE_FACTOR,
                                                        color_factor=cfg.DATASET.COLOR_FACTOR,
                                                        occlusion=cfg.DATASET.OCCLUSION,
                                                        input_size=cfg.MODEL.IMAGE_SIZE,
                                                        output_size=cfg.MODEL.HEATMAP_SIZE,
                                                        depth_dim=cfg.MODEL.EXTRA.DEPTH_DIM,
                                                        bbox_3d_shape=(2.2, 2.2, 2.2),
                                                        rot=cfg.DATASET.ROT_FACTOR,
                                                        sigma=cfg.MODEL.EXTRA.SIGMA,
                                                        train=False,
                                                        add_dpg=False,
                                                        loss_type=cfg.LOSS['TYPE'])

    def start(self, frames):
        self.img_list = frames
        self.img_num = len(frames)
        # 计算批处理的数量
        leftover = 0 if self.img_num % self.batchSize == 0 else 1
        self.num_batches = self.img_num // self.batchSize + leftover
        # start a thread to pre process images for object detection
        if self.mode == 'image':
            image_preprocess_worker = self.start_subprocess_worker(self.image_preprocess)
            # start a thread to detect human in images
            image_detection_worker = self.start_subprocess_worker(self.image_detection)
            # start a thread to post process cropped human image for pose estimation
            image_postprocess_worker = self.start_subprocess_worker(self.image_postprocess)
            return [image_preprocess_worker, image_detection_worker, image_postprocess_worker]
        else:
            print("The detection mode is invalid. Please use image.")
            raise ModuleNotFoundError

    def image_preprocess(self):
        if len(self.img_list) <= 0:
            return
        for i in range(self.num_batches):
            imgs, orig_imgs = [], []
            im_names = []
            im_dim_list = []
            for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.img_num)):
                if self.stopped:
                    wait_and_put(self.image_queue, (None, None, None, None))
                    return
                im_name_k = self.img_list[k]

                # expected image shape like (1,3,h,w) or (3,h,w)
                img_k = self.detector.image_preprocess(im_name_k)
                if isinstance(img_k, np.ndarray):
                    img_k = torch.from_numpy(img_k)
                # add one dimension at the front for batch if image shape (3,h,w)
                if img_k.dim() == 3:
                    img_k = img_k.unsqueeze(0)
                orig_img_k = cv2.cvtColor(cv2.imread(im_name_k), cv2.COLOR_BGR2RGB)
                # scipy.misc.imread(im_name_k, mode='RGB') is depreciated
                im_dim_list_k = orig_img_k.shape[1], orig_img_k.shape[0]

                imgs.append(img_k)
                orig_imgs.append(orig_img_k)
                im_names.append(os.path.basename(im_name_k))
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                # Human Detection
                imgs = torch.cat(imgs)
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                # im_dim_list_ = im_dim_list
            wait_and_put(self.image_queue, (imgs, orig_imgs, im_names, im_dim_list))

    def image_detection(self):
        for i in range(self.num_batches):
            imgs, orig_imgs, im_names, im_dim_list = wait_and_get(self.image_queue)
            if imgs is None or self.stopped:
                wait_and_put(self.det_queue, (None, None, None, None, None, None, None))
                return

            with torch.no_grad():
                # pad useless images to fill a batch, else there will be a bug
                for pad_i in range(self.batchSize - len(imgs)):
                    imgs = torch.cat((imgs, torch.unsqueeze(imgs[0], dim=0)), 0)
                    im_dim_list = torch.cat((im_dim_list, torch.unsqueeze(im_dim_list[0], dim=0)), 0)

                dets = self.detector.images_detection(imgs, im_dim_list)
                if isinstance(dets, int) or dets.shape[0] == 0:
                    for k in range(len(orig_imgs)):
                        wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], None, None, None, None, None))
                    continue
                if isinstance(dets, np.ndarray):
                    dets = torch.from_numpy(dets)
                dets = dets.cpu()
                boxes = dets[:, 1:5]
                scores = dets[:, 5:6]
                if self.opt.tracking:
                    ids = dets[:, 6:7]
                else:
                    ids = torch.zeros(scores.shape)

            for k in range(len(orig_imgs)):
                boxes_k = boxes[dets[:, 0] == k]
                if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
                    wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], None, None, None, None, None))
                    continue
                inps = torch.zeros(boxes_k.size(0), 3, *self._input_size)
                cropped_boxes = torch.zeros(boxes_k.size(0), 4)

                wait_and_put(self.det_queue, (
                    orig_imgs[k], im_names[k], boxes_k, scores[dets[:, 0] == k], ids[dets[:, 0] == k], inps,
                    cropped_boxes))

    def image_postprocess(self):
        for i in range(self.img_num):
            with torch.no_grad():
                (orig_img, im_name, boxes, scores, ids, inps, cropped_boxes) = wait_and_get(self.det_queue)
                if orig_img is None or self.stopped:
                    wait_and_put(self.pose_queue, (None, None, None, None, None, None, None))
                    return
                if boxes is None or boxes.nelement() == 0:
                    wait_and_put(self.pose_queue, (None, orig_img, im_name, boxes, scores, ids, None))
                    continue
                # imght = orig_img.shape[0]
                # imgwidth = orig_img.shape[1]
                for idx, box in enumerate(boxes):
                    inps[idx], cropped_box = self.transformation.test_transform(orig_img, box)
                    cropped_boxes[idx] = torch.FloatTensor(cropped_box)
                # inps, cropped_boxes = self.transformation.align_transform(orig_img, boxes)
                wait_and_put(self.pose_queue, (inps, orig_img, im_name, boxes, scores, ids, cropped_boxes))
