#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import logging
import time
from multiprocessing import JoinableQueue, Queue
import traceback
import numpy as np
import cv2
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor

from utils import body_post_process

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class BodyDetection(object):
    def __init__(self, det_model_path, batch_size, device_id, pid):
        self.det_model = base.model(det_model_path, deviceId=device_id)
        self.image_processor = ImageProcessor(device_id) # initialize mxbase image_process
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid

        self.INFER_BREAK_WAIT_TIME = 5

    def det_preprocess(self, image_ori, image_ori_height, image_ori_width):
        decodeImage = Image(image_ori[0], base.rgb)
        decodeImage.set_original_size(Size(image_ori_width, image_ori_height))
        image_ori = image_ori[:, :image_ori_height, :image_ori_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))

        # image resized by dvpp
        decodeImage.to_device(self.device_id)
        resize_tuple, pad_tuple = body_post_process.letterbox(decodeImage)
        resize_conf = Size(resize_tuple[0], resize_tuple[1])
        decodeImage = self.image_processor.resize(decodeImage, resize_conf, base.bilinear_similar_opencv)
        decodeImage.to_host()

        # transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                       cv2.BORDER_CONSTANT, value=(112, 112, 112))
        image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
        return image_src, image_ori

    def infer(self, input_tensor_q_con: JoinableQueue, post_process_q_pro: Queue,
              post_process_q_pro_1: Queue, post_process_q_track: Queue):
        try:
            logger.info(
                "======================================== Start body_detection ========================================")
            img_ndarray_list = []
            img_ori_ndarray_list = []

            count = 0
            while True:
                try:
                    print('body_detect_q_size:', input_tensor_q_con.qsize())
                    _, image, image_ori_height, image_ori_width = input_tensor_q_con.get(timeout=self.INFER_BREAK_WAIT_TIME)
                except Exception as e:
                    # 最后一个 batch 补为 16
                    if len(img_ndarray_list) > 0:
                        logger.info("process1_infer dynamic batch branch.")
                        current_num = len(img_ndarray_list)
                        pad_shape = (self.batch_size - current_num,) + img_ndarray_list[-1].shape
                        pad_zeros = np.zeros(shape=pad_shape)
                        img_ndarray_list = np.concatenate((np.array(img_ndarray_list), pad_zeros), axis=0).astype(np.float32)

                        # (1) Initialize one sample.
                        img_mxtensor = Tensor(np.squeeze(img_ndarray_list, axis=1))
                        img_mxtensor.to_device(self.device_id)
                        img_mxtensor_list = [img_mxtensor]

                        # (2) Det model infer
                        output_tensors = self.det_model.infer(img_mxtensor_list)  # output is a list with 4 arrays
                        for i, output_tensor in enumerate(output_tensors):
                            output_tensor.to_host()
                            output_tensors[i] = np.array(output_tensor)[:current_num]

                        # (3) post process
                        all_bboxes = body_post_process.det_postprocess(output_tensors[0], img_ndarray_list[:current_num],
                                                                       img_ori_ndarray_list)
                        logger.debug(f"process1_infer post process finished.")
                        rs = body_post_process.get_rs_from_box(all_bboxes, img_ori_ndarray_list)  # rs is the output
                        for i, res in enumerate(rs):
                            post_process_q_pro.put(res[0])
                            post_process_q_pro_1.put(res[0])
                            post_process_q_track.put(res)
                        logger.info(f"process1_infer image put into rec queue")
                        # (4) Infer finished, free lists
                        logger.info(f"process1_infer det model infer dynamic {count} samples finished.")
                        img_ndarray_list = []
                        img_ori_ndarray_list = []
                    # 1.1.2 no input and time out
                    else:
                        logger.info(f"process1_infer pid:{self.pid} wait time out, break")
                        logger.info(f"process1_infer pid:{self.pid} finished. get image cnt:{count}")
                        return
                else:
                    count += 1
                    # preprocess
                    image_src, image_ori = self.det_preprocess(image, image_ori_height, image_ori_width)

                    # 3 read input array and transfer array type, put into tensor list
                    img_ndarray = image_src.astype(np.float32)  # NCHW, RGB
                    img_ori_ndarray = image_ori  # NCHW, RGB
                    img_ndarray = img_ndarray / 255.
                    img_ndarray_list.append(img_ndarray)
                    img_ori_ndarray_list.append(img_ori_ndarray)

                    # 4. If BATCH_SIZE smaller than config, wait until get enough
                    if len(img_ori_ndarray_list) < self.batch_size:
                        continue

                    # 5.1 Prepare batch input
                    img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
                    img_mxtensor.to_device(self.device_id)
                    img_mxtensor_list = [img_mxtensor]

                    # 5.2 Retina model infer
                    output_tensors = self.det_model.infer(img_mxtensor_list)  # output is a list with 4 arrays
                    for i, output_tensor in enumerate(output_tensors):
                        output_tensor.to_host()
                        output_tensors[i] = np.array(output_tensor)
                    logger.info(f"process1_infer det model infer {count} finished.")

                    # 5.3 Post process
                    all_bboxes = body_post_process.det_postprocess(output_tensors[0], img_ndarray_list,
                                                                   img_ori_ndarray_list)
                    logger.debug(f"process1_infer post process finished.")
                    rs = body_post_process.get_rs_from_box(all_bboxes, img_ori_ndarray_list)

                    for i, res in enumerate(rs):
                        post_process_q_pro.put(res[0])
                        post_process_q_pro_1.put(res[0])
                        post_process_q_track.put(res)
                    logger.info(f"process1_infer image put into rec queue")

                    # 6 wait for next batch, Clear lists and timer
                    img_ndarray_list = []
                    img_ori_ndarray_list = []
                # logger.info("process1_infer finished.")

        except Exception as e:
            print("process1_infer failed", repr(e))
            traceback.print_exc()


class BodyRec(object):
    def __init__(self, rec_model_path, batch_size, device_id, pid):
        self.rec_model = base.model(rec_model_path, deviceId=device_id)
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid

        self.INFER_BREAK_WAIT_TIME = 10

    def rec_preprocess(self, img):
        height = 256
        width = 192
        # resize
        img = cv2.resize(img, (width, height))
        img = img[:, :, ::-1].transpose(2, 0, 1)  # HWC转CHW

        # 归一化
        img = img/255

        # 标准化
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        mean = np.array(mean).reshape(3, 1, 1)
        std = np.array(std).reshape(3, 1, 1)
        img = (img - mean) / std

        img = img.astype(np.float32)
        img = np.expand_dims(img, axis=0) # NCHW

        return img

    def infer(self, post_process_q_con: Queue):
        try:
            logger.info(
                "======================================== Start pattern_rec ========================================")
            count = 0
            img_arr_list = []

            while True:
                try:
                    img_ndarray = post_process_q_con.get(timeout=self.INFER_BREAK_WAIT_TIME)
                    if img_ndarray is None:
                        continue
                except Exception as e:
                    if len(img_arr_list) > 0:
                        logger.info("process2_pattern_rec enter dynamic branch.")
                        pad_shape = (self.batch_size - len(img_arr_list),) + img_arr_list[-1].shape
                        pad_zeros = np.zeros(shape=pad_shape)
                        img_arr_list = np.concatenate((np.array(img_arr_list), pad_zeros), axis=0).astype(np.float32)

                        # (1) Initialize one sample.
                        img_tensor = Tensor(np.squeeze(img_arr_list, axis=1))
                        img_tensor.to_device(self.device_id)
                        img_tensor_vec = [img_tensor]


                        # (1) Feature model preprocess.
                        output_tensors = self.rec_model.infer(img_tensor_vec)

                        # (2) Feature model infer.
                        sigmoid_results = []
                        for i, output_tensor in enumerate(output_tensors):
                            output_tensor.to_host()
                            output_tensors[i] = np.array(output_tensor)
                            sigmoid_results.append(body_post_process.rec_postprocess(output_tensors[i]))
                        # (3) Clear lists
                        logger.debug(f"process2_pattern_rec dynamic infer finished infer {count} samples.")
                        img_arr_list = []
                    # 1.1.2 no input and time out
                    else:
                        logger.info(f"process2_pattern_rec pid:{self.pid} wait time out, break")
                        logger.info(f"BodyRec process2_pattern_rec pid:{self.pid} finished. get image cnt:{count}")
                        return

                else:
                    count += 1

                    # 3. rec model preprocess
                    img_ndarray = self.rec_preprocess(img_ndarray)  # HWC->NCHW
                    img_arr_list.append(img_ndarray)

                    if len(img_arr_list) < self.batch_size:
                        continue

                    img_tensor = Tensor(np.squeeze(np.array(img_arr_list), axis=1))
                    img_tensor.to_device(self.device_id)
                    img_tensor_vec = [img_tensor]

                    # 4. rec model infer
                    output_tensors = self.rec_model.infer(img_tensor_vec)
                    logger.info(f"process2_pattern_rec infer finished {count} samples.")

                    # 5. rec model postprocess
                    sigmoid_results = []
                    for i, output_tensor in enumerate(output_tensors):
                        output_tensor.to_host()
                        output_tensors[i] = np.array(output_tensor)
                        sigmoid_results.append(body_post_process.rec_postprocess(output_tensors[i]))

                    img_arr_list = []
                # logger.info("process2_pattern_rec finished.")
        except Exception as e:
            print("process2_pattern_rec failed", repr(e))
            traceback.print_exc()


class BodyReid(object):
    def __init__(self, reid_model_path, batch_size, device_id, pid):
        self.reid_model = base.model(reid_model_path, deviceId=device_id)
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid

        self.INFER_BREAK_WAIT_TIME = 10

    def reid_preprocess(self, img):
        height = 256
        width = 128
        # resize
        img = cv2.resize(img, (width, height))
        img = img[:, :, ::-1].transpose(2, 0, 1)  # HWC转CHW

        img = img.astype(np.float32)
        img = np.expand_dims(img, axis=0) # NCHW

        return img

    def infer(self, post_process_q_con: Queue):
        try:
            logger.info(
                "======================================== Start body_reid ========================================")
            count = 0
            img_arr_list = []

            while True:
                try:
                    img_ndarray = post_process_q_con.get(timeout=self.INFER_BREAK_WAIT_TIME)
                    if img_ndarray is None:
                        continue
                except Exception as e:
                    if len(img_arr_list) > 0:
                        logger.info("process3_reid enter dynamic branch.")
                        pad_shape = (self.batch_size - len(img_arr_list),) + img_arr_list[-1].shape
                        pad_zeros = np.zeros(shape=pad_shape)
                        img_arr_list = np.concatenate((np.array(img_arr_list), pad_zeros), axis=0).astype(np.float32)

                        # (1) Initialize one sample.
                        img_tensor = Tensor(np.squeeze(img_arr_list, axis=1))
                        img_tensor.to_device(self.device_id)
                        img_tensor_vec = [img_tensor]

                        # (1) Feature model preprocess.
                        output_tensors = self.reid_model.infer(img_tensor_vec)

                        for i, output_tensor in enumerate(output_tensors):
                            output_tensor.to_host()
                            output_tensors[i] = np.array(output_tensor)
                        # (2) Clear lists
                        logger.debug(f"process3_reid dynamic infer finished infer {count} samples.")
                        img_arr_list = []
                    # 1.1.2 no input and time out
                    else:
                        logger.info(f"process3_reid pid:{self.pid} wait time out, break")
                        logger.info(f"BodyReid process3_reid pid:{self.pid} finished. get image cnt:{count}")
                        return
                else:
                    count += 1

                    # 3. rec model preprocess
                    img_ndarray = self.reid_preprocess(img_ndarray)  # NCHW
                    img_arr_list.append(img_ndarray)

                    if len(img_arr_list) < self.batch_size:
                        continue

                    img_tensor = Tensor(np.squeeze(np.array(img_arr_list), axis=1))
                    img_tensor.to_device(self.device_id)
                    img_tensor_vec = [img_tensor]

                    # 4. rec model infer
                    output_tensors = self.reid_model.infer(img_tensor_vec)
                    logger.info(f"process3_reid infer finished {count} samples.")

                    for i, output_tensor in enumerate(output_tensors):
                        output_tensor.to_host()
                        output_tensors[i] = np.array(output_tensor)

                    img_arr_list = []
                # logger.info("process3_reid finished.")
        except Exception as e:
            print("process3_reid failed", repr(e))
            traceback.print_exc()


def body_detect(det_model_path, batch_size, device_id, pid,
                input_tensor_q_con: JoinableQueue,
                post_process_q_pro: Queue,
                post_process_q_pro_1: Queue,
                post_process_q_track: Queue):
    BodyDetection(det_model_path, batch_size, device_id, pid).infer(
        input_tensor_q_con, post_process_q_pro, post_process_q_pro_1, post_process_q_track)
    

def body_recognize(rec_model_path, batch_size, device_id, pid,
                   post_process_q_con: Queue):
    BodyRec(rec_model_path, batch_size, device_id, pid).infer(post_process_q_con)


def body_reid(reid_model_path,  batch_size, device_id, pid,
              post_process_q_con: Queue):
    BodyReid(reid_model_path, batch_size, device_id, pid).infer(post_process_q_con)
