#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import logging
import time
from multiprocessing import Queue
import traceback
import numpy as np
from sklearn import preprocessing
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor, Dim, Color

from utils import face_post_process, timer

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class PortraitDection(object):
    def __init__(self, det_model_path, batch_size, device_id, pid):
        self.det_model = base.model(det_model_path, deviceId=device_id)
        self.image_processor = ImageProcessor(device_id) # initialize mxbase image_process
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid
        self.mytimer = timer.Timer(batch_size)

        self.MODEL_INPUT_WIDTH = 960
        self.MODEL_INPUT_HEIGHT = 960
        self.INFER_BREAK_WAIT_TIME = 5
        self.USE_BENCHMARK = False

    def det_preprocess(self, image_ori, image_ori_height, image_ori_width):
        decodeImage = Image(image_ori[0], base.rgb)
        decodeImage.set_original_size(Size(image_ori_width, image_ori_height))

        image_ori = image_ori[:, :image_ori_height, :image_ori_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))

        # image resize and pad by dvpp
        decodeImage.to_device(self.device_id)
        resize_tuple, pad_tuple, scale_factor = face_post_process.resize_factor(decodeImage,
                                                                                resize_shape=(
                                                                                    self.MODEL_INPUT_WIDTH,
                                                                                    self.MODEL_INPUT_HEIGHT))
        resize_conf = Size(resize_tuple[0], resize_tuple[1])
        pad_conf = Dim(pad_tuple[2], pad_tuple[3], pad_tuple[0], pad_tuple[1])
        color_conf = Color(112, 112, 112)
        decodeImage = self.image_processor.resize(decodeImage, resize_conf, base.bilinear_similar_opencv)
        decodeImage = self.image_processor.padding(decodeImage, pad_conf, color_conf, base.border_constant)
        decodeImage.to_host()

        # transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_src = image_src.transpose((0, 3, 1, 2))  # NCHW

        return image_src, scale_factor, image_ori

    def infer(self, input_tensor_q_con: Queue, post_process_q_pro: Queue, post_process_q_track: Queue):
        """
        input_tensor_q_con: Queue, consumer, data from video decoder
        post_process_q_pro: Queue, productor, data to be put into feature infer queue
        post_process_q_track: Queue, productor, data to be put into track queue
        """
        try:
            logger.info(
                "======================================== Start portrait detection infer ========================================")

            post_retina = face_post_process.RetinaFace(self.det_model, rac="net3", masks=False)
            img_ndarray_list = []
            scale_factor_list = []
            img_ori_ndarray_list = []

            count = 0
            while True:
                try:
                    print('portrait_detect_q_size:', input_tensor_q_con.qsize())
                    _, image, image_ori_height, image_ori_width = input_tensor_q_con.get(timeout=self.INFER_BREAK_WAIT_TIME)
                except Exception as e:
                    if len(img_ndarray_list) > 0:
                        current_num = len(img_ndarray_list)
                        pad_num = self.batch_size - current_num
                        pad_shape = (pad_num,) + img_ndarray_list[-1].shape
                        pad_zeros = np.zeros(shape=pad_shape)
                        img_ndarray_list = np.concatenate((np.array(img_ndarray_list), pad_zeros), axis=0).astype(np.float32)
                        scale_factor_list.extend([scale_factor_list[-1]]*pad_num)

                        # (1) Initialize one sample.
                        img_mxtensor = Tensor(np.squeeze(img_ndarray_list, axis=1))
                        img_mxtensor.to_device(self.device_id)
                        img_mxtensor_list = [img_mxtensor]

                        # (2) Retina model infer.
                        all_det, all_landmarks = post_retina.detect(img_mxtensor_list, scale_factor_list, 0.6)
                        logger.info((f"process1_infer det model infer {count} finished."))

                        # (3) Post process.
                        img_arr_list, boxes, landmarks, scores, _ = face_post_process.get_aligned_face_base_with_mface_all(
                            all_det, all_landmarks, img_ori_ndarray_list)
                        for i, img_arr in enumerate(img_arr_list[: current_num]):
                            post_process_q_pro.put(img_arr)
                            if img_arr is not None:
                                img_arr = img_arr.transpose(1, 2, 0)
                            post_process_q_track.put((img_arr, boxes[i], scores[i]))

                        # (5) Infer finished, free lists
                        img_ndarray_list = []
                        scale_factor_list = []
                        img_ori_ndarray_list = []
                    else:
                        logger.info(f"process1_infer pid:{self.pid} wait time out, break")
                        logger.info(f"PortraitDetection process1_infer pid:{self.pid} finished. get image cnt:{count}")
                        return

                else:
                    count += 1
                    image_src, scale_factor, image_ori = self.det_preprocess(image, image_ori_height, image_ori_width)

                    # 3. Read input array and transfer array type, put into tensor list
                    img_ndarray = image_src.astype(np.float32)  # NCHW, RGB
                    scale_factor = scale_factor
                    img_ori_ndarray = image_ori.astype(np.float32)  # NCHW, RGB

                    img_ndarray_list.append(img_ndarray)
                    scale_factor_list.append(scale_factor)
                    img_ori_ndarray_list.append(img_ori_ndarray)

                    # 4. If BATCH_SIZE smaller than config, wait until get enough
                    if len(img_ori_ndarray_list) < self.batch_size:
                        continue

                    # 5.1 Prepare batch input
                    img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
                    img_mxtensor.to_device(self.device_id)
                    img_mxtensor_list = [img_mxtensor]

                    # 5.2 Retina model infer
                    if self.USE_BENCHMARK:
                        self.mytimer.infer_infer_and_post_start.append(time.time())
                    all_det, all_landmarks = post_retina.detect(img_mxtensor_list, scale_factor_list, 0.6)
                    logger.info(f"process1_infer det model infer {count} samples finished.")
                    img_arr_list, boxes, landmarks, scores, _ = face_post_process.get_aligned_face_base_with_mface_all(
                        all_det, all_landmarks, img_ori_ndarray_list)
                    for i, img_arr in enumerate(img_arr_list):
                        post_process_q_pro.put(img_arr)
                        post_process_q_track.put((img_arr, boxes[i], scores[i]))

                    if self.USE_BENCHMARK:
                        self.mytimer.infer_infer_and_post_end.append(time.time())

                    # 6. Clear lists and timer
                    img_ndarray_list = []
                    scale_factor_list = []
                    img_ori_ndarray_list = []

                # logger.info("process1_infer finished.")
        except Exception as e:
            print("process1_infer failed.", repr(e))
            traceback.print_exc()


class PortraitFeature(object):
    def __init__(self, feat_model_path, batch_size, device_id, pid):
        self.feat_model = base.model(feat_model_path, deviceId=device_id)
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid
        self.mytimer = timer.Timer(batch_size)

        self.INFER_BREAK_WAIT_TIME = 10
        self.USE_BENCHMARK = False

    def infer(self, post_process_q_con: Queue):
        try:
            logger.info(
                "======================================== Start portrait feature extraction ========================================")

            count = 0
            img_arr_list = []
            while True:
                try:
                    img_ndarray = post_process_q_con.get(timeout=self.INFER_BREAK_WAIT_TIME)
                    if img_ndarray is None:
                        continue
                except Exception as e:
                    if len(img_arr_list) > 0:
                        logger.info("process2_pattern_rec enter dynamic branch.")

                        current_num = len(img_arr_list)
                        pad_num = self.batch_size - current_num
                        pad_shape = (pad_num, ) + img_arr_list[-1].shape
                        pad_zeros = np.zeros(shape=pad_shape)
                        img_arr_list = np.concatenate((np.array(img_arr_list), pad_zeros), axis=0).astype(np.float32)

                        img_ndarray_batch, img_ndarray_flip_batch = face_post_process.get_feature_batch(img_arr_list)

                        # (1) Initialize one sample.
                        img_mxtensor = Tensor(img_ndarray_batch)
                        img_mxtensor.to_device(self.device_id)
                        img_mxtensor_vec = [img_mxtensor]
                        output_tensors = self.feat_model.infer(img_mxtensor_vec)
                        for i, output_tensor in enumerate(output_tensors):
                            output_tensor.to_host()
                            output_tensors[i] = np.array(output_tensor)[:current_num]

                        img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
                        img_mxtensor_flip.to_device(self.device_id)
                        img_mxtensor_vec_flip = [img_mxtensor_flip]
                        output_flip_tensors = self.feat_model.infer(img_mxtensor_vec_flip)
                        for i, output_tensor in enumerate(output_flip_tensors):
                            output_tensor.to_host()
                            output_flip_tensors[i] = np.array(output_tensor)[:current_num]



                        # (3) Feature model postprocess.
                        embedding = output_tensors[0] + output_flip_tensors[0]
                        embedding = preprocessing.normalize(embedding)

                        logger.info(f"process2_pattern_rec infer: {count} samples finished.")
                        # (4) Clear lists
                        img_arr_list = []
                    else:
                        logger.info(f"process2_pattern_rec pid:{self.pid} wait time out, break")
                        logger.info(f"Portrait process2_pattern_rec pid:{self.pid} finished. get image cnt:{count}")
                        return
                else:
                    # 3. Read input array and transfer array type, put into tensor list
                    img_arr_list.append(img_ndarray)
                    count += 1

                    # 5.3 Post process
                    if self.USE_BENCHMARK:
                        self.mytimer.rec_pre_start.append(time.time())

                    # 4. If BATCH_SIZE smaller than config, wait until get enough
                    if len(img_arr_list) < self.batch_size:
                        continue

                    # 5.1 Feature model preprocess.
                    img_ndarray_batch, img_ndarray_flip_batch = face_post_process.get_feature_batch(
                        np.array(img_arr_list))
                    if self.USE_BENCHMARK:
                        self.mytimer.rec_pre_end.append(time.time())

                    # 5.2 Feature model infer.
                    if self.USE_BENCHMARK:
                        self.mytimer.rec_infer_and_post_start.append(time.time())
                    img_mxtensor = Tensor(img_ndarray_batch)
                    img_mxtensor.to_device(self.device_id)
                    img_mxtensor_vec = [img_mxtensor]
                    output_tensors = self.feat_model.infer(img_mxtensor_vec)
                    for i, output_tensor in enumerate(output_tensors):
                        output_tensor.to_host()
                        output_tensors[i] = np.array(output_tensor)

                    img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
                    img_mxtensor_flip.to_device(self.device_id)
                    img_mxtensor_flip_vec = [img_mxtensor_flip]
                    output_flip_tensors = self.feat_model.infer(img_mxtensor_flip_vec)
                    for i, output_tensor in enumerate(output_flip_tensors):
                        output_tensor.to_host()
                        output_flip_tensors[i] = np.array(output_tensor)

                    # 5.3 Feature model postprocess.
                    embedding = output_tensors[0] + output_flip_tensors[0]
                    embedding = preprocessing.normalize(embedding)
                    # 5.4 Clear lists.
                    logger.info(f"process2_pattern_rec infer: {count} samples finished.")
                    img_arr_list = []
                    if self.USE_BENCHMARK:
                        self.mytimer.rec_infer_and_post_end.append(time.time())
                # logger.info("process2_pattern_rec finished.")

        except Exception as e:
            print("process2_pattern_rec failed.", repr(e))
            traceback.print_exc()


def portrait_detect(det_model_path, batch_size, device_id, pid,
                    input_tensor_q_con: Queue, 
                    post_process_q_pro: Queue,
                    post_process_q_track: Queue):
    PortraitDection(det_model_path, batch_size, device_id, pid).infer(
        input_tensor_q_con, post_process_q_pro, post_process_q_track)


def portrait_feature(reid_model_path, batch_size, device_id, pid,
                     post_process_q_con: Queue):
    PortraitFeature(reid_model_path, batch_size, device_id, pid).infer(post_process_q_con)

