#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Process, JoinableQueue, Queue, Manager, Pool

import numpy as np
import cv2
from sklearn import preprocessing
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from utils import face_post_process

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
MODEL_INPUT_HEIGHT = 960
MODEL_INPUT_WIDTH = 960
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
VIDEO_BREAK_FRAME = 100
MODEL_PATH = "./model/retinaface_r50_v1_960_dy_batch.om"
BATCH_SIZE = 16
INFER_WAIT_TIME = 5
INFER_BREAK_WAIT_TIME = 10
CLF_MODEL_PATH = "./model/model_dy_batch.om"
VIDEO_PATH = "./data/test.264"
JPG_PATH = "./data/img_files"


def process0_video_decode(decode_q_pro: Queue, video_stream_path: str, channelId: int):
    """
    decode_q_pro: Queue, decoded frames to be put in
    video_stream_path: str, video to decode from
    channelId: int, video decode channel
    """
    logger.info("=========================== Start process0_video_decode ===========================")
    image_processor = ImageProcessor(device_id)  # initialize mxbase image_process

    def vdec_callback(decodeImage, channelId, frameId):
        """
        Define videoDecoder callback function
        """
        logger.debug(f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

        # 1. Calculate preprocess resize and padding config
        decodeImage.to_host()
        image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
        logger.debug(f"process0_video_decode Decode image shape: {image_ori.shape}")
        image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))
        decodeImage.to_device(device_id)
        resize_tuple, pad_tuple, scale_factor = face_post_process.resize_factor(decodeImage,
                                                                  resize_shape=(MODEL_INPUT_WIDTH, MODEL_INPUT_HEIGHT))
        resize_conf = Size(resize_tuple[0], resize_tuple[1])

        # 2. resize and pad
        decodeImage = image_processor.resize(decodeImage, resize_conf, base.huaweiu_high_order_filter)
        decodeImage.to_host()

        # 3. transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
        image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3], cv2.BORDER_CONSTANT, value=(112, 112, 112))
        image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
        logger.debug("process0_video_decode to be put in queue resized_image: {}, scale_factor: {}, image_ori: {}".format(image_src.shape, scale_factor, image_ori.shape))
        decode_q_pro.put((frameId, image_src, scale_factor, image_ori))

        logger.debug("process0_video_decode producer queue, elements {}".format(decode_q_pro.qsize()))

    # 1. initialize video decoder callback
    vdec_callbacker = VdecCallBacker()
    vdec_callbacker.registerVdecCallBack(vdec_callback)

    # 2. initialize video decoder config
    vdec_config = VideoDecodeConfig()
    vdec_config.skipInterval = SKIP_INTERVAL
    vdec_config.outputImageFormat = base.rgb
    vdec_config.width = VIDEO_WIDTH
    vdec_config.height = VIDEO_HEIGHT

    # 3. initialize video decode object
    video_decoder = VideoDecoder(vdec_config, vdec_callbacker, deviceId=device_id, channelId=channelId)

    # 4. use ffmpeg av and VideoDecoder to decode video
    import av
    with av.open(video_stream_path) as container:
        logger.debug(f"============================process0_video_decode start decode stream =========================")
        for frame_id, packet in enumerate(container.demux()):
            logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
            if packet.size == 0:
                break
            try:
                video_decoder.decode(packet, frame_id)
            except Exception as err:
                logger.error("process0_video_decode decode error: ", err)
            if frame_id == VIDEO_BREAK_FRAME:
                break
    logger.info("=============== Finish process0_video_decode ========================")
    del video_decoder


def process0_read_image(decode_q_pro: Queue):
    logger.info("=========================== Start process0_read_image ===========================")
    image_processor = ImageProcessor(device_id)  # initialise mxbase image_process

    for img_file in os.listdir(JPG_PATH):
        logger.info(f"process0_read_image{img_file}")
        decodeImage = image_processor.decode(os.path.join(JPG_PATH,img_file), base.rgb) # NHWC
        # 1. Calculate preprocess resize and padding config
        decodeImage.to_host()
        image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
        logger.debug(f"process0_read_image Decode image shape: {image_ori.shape}")
        image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))
        # resize and pad
        decodeImage.to_device(device_id)
        resize_tuple, pad_tuple, scale_factor = face_post_process.resize_factor(decodeImage,
                                                                                resize_shape=(
                                                                                    MODEL_INPUT_WIDTH,
                                                                                    MODEL_INPUT_HEIGHT))
        resize_conf = Size(resize_tuple[0], resize_tuple[1])

        # 2. resize and pad
        decodeImage = image_processor.resize(decodeImage, resize_conf, base.huaweiu_high_order_filter)
        decodeImage.to_host()

        # 3. transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
        image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                       cv2.BORDER_CONSTANT, value=(112, 112, 112))
        image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
        logger.debug("process0_read_image to be put in queue resized_image: {}, scale_factor: {}, image_ori: {}".format(
            image_src.shape, scale_factor, image_ori.shape))
        decode_q_pro.put((img_file, image_src, scale_factor, image_ori))

        logger.debug("process0_read_image producer queue, elements {}".format(decode_q_pro.qsize()))
    print("process0_read_image finished")


def process2_infer(input_tensor_q_con: Queue, post_process_q_pro: Queue, batch_size: int):
    """
    input_tensor_q_con: Queue, consumer, data from video decoder
    post_process_q_pro: Queue, productor, data to be put into feature infer queue
    """
    logger.info("======================================== Start process1_infer ========================================")
    model = base.model(MODEL_PATH, deviceId=device_id)

    post_retina = face_post_process.RetinaFace(model, rac="net3", masks=False)
    func_timer = time.time()
    infer_timer = time.time()
    img_ndarray_list = []
    scale_factor_list = []
    img_ori_ndarray_list = []

    count = 0
    while True:
        # 1. If infer wait time out, directly infer the samples in queue without waiting for a batch.
        if time.time() - infer_timer >= INFER_WAIT_TIME:
            logger.info("process1_infer dynamic batch branch.")
            for n_sample in range(len(img_ndarray_list)):
                # (1) Initialize one sample.
                img_mxtensor = Tensor(img_ndarray_list[n_sample])
                img_mxtensor.to_device(device_id)
                img_mxtensor_list = [img_mxtensor]

                # (2) Retina model infer.
                all_det, all_landmarks = post_retina.detect(img_mxtensor_list, [scale_factor_list[n_sample]], 0.6)
                logger.info((f"process1_infer det model infer {count} finished."))

                # (3) Post process.
                feature_inputs, _, boxes, landmarks, scores = post_retina.get_aligned_face_base_with_mface_all(all_det, all_landmarks, [img_ori_ndarray_list[n_sample]])

                logger.debug(f"process1_infer post process {count} finished.")

                # (4) Put images into rec infer queue
                for feature_input in feature_inputs:
                    if type(feature_input) != np.ndarray:
                        continue
                    feature_input = np.expand_dims(feature_input, axis=0)
                    post_process_q_pro.put(feature_input)
                    logger.debug(f"process1_infer image {count} put into feature queue, shape: {feature_input.shape}")
            # (5) Infer finished, free lists
            infer_timer = time.time()
            img_ndarray_list = []
            scale_factor_list = []
            img_ori_ndarray_list = []
            count = 0
        # 2. If queue size is 0, continue to the next turn util wait time out.
        if input_tensor_q_con.qsize() == 0:
            if time.time() - func_timer > INFER_BREAK_WAIT_TIME:
                logger.info("process1_infer wait time out, break.")
                break
            else:
                continue
        logger.debug(f"process1_infer queue size {input_tensor_q_con.qsize()}")
        func_timer = time.time()
        img_tuple = input_tensor_q_con.get()
        count += 1

        # 3. Read input array and transfer array type, put into tensor list
        img_ndarray = img_tuple[1].astype(np.float32)  # NCHW, RGB
        scale_factor = img_tuple[2]
        img_ori_ndarray = img_tuple[3].astype(np.float32)  # NCHW, RGB

        img_ndarray_list.append(img_ndarray)
        scale_factor_list.append(scale_factor)
        img_ori_ndarray_list.append(img_ori_ndarray)

        # 4. If BATCH_SIZE smaller than config, wait until get enough
        if len(img_ori_ndarray_list) < BATCH_SIZE:
            continue

        # 5.1 Prepare batch input
        img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
        img_mxtensor.to_device(device_id)
        img_mxtensor_list = [img_mxtensor]

        # 5.2 Retina model infer
        all_det, all_landmarks = post_retina.detect(img_mxtensor_list, scale_factor_list, 0.6)
        logger.info(f"process1_infer det model infer {count} samples finished.")

        # 5.3 Post process
        feature_inputs, _, boxes, landmarks, scores = post_retina.get_aligned_face_base_with_mface_all(all_det, all_landmarks, img_ori_ndarray_list)
        logger.debug(f"process1_infer post process {count} samples finished.")

        # 5.4 Put images into rec infer queue
        for feature_input in feature_inputs:
            if type(feature_input) != np.ndarray:
                continue
            feature_input = np.expand_dims(feature_input, axis=0)
            post_process_q_pro.put(feature_input)  # put HWC, BGR
            logger.debug(f"process1_infer image {count} put into feature queue, shape: {feature_input.shape}")
        # 6. Clear lists and timer
        infer_timer = time.time()
        img_ndarray_list = []
        scale_factor_list = []
        img_ori_ndarray_list = []
        count = 0

    logger.info("process1_infer finished.")


def process3_1_pattern_rec(post_process_q_con: Queue):
    logger.info("======================================== Start process2_pattern_rec ========================================")
    model = base.model(CLF_MODEL_PATH, deviceId=device_id)
    func_timer = time.time()
    infer_timer = time.time()

    count = 0
    img_arr_list = []
    while True:
        # 1. if infer wait time out, directly infer the samples in queue without waiting for a batch.
        if time.time() - infer_timer >= INFER_WAIT_TIME:
            logger.info("process2_pattern_rec dynamic batch branch.")
            for n_sample in range(len(img_arr_list)):
                # (1) Feature model preprocess.
                img_ndarray_batch, img_ndarray_flip_batch = face_post_process.get_feature_batch(img_arr_list[0])

                # (2) Feature model infer.
                img_mxtensor = Tensor(img_ndarray_batch)
                img_mxtensor.to_device(device_id)
                img_mxtensor_vec = [img_mxtensor]
                output_tensors = model.infer(img_mxtensor_vec)
                for i, output_tensor in enumerate(output_tensors):
                    output_tensor.to_host()
                    output_tensors[i] = np.array(output_tensor)
                    logger.info("process2_pattern_rec result shape: {}.".format(output_tensors[i].shape))

                img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
                img_mxtensor_flip.to_device(device_id)
                img_mxtensor_flip_vec = [img_mxtensor_flip]
                output_flip_tensors = model.infer(img_mxtensor_flip_vec)
                for i, output_tensor in enumerate(output_flip_tensors):
                    output_tensor.to_host()
                    output_flip_tensors[i] = np.array(output_tensor)
                    logger.info("process2_pattern_rec flip result shape: {}.".format(output_flip_tensors[i].shape))

                # (3) Feature model postprocess.
                embedding = output_tensors[0] + output_flip_tensors[0]
                embedding = preprocessing.normalize(embedding)

                logger.info("process2_pattern_rec face classification results len: {}.".format(len(embedding)))
            # (4) Clear lists
            img_arr_list = []
            infer_timer = time.time()
            count = 0
        # 2. If queue size is 0, continue to the next turn util wait time out
        if post_process_q_con.qsize() == 0:
            if time.time() - func_timer > INFER_BREAK_WAIT_TIME:
                logger.info("process2_pattern_rec wait time out, break.")
                break
            else:
                continue
        # 3. Read input array and transfer array type, put into tensor list
        func_timer = time.time()
        logger.info(f"process2_pattern_rec get inferred image {count}.")
        img_ndarray = post_process_q_con.get()  # type is Image
        img_arr_list.append(img_ndarray)
        count += 1
        # 4. If BATCH_SIZE smaller than config, wait until get enough
        if len(img_arr_list) < BATCH_SIZE:
            continue

        # 5.1 Feature model preprocess.
        img_ndarray_batch, img_ndarray_flip_batch = face_post_process.get_feature_batch(np.squeeze(np.array(img_arr_list), axis=1))

        # 5.2 Feature model infer.
        img_mxtensor = Tensor(img_ndarray_batch)
        img_mxtensor.to_device(device_id)
        img_mxtensor_vec = [img_mxtensor]
        output_tensors = model.infer(img_mxtensor_vec)
        for i, output_tensor in enumerate(output_tensors):
            output_tensor.to_host()
            output_tensors[i] = np.array(output_tensor)
            logger.info("process2_pattern_rec result shape: {}.".format(output_tensors[i].shape))

        img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
        img_mxtensor_flip.to_device(device_id)
        img_mxtensor_flip_vec = [img_mxtensor_flip]
        output_flip_tensors = model.infer(img_mxtensor_flip_vec)
        for i, output_tensor in enumerate(output_flip_tensors):
            output_tensor.to_host()
            output_flip_tensors[i] = np.array(output_tensor)
            logger.info("process2_pattern_rec flip result shape: {}.".format(output_flip_tensors[i].shape))

        # 5.3 Feature model postprocess.
        embedding = output_tensors[0] + output_flip_tensors[0]
        embedding = preprocessing.normalize(embedding)
        # 5.4 Clear lists.
        logger.info("process2_pattern_rec face classification results len: {}.".format(len(embedding)))
        img_arr_list = []
    logger.info("process2_pattern_rec finished.")


def main_pool():
    start = time.time()
    NUM_DECODE_PROCESS = 10
    NUM_INFER_PROCESS = 4
    NUM_PATTERN_RECOG_PROCESS = 4
    manager = Manager()
    q_decode = manager.Queue()
    q_rec = manager.Queue()
    pool = Pool()

    for i in range(NUM_DECODE_PROCESS):
        # pool.apply_async(process0_video_decode, args=(q_decode, VIDEO_PATH, i))
        pool.apply_async(process0_read_image, args=(q_decode,))
    for i in range(NUM_INFER_PROCESS):
        pool.apply_async(process2_infer, args=(q_decode, q_rec, BATCH_SIZE))
    for i in range(NUM_PATTERN_RECOG_PROCESS):
        pool.apply_async(process3_1_pattern_rec, args=(q_rec,))

    pool.close()
    pool.join()

    end = time.time()
    print("total time", end - start)
    print("avg time", (end - start) / (NUM_DECODE_PROCESS * len(os.listdir(JPG_PATH))))


if __name__ == "__main__":
    base.mx_init()
    main_pool()
