#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Process, JoinableQueue, Queue, Manager, Pool

import numpy as np
import cv2
from sklearn import preprocessing
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from utils import face_post_process

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
MODEL_INPUT_HEIGHT = 960
MODEL_INPUT_WIDTH = 960
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
MODEL_PATH = "./model/retinaface_r50_v1_960_dy_batch.om"
BATCH_SIZE = 64
INFER_WAIT_TIME = 1e-3
INFER_BREAK_WAIT_TIME = 10
CLF_MODEL_PATH = "./model/model_dy_batch.om"
VIDEO_PATH = "./data/test.264"


def process0_video_decode(decode_q_pro: Queue, video_stream_path: str, channelId: int):
    logger.info("=========================== Start process0_video_decode ===========================")
    image_processor = ImageProcessor(device_id)

    def vdec_callback(decodeImage, channelId, frameId):
        logger.debug(f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

        # 1. Calculate preprocess resize and padding config
        decodeImage.to_host()
        image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
        logger.debug(f"process0_video_decode Decode image shape: {image_ori.shape}")
        image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))
        decodeImage.to_device(device_id)
        resize_tuple, pad_tuple, scale_factor = face_post_process.resize_factor(decodeImage,
                                                                  resize_shape=(MODEL_INPUT_WIDTH, MODEL_INPUT_HEIGHT))
        resize_conf = Size(resize_tuple[0], resize_tuple[1])

        # 2. resize and pad
        decodeImage = image_processor.resize(decodeImage, resize_conf, base.huaweiu_high_order_filter)
        decodeImage.to_host()

        # 3. transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
        image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3], cv2.BORDER_CONSTANT, value=(112, 112, 112))
        image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
        logger.debug("process0_video_decode to be put in queue resized_image: {}, scale_factor: {}, image_ori: {}".format(image_src.shape, scale_factor, image_ori.shape))
        decode_q_pro.put((frameId, image_src, scale_factor, image_ori))

        logger.debug("process0_video_decode producer queue, elements {}".format(decode_q_pro.qsize()))

    # 1. initialize video decoder callback
    vdec_callbacker = VdecCallBacker()
    vdec_callbacker.registerVdecCallBack(vdec_callback)

    # 2. initialize video decoder config
    vdec_config = VideoDecodeConfig()
    vdec_config.skipInterval = SKIP_INTERVAL
    vdec_config.outputImageFormat = base.rgb
    vdec_config.width = VIDEO_WIDTH
    vdec_config.height = VIDEO_HEIGHT

    # 3. initialize video decode object
    video_decoder = VideoDecoder(vdec_config, vdec_callbacker, deviceId=device_id, channelId=channelId)

    # 4. use ffmpeg av and VideoDecoder to decode video
    import av
    with av.open(video_stream_path) as container:
        logger.debug(f"============================process0_video_decode start decode stream =========================")
        for frame_id, packet in enumerate(container.demux()):
            logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
            if packet.size == 0:
                break
            try:
                video_decoder.decode(packet, frame_id)
            except Exception as err:
                logger.error("process0_video_decode decode error: ", err)
            if frame_id == 50:
                break
    logger.info("=============== Finish process0_video_decode ========================")
    del video_decoder


def process2_infer(input_tensor_q_con: Queue, post_process_q_pro: Queue, batch_size: int):
    logger.info("======================================== Start process2_infer ========================================")
    model = base.model(MODEL_PATH, deviceId=device_id)

    post_retina = face_post_process.RetinaFace(model, rac="net3", masks=False)
    wait_time = time.time()
    count = 0
    while True:
        if input_tensor_q_con.qsize() == 0:
            if time.time() - wait_time > INFER_BREAK_WAIT_TIME:
                logger.info("process2_infer wait time out, break.")
                break
            else:
                continue
        wait_time = time.time()
        img_tuple = input_tensor_q_con.get()

        # 1. read input array and transfer array type, put into tensor list
        img_ndarray = img_tuple[1].astype(np.float32)  # NCHW, RGB
        scale_factor = img_tuple[2]
        img_ori_ndarray = img_tuple[3].astype(np.float32)  # NCHW, RGB
        img_mxtensor = Tensor(img_ndarray)
        img_mxtensor.to_device(device_id)

        img_mxtensor_list = [img_mxtensor]
        img_ndarray_list = [img_ndarray]
        scale_factor_list = [scale_factor]
        img_ori_ndarray_list = [img_ori_ndarray]

        # 2. retina model infer
        all_det, all_landmarks = post_retina.detect(img_mxtensor_list, scale_factor_list, 0.6)
        logger.info(f"process2_infer det model infer {count} finished.")

        # 3. post process
        feature_inputs, _, boxes, landmarks, scores = post_retina.get_aligned_face_base_with_mface_all(all_det, all_landmarks, img_ori_ndarray_list)
        logger.debug(f"process2_infer post process {count} finished.")
        feature_input = feature_inputs[0]
        if type(feature_input) != np.ndarray:
            continue

        # 4. put images into rec infer queue
        feature_input = np.expand_dims(feature_input, axis=0)
        post_process_q_pro.put(feature_input)  # put HWC, BGR
        logger.debug(f"process2_infer image {count} put into feature queue, shape: {feature_input.shape}")
        count += 1
    logger.info("process2_infer finished.")


def process3_1_pattern_rec(post_process_q_con: Queue):
    logger.info("======================================== Start process3_1_pattern_rec ========================================")
    model = base.model(CLF_MODEL_PATH, deviceId=device_id)
    count = 0
    wait_time = time.time()
    count = 0
    while True:
        if post_process_q_con.qsize() == 0:
            if time.time() - wait_time > INFER_BREAK_WAIT_TIME:
                logger.info("process2_infer wait time out, break.")
                break
            else:
                continue
        wait_time = time.time()
        logger.info(f"process3_1_pattern_rec get inferred image {count}.")
        img_ndarray = post_process_q_con.get()  # type is Image

        # 1. feature model preprocess
        img_ndarray = face_post_process.get_feature(img_ndarray)

        # 2. feature model infer
        output_tensors_2 = []
        for j in range(img_ndarray.shape[0]):
            img_mxtensor = Tensor(img_ndarray[j][np.newaxis, :])
            img_mxtensor.to_device(device_id)
            img_mxtensor_vec = [img_mxtensor]
            output_tensors = model.infer(img_mxtensor_vec)

            for i, output_tensor in enumerate(output_tensors):
                output_tensor.to_host()
                output_tensors[i] = np.array(output_tensor)
                logger.info("process3_1_pattern_rec result shape: {}.".format(output_tensors[i].shape))

            output_tensors_2.append(output_tensors[0])

        # 3. feature model postprocess
        embedding = output_tensors_2[0] + output_tensors_2[1]
        embedding = preprocessing.normalize(embedding)

        logger.info("process3_1_pattern_rec face classification results len: {}.".format(len(embedding)))
        count += 1
    logger.info("process3_1_pattern_rec finished.")


def main_pool():
    NUM_DECODE_PROCESS = 1
    NUM_INFER_PROCESS = 1
    NUM_PATTERN_RECOG_PROCESS = 1
    manager = Manager()
    q_decode = manager.Queue()
    q_rec = manager.Queue()
    pool = Pool()

    for i in range(NUM_DECODE_PROCESS):
        pool.apply_async(process0_video_decode, args=(q_decode, VIDEO_PATH, i))
    for i in range(NUM_INFER_PROCESS):
        pool.apply_async(process2_infer, args=(q_decode, q_rec, BATCH_SIZE))
    for i in range(NUM_PATTERN_RECOG_PROCESS):
        pool.apply_async(process3_1_pattern_rec, args=(q_rec, ))

    pool.close()
    pool.join()


if __name__ == "__main__":
    base.mx_init()
    main_pool()
