#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Process, JoinableQueue, Queue, Manager, Pool
import traceback
import numpy as np
import cv2
from sklearn import preprocessing
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor, Dim, Color
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from portrait.utils import face_post_process_v2, timer

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
MODEL_INPUT_HEIGHT = 960
MODEL_INPUT_WIDTH = 960
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
VIDEO_BREAK_FRAME = 100
MODEL_PATH = "./model/retinaface_r50_v1_960_dy_batch.om"
BATCH_SIZE = 16
INFER_WAIT_TIME = 5
INFER_BREAK_WAIT_TIME = 10
CLF_MODEL_PATH = "./model/model_dy_batch.om"
VIDEO_PATH = "./data/test.264"
JPG_PATH = "./data/img_files"

USE_BENCHMARK = False
mytimer = timer.Timer(BATCH_SIZE)


def process0_video_decode(decode_q_pro: Queue, video_stream_path: str, channelId: int):
    """
    decode_q_pro: Queue, decoded frames to be put in
    video_stream_path: str, video to decode from
    channelId: int, video decode channel
    """
    try:
        logger.info("=========================== Start process0_video_decode ===========================")
        image_processor = ImageProcessor(device_id)  # initialize mxbase image_process

        def vdec_callback(decodeImage, channelId, frameId):
            """
            Define videoDecoder callback function
            """
            logger.debug(
                f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

            # 1. Calculate preprocess resize and padding config
            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB

            decode_q_pro.put((frameId, image_ori, decodeImage.original_height, decodeImage.original_width))

            logger.debug("process0_video_decode producer queue, elements {}".format(decode_q_pro.qsize()))

        # 1. initialize video decoder callback
        vdec_callbacker = VdecCallBacker()
        vdec_callbacker.registerVdecCallBack(vdec_callback)

        # 2. initialize video decoder config
        vdec_config = VideoDecodeConfig()
        vdec_config.skipInterval = SKIP_INTERVAL
        vdec_config.outputImageFormat = base.rgb
        vdec_config.width = VIDEO_WIDTH
        vdec_config.height = VIDEO_HEIGHT

        # 3. initialize video decode object
        video_decoder = VideoDecoder(vdec_config, vdec_callbacker, deviceId=device_id, channelId=channelId)

        # 4. use ffmpeg av and VideoDecoder to decode video
        import av
        with av.open(video_stream_path) as container:
            logger.debug(
                f"============================process0_video_decode start decode stream =========================")
            for frame_id, packet in enumerate(container.demux()):
                logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
                if packet.size == 0:
                    break
                try:
                    video_decoder.decode(packet, frame_id)
                except Exception as err:
                    logger.error("process0_video_decode decode error: ", err)
                if frame_id == VIDEO_BREAK_FRAME:
                    break
        logger.info("=============== Finish process0_video_decode ========================")
        del video_decoder
    except Exception as e:
        print("process0_video_decode failed.", repr(e))
        traceback.print_exc()

def process0_read_image(decode_q_pro: Queue, pid):
    try:
        logger.info("=========================== Start process0_read_image ===========================")
        image_processor = ImageProcessor(device_id)  # initialise mxbase image_process
        count = 0

        for img_file in os.listdir(JPG_PATH):
            logger.info(f"process0_read_image{img_file}")
            decodeImage = image_processor.decode(os.path.join(JPG_PATH, img_file), base.rgb)  # NHWC
            # 1. Calculate preprocess resize and padding config
            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB

            decode_q_pro.put((img_file, image_ori, decodeImage.original_height, decodeImage.original_width))
            count += 1

        logger.info(f"process0_read_image pid:{pid} finished, put {count} images")
    except Exception as e:
        print("process0_read_image failed.", repr(e))
        traceback.print_exc()

def det_preprocess(image_ori, image_ori_height, image_ori_width, image_processor, device_id):
    decodeImage = Image(image_ori[0], base.rgb)
    decodeImage.set_original_size(Size(image_ori_width, image_ori_height))

    image_ori = image_ori[:, :image_ori_height, :image_ori_width, :]
    image_ori = image_ori.transpose((0, 3, 1, 2))
    # resize and pad
    decodeImage.to_device(device_id)
    resize_tuple, pad_tuple, scale_factor = face_post_process_v2.resize_factor(decodeImage,
                                                                               resize_shape=(
                                                                                   MODEL_INPUT_WIDTH,
                                                                                   MODEL_INPUT_HEIGHT))
    resize_conf = Size(resize_tuple[0], resize_tuple[1])
    pad_conf = Dim(pad_tuple[2], pad_tuple[3], pad_tuple[0], pad_tuple[1])
    color_conf = Color(112, 112, 112)

    # 2. resize and pad
    decodeImage = image_processor.resize(decodeImage, resize_conf, base.bilinear_similar_opencv)
    decodeImage = image_processor.padding(decodeImage, pad_conf, color_conf, base.border_constant)
    decodeImage.to_host()

    # 3. transfer to ndarray and put original and resized image array into queue
    image_src = np.array(decodeImage.to_tensor())  # NHWC
    image_src = image_src[:, :decodeImage.original_height, :decodeImage.original_width, :]
    image_src = image_src.transpose((0, 3, 1, 2))  # NCHW

    return image_src, scale_factor, image_ori

def process1_infer(input_tensor_q_con: Queue, post_process_q_pro: Queue, batch_size: int, pid):
    """
    input_tensor_q_con: Queue, consumer, data from video decoder
    post_process_q_pro: Queue, productor, data to be put into feature infer queue
    """
    try:
        logger.info(
            "======================================== Start process1_infer ========================================")
        image_processor = ImageProcessor(device_id)  # initialise mxbase image_process
        model = base.model(MODEL_PATH, deviceId=device_id)

        post_retina = face_post_process_v2.RetinaFace(model, rac="net3", masks=False)
        img_ndarray_list = []
        scale_factor_list = []
        img_ori_ndarray_list = []

        count = 0
        while True:
            start_time = time.time()
            # 1.wait for data, until wait too long, then return;
            while input_tensor_q_con.qsize() == 0:
                cur_time = time.time()
                # 1.1 wait too long
                if cur_time - start_time > INFER_BREAK_WAIT_TIME:
                    if len(img_ndarray_list) > 0:
                        logger.info("process1_infer dynamic batch branch.")
                        for n_sample in range(len(img_ndarray_list)):
                            # (1) Initialize one sample.
                            img_mxtensor = Tensor(img_ndarray_list[n_sample])
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_list = [img_mxtensor]

                            # (2) Retina model infer.
                            all_det, all_landmarks = post_retina.detect(img_mxtensor_list,
                                                                        [scale_factor_list[n_sample]], 0.6)
                            logger.info((f"process1_infer det model infer {count} finished."))

                            # (3) Post process.
                            img_arr_list, boxes, landmarks, scores, _ = face_post_process_v2.get_aligned_face_base_with_mface_all(
                                all_det, all_landmarks, img_ori_ndarray_list)
                            for img_arr in img_arr_list:
                                if img_arr is not None:
                                    post_process_q_pro.put(img_arr)

                        # (5) Infer finished, free lists
                        start_time = time.time()
                        img_ndarray_list = []
                        scale_factor_list = []
                        img_ori_ndarray_list = []
                    else:
                        logger.info(f"process1_infer pid:{pid} wait time out, break")
                        logger.info(f"process1_infer pid:{pid} finished. get image cnt:{count}")
                        return
                time.sleep(0.5)


            logger.debug(f"process1_infer queue size {input_tensor_q_con.qsize()}")
            img_file, image, image_ori_height, image_ori_width = input_tensor_q_con.get()
            count += 1
            image_src, scale_factor, image_ori = det_preprocess(image, image_ori_height, image_ori_width, image_processor, device_id)

            # 3. Read input array and transfer array type, put into tensor list
            img_ndarray = image_src.astype(np.float32)  # NCHW, RGB
            scale_factor = scale_factor
            img_ori_ndarray = image_ori.astype(np.float32)  # NCHW, RGB

            img_ndarray_list.append(img_ndarray)
            scale_factor_list.append(scale_factor)
            img_ori_ndarray_list.append(img_ori_ndarray)

            # 4. If BATCH_SIZE smaller than config, wait until get enough
            if len(img_ori_ndarray_list) < BATCH_SIZE:
                continue

            # 5.1 Prepare batch input
            img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
            img_mxtensor.to_device(device_id)
            img_mxtensor_list = [img_mxtensor]

            # 5.2 Retina model infer
            if USE_BENCHMARK:
                mytimer.infer_infer_and_post_start.append(time.time())
            all_det, all_landmarks = post_retina.detect(img_mxtensor_list, scale_factor_list, 0.6)
            logger.info(f"process1_infer det model infer {count} samples finished.")
            img_arr_list, boxes, landmarks, scores, _ = face_post_process_v2.get_aligned_face_base_with_mface_all(
                all_det, all_landmarks, img_ori_ndarray_list)
            for img_arr in img_arr_list:
                if img_arr is not None:
                    post_process_q_pro.put(img_arr)

            if USE_BENCHMARK:
                mytimer.infer_infer_and_post_end.append(time.time())

            # 6. Clear lists and timer
            img_ndarray_list = []
            scale_factor_list = []
            img_ori_ndarray_list = []

        logger.info("process1_infer finished.")
    except Exception as e:
        print("process1_infer failed.", repr(e))
        traceback.print_exc()


def process2_pattern_rec(post_process_q_con: Queue, pid):
    try:
        logger.info(
            "======================================== Start process2_pattern_rec ========================================")
        model = base.model(CLF_MODEL_PATH, deviceId=device_id)

        count = 0
        img_arr_list = []
        while True:
            start_time = time.time()
            # 1.wait for data, until wait too long, then return;
            while post_process_q_con.qsize() == 0:
                cur_time = time.time()
                # 1.1 wait too long
                if cur_time - start_time > INFER_BREAK_WAIT_TIME:
                    if len(img_arr_list) > 0:
                        logger.info("process2_pattern_rec enter dynamic branch.")
                        for n_sample in range(len(img_arr_list)):
                            # (1) Feature model preprocess.
                            img_ndarray_batch, img_ndarray_flip_batch = face_post_process_v2.get_feature_batch(
                                img_arr_list[n_sample])
                            # (2) Feature model infer.
                            img_mxtensor = Tensor(img_ndarray_batch)
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_vec = [img_mxtensor]
                            output_tensors = model.infer(img_mxtensor_vec)
                            for i, output_tensor in enumerate(output_tensors):
                                output_tensor.to_host()
                                output_tensors[i] = np.array(output_tensor)

                            img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
                            img_mxtensor_flip.to_device(device_id)
                            img_mxtensor_flip_vec = [img_mxtensor_flip]
                            output_flip_tensors = model.infer(img_mxtensor_flip_vec)
                            for i, output_tensor in enumerate(output_flip_tensors):
                                output_tensor.to_host()
                                output_flip_tensors[i] = np.array(output_tensor)

                            # (3) Feature model postprocess.
                            embedding = output_tensors[0] + output_flip_tensors[0]
                            embedding = preprocessing.normalize(embedding)

                            logger.info(f"process2_pattern_rec infer: {count} samples finished.")
                        # (4) Clear lists
                        img_arr_list = []
                        start_time = time.time()
                    else:
                        logger.info(f"process2_pattern_rec pid:{pid} wait time out, break")
                        logger.info(f"process2_pattern_rec pid:{pid} finished. get image cnt:{count}")
                        return
                time.sleep(0.5)

            # 3. Read input array and transfer array type, put into tensor list
            preprocess_start = time.time()
            img_ndarray = post_process_q_con.get()  # type is Image
            img_arr_list.append(img_ndarray)
            count += 1

            # 5.3 Post process
            if USE_BENCHMARK:
                mytimer.rec_pre_start.append(time.time())

            # 4. If BATCH_SIZE smaller than config, wait until get enough
            if len(img_arr_list) < BATCH_SIZE:
                continue

            # 5.1 Feature model preprocess.
            img_ndarray_batch, img_ndarray_flip_batch = face_post_process_v2.get_feature_batch(np.array(img_arr_list))
            if USE_BENCHMARK:
                mytimer.rec_pre_end.append(time.time())

            # 5.2 Feature model infer.
            if USE_BENCHMARK:
                mytimer.rec_infer_and_post_start.append(time.time())
            img_mxtensor = Tensor(img_ndarray_batch)
            img_mxtensor.to_device(device_id)
            img_mxtensor_vec = [img_mxtensor]
            output_tensors = model.infer(img_mxtensor_vec)
            for i, output_tensor in enumerate(output_tensors):
                output_tensor.to_host()
                output_tensors[i] = np.array(output_tensor)

            img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
            img_mxtensor_flip.to_device(device_id)
            img_mxtensor_flip_vec = [img_mxtensor_flip]
            output_flip_tensors = model.infer(img_mxtensor_flip_vec)
            for i, output_tensor in enumerate(output_flip_tensors):
                output_tensor.to_host()
                output_flip_tensors[i] = np.array(output_tensor)

            # 5.3 Feature model postprocess.
            embedding = output_tensors[0] + output_flip_tensors[0]
            embedding = preprocessing.normalize(embedding)
            # 5.4 Clear lists.
            logger.info(f"process2_pattern_rec infer: {count} samples finished.")
            img_arr_list = []
            if USE_BENCHMARK:
                mytimer.rec_infer_and_post_end.append(time.time())
        logger.info("process2_pattern_rec finished.")
    except Exception as e:
        print("process2_pattern_rec failed.", repr(e))
        traceback.print_exc()


def main_pool():
    NUM_DECODE_PROCESS = 15
    NUM_INFER_PROCESS = 4
    NUM_PATTERN_RECOG_PROCESS = 4
    manager = Manager()
    q_decode = manager.Queue()
    q_rec = manager.Queue()
    pool = Pool()

    start = time.time()
    for i in range(NUM_DECODE_PROCESS):
        #pool.apply_async(process0_video_decode, args=(q_decode, VIDEO_PATH, i))
        pool.apply_async(process0_read_image, args=(q_decode, i))
    for i in range(NUM_INFER_PROCESS):
        pool.apply_async(process1_infer, args=(q_decode, q_rec, BATCH_SIZE, i))
    for i in range(NUM_PATTERN_RECOG_PROCESS):
        pool.apply_async(process2_pattern_rec, args=(q_rec, i))

    pool.close()
    pool.join()

    end = time.time()
    print("total time:", end - start)
    print("avg time:", (end - start) / (NUM_DECODE_PROCESS * len(os.listdir(JPG_PATH))))


if __name__ == "__main__":
    base.mx_init()
    main_pool()
    if USE_BENCHMARK:
        print(
            f"first model preprocess per image cost time: {mytimer.average_infer_pre()}, "
            f"infer and postprocess per image cost time: {mytimer.average_infer_infer()}; "
            f"second model preprocess per image cost time: {mytimer.average_rec_pre()}, "
            f"infer and postprocess per image cost time: {mytimer.average_rec_infer()}")
