#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Process, JoinableQueue, Queue, Manager, Pool
import traceback
import numpy as np
import cv2
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from utils import body_post_process

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
VIDEO_BREAK_FRAME = 100
MODEL_PATH = "./model/linewell_body_det_v1_bs.om"
BATCH_SIZE = 16
INFER_WAIT_TIME = 3
INFER_BREAK_WAIT_TIME = 5
CLF_MODEL_PATH = "./model/linewell_body_rec_v1_dy_bs.om"
VIDEO_PATH = "./data/test.264"
JPG_PATH = "./data/img_files"


def process0_video_decode(decode_q_pro: Queue, video_stream_path: str, channelId: int):
    """
    decode_q_pro: Queue, decoded frames to be put in
    video_stream_path: str, video to decode from
    channelId: int, video decode channel
    """
    try:
        logger.info("=========================== Start process0_video_decode ===========================")
        image_processor = ImageProcessor(device_id)

        interpolation = base.huaweiu_high_order_filter

        def handle_message(decodeImage, channelId, frameId):
            logger.debug(
                f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

            # 1. Calculate preprocess resize and padding config
            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
            logger.debug(f"process0_video_decode orginal image shape: {image_ori.shape}")
            image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
            image_ori = image_ori.transpose((0, 3, 1, 2))  # NCHW, RGB
            decodeImage.to_device(device_id)
            resize_tuple, pad_tuple = body_post_process.letterbox(decodeImage)
            resize_conf = Size(resize_tuple[0], resize_tuple[1])

            # 2. resize and pad
            decodeImage = image_processor.resize(decodeImage, resize_conf, interpolation)
            decodeImage.to_host()

            # 3. transfer to ndarray and put original and resized image array into queue
            image_src = np.array(decodeImage.to_tensor())  # NHWC
            image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
            image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                           cv2.BORDER_CONSTANT, value=(112, 112, 112))
            image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
            logger.debug(f"process0_video_decode resized image shape: {image_src.shape}")
            decode_q_pro.put((frameId, image_src, image_ori))

            logger.debug("process0_video_decode queue has {} images".format(decode_q_pro.qsize()))

        # 1. initialize video decoder callback
        vdec_call_backer = VdecCallBacker()
        vdec_call_backer.registerVdecCallBack(handle_message)

        # 2. initialize video decoder config
        vdec_config = VideoDecodeConfig()
        vdec_config.skipInterval = SKIP_INTERVAL
        vdec_config.outputImageFormat = base.rgb
        vdec_config.width = VIDEO_WIDTH
        vdec_config.height = VIDEO_HEIGHT

        # 3. initialize video decoder object
        video_decoder = VideoDecoder(vdec_config, vdec_call_backer, deviceId=device_id, channelId=channelId)

        # 4. use ffmpeg av and VideoDecoder to decode mp4
        import av
        with av.open(video_stream_path) as container:
            logger.debug(f"process0_video_decode start decode stream.")
            for frame_id, packet in enumerate(container.demux()):
                logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
                if packet.size == 0:
                    break
                try:
                    video_decoder.decode(packet, frame_id)
                except Exception as err:
                    logger.error("process0_video_decode decode err: ", err)
                if frame_id == VIDEO_BREAK_FRAME:
                    break
        logger.info("================== Finish process0_video_decode ==================")
        del video_decoder
    except Exception as e:
        print("process0_video_decode failed", repr(e))
        traceback.print_exc()

def process0_read_image(decode_q_pro: Queue, pid):
    try:
        logger.info("=========================== Start process0_read_image ===========================")
        image_processor = ImageProcessor(device_id)
        interpolation = base.huaweiu_high_order_filter
        count = 0

        for img_file in os.listdir(JPG_PATH):
            logger.info(f"process0_read_image {count}th {img_file}")
            decodeImage = image_processor.decode(os.path.join(JPG_PATH, img_file), base.rgb)  # NHWC

            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
            logger.debug(f"process0_read_image orginal image shape: {image_ori.shape}")
            image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
            image_ori = image_ori.transpose((0, 3, 1, 2))  # NCHW, RGB
            decodeImage.to_device(device_id)

            resize_tuple, pad_tuple = body_post_process.letterbox(decodeImage)
            resize_conf = Size(resize_tuple[0], resize_tuple[1])

            # 2. resize and pad
            decodeImage = image_processor.resize(decodeImage, resize_conf, interpolation)
            decodeImage.to_host()

            # 3. transfer to ndarray and put original and resized image array into queue
            image_src = np.array(decodeImage.to_tensor())  # NHWC
            image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
            image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                           cv2.BORDER_CONSTANT, value=(112, 112, 112))
            image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
            logger.debug(f"process0_read_image resized image shape: {image_src.shape}")
            decode_q_pro.put((count, image_src, image_ori))

            logger.debug("process0_read_imagee queue has {} images".format(decode_q_pro.qsize()))
            count += 1
        logger.info(f"process0_read_image pid {pid} finished, put {count} images")
    except Exception as e:
        print("process0_read_image failed", repr(e))
        traceback.print_exc()


def process1_infer(input_tensor_q_con: JoinableQueue, post_process_q_pro: Queue, batch_size: int, pid):
    try:
        logger.info(
            "======================================== Start process1_infer ========================================")
        model = base.model(MODEL_PATH, deviceId=device_id)

        img_ndarray_list = []
        img_ori_ndarray_list = []

        count = 0
        while True:
            start_time = time.time()
            # 1.wait for data, until wait too long, then return;
            while input_tensor_q_con.qsize() == 0:
                cur_time = time.time()
                # 1.1 wait too long
                if cur_time - start_time > INFER_BREAK_WAIT_TIME:
                    if len(img_ndarray_list) >0 :
                        logger.info("process1_infer dynamic batch branch.")
                        for n_sample in range(len(img_ndarray_list)):
                            # (1) Initialize one sample.
                            img_mxtensor = Tensor(img_ndarray_list[n_sample])
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_list = [img_mxtensor]

                            # (2) Det model infer
                            output_tensors = model.infer(img_mxtensor_list)  # output is a list with 4 arrays
                            for i, output_tensor in enumerate(output_tensors):
                                output_tensor.to_host()
                                output_tensors[i] = np.array(output_tensor)

                            # (3) post process
                            all_bboxes = body_post_process.det_postprocess(output_tensors[0],
                                                                           [img_ndarray_list[n_sample]],
                                                                           [img_ori_ndarray_list[n_sample]])
                            logger.debug(f"process1_infer post process finished.")
                            rs = body_post_process.get_rs_from_box(all_bboxes,
                                                                   [img_ori_ndarray_list[n_sample]])  # rs is the output
                            for res in rs:
                                if res[0] is not None:
                                    post_process_q_pro.put(res[0])
                            logger.info(f"process1_infer image put into rec queue")
                        # (4) Infer finished, free lists
                        logger.info(f"process1_infer det model infer dynamic {count} samples finished.")
                        img_ndarray_list = []
                        img_ori_ndarray_list = []
                        start_time = time.time()
                    # 1.1.2 no input and time out
                    else:
                        logger.info(f"process1_infer pid:{pid} wait time out, break")
                        logger.info(f"process1_infer pid:{pid} finished. get image cnt:{count}")
                        return
                time.sleep(0.5)

            # 2. if queue have data, get.
            img_tuple = input_tensor_q_con.get()  # (frame_id, ndarray, ndarray_ori), NCHW, RGB
            count += 1
            logger.debug(
                f"process1_infer reading image: {count}, current element in queue: {input_tensor_q_con.qsize()}.")
            logger.debug(f"process1_infer img_ndarray shape: {img_tuple[1].shape}.")

            # 3 read input array and transfer array type, put into tensor list
            img_ndarray = img_tuple[1].astype(np.float32)  # NCHW, RGB
            img_ori_ndarray = img_tuple[2]  # NCHW, RGB
            img_ndarray = img_ndarray / 255.
            img_ndarray_list.append(img_ndarray)
            img_ori_ndarray_list.append(img_ori_ndarray)

            # 4. If BATCH_SIZE smaller than config, wait until get enough
            if len(img_ori_ndarray_list) < BATCH_SIZE:
                continue

            # 5.1 Prepare batch input
            img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
            img_mxtensor.to_device(device_id)
            img_mxtensor_list = [img_mxtensor]

            # 5.2 Retina model infer
            output_tensors = model.infer(img_mxtensor_list)  # output is a list with 4 arrays
            for i, output_tensor in enumerate(output_tensors):
                output_tensor.to_host()
                output_tensors[i] = np.array(output_tensor)
            logger.info(f"process1_infer det model infer {count} finished.")

            # 5.3 Post process
            all_bboxes = body_post_process.det_postprocess(output_tensors[0], img_ndarray_list, img_ori_ndarray_list)
            logger.debug(f"process1_infer post process finished.")
            rs = body_post_process.get_rs_from_box(all_bboxes, img_ori_ndarray_list)

            for res in rs:
                if res[0] is not None:
                    post_process_q_pro.put(res[0])
            logger.info(f"process1_infer image put into rec queue")

            # 6 wait for next batch, Clear lists and timer
            img_ndarray_list = []
            img_ori_ndarray_list = []
        logger.info("process1_infer finished.")
    except Exception as e:
        print("process1_infer failed", repr(e))
        traceback.print_exc()


def process2_pattern_rec(post_process_q_con: Queue, pid):
    try:
        logger.info(
            "======================================== Start process2_pattern_rec ========================================")
        model = base.model(CLF_MODEL_PATH, deviceId=device_id)

        count = 0
        img_arr_list = []

        while True:
            start_time = time.time()
            # 1.wait for data, until wait too long, then return;
            while post_process_q_con.qsize() == 0:
                cur_time = time.time()
                # 1.1 wait too long
                if cur_time - start_time > INFER_BREAK_WAIT_TIME:
                    if len(img_arr_list) > 0:
                        logger.info("process2_pattern_rec enter dynamic branch.")
                        for n_sample in range(len(img_arr_list)):
                            img_mxtensor = Tensor(img_arr_list[n_sample])
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_vec = [img_mxtensor]

                            # (1) Feature model preprocess.
                            output_tensors = model.infer(img_mxtensor_vec)

                            # (2) Feature model infer.
                            sigmoid_results = []
                            for i, output_tensor in enumerate(output_tensors):
                                output_tensor.to_host()
                                output_tensors[i] = np.array(output_tensor)
                                sigmoid_results.append(body_post_process.rec_postprocess(output_tensors[i]))
                        # (3) Clear lists
                        logger.debug(f"process2_pattern_rec dynamic infer finished infer {count} samples.")
                        start_time = time.time()
                        img_arr_list = []
                    # 1.1.2 no input and time out
                    else:
                        logger.info(f"process2_pattern_rec pid:{pid} wait time out, break")
                        logger.info(f"process2_pattern_rec pid:{pid} finished. get image cnt:{count}")
                        return
                time.sleep(0.5)

            img_ndarray = post_process_q_con.get()
            count += 1

            # 3. rec model preprocess
            img_ndarray = body_post_process.rec_preprocess(img_ndarray)  # HWC, BGR
            img_arr_list.append(img_ndarray)

            if len(img_arr_list) < BATCH_SIZE:
                continue

            img_tensor = Tensor(np.squeeze(np.array(img_arr_list), axis=1))
            img_tensor.to_device(device_id)
            img_tensor_vec = [img_tensor]

            # 4. rec model infer
            output_tensors = model.infer(img_tensor_vec)
            logger.info(f"process2_pattern_rec infer finished {count} samples.")

            # 5. rec model postprocess
            sigmoid_results = []
            for i, output_tensor in enumerate(output_tensors):
                output_tensor.to_host()
                output_tensors[i] = np.array(output_tensor)
                sigmoid_results.append(body_post_process.rec_postprocess(output_tensors[i]))

            img_arr_list = []
        logger.info("process2_pattern_rec finished.")
    except Exception as e:
        print("process2_pattern_rec failed", repr(e))
        traceback.print_exc()


def main_pool():
    start = time.time()

    NUM_DECODE_PROCESS = 20
    NUM_INFER_PROCESS = 4
    NUM_PATTERN_RECOG_PROCESS = 4
    manager = Manager()
    q_decode = manager.Queue()
    q_rec = manager.Queue()
    pool = Pool()

    for i in range(NUM_DECODE_PROCESS):
        # pool.apply_async(process0_video_decode, args=(q_decode, VIDEO_PATH, i))
        pool.apply_async(process0_read_image, args=(q_decode,i,))
    for i in range(NUM_INFER_PROCESS):
        pool.apply_async(process1_infer, args=(q_decode, q_rec, BATCH_SIZE, i))
    for i in range(NUM_PATTERN_RECOG_PROCESS):
        pool.apply_async(process2_pattern_rec, args=(q_rec, i,))

    pool.close()
    pool.join()

    end = time.time()
    print("total time", end - start)
    print("avg time", (end - start) / (NUM_DECODE_PROCESS * len(os.listdir(JPG_PATH))))


if __name__ == "__main__":
    base.mx_init()
    main_pool()
