#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Process, JoinableQueue, Queue, Manager, Pool, log_to_stderr
import traceback
import numpy as np
import cv2
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from utils import car_post_process

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s',
                    filename="log.log", filemode='w')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
VIDEO_BREAK_FRAME = 100
MODEL_PATH = "./model/car_detal.om"
BATCH_SIZE = 1 
INFER_WAIT_TIME = 5
INFER_BREAK_WAIT_TIME = 5
COL_MODEL_PATH = "./model/car_color.om"
VIDEO_PATH = "./data/test.264"
JPG_PATH = "./data/img_files"

def process0_video_decode(decode_q_pro: Queue, video_stream_path: str, channelId: int):
    """
    decode_q_pro: Queue, decoded frames to be put in
    video_stream_path: str, video to decode from
    channelId: int, video decode channel
    """
    try:
        logger.info("=========================== Start process0_video_decode ===========================")
        image_processor = ImageProcessor(device_id)

        interpolation = base.huaweiu_high_order_filter

        def handle_message(decodeImage, channelId, frameId):
            logger.debug(f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

            # 1. Calculate preprocess resize and padding config
            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
            logger.debug(f"process0_video_decode orginal image shape: {image_ori.shape}")
            image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
            image_ori = image_ori.transpose((0, 3, 1, 2))  # NCHW, RGB
            decodeImage.to_device(device_id)
            resize_tuple, pad_tuple = car_post_process.letterbox(decodeImage)
            resize_conf = Size(resize_tuple[0], resize_tuple[1])

            # 2. resize and pad
            decodeImage = image_processor.resize(decodeImage, resize_conf, interpolation)
            decodeImage.to_host()

            # 3. transfer to ndarray and put original and resized image array into queue
            image_src = np.array(decodeImage.to_tensor())  # NHWC
            image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
            image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                        cv2.BORDER_CONSTANT, value=(112, 112, 112))
            image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
            logger.debug(f"process0_video_decode resized image shape: {image_src.shape}")
            decode_q_pro.put((frameId, image_src, image_ori))

            logger.debug("process0_video_decode queue has {} images".format(decode_q_pro.qsize()))

        # 1. initialize video decoder callback
        vdec_call_backer = VdecCallBacker()
        vdec_call_backer.registerVdecCallBack(handle_message)

        # 2. initialize video decoder config
        vdec_config = VideoDecodeConfig()
        vdec_config.skipInterval = SKIP_INTERVAL
        vdec_config.outputImageFormat = base.rgb
        vdec_config.width = VIDEO_WIDTH
        vdec_config.height = VIDEO_HEIGHT

        # 3. initialize video decoder object
        video_decoder = VideoDecoder(vdec_config, vdec_call_backer, deviceId=device_id, channelId=channelId)

        # 4. use ffmpeg av and VideoDecoder to decode mp4
        import av
        with av.open(video_stream_path) as container:
            logger.debug(f"process0_video_decode start decode stream.")
            for frame_id, packet in enumerate(container.demux()):
                logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
                if packet.size == 0:
                    break
                try:
                    video_decoder.decode(packet, frame_id)
                except Exception as err:
                    logger.error("process0_video_decode decode err: ", err)
                if frame_id == VIDEO_BREAK_FRAME:
                    break
        logger.info("================== Finish process0_video_decode ==================")
        del video_decoder
    except Exception as e:
        logger.error("process0_video_decode failed")
        traceback.print_exc()


def process0_read_image(decode_q_pro: Queue, pid):
    try:
        logger.info("=========================== Start process0_read_image ===========================")
        image_processor = ImageProcessor(device_id)
        interpolation = base.huaweiu_high_order_filter
        count = 0

        for img_file in os.listdir(JPG_PATH):
            if img_file == ".keep":
                continue
            logger.info(f"process0_read_image {count}th {img_file}")
            decodeImage = image_processor.decode(os.path.join(JPG_PATH, img_file), base.rgb)  # NHWC

            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
            logger.debug(f"process0_read_image orginal image shape: {image_ori.shape}")
            image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
            image_ori = image_ori.transpose((0, 3, 1, 2))  # NCHW, RGB
            decodeImage.to_device(device_id)

            resize_tuple, pad_tuple = car_post_process.letterbox(decodeImage)
            resize_conf = Size(resize_tuple[0], resize_tuple[1])

            # 2. resize and pad
            decodeImage = image_processor.resize(decodeImage, resize_conf, interpolation)
            decodeImage.to_host()

            # 3. transfer to ndarray and put original and resized image array into queue
            image_src = np.array(decodeImage.to_tensor())  # NHWC
            image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
            image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                        cv2.BORDER_CONSTANT, value=(112, 112, 112))
            image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
            logger.debug(f"process0_read_image resized image shape: {image_src.shape}")
            decode_q_pro.put((count, image_src, image_ori))

            logger.debug("process0_read_imagee queue has {} images".format(decode_q_pro.qsize()))
            count += 1
        logger.info(f"{pid}pid process0_read_image finished, put images cnt: {count}")
    except Exception as e:
        logger.error(f"{pid}pid process0_read_image failed")
        traceback.print_exc()



def process1_infer(input_tensor_q_con: JoinableQueue, q_color: Queue, batch_size: int, pid):
    try:
        logger.info("======================================== Start process1_infer ========================================")
        model = base.model(MODEL_PATH, deviceId=device_id)
        
        img_ndarray_list = []
        img_ori_ndarray_list = []

        count = 0
        while True:
            start_time = time.time()
            while input_tensor_q_con.qsize() == 0:
                cur_time = time.time()
                # 1. If infer wait time out, directly infer the samples in queue without waiting for a batch.
                if cur_time - start_time >= INFER_WAIT_TIME:
                    if len(img_ndarray_list) > 0:
                        logger.info(f"{pid}pid process1_infer dynamic batch branch.")
                        for n_sample in range(len(img_ndarray_list)):
                            # (1) Initialize one sample.
                            img_mxtensor = Tensor(img_ndarray_list[n_sample])
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_list = [img_mxtensor]

                            # (2) Det model infer
                            output_tensors = model.infer(img_mxtensor_list)  # output is a list with 4 arrays
                            for i, output_tensor in enumerate(output_tensors):
                                output_tensor.to_host()
                                output_tensors[i] = np.array(output_tensor)

                            # (3) post process
                            all_bboxes = car_post_process.det_postprocess(output_tensors[0], [img_ndarray_list[n_sample]], [img_ori_ndarray_list[n_sample]])
                            logger.debug(f"process1_infer post process finished.")
                            rs = car_post_process.get_rs_from_box(all_bboxes, [img_ori_ndarray_list[n_sample]])  # rs is the output
                            logger.debug("get_rs_from_box fin")
                            q_color.put(rs)

                        # (4) Infer finished, free lists
                        logger.info(f"{pid}pid process1_infer det model infer dynamic {count} samples finished.")
                        img_ndarray_list = []
                        img_ori_ndarray_list = []
                        start_time = time.time()
                    else:
                        logger.info(f"{pid}pid process1_infer wait time out, break.")
                        logger.info(f"{pid}pid process1_2_infer finished. get image cnt: {count}")
                        return
                time.sleep(0.5)

            img_tuple = input_tensor_q_con.get()  # (frame_id, ndarray, ndarray_ori), NCHW, RGB
            count += 1
            logger.debug(f"process1_infer reading image: {count}, current element in queue: {input_tensor_q_con.qsize()}.")
            logger.debug(f"process1_infer img_ndarray shape: {img_tuple[1].shape}.")

            # 3 read input array and transfer array type, put into tensor list
            img_ndarray = img_tuple[1].astype(np.float32)  # NCHW, RGB
            img_ori_ndarray = img_tuple[2]  # NCHW, RGB
            img_ndarray = img_ndarray / 255.
            img_ndarray_list.append(img_ndarray)
            img_ori_ndarray_list.append(img_ori_ndarray)

            # 4. If BATCH_SIZE smaller than config, wait until get enough
            if len(img_ndarray_list) < BATCH_SIZE:
                continue

            # 5.1 Prepare batch input
            img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
            img_mxtensor.to_device(device_id)
            img_mxtensor_list = [img_mxtensor]

            # 5.2 Retina model infer
            output_tensors = model.infer(img_mxtensor_list)  # output is a list with 4 arrays
            for i, output_tensor in enumerate(output_tensors):
                output_tensor.to_host()
                output_tensors[i] = np.array(output_tensor)
            logger.info(f"{pid}pid process1_2_infer det model infer {count} finished.")

            # 5.3 Post process
            all_bboxes = car_post_process.det_postprocess(output_tensors[0], img_ndarray_list, img_ori_ndarray_list)
            logger.debug(f"{pid}pid process1_2_infer post process finished.")    
            rs = car_post_process.get_rs_from_box(all_bboxes, img_ori_ndarray_list)
            logger.debug(f"{pid} pid get_rs_from_box fin")
            q_color.put(rs)

            # 6 Clear lists and timer
            img_ndarray_list = []
            img_ori_ndarray_list = []

    except Exception as e:
        logger.error(f"{pid}pid process1_infer failed")
        traceback.print_exc()


def process2_pattern_rec(q_color: Queue, pid):
    try:
        logger.info("======================================== Start process2_pattern_rec ========================================")
        model = base.model(COL_MODEL_PATH, deviceId=device_id)

        count = 0
        img_arr_list = []

        while True:
            start_time = time.time()
            while q_color.qsize() == 0:
                cur_time = time.time()

                # 1. if infer wait time out, directly infer the samples in queue without waiting for a batch.
                if cur_time - start_time >= INFER_WAIT_TIME:
                    if len(img_arr_list) > 0:
                        logger.info(f"{pid}pid process2_pattern_rec enter dynamic branch.")
                        for n_sample in range(len(img_arr_list)):
                            img_mxtensor = Tensor(img_arr_list[n_sample])
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_vec = [img_mxtensor]

                            # (1) Feature model preprocess.
                            output_tensors = model.infer(img_mxtensor_vec)

                            # (2) Feature model infer.
                            for i, output_tensor in enumerate(output_tensors):
                                output_tensor.to_host()
                                output_tensors[i] = np.array(output_tensor)
                                ### process result
                        # (3) Clear lists
                        logger.debug(f"process2_pattern_rec dynamic infer finished infer {count} samples.")
                        start_time = time.time()
                        img_arr_list = []
                    else:
                        logger.info(f"{pid}pid process2_pattern_rec wait time out, break.")
                        logger.info(f"{pid}pid process2_pattern_rec finished. get imagecnt: {count}")
                        return
                time.sleep(0.5)

            logger.info(f"{pid} pid process2_pattern_rec get q_color size {q_color.qsize()}.")
            img_input = q_color.get()
            count += 1
            img_arr_list = [img_triple[0] for img_triple in img_input]

            # 3. rec model preprocess
            tf_imgs = []
            for idx, img in enumerate(img_arr_list):
                if img is None:
                    img_arr_list.pop(idx)
                    continue
                img = car_post_process.color_preprocess(img)
                tf_imgs.append(np.array(img))
            tf_imgs = np.array(tf_imgs)

            if len(tf_imgs) < BATCH_SIZE:
                continue

            img_tensor = Tensor(tf_imgs)
            img_tensor.to_device(device_id)
            img_tensor_vec = [img_tensor]

            # 4. rec model infer
            output_tensors = model.infer(img_tensor_vec)
            logger.info(f"process2_pattern_rec infer finished {count} samples.")

            # 5. rec model postprocess
            for i, output_tensor in enumerate(output_tensors):
                output_tensor.to_host()
                output_tensors[i] = np.array(output_tensor)
                
                ### process result
            img_arr_list = []
    except Exception as e:
        logger.error(f"{pid}pid process2_pattern_rec failed")
        traceback.print_exc()


def main_pool():
    start = time.time()

    NUM_DECODE_PROCESS = 20
    NUM_INFER_PROCESS = 4
    NUM_PATTERN_RECOG_PROCESS = 1
    manager = Manager()
    q_decode = manager.Queue()
    q_color = manager.Queue()
    pool = Pool()

    for i in range(NUM_DECODE_PROCESS):
        # pool.apply_async(process0_video_decode, args=(q_decode, VIDEO_PATH, i))
        pool.apply_async(process0_read_image, args=(q_decode, i, ))
    for i in range(NUM_INFER_PROCESS):
        pool.apply_async(process1_infer, args=(q_decode, q_color, BATCH_SIZE, i, ))
    for i in range(NUM_PATTERN_RECOG_PROCESS):
        pool.apply_async(process2_pattern_rec, args=(q_color, i, ))

    pool.close()
    pool.join()

    end = time.time()
    print("total time", end - start)
    print("avg time", (end - start) / (NUM_DECODE_PROCESS * len(os.listdir(JPG_PATH))))


if __name__ == "__main__":
    base.mx_init()
    main_pool()
