#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Process, JoinableQueue, Queue, Manager, Pool

import numpy as np
import cv2
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from utils import body_post_process

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
MODEL_PATH = "./model/linewell_body_det_v1_bs1.om"
BATCH_SIZE = 64
INFER_WAIT_TIME = 1e-3
INFER_BREAK_WAIT_TIME = 10
CLF_MODEL_PATH = "./model/linewell_body_rec_v1_dy_bs.om"
VIDEO_PATH = "./data/test.264"


def process0_video_decode(decode_q_pro: Queue, video_stream_path: str, channelId: int):
    logger.info("=========================== Start process0_video_decode ===========================")
    image_processor = ImageProcessor(device_id)

    interpolation = base.huaweiu_high_order_filter

    def handle_message(decodeImage, channelId, frameId):
        logger.debug(f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

        # 1. Calculate preprocess resize and padding config
        decodeImage.to_host()
        image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
        logger.debug(f"process0_video_decode orginal image shape: {image_ori.shape}")
        image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))  # NCHW, RGB
        decodeImage.to_device(device_id)
        resize_tuple, pad_tuple = body_post_process.letterbox(decodeImage)
        resize_conf = Size(resize_tuple[0], resize_tuple[1])

        # 2. resize and pad
        decodeImage = image_processor.resize(decodeImage, resize_conf, interpolation)
        decodeImage.to_host()

        # 3. transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
        image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                       cv2.BORDER_CONSTANT, value=(112, 112, 112))
        image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
        logger.debug(f"process0_video_decode resized image shape: {image_src.shape}")
        decode_q_pro.put((frameId, image_src, image_ori))

        logger.debug("process0_video_decode queue has {} images".format(decode_q_pro.qsize()))

    # 1. initialize video decoder callback
    vdec_call_backer = VdecCallBacker()
    vdec_call_backer.registerVdecCallBack(handle_message)

    # 2. initialize video decoder config
    vdec_config = VideoDecodeConfig()
    vdec_config.skipInterval = SKIP_INTERVAL
    vdec_config.outputImageFormat = base.rgb
    vdec_config.width = VIDEO_WIDTH
    vdec_config.height = VIDEO_HEIGHT

    # 3. initialize video decoder object
    video_decoder = VideoDecoder(vdec_config, vdec_call_backer, deviceId=device_id, channelId=channelId)

    # 4. use ffmpeg av and VideoDecoder to decode mp4
    import av
    with av.open(video_stream_path) as container:
        logger.debug(f"process0_video_decode start decode stream.")
        for frame_id, packet in enumerate(container.demux()):
            logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
            if packet.size == 0:
                break
            try:
                video_decoder.decode(packet, frame_id)
            except Exception as err:
                logger.error("process0_video_decode decode err: ", err)
            if frame_id == 50:
                break
    logger.info("================== Finish process0_video_decode ==================")
    del video_decoder


def process2_infer(input_tensor_q_con: JoinableQueue, post_process_q_pro: Queue, batch_size: int):
    logger.info("======================================== Start process2_infer ========================================")
    model = base.model(MODEL_PATH, deviceId=device_id)

    wait_time = time.time()
    count = 0
    while True:
        if input_tensor_q_con.qsize() == 0:
            if time.time() - wait_time > INFER_BREAK_WAIT_TIME:
                logger.info("process2_infer wait time out, break.")
                break
            else:
                continue
        wait_time = time.time()
        img_tuple = input_tensor_q_con.get()  # (frame_id, ndarray, ndarray_ori), NCHW, RGB
        logger.debug(f"process2_infer reading image: {count}, current element in queue: {input_tensor_q_con.qsize()}.")
        logger.debug(f"process2_infer img_ndarray shape: {img_tuple[1].shape}.")

        # 1. read input array and transfer array type, put into tensor list
        img_ndarray = img_tuple[1].astype(np.float32)  # NCHW, RGB
        img_ori_ndarray = img_tuple[2]  # NCHW, RGB
        img_ndarray = img_ndarray / 255.
        img_mxtensor = Tensor(img_ndarray)
        img_mxtensor.to_device(device_id)

        img_mxtensor_list = [img_mxtensor]
        img_ndarray_list = [img_ndarray]
        img_ori_ndarray_list = [img_ori_ndarray]

        # 2. det model infer
        output_tensors = model.infer(img_mxtensor_list)  # output is a list with 4 arrays
        for i, output_tensor in enumerate(output_tensors):
            output_tensor.to_host()
            output_tensors[i] = np.array(output_tensor)
        logger.info(f"process2_infer det model infer {count} finished.")

        # 3. post process
        all_bboxes = body_post_process.det_postprocess(output_tensors[0], img_ndarray_list, img_ori_ndarray_list)
        logger.debug(f"process2_infer post process finished.")
        rs = body_post_process.get_rs_from_box(all_bboxes, img_ori_ndarray_list)

        # 4. put images into rec infer queue
        post_process_q_pro.put(img_ori_ndarray[0, ::-1, :, :].transpose(1, 2, 0))  # put HWC, BGR
        logger.debug(f"process2_infer image {count} put into rec queue")
        count += 1
    logger.info("process2_infer finished.")


def process3_1_pattern_rec(post_process_q_con: Queue):
    logger.info("======================================== Start process3_1_pattern_rec ========================================")
    model = base.model(CLF_MODEL_PATH, deviceId=device_id)

    wait_time = time.time()
    count = 0
    while True:
        if post_process_q_con.qsize() == 0:
            if time.time() - wait_time > INFER_BREAK_WAIT_TIME:
                logger.info("process3_1_pattern_rec wait time out, break.")
                break
            else:
                continue
        wait_time = time.time()
        logger.debug(f"process3_1_pattern_rec get inferred image {count}.")
        img_ndarray = post_process_q_con.get()

        # 1. rec model preprocess
        img_ndarray = body_post_process.rec_preprocess(img_ndarray)  # HWC, BGR
        np.save("rec_input.npy", img_ndarray)
        sigmoid_results = []
        img_arr = np.load("rec_input.npy")
        img_tensor = Tensor(img_arr)
        img_tensor.to_device(device_id)
        img_tensor_vec = [img_tensor]

        # 2. rec model infer
        output_tensors = model.infer(img_tensor_vec)
        logger.info("process3_1_pattern_rec infer finished.")

        # 3. rec model postprocess
        for i, output_tensor in enumerate(output_tensors):
            output_tensor.to_host()
            output_tensors[i] = np.array(output_tensor)
            sigmoid_results.append(body_post_process.rec_postprocess(output_tensors[i]))

        logger.info("process3_1_pattern_rec finished.")


def main_pool():
    NUM_DECODE_PROCESS = 1
    NUM_INFER_PROCESS = 1
    NUM_PATTERN_RECOG_PROCESS = 1
    manager = Manager()
    q_decode = manager.Queue()
    q_rec = manager.Queue()
    pool = Pool()

    for i in range(NUM_DECODE_PROCESS):
        pool.apply_async(process0_video_decode, args=(q_decode, VIDEO_PATH, i))
    for i in range(NUM_INFER_PROCESS):
        pool.apply_async(process2_infer, args=(q_decode, q_rec, BATCH_SIZE))
    for i in range(NUM_PATTERN_RECOG_PROCESS):
        pool.apply_async(process3_1_pattern_rec, args=(q_rec, ))

    pool.close()
    pool.join()


if __name__ == "__main__":
    base.mx_init()
    main_pool()
