#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Queue, Manager, Pool
import traceback
import numpy as np
from mindx.sdk import base
from mindx.sdk.base import ImageProcessor
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from body import body_detect, body_recognize, body_reid
from portrait import portrait_detect, portrait_feature
from car_union import car_union_detect, car_union_color_recognize, car_union_prop_recognize, car_union_brand_recognize
from car_plate import car_plate_detect_rec
from deepsort import track_deepsort

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
BATCH_SIZE = 16
CAR_BATCH = 1
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
VIDEO_BREAK_FRAME = 100
VIDEO_PATH = "./data"
JPG_PATH = "./data/img_files"
MAX_AGE = 5
USE_NPU = True


BODY_DET_MODEL_PATH = "./model/linewell_body_det_v1_dy_bs.om"
BODY_REC_MODEL_PATH = "./model/linewell_body_rec_v1_dy_bs.om"
BODY_REID_MODEL_PATH = "./model/linewell_body_reid_v2_dy_bs.om"

PORTRAIT_DET_MODEL_PATH = "./model/retinaface_r50_v1_960_dy_batch.om"
PORTRAIT_FEAT_MODEL_PATH = "./model/model_dy_batch.om"

CARUNION_DET_MODEL_PATH= "./model/car_detect/car_det_v2_aoe2.om"
CARUNION_REC_COLOR_MODEL_PATH = "./model/car_color.om"
CARUNION_REC_PROP_MODEL_PATH = "./model/car_prop/car_prop_aoe2.om"
CARUNION_REC_BRAND_MODEL_PATH = "./model/car_brand/car_brand_aoe2.om"

CARPLATE_DET_MODEL_PATH = "./model/carplate_det_aipp.om"
CARPLATE_REC_MODEL_PATH = "./model/inference_rec_dy_bs.om"


def video_decode(car_union_q: Queue, body_q: Queue, face_q: Queue, video_stream_path: str, channelId: int):
    """
    decode_q_pro: Queue, decoded frames to be put in
    video_stream_path: str, video to decode from
    channelId: int, video decode channel
    """
    try:
        logger.info("=========================== Start video_decode ===========================")

        def vdec_callback(decodeImage, channelId, frameId):
            """
            Define videoDecoder callback function
            """
            logger.debug(
                f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

            # 1. Calculate preprocess resize and padding config
            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB

            car_union_q.put((frameId, image_ori, decodeImage.original_height, decodeImage.original_width))
            body_q.put((frameId, image_ori, decodeImage.original_height, decodeImage.original_width))
            face_q.put((frameId, image_ori, decodeImage.original_height, decodeImage.original_width))

            #logger.debug("process0_video_decode producer queue, elements {}".format(face_q.qsize()))

        # 1. initialize video decoder callback
        vdec_callbacker = VdecCallBacker()
        vdec_callbacker.registerVdecCallBack(vdec_callback)

        # 2. initialize video decoder config
        vdec_config = VideoDecodeConfig()
        vdec_config.skipInterval = SKIP_INTERVAL
        vdec_config.outputImageFormat = base.rgb
        vdec_config.width = VIDEO_WIDTH
        vdec_config.height = VIDEO_HEIGHT

        # 3. initialize video decode object
        video_decoder = VideoDecoder(vdec_config, vdec_callbacker, deviceId=device_id, channelId=channelId)

        # 4. use ffmpeg av and VideoDecoder to decode video
        import av
        with av.open(video_stream_path) as container:
            logger.debug(
                f"============================process0_video_decode start decode stream =========================")
            for frame_id, packet in enumerate(container.demux()):
                logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
                if packet.size == 0:
                    break
                try:
                    video_decoder.decode(packet, frame_id)
                except Exception as err:
                    logger.error("process0_video_decode decode error: ", err)
                #if frame_id == VIDEO_BREAK_FRAME:
                #    break
        logger.info("=============== Finish process0_video_decode ========================")
        del video_decoder
    except Exception as e:
        print("process0_video_decode failed.", repr(e))
        traceback.print_exc()


def read_image(car_union_q: Queue, body_q: Queue, face_q: Queue, pid):
    try:
        logger.info("=========================== Start read_image ===========================")
        image_processor = ImageProcessor(device_id)  # initialize mxbase image_process
        count = 0

        for img_file in os.listdir(JPG_PATH):
            if not img_file.endswith(".jpg"):
                continue
            decodeImage = image_processor.decode(os.path.join(JPG_PATH, img_file), base.rgb)  # NHWC
            decodeImage.to_host()

            # serialize the decoded image and put it into the queues
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
            car_union_q.put((img_file, image_ori, decodeImage.original_height, decodeImage.original_width))
            body_q.put((img_file, image_ori, decodeImage.original_height, decodeImage.original_width))
            face_q.put((img_file, image_ori, decodeImage.original_height, decodeImage.original_width))
            count += 1

        logger.info(f"process0_read_image pid:{pid} finished, put {count} images")
    except Exception as e:
        print("process0_read_image failed.", repr(e))
        traceback.print_exc()


if __name__ == "__main__":
    start = time.time()
    base.mx_init()

    NUM_DECODE_PROCESS = 25
    NUM_DETECTION_PROCESS = 2
    NUM_PATTERN_RECOG_PROCESS = 2
    NUM_REID_PROCESS = 2
    NUM_TRACK_PROCESS = 2

    manager = Manager()
    # Car union
    q_decode_car_union = manager.Queue()
    q_rec_car_union_color = manager.Queue()
    q_rec_car_union_prop = manager.Queue()
    q_rec_car_union_brand = manager.Queue()
    # Carplate
    q_det_rec_carplate = manager.Queue()
    # Body
    q_decode_body = manager.Queue()
    q_rec_body = manager.Queue()
    q_reid_body = manager.Queue()
    # Portrait
    q_decode_face = manager.Queue()
    q_feat_face = manager.Queue()
    # Track_deepsort
    q_track_body = manager.Queue()
    q_track_face = manager.Queue()
    q_track_car = manager.Queue()

    pool = Pool()

    for i in range(NUM_DECODE_PROCESS):
        # pool.apply_async(video_decode, args=(q_decode_car_union, q_decode_body, q_decode_face, VIDEO_PATH, i))
        pool.apply_async(read_image, args=(q_decode_car_union, q_decode_body, q_decode_face, i,))
    for i in range(NUM_DETECTION_PROCESS):
        pool.apply_async(body_detect, args=(os.path.realpath(BODY_DET_MODEL_PATH), 
                                            BATCH_SIZE, device_id, i, q_decode_body, q_rec_body, q_reid_body, q_track_body))
        pool.apply_async(portrait_detect, args=(os.path.realpath(PORTRAIT_DET_MODEL_PATH), 
                                                BATCH_SIZE, device_id, i, q_decode_face, q_feat_face, q_track_face))
        pool.apply_async(car_union_detect, args=(os.path.realpath(CARUNION_DET_MODEL_PATH), 
                                                 CAR_BATCH, device_id, i,
                                                 q_decode_car_union,
                                                 q_rec_car_union_color,
                                                 q_rec_car_union_prop,
                                                 q_rec_car_union_brand,
                                                 q_det_rec_carplate,
                                                 q_track_car))

    for i in range(NUM_PATTERN_RECOG_PROCESS):
        pool.apply_async(body_recognize, args=(os.path.realpath(BODY_REC_MODEL_PATH), 
                                               BATCH_SIZE, device_id, i, q_rec_body,))
        pool.apply_async(car_union_color_recognize, args=(os.path.realpath(CARUNION_REC_COLOR_MODEL_PATH),
                                                          CAR_BATCH, device_id, i, q_rec_car_union_color,))
        pool.apply_async(car_union_prop_recognize, args=(os.path.realpath(CARUNION_REC_PROP_MODEL_PATH),
                                                         CAR_BATCH, device_id, i, q_rec_car_union_prop,))
        pool.apply_async(car_union_brand_recognize, args=(os.path.realpath(CARUNION_REC_BRAND_MODEL_PATH),
                                                          CAR_BATCH, device_id, i, q_rec_car_union_brand,))
        pool.apply_async(car_plate_detect_rec, args=(os.path.realpath(CARPLATE_DET_MODEL_PATH),
                                                     os.path.realpath(CARPLATE_REC_MODEL_PATH),
                                                     CAR_BATCH, device_id, i, q_det_rec_carplate))
    for i in range(NUM_REID_PROCESS):
        pool.apply_async(body_reid, args=(os.path.realpath(BODY_REID_MODEL_PATH), BATCH_SIZE, 
                                          device_id, i, q_reid_body,))
        pool.apply_async(portrait_feature, args=(os.path.realpath(PORTRAIT_FEAT_MODEL_PATH), 
                                                 BATCH_SIZE, device_id, i, q_feat_face,))

    for i in range(NUM_TRACK_PROCESS):
        pool.apply_async(track_deepsort, args=(device_id, MAX_AGE, USE_NPU, q_track_body,))
        pool.apply_async(track_deepsort, args=(device_id, MAX_AGE, USE_NPU, q_track_face,))
        pool.apply_async(track_deepsort, args=(device_id, MAX_AGE, USE_NPU, q_track_car,))

    pool.close()
    pool.join()

    end = time.time()
    print("total time", end - start)
    print("avg time", (end - start) / (NUM_DECODE_PROCESS * len(os.listdir(JPG_PATH))))
