#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import os
import logging
import time
from multiprocessing import Process, JoinableQueue, Queue, Manager, Pool
import traceback
import numpy as np
import cv2
from sklearn import preprocessing
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor, Dim, Color
from mindx.sdk.base import VideoDecoder, VideoDecodeConfig, VdecCallBacker

from utils import face_post_process_v2, timer

import sys
sys.path.append("./deep_sort_realtime-master")
from deep_sort_realtime.deepsort_tracker import DeepSort

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
device_id = 0
channel_id = 0
SKIP_INTERVAL = 3
MODEL_INPUT_HEIGHT = 960
MODEL_INPUT_WIDTH = 960
VIDEO_HEIGHT = 4096
VIDEO_WIDTH = 2160
VIDEO_BREAK_FRAME = 100
MODEL_PATH = "./model/retinaface_r50_v1_960_dy_bs.om"
BATCH_SIZE = 1
INFER_WAIT_TIME = 1
INFER_BREAK_WAIT_TIME = 3
CLF_MODEL_PATH = "./model/model_dy_bs_so.om"
VIDEO_PATH = "./data/test.264"
JPG_PATH = "./data/imgs"

USE_BENCHMARK = False
mytimer = timer.Timer(BATCH_SIZE)


def process0_video_decode(decode_q_pro: Queue, video_stream_path: str, channelId: int):
    """
    decode_q_pro: Queue, decoded frames to be put in
    video_stream_path: str, video to decode from
    channelId: int, video decode channel
    """
    try:
        logger.info("=========================== Start process0_video_decode ===========================")
        image_processor = ImageProcessor(device_id)  # initialize mxbase image_process

        def vdec_callback(decodeImage, channelId, frameId):
            if USE_BENCHMARK:
                mytimer.infer_pre_start.append(time.time())
            """
            Define videoDecoder callback function
            """
            logger.debug(
                f"process0_video_decode decodeImage channelId: {channelId}, frameId: {frameId} width: {decodeImage.width}, height: {decodeImage.height}")

            # 1. Calculate preprocess resize and padding config
            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
            logger.debug(f"process0_video_decode Decode image shape: {image_ori.shape}")
            image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
            image_ori = image_ori.transpose((0, 3, 1, 2))
            # decodeImage.to_device(device_id)
            resize_tuple, pad_tuple, scale_factor = face_post_process_v2.resize_factor(decodeImage,
                                                                                       resize_shape=(MODEL_INPUT_WIDTH,
                                                                                                     MODEL_INPUT_HEIGHT))
            resize_conf = Size(resize_tuple[0], resize_tuple[1])
            pad_conf = Dim(pad_tuple[2], pad_tuple[3], pad_tuple[0], pad_tuple[1])
            color_conf = Color(112, 112, 112)

            # 2. resize and pad
            decodeImage = image_processor.resize(decodeImage, resize_conf, base.bilinear_similar_opencv)
            decodeImage = image_processor.padding(decodeImage, pad_conf, color_conf, base.border_constant)

            decodeImage.to_host()

            # 3. transfer to ndarray and put original and resized image array into queue
            image_src = np.array(decodeImage.to_tensor())  # NHWC
            image_src = image_src[:, :decodeImage.original_height, : decodeImage.original_width, :]
            # image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3], cv2.BORDER_CONSTANT, value=(112, 112, 112))
            image_src = image_src.transpose((0, 3, 1, 2))  # NCHW
            logger.debug(
                "process0_video_decode to be put in queue resized_image: {}, scale_factor: {}, image_ori: {}".format(
                    image_src.shape, scale_factor, image_ori.shape))
            decode_q_pro.put((frameId, image_src, scale_factor, image_ori))

            logger.debug("process0_video_decode producer queue, elements {}".format(decode_q_pro.qsize()))
            if USE_BENCHMARK:
                mytimer.infer_pre_end.append(time.time())

        # 1. initialize video decoder callback
        vdec_callbacker = VdecCallBacker()
        vdec_callbacker.registerVdecCallBack(vdec_callback)

        # 2. initialize video decoder config
        vdec_config = VideoDecodeConfig()
        vdec_config.skipInterval = SKIP_INTERVAL
        vdec_config.outputImageFormat = base.rgb
        vdec_config.width = VIDEO_WIDTH
        vdec_config.height = VIDEO_HEIGHT

        # 3. initialize video decode object
        video_decoder = VideoDecoder(vdec_config, vdec_callbacker, deviceId=device_id, channelId=channelId)

        # 4. use ffmpeg av and VideoDecoder to decode video
        import av
        with av.open(video_stream_path) as container:
            logger.debug(
                f"============================process0_video_decode start decode stream =========================")
            for frame_id, packet in enumerate(container.demux()):
                logger.debug(f"process0_video_decode curr frame id: {frame_id}, size: {packet.size}")
                if packet.size == 0:
                    break
                try:
                    video_decoder.decode(packet, frame_id)
                except Exception as err:
                    logger.error("process0_video_decode decode error: ", err)
                if frame_id == VIDEO_BREAK_FRAME:
                    break
        logger.info("=============== Finish process0_video_decode ========================")
        del video_decoder
    except Exception as e:
        print("process0_video_decode failed.", repr(e))
        traceback.print_exc()

def process0_read_image(decode_q_pro: Queue, pid):
    try:
        logger.info("=========================== Start process0_read_image ===========================")
        image_processor = ImageProcessor(device_id)  # initialise mxbase image_process
        count = 0

        for img_file in os.listdir(JPG_PATH):
            if USE_BENCHMARK:
                mytimer.infer_pre_start.append(time.time())
            logger.info(f"process0_read_image{img_file}")
            decodeImage = image_processor.decode(os.path.join(JPG_PATH, img_file), base.rgb)  # NHWC
            # 1. Calculate preprocess resize and padding config
            decodeImage.to_host()
            image_ori = np.array(decodeImage.to_tensor())  # NHWC, RGB
            logger.debug(f"process0_read_image Decode image shape: {image_ori.shape}")
            image_ori = image_ori[:, :decodeImage.original_height, :decodeImage.original_width, :]
            image_ori = image_ori.transpose((0, 3, 1, 2))
            # resize and pad
            decodeImage.to_device(device_id)
            resize_tuple, pad_tuple, scale_factor = face_post_process_v2.resize_factor(decodeImage,
                                                                                       resize_shape=(
                                                                                           MODEL_INPUT_WIDTH,
                                                                                           MODEL_INPUT_HEIGHT))
            resize_conf = Size(resize_tuple[0], resize_tuple[1])
            pad_conf = Dim(pad_tuple[2], pad_tuple[3], pad_tuple[0], pad_tuple[1])
            color_conf = Color(112, 112, 112)

            # 2. resize and pad
            decodeImage = image_processor.resize(decodeImage, resize_conf, base.bilinear_similar_opencv)
            decodeImage = image_processor.padding(decodeImage, pad_conf, color_conf, base.border_constant)
            decodeImage.to_host()

            # 3. transfer to ndarray and put original and resized image array into queue
            image_src = np.array(decodeImage.to_tensor())  # NHWC
            image_src = image_src[:, :decodeImage.original_height, :decodeImage.original_width, :]
            image_src = image_src.transpose((0, 3, 1, 2))  # NCHW
            logger.debug(
                "process0_read_image to be put in queue resized_image: {}, scale_factor: {}, image_ori: {}".format(
                    image_src.shape, scale_factor, image_ori.shape))
            decode_q_pro.put((img_file, image_src, scale_factor, image_ori))
            count += 1

            logger.debug("process0_read_image producer queue, elements {}".format(decode_q_pro.qsize()))
            if USE_BENCHMARK:
                mytimer.infer_pre_end.append(time.time())
        logger.info(f"process0_read_image pid:{pid} finished, put {count} images")
    except Exception as e:
        print("process0_read_image failed.", repr(e))
        traceback.print_exc()

def process1_infer(input_tensor_q_con: Queue, post_process_q_pro: Queue, batch_size: int, pid, deepsort_q:Queue):
    """
    input_tensor_q_con: Queue, consumer, data from video decoder
    post_process_q_pro: Queue, productor, data to be put into feature infer queue
    """
    try:
        logger.info(
            "======================================== Start process1_infer ========================================")
        model = base.model(MODEL_PATH, deviceId=device_id)

        post_retina = face_post_process_v2.RetinaFace(model, rac="net3", masks=False)
        img_ndarray_list = []
        scale_factor_list = []
        img_ori_ndarray_list = []

        count = 0
        while True:
            start_time = time.time()
            # 1.wait for data, until wait too long, then return;
            while input_tensor_q_con.qsize() == 0:
                cur_time = time.time()
                # 1.1 wait too long
                if cur_time - start_time > INFER_BREAK_WAIT_TIME:
                    if len(img_ndarray_list) > 0:
                        logger.info("process1_infer dynamic batch branch.")
                        for n_sample in range(len(img_ndarray_list)):
                            # (1) Initialize one sample.
                            img_mxtensor = Tensor(img_ndarray_list[n_sample])
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_list = [img_mxtensor]

                            # (2) Retina model infer.
                            all_det, all_landmarks = post_retina.detect(img_mxtensor_list,
                                                                        [scale_factor_list[n_sample]], 0.6)
                            logger.info((f"process1_infer det model infer {count} finished."))

                            # (3) Post process.
                            img_arr_list, boxes, landmarks, scores, nfaces = face_post_process_v2.get_aligned_face_base_with_mface_all(
                                all_det, all_landmarks, img_ori_ndarray_list)
                            for img_arr in img_arr_list:
                                if img_arr is not None:
                                    post_process_q_pro.put(img_arr)
                            for i in range(len(all_det)):
                                img_ori_mxtensor = Tensor(img_ori_ndarray_list[n_sample])
                                img_ori_mxtensor.to_host()
                                img_mxtensor_nhwc = base.transpose(img_ori_mxtensor,[0,2,3,1])
                                img_ndarray = np.array(img_mxtensor_nhwc).squeeze(0) # hwc 960*960*3
                                bbs_sig = boxes[i]
                                deepsort_q.put((img_ndarray, bbs_sig, scores))
                                logger.info("process1_det_post_p1 producer queue, elements {}".format(
                                    deepsort_q.qsize()))

                        # (5) Infer finished, free lists
                        start_time = time.time()
                        img_ndarray_list = []
                        scale_factor_list = []
                        img_ori_ndarray_list = []
                    else:
                        logger.info(f"process1_infer pid:{pid} wait time out, break")
                        logger.info(f"process1_infer pid:{pid} finished. get image cnt:{count}")
                        return
                time.sleep(0.5)


            logger.debug(f"process1_infer queue size {input_tensor_q_con.qsize()}")
            img_tuple = input_tensor_q_con.get()
            count += 1

            # 3. Read input array and transfer array type, put into tensor list
            img_ndarray = img_tuple[1].astype(np.float32)  # NCHW, RGB
            scale_factor = img_tuple[2]
            img_ori_ndarray = img_tuple[3].astype(np.float32)  # NCHW, RGB

            img_ndarray_list.append(img_ndarray)
            scale_factor_list.append(scale_factor)
            img_ori_ndarray_list.append(img_ori_ndarray)

            # 4. If BATCH_SIZE smaller than config, wait until get enough
            if len(img_ori_ndarray_list) < BATCH_SIZE:
                continue

            # 5.1 Prepare batch input
            img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
            img_mxtensor.to_device(device_id)
            img_mxtensor_list = [img_mxtensor]

            # 5.2 Retina model infer
            if USE_BENCHMARK:
                mytimer.infer_infer_and_post_start.append(time.time())
            all_det, all_landmarks = post_retina.detect(img_mxtensor_list, scale_factor_list, 0.6)
            logger.info(f"process1_infer det model infer {count} samples finished.")
            img_arr_list, boxes, landmarks, scores, nfaces = face_post_process_v2.get_aligned_face_base_with_mface_all(
                all_det, all_landmarks, img_ori_ndarray_list)
            for img_arr in img_arr_list:
                if img_arr is not None:
                    post_process_q_pro.put(img_arr)
            for i in range(len(all_det)):
                img_ori_mxtensor = Tensor(img_ori_ndarray_list[n_sample])
                img_ori_mxtensor.to_host()
                img_mxtensor_nhwc = base.transpose(img_ori_mxtensor,[0,2,3,1])
                img_ndarray = np.array(img_mxtensor_nhwc).squeeze(0) # hwc 960*960*3
                bbs_sig = boxes[i]
                deepsort_q.put((img_ndarray, bbs_sig, scores))
                logger.info("process1_det_post_p2 producer queue, elements {}".format(
                    deepsort_q.qsize()))
                
            if USE_BENCHMARK:
                mytimer.infer_infer_and_post_end.append(time.time())

            # 6. Clear lists and timer
            img_ndarray_list = []
            scale_factor_list = []
            img_ori_ndarray_list = []

        logger.info("process1_infer finished.")
    except Exception as e:
        print("process1_infer failed.", repr(e))
        traceback.print_exc()

def process2_pattern_rec(post_process_q_con: Queue, pid):
    try:
        logger.info(
            "======================================== Start process2_pattern_rec ========================================")
        model = base.model(CLF_MODEL_PATH, deviceId=device_id)

        count = 0
        img_arr_list = []
        while True:
            start_time = time.time()
            # 1.wait for data, until wait too long, then return;
            while post_process_q_con.qsize() == 0:
                cur_time = time.time()
                # 1.1 wait too long
                if cur_time - start_time > INFER_BREAK_WAIT_TIME:
                    if len(img_arr_list) > 0:
                        logger.info("process2_pattern_rec enter dynamic branch.")
                        for n_sample in range(len(img_arr_list)):
                            # (1) Feature model preprocess.
                            img_ndarray_batch, img_ndarray_flip_batch = face_post_process_v2.get_feature_batch(
                                img_arr_list[n_sample])
                            # (2) Feature model infer.
                            img_mxtensor = Tensor(img_ndarray_batch)
                            img_mxtensor.to_device(device_id)
                            img_mxtensor_vec = [img_mxtensor]
                            output_tensors = model.infer(img_mxtensor_vec)
                            for i, output_tensor in enumerate(output_tensors):
                                output_tensor.to_host()
                                output_tensors[i] = np.array(output_tensor)

                            img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
                            img_mxtensor_flip.to_device(device_id)
                            img_mxtensor_flip_vec = [img_mxtensor_flip]
                            output_flip_tensors = model.infer(img_mxtensor_flip_vec)
                            for i, output_tensor in enumerate(output_flip_tensors):
                                output_tensor.to_host()
                                output_flip_tensors[i] = np.array(output_tensor)

                            # (3) Feature model postprocess.
                            embedding = output_tensors[0] + output_flip_tensors[0]
                            embedding = preprocessing.normalize(embedding)

                            logger.info(f"process2_pattern_rec infer: {count} samples finished.")
                        # (4) Clear lists
                        img_arr_list = []
                        start_time = time.time()
                    else:
                        logger.info(f"process2_pattern_rec pid:{pid} wait time out, break")
                        logger.info(f"process2_pattern_rec pid:{pid} finished. get image cnt:{count}")
                        return
                time.sleep(0.5)

            # 3. Read input array and transfer array type, put into tensor list
            preprocess_start = time.time()
            logger.info(f"process2_pattern_rec get inferred image {count}.")
            img_ndarray = post_process_q_con.get()  # type is Image
            img_arr_list.append(img_ndarray)
            count += 1

            # 5.3 Post process
            if USE_BENCHMARK:
                mytimer.rec_pre_start.append(time.time())

            # 4. If BATCH_SIZE smaller than config, wait until get enough
            if len(img_arr_list) < BATCH_SIZE:
                continue

            # 5.1 Feature model preprocess.
            img_ndarray_batch, img_ndarray_flip_batch = face_post_process_v2.get_feature_batch(np.array(img_arr_list))
            if USE_BENCHMARK:
                mytimer.rec_pre_end.append(time.time())

            # 5.2 Feature model infer.
            if USE_BENCHMARK:
                mytimer.rec_infer_and_post_start.append(time.time())
            img_mxtensor = Tensor(img_ndarray_batch)
            img_mxtensor.to_device(device_id)
            img_mxtensor_vec = [img_mxtensor]
            output_tensors = model.infer(img_mxtensor_vec)
            for i, output_tensor in enumerate(output_tensors):
                output_tensor.to_host()
                output_tensors[i] = np.array(output_tensor)

            img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
            img_mxtensor_flip.to_device(device_id)
            img_mxtensor_flip_vec = [img_mxtensor_flip]
            output_flip_tensors = model.infer(img_mxtensor_flip_vec)
            for i, output_tensor in enumerate(output_flip_tensors):
                output_tensor.to_host()
                output_flip_tensors[i] = np.array(output_tensor)

            # 5.3 Feature model postprocess.
            embedding = output_tensors[0] + output_flip_tensors[0]
            embedding = preprocessing.normalize(embedding)
            # 5.4 Clear lists.
            logger.info(f"process2_pattern_rec infer: {count} samples finished.")
            img_arr_list = []
            if USE_BENCHMARK:
                mytimer.rec_infer_and_post_end.append(time.time())
        logger.info("process2_pattern_rec finished.")
    except Exception as e:
        print("process2_pattern_rec failed.", repr(e))
        traceback.print_exc()

def process4_deepsort(deepsort_q: Queue, pid):
    logger.info(
        "======================================== Init process4_deepsort ========================================")
    tracker = DeepSort(max_age=5, embedder_npu=True)
    logger.info(
        "======================================== Start process4_deepsort ========================================")
    func_timer = time.time()
    count = 0
    while True:
        if deepsort_q.qsize() == 0:
            if time.time() - func_timer > INFER_BREAK_WAIT_TIME:
                logger.info("process4_deepsort wait time out, break.")
                break
            else:
                continue
        sortinput_tuple = deepsort_q.get()  # type is nparray
        
        img_ndarray = sortinput_tuple[0].astype(np.float32)
        bbox = sortinput_tuple[1]
        score = sortinput_tuple[2][0]
        det_class = 0 # maybe change?
        
        bbs = [(bbox, score, det_class)]
        logger.debug("raw bbs to sort:",bbs)
        count += 1

        if bbox is None:
            logger.info(f"empty bboxs in img count {count} , continue.")
            continue
        # deepsort
        # bbs expected to be a list of detections, each in tuples of ( [left,top,w,h], confidence, detection_class )
        # fream expected np.ndarray in [H,W,C]
        tracks = tracker.update_tracks(bbs, frame=img_ndarray)
        logger.info(f"track img count {count} !")

        for track in tracks:
            logger.debug(f"raw track_mean:{track.mean}")
            if not track.is_confirmed():
                continue
            track_id = track.track_id
            logger.info(f"track_id(confirmed):{track_id} !")

        func_timer = time.time()
    logger.info("process4_deepsort finished.")


def main_pool():
    NUM_DECODE_PROCESS = 1
    NUM_INFER_PROCESS = 1
    NUM_PATTERN_RECOG_PROCESS = 1
    NUM_DEEPSORT_PROCESS = 1 
    manager = Manager()
    q_decode = manager.Queue()
    q_rec = manager.Queue()
    q_sort = manager.Queue()
    pool = Pool()

    start = time.time()
    # for i in range(NUM_DECODE_PROCESS):
    #     #pool.apply_async(process0_video_decode, args=(q_decode, VIDEO_PATH, i))
    #     pool.apply_async(process0_read_image, args=(q_decode, i))
    # for i in range(NUM_INFER_PROCESS):
    #     pool.apply_async(process1_infer, args=(q_decode, q_rec, BATCH_SIZE, i , q_sort))
    # for i in range(NUM_PATTERN_RECOG_PROCESS):
    #     # pool.apply_async(process2_pattern_rec, args=(q_rec, i))
    #     pass
    # for i in range(NUM_DEEPSORT_PROCESS):
    #     pool.apply_async(process4_deepsort, args=(q_sort,i ))

    # pool.close()
    # pool.join()

    process0_read_image(q_decode, 0)
    process1_infer(q_decode, q_rec, BATCH_SIZE, 0 , q_sort)
    process4_deepsort(q_sort,0)

    end = time.time()
    print("total time:", end - start)
    print("avg time:", (end - start) / (NUM_DECODE_PROCESS * len(os.listdir(JPG_PATH))))


if __name__ == "__main__":
    base.mx_init()
    main_pool()
    if USE_BENCHMARK:
        print(
            f"first model preprocess per image cost time: {mytimer.average_infer_pre()}, "
            f"infer and postprocess per image cost time: {mytimer.average_infer_infer()}; "
            f"second model preprocess per image cost time: {mytimer.average_rec_pre()}, "
            f"infer and postprocess per image cost time: {mytimer.average_rec_infer()}")
