#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import logging
import time
from multiprocessing import JoinableQueue, Queue
import traceback
import numpy as np
import cv2
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor

from utils import body_post_process, common
from config import *
import cv2
from sklearn import preprocessing
from body import body_post_process
from car_union import car_post_process
from motor import motor_post_process
import logging.config
from sort import *
from queue import Queue as QQueue

logging.config.fileConfig("logger.conf")
logger = logging.getLogger("pipeline")


class TrafficDetection(object):
    def __init__(self, det_model_path, batch_size, device_id, pid):
        self.det_model = base.model(det_model_path, deviceId=device_id)
        self.image_processor = ImageProcessor(device_id) # initialize mxbase image_process
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid

        self.INFER_BREAK_WAIT_TIME = 5

        self.sorts = {}
        self.queues = {}

    def det_preprocess(self, image_ori, image_ori_height, image_ori_width):
        decodeImage = Image(image_ori[0], base.rgb)
        decodeImage.set_original_size(Size(image_ori_width, image_ori_height))
        image_ori = image_ori[:, :image_ori_height, :image_ori_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))

        # image resized by dvpp
        decodeImage.to_device(self.device_id)
        resize_tuple, pad_tuple = body_post_process.letterbox(decodeImage, (640, 640))
        resize_conf = Size(resize_tuple[0], resize_tuple[1])
        decodeImage = self.image_processor.resize(decodeImage, resize_conf, base.bilinear_similar_opencv)
        decodeImage.to_host()

        # transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_src = cv2.copyMakeBorder(image_src[0], pad_tuple[0], pad_tuple[1], pad_tuple[2], pad_tuple[3],
                                       cv2.BORDER_CONSTANT, value=(112, 112, 112))
        image_src = np.expand_dims(image_src, axis=0).transpose((0, 3, 1, 2))  # NCHW
        return image_src, image_ori

    def infer(self,
              input_tensor_q_con: JoinableQueue,
              post_process_body: Queue,
              post_process_car: Queue,
              post_process_motor: Queue):
        try:
            ImageProcessor(self.device_id)
            img_ndarray_list = []
            img_ori_ndarray_list = []
            input_data_list = []
            count = 0
            while True:
                data = input_tensor_q_con.get()
                input_data_list.append(data)

                video_id = data["video_id"]
                if video_id not in self.sorts:
                    self.sorts[video_id] = Sort()
                sort = self.sorts[video_id]
                if video_id not in self.queues:
                    self.queues[video_id] = QQueue(maxsize=500)


                count += 1
                # preprocess
                image_src, image_ori = self.det_preprocess(data["image"], data["height"], data["width"])

                # 3 read input array and transfer array type, put into tensor list
                img_ndarray = image_src.astype(np.float32)  # NCHW, RGB
                img_ori_ndarray = image_ori  # NCHW, RGB
                img_ndarray = img_ndarray / 255.
                img_ndarray_list.append(img_ndarray)
                img_ori_ndarray_list.append(img_ori_ndarray)

                # 4. If BATCH_SIZE smaller than config, wait until get enough
                if len(img_ori_ndarray_list) < self.batch_size:
                    continue

                # 5.1 Prepare batch input
                img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
                img_mxtensor.to_device(self.device_id)
                img_mxtensor_list = [img_mxtensor]

                # 5.2 Retina model infer
                stime = time.time()
                output_tensors = self.det_model.infer(img_mxtensor_list)  # output is a list with 4 arrays
                # logger.info(f"==============================人体、车辆、非机动车检测时间:{time.time() - stime}")

                for i, output_tensor in enumerate(output_tensors):
                    output_tensor.to_host()
                    output_tensors[i] = np.array(output_tensor)

                # sort
                all_bboxes = body_post_process.det_all_postprocess(output_tensors[0], img_ndarray_list, img_ori_ndarray_list)
                all_rs = body_post_process.get_all_rs_from_box(all_bboxes, img_ori_ndarray_list)
                for i, res in enumerate(all_rs):
                    dets = []
                    small_imgs = []
                    types = []
                    if res is None:
                        continue
                    for j, det in enumerate(res):
                        box = det[1].copy()
                        box.append(j)
                        dets.append(box)
                        small_imgs.append(det[0])
                        types.append(det[3])

                    post_data = input_data_list[i].copy()
                    # sort
                    idxs = []
                    if len(dets) > 0:
                        tracks = sort.update(np.array(dets))
                        for i, track in enumerate(tracks):
                            idx = int(track[-2])
                            idxs.append(idx)
                            track_id = int(track[-1])
                            queue = self.queues[video_id]
                            queue_data = list(queue.queue)[::-1]

                            if queue.full():
                                queue.get()

                            x1, y1, x2, y2 = dets[idx][:4]
                            w = abs(x2 - x1)
                            h = abs(y2 - y1)

                            pw, ph, p_track_id = 0, 0, 0
                            for data in queue_data:
                                p_track_id = list(data.keys())[0]
                                if p_track_id == track_id:
                                    pw, ph = list(data.values())[0]
                                    break

                            if w * h > pw * ph * config.TRACK_THRESHOLD or pw * ph > w * h * config.TRACK_THRESHOLD:
                                queue.put({track_id: (w, h)})
                                post_data["track_id"] = track_id
                                post_data["det"] = (small_imgs[idx], dets[idx][:4], 0)

                                type = types[idx]
                                if type == 0:  # 人体
                                    post_process_body.put(post_data, block=True)
                                elif type  in [2, 5, 7]:  # 车辆
                                    post_process_car.put(post_data, block=True)
                                elif type in [1, 3]:  # 非机动车
                                    post_process_motor.put(post_data, block=True)

                            else:
                                # print("【{}】变化不大，跳过".format(model_type))
                                if p_track_id != 0:
                                    queue.put({p_track_id: (pw, ph)})

                # 5.3 Post process

                # 6 wait for next batch, Clear lists and timer
                img_ndarray_list = []
                img_ori_ndarray_list = []
                input_data_list = []

        except Exception as e:
            print("process1_infer failed", repr(e))
            traceback.print_exc()


def traffic_detect(det_model_path,
                   batch_size,
                   device_id,
                   pid,
                   input_tensor_q_con: JoinableQueue,
                   post_process_body_track: Queue,
                   post_process_car_track: Queue,
                   post_process_motor_track: Queue,
                   model_loading: list
                   ):
    model = TrafficDetection(det_model_path, batch_size, device_id, pid)
    model_loading.append(True)
    model.infer(input_tensor_q_con, post_process_body_track, post_process_car_track, post_process_motor_track)

    
