import traceback
from multiprocessing import Queue
import utils.car_plate_det_om as predict_det
import utils.car_plate_rec_om as predict_rec

from mindx.sdk.base import Image, Tensor, Model, Size, ImageProcessor
from mindx.sdk import base
import numpy as np
import copy
import cv2
import logging

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s',
                    filename="log.log", filemode='w')
logger = logging.getLogger(__name__)


class Params(object):
    def __init__(self, det_model_dir, rec_model_dir, 
                 use_npu=True,
                 device_id=0,
                 det_algorithm="DB",
                 det_box_type="quad",
                 det_image_shape="3, 480, 640",
                 rec_image_shape="3, 48, 320",
                 rec_batch_num=6,
                 max_text_length=25,
                 rec_char_dict_path="./utils/ppocr_keys_v1.txt",
                 use_space_char=True,
                 drop_score=0.5,
                 det_db_thresh=0.3,
                 det_db_box_thresh=0.6,
                 det_db_unclip_ratio=1.5,
                 max_batch_size=10,
                 use_dilation=True,
                 det_db_score_mode='fast',
                 benchmark=False,
                 use_mp=False,
                 use_mpinfer=True,
                 multi_thread_num=4
                 ):
        self.det_model_dir = det_model_dir
        self.rec_model_dir = rec_model_dir
        self.use_npu = use_npu
        self.device_id = device_id
        self.det_algorithm = det_algorithm
        self.det_box_type = det_box_type
        self.det_image_shape = det_image_shape
        self.rec_image_shape = rec_image_shape
        self.rec_batch_num = rec_batch_num
        self.max_text_length = max_text_length
        self.rec_char_dict_path = rec_char_dict_path
        self.use_space_char = use_space_char
        self.drop_score = drop_score
        self.det_db_thresh = det_db_thresh
        self.det_db_box_thresh = det_db_box_thresh
        self.det_db_unclip_ratio = det_db_unclip_ratio
        self.max_batch_size = max_batch_size
        self.use_dilation = use_dilation
        self.det_db_score_mode = det_db_score_mode
        self.benchmark = benchmark
        self.use_mp = use_mp
        self.use_mpinfer = use_mpinfer
        self.multi_thread_num = multi_thread_num


class TextSystem(object):
    def __init__(self, args):
        self.text_detector = predict_det.TextDetector(args)
        self.text_recognizer = predict_rec.TextRecognizer(args)
        self.drop_score = args.drop_score
        self.benchmark = args.benchmark
        if self.benchmark:
            self.det_time = 0
            self.rec_time = 0


    def get_rotate_crop_image(self, img, points):
        '''
        img_height, img_width = img.shape[0:2]
        left = int(np.min(points[:, 0]))
        right = int(np.max(points[:, 0]))
        top = int(np.min(points[:, 1]))
        bottom = int(np.max(points[:, 1]))
        img_crop = img[top:bottom, left:right, :].copy()
        points[:, 0] = points[:, 0] - left
        points[:, 1] = points[:, 1] - top
        '''
        img_crop_width = int(
            max(
                np.linalg.norm(points[0] - points[1]),
                np.linalg.norm(points[2] - points[3])))
        img_crop_height = int(
            max(
                np.linalg.norm(points[0] - points[3]),
                np.linalg.norm(points[1] - points[2])))
        pts_std = np.float32([[0, 0], [img_crop_width, 0],
                              [img_crop_width, img_crop_height],
                              [0, img_crop_height]])

        M = cv2.getPerspectiveTransform(points, pts_std)
        dst_img = cv2.warpPerspective(
            img,
            M, (img_crop_width, img_crop_height),
            borderMode=cv2.BORDER_REPLICATE,
            flags=cv2.INTER_CUBIC)
        dst_img_height, dst_img_width = dst_img.shape[0:2]
        if dst_img_height * 1.0 / dst_img_width >= 1.5:
            dst_img = np.rot90(dst_img)
        return dst_img

    def sorted_boxes(self, dt_boxes):
        """
        Sort text boxes in order from top to bottom, left to right
        args:
            dt_boxes(array):detected text boxes with shape [4, 2]
        return:
            sorted boxes(array) with shape [4, 2]
        """
        num_boxes = dt_boxes.shape[0]
        sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
        _boxes = list(sorted_boxes)

        for i in range(num_boxes - 1):
            if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \
                    (_boxes[i + 1][0][0] < _boxes[i][0][0]):
                tmp = _boxes[i]
                _boxes[i] = _boxes[i + 1]
                _boxes[i + 1] = tmp
        return _boxes

    def __call__(self, decodedImg, im_rgb, image_processor, device_id):
        '''
        decodedImg : img_file --image_processor(dvpp)--> img_yuv
        im_rgb : img_rgb in cv.mat
        image_processor : npu chip dvpp id 
        '''
        # 检测车牌
        dt_boxes, elapse = self.text_detector(
            decodedImg, im_rgb, image_processor, device_id)
        if dt_boxes is None:
            return None, None
        img_crop_list = []
        dt_boxes = self.sorted_boxes(dt_boxes)
        for bno in range(len(dt_boxes)):
            tmp_box = copy.deepcopy(dt_boxes[bno])
            img_crop = self.get_rotate_crop_image(im_rgb, tmp_box)
            img_crop_list.append(img_crop)
        if self.benchmark:
            self.det_time += elapse

        # 识别车牌
        rec_res, elapse = self.text_recognizer(img_crop_list)
        filter_boxes, filter_rec_res = [], []
        for box, rec_result in zip(dt_boxes, rec_res):
            text, score = rec_result
            if score >= self.drop_score:
                filter_boxes.append(box)
                filter_rec_res.append(rec_result)
        if self.benchmark:
            self.rec_time += elapse
        return filter_boxes, filter_rec_res

    def detect_sig(self, decodedImg, im_rgb, image_processor):  
        # 检测车牌
        dt_boxes, elapse = self.text_detector(
            decodedImg, im_rgb, image_processor)
        if dt_boxes is None:
            return None, None
        img_crop_list = []
        dt_boxes = self.sorted_boxes(dt_boxes)
        for bno in range(len(dt_boxes)):
            tmp_box = copy.deepcopy(dt_boxes[bno])
            img_crop = self.get_rotate_crop_image(im_rgb, tmp_box)
            img_crop_list.append(img_crop)
        if self.benchmark:
            self.det_time += elapse

        return dt_boxes, img_crop_list

    def recognize_sig(self, dt_boxes, img_crop_list):
        # 识别车牌
        rec_res, elapse = self.text_recognizer(img_crop_list)
        filter_boxes, filter_rec_res = [], []
        for box, rec_result in zip(dt_boxes, rec_res):
            text, score = rec_result
            if score >= self.drop_score:
                filter_boxes.append(box)
                filter_rec_res.append(rec_result)
        if self.benchmark:
            self.rec_time += elapse                
        return filter_boxes, filter_rec_res


class CarPlateDetAndRec(object):
    def __init__(self, det_model_dir, rec_model_dir, batch_size, device_id, pid):
        self.args = Params(det_model_dir=det_model_dir, rec_model_dir=rec_model_dir)
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid
        self.wait_threshould = 10
    
    def infer(self, get_arr_queue: Queue):
        try:
            logger.info(f"========= car plate start det and rec {self.pid} ==================")
            text_sys = TextSystem(self.args)
            image_processor = ImageProcessor(self.device_id)
            count = 0

            while True:
                try:
                    img = get_arr_queue.get(timeout=self.wait_threshould)
                except Exception as e:
                    del text_sys
                    logger.info("get queue timeout")
                    logger.info(f"{self.pid} car plate infer finished. get image cnt {count} ")
                    return

                if img is None:
                    continue
                count += 1
                dt_boxes, rec_res = text_sys(None, img, image_processor, self.device_id)
                # boxes = dt_boxes
                txts = [rec_res[i][0] for i in range(len(rec_res))]
                # print(txts)
        except Exception as e:
            logger.error("car plate infer failed.")
            traceback.print_exc()


def car_plate_detect_rec(det_model_path, rec_model_path, batch_size, device_id, pid, input_tensor_q_con: Queue):
    CarPlateDetAndRec(det_model_path, rec_model_path, batch_size, device_id, pid).infer(input_tensor_q_con)
