# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import queue
import threading

import predict_rec_om as predict_rec
import predict_det_om as predict_det
from mindx.sdk.base import Image, Tensor, Model, Size, ImageProcessor
from mindx.sdk import base
import time
import numpy as np
import copy
import cv2
import os
import sys
import subprocess
import argparse

__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)


class TextSystem(object):
    def __init__(self, args):
        self.text_detector = predict_det.TextDetector(args)
        self.text_recognizer = predict_rec.TextRecognizer(args)
        self.drop_score = args.drop_score
        self.benchmark = args.benchmark
        if self.benchmark:
            self.det_time = 0
            self.rec_time = 0


    def get_rotate_crop_image(self, img, points):
        '''
        img_height, img_width = img.shape[0:2]
        left = int(np.min(points[:, 0]))
        right = int(np.max(points[:, 0]))
        top = int(np.min(points[:, 1]))
        bottom = int(np.max(points[:, 1]))
        img_crop = img[top:bottom, left:right, :].copy()
        points[:, 0] = points[:, 0] - left
        points[:, 1] = points[:, 1] - top
        '''
        img_crop_width = int(
            max(
                np.linalg.norm(points[0] - points[1]),
                np.linalg.norm(points[2] - points[3])))
        img_crop_height = int(
            max(
                np.linalg.norm(points[0] - points[3]),
                np.linalg.norm(points[1] - points[2])))
        pts_std = np.float32([[0, 0], [img_crop_width, 0],
                              [img_crop_width, img_crop_height],
                              [0, img_crop_height]])

        M = cv2.getPerspectiveTransform(points, pts_std)
        dst_img = cv2.warpPerspective(
            img,
            M, (img_crop_width, img_crop_height),
            borderMode=cv2.BORDER_REPLICATE,
            flags=cv2.INTER_CUBIC)
        dst_img_height, dst_img_width = dst_img.shape[0:2]
        if dst_img_height * 1.0 / dst_img_width >= 1.5:
            dst_img = np.rot90(dst_img)
        return dst_img

    def sorted_boxes(self, dt_boxes):
        """
        Sort text boxes in order from top to bottom, left to right
        args:
            dt_boxes(array):detected text boxes with shape [4, 2]
        return:
            sorted boxes(array) with shape [4, 2]
        """
        num_boxes = dt_boxes.shape[0]
        sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
        _boxes = list(sorted_boxes)

        for i in range(num_boxes - 1):
            if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \
                    (_boxes[i + 1][0][0] < _boxes[i][0][0]):
                tmp = _boxes[i]
                _boxes[i] = _boxes[i + 1]
                _boxes[i + 1] = tmp
        return _boxes

    def __call__(self, decodedImg, im_rgb, image_processor):
        '''
        decodedImg : img_file --image_processor(dvpp)--> img_yuv
        im_rgb : img_rgb in cv.mat
        image_processor : npu chip dvpp id 
        '''
        # 检测车牌
        dt_boxes, elapse = self.text_detector(
            decodedImg, im_rgb, image_processor)
        if dt_boxes is None:
            return None, None
        img_crop_list = []
        dt_boxes = self.sorted_boxes(dt_boxes)
        for bno in range(len(dt_boxes)):
            tmp_box = copy.deepcopy(dt_boxes[bno])
            img_crop = self.get_rotate_crop_image(im_rgb, tmp_box)
            img_crop_list.append(img_crop)
        if self.benchmark:
            self.det_time += elapse

        # 识别车牌
        rec_res, elapse = self.text_recognizer(img_crop_list)
        filter_boxes, filter_rec_res = [], []
        for box, rec_result in zip(dt_boxes, rec_res):
            text, score = rec_result
            if score >= self.drop_score:
                filter_boxes.append(box)
                filter_rec_res.append(rec_result)
        if self.benchmark:
            self.rec_time += elapse
        return filter_boxes, filter_rec_res

    def detect_sig(self, decodedImg, im_rgb, image_processor):  
        # 检测车牌
        dt_boxes, elapse = self.text_detector(
            decodedImg, im_rgb, image_processor)
        if dt_boxes is None:
            return None, None
        img_crop_list = []
        dt_boxes = self.sorted_boxes(dt_boxes)
        for bno in range(len(dt_boxes)):
            tmp_box = copy.deepcopy(dt_boxes[bno])
            img_crop = self.get_rotate_crop_image(im_rgb, tmp_box)
            img_crop_list.append(img_crop)
        if self.benchmark:
            self.det_time += elapse

        return dt_boxes, img_crop_list

    def recognize_sig(self, dt_boxes, img_crop_list):
        # 识别车牌
        rec_res, elapse = self.text_recognizer(img_crop_list)
        filter_boxes, filter_rec_res = [], []
        for box, rec_result in zip(dt_boxes, rec_res):
            text, score = rec_result
            if score >= self.drop_score:
                filter_boxes.append(box)
                filter_rec_res.append(rec_result)
        if self.benchmark:
            self.rec_time += elapse                
        return filter_boxes, filter_rec_res


def file_name(file_dir):
    L = []
    for root, dirs, files in os.walk(file_dir):
        for file in files:
            if os.path.splitext(file)[1] == '.jpg':
                L.append(os.path.join(root, file))
    return L


img_arr_q = queue.Queue()
dec_res_q = queue.Queue()

def send_img(file_list, device_id):
    image_processor = ImageProcessor(device_id)
    for img_path in file_list:
        decodedImg = image_processor.decode(img_path, base.nv12)
        decodedImg.to_host()
        image_ori = np.array(decodedImg.to_tensor())
        img_arr_q.put((image_ori, decodedImg.original_height, decodedImg.original_width))


def detector_and_recognizer(device_id, text_sys):
    '''
    img_file --image_processor(dvpp)--> img_yuv --> modelinfer
    '''
    image_processor = ImageProcessor(device_id)
    while not img_arr_q.empty():
        triple = img_arr_q.get()
        img_arr = triple[0][:, :, :, ::-1]
        height = triple[1]
        width = triple[2]

        decodedImg = Image(img_arr[0], base.nv12)
        decodedImg.set_original_size(Size(int(width), int(height)))
        decodedImg.to_device(device_id)

        # tensor -> cvmat
        im_yuv = img_arr.reshape(
            decodedImg.height*3//2, decodedImg.width) # yuv reshape
        im_rgb = cv2.cvtColor(im_yuv, cv2.COLOR_YUV2BGR_NV12)

        if decodedImg is None or im_rgb is None:
            continue

        dt_boxes, rec_res = text_sys(decodedImg, im_rgb, image_processor)
        # boxes = dt_boxes
        txts = [rec_res[i][0] for i in range(len(rec_res))]
        print(txts)


def detector(device_id, text_sys):
    image_processor = ImageProcessor(device_id)
    while not img_arr_q.empty():
        triple = img_arr_q.get()
        img_arr = triple[0]
        height = triple[1]
        width = triple[2]

        decodedImg = Image(img_arr[0], base.nv12)
        decodedImg.set_original_size(Size(int(width), int(height)))
        decodedImg.to_device(device_id)

        # tensor -> cvmat
        im_yuv = img_arr.reshape(
            decodedImg.height*3//2, decodedImg.width) # yuv reshape
        im_rgb = cv2.cvtColor(im_yuv, cv2.COLOR_YUV2BGR_NV12)

        if decodedImg is None or im_rgb is None:
            continue

        dt_boxes, img_crop_list = text_sys.detect_sig(
            decodedImg, im_rgb, image_processor)
        dec_res_q.put((dt_boxes, img_crop_list))

    dec_res_q.put(None)


def recognizer(text_sys):
    while True:
        task = dec_res_q.get()
        if task is None:
            break

        dt_boxes, img_crop_list = task
        dt_boxes, rec_res = text_sys.recognize_sig(dt_boxes, img_crop_list)
        # boxes = dt_boxes
        txts = [rec_res[i][0] for i in range(len(rec_res))]
        # scores = [rec_res[i][1] for i in range(len(rec_res))]
        # print(boxes)
        print(txts)
        # print(scores)


def main_old(args):
    text_sys = TextSystem(args)
    file_dir = './test_data/'  # 读取图片的路径
    file_list = []
    file_list = file_name(file_dir)
    save_results = []

    total_time = 0
    res = []
    idx = 0
    image_processor = ImageProcessor(args.device_id)
    starttime = time.time()
    for image_file in file_list:
        print(image_file)
        idx += 1
        # 读取img
        decodedImg = image_processor.decode(image_file, base.nv12)
        decoded_tensor = decodedImg.get_tensor()
        decoded_tensor.to_host()
        # tensor -> cvmat
        im_yuv = np.array(decoded_tensor).reshape(
            decodedImg.height*3//2, decodedImg.width)
        im_rgb = cv2.cvtColor(im_yuv, cv2.COLOR_YUV2BGR_NV12)

        dt_boxes, rec_res = text_sys(
            decodedImg, im_rgb, image_processor)  # 输出BOX+结构文本，业务流程FIN

        boxes = dt_boxes
        txts = [rec_res[i][0] for i in range(len(rec_res))]
        scores = [rec_res[i][1] for i in range(len(rec_res))]
        # print(boxes)
        print(txts)
        # print(scores)

        # key = idx

        # for i in range(len(dt_boxes)):
        #     x_list = [element[0] for element in dt_boxes[i]]
        #     if len(rec_res[i][0])>=7:
        #         res.append({
        #         "transcription": rec_res[i][0],
        #         "points": np.array(dt_boxes[i]).astype(np.int32).tolist()
        #     })
        # import os
        # import json
        # save_pred = os.path.basename(image_file) + "\t" + json.dumps(res, ensure_ascii=False) + "\n"
        # save_results.append(save_pred)
    # with open(
    #         os.path.join("system_results.txt"),
    #         'w',
    #         encoding='utf-8') as f:
    #     f.writelines(save_results)
    endedtime = time.time()
    total_time = endedtime - starttime
    print('total_time:', total_time)
    print('average_time:', total_time/idx)


def main(args):
    text_sys = TextSystem(args)
    file_dir = './test_data/'  # 读取图片的路径
    file_list = []
    file_list = file_name(file_dir)

    total_time = 0
    idx = 0
    multi_thread_num = args.multi_thread_num
    threads = []
    starttime = time.time()
    device_id = args.device_id

    # 读取图片的路径进入处理队列
    for _ in file_list:
        idx += 1

    for i in range(multi_thread_num):
        t = threading.Thread(target=send_img, args=(file_list, device_id, ))
        threads.append(t)

    # 推理线程
    if args.use_mpinfer:
        for i in range(multi_thread_num):
            t = threading.Thread(target=detector_and_recognizer,
                                 args=(device_id, text_sys))
            threads.append(t)
    else:
        t = threading.Thread(target=detector, args=(device_id, text_sys))
        threads.append(t)

        t = threading.Thread(target=recognizer, args=(text_sys,))
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    endedtime = time.time()
    total_time = endedtime - starttime
    print('total_time:', total_time)
    print('average_time:', total_time / (idx * multi_thread_num))

    if args.benchmark:
        print("**********************")
        print('average_det_total_time:', text_sys.det_time/idx)
        print('average_det_pre_process_time:', text_sys.text_detector.pre_process_time/idx)
        print('average_det_infer_time:', text_sys.text_detector.infer_time/idx)
        print('average_det_postprocess_time:', text_sys.text_detector.postprocess_time/idx)
        print("**********************")
        print('average_rec_total_time:', text_sys.rec_time/idx)
        print('average_rec_pre_process_time:', text_sys.text_recognizer.pre_process_time/idx)
        print('average_rec_infer_time:', text_sys.text_recognizer.infer_time/idx)
        print('average_rec_postprocess_time:', text_sys.text_recognizer.postprocess_time/idx)
        print("**********************")


def str2bool(v):
    return v.lower() in ("true", "t", "1")


def init_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--use_npu", type=str2bool, default=True)
    parser.add_argument("--device_id", type=int, default=0)

    # params for text detector
    parser.add_argument("--det_algorithm", type=str, default='DB')
    parser.add_argument("--det_model_dir", type=str)
    parser.add_argument("--det_box_type", type=str, default='quad')
    parser.add_argument("--det_image_shape", type=str, default="3, 960, 1280")

    # params for text recognizer
    parser.add_argument("--rec_model_dir", type=str)
    parser.add_argument("--rec_image_shape", type=str, default="3, 48, 320")
    parser.add_argument("--rec_batch_num", type=int, default=6)
    parser.add_argument("--max_text_length", type=int, default=25)
    parser.add_argument("--rec_char_dict_path", type=str,
                        default="./ppocr_keys_v1.txt")
    parser.add_argument("--use_space_char", type=str2bool, default=True)
    parser.add_argument("--drop_score", type=float, default=0.5)

    # DB parmas
    parser.add_argument("--det_db_thresh", type=float, default=0.3)
    parser.add_argument("--det_db_box_thresh", type=float, default=0.6)
    parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5)
    parser.add_argument("--max_batch_size", type=int, default=10)
    parser.add_argument("--use_dilation", type=str2bool, default=True)
    parser.add_argument("--det_db_score_mode", type=str, default="fast")

    parser.add_argument("--benchmark", type=str2bool, default=False)
    parser.add_argument("--use_mp", type=str2bool, default=False)
    parser.add_argument("--use_mpinfer", type=str2bool, default=True)
    parser.add_argument("--multi_thread_num", type=int, default=4)
    return parser


def parse_args():
    parser = init_args()
    return parser.parse_args()


if __name__ == "__main__":

    args = parse_args()
    if args.use_mp or args.use_mpinfer:
        main(args)
    else:
        main_old(args)
