import copy
import sys
import time
import os
import threading
import queue
import cv2
import numpy as np
from shapely.geometry import Polygon
import pyclipper
import math
import json
from PIL import Image
import matplotlib.pyplot as plt
import paddle
# from paddle.io import Dataset, DataLoader, BatchSampler
import paddle_ocr.tools.infer.utility as utility
from paddle_ocr.ppocr.utils.logging import get_logger
from paddle_ocr.ppocr.data.imaug import DecodeImage, NormalizeImage, ToCHWImage, KeepKeys
from paddle_ocr.ppocr.postprocess import build_post_process
from paddle_ocr.tools.infer.utility import get_rotate_crop_image, get_minarea_rect_crop

logger = get_logger()


def transform(data, ops=None):
    """ transform """
    if ops is None:
        ops = []
    for op in ops:
        data = op(data)
        if data is None:
            return None
    return data


def create_operators(op_param_list, global_config=None):
    """
    create operators based on the config

    Args:
        params(list): a dict list, used to create some operators
    """
    assert isinstance(op_param_list, list), ('operator config should be a list')
    ops = []
    for operator in op_param_list:
        assert isinstance(operator,
                          dict) and len(operator) == 1, "yaml format error"
        op_name = list(operator)[0]
        param = {} if operator[op_name] is None else operator[op_name]
        if global_config is not None:
            param.update(global_config)
        op = eval(op_name)(**param)
        ops.append(op)
    return ops


class DBPostProcess(object):
    """
    The post process for Differentiable Binarization (DB).
    """

    def __init__(self,
                 thresh=0.3,
                 box_thresh=0.7,
                 max_candidates=1000,
                 unclip_ratio=2.0,
                 use_dilation=False,
                 score_mode="fast",
                 box_type='quad',
                 **kwargs):
        self.thresh = thresh
        self.box_thresh = box_thresh
        self.max_candidates = max_candidates
        self.unclip_ratio = unclip_ratio
        self.min_size = 3
        self.score_mode = score_mode
        self.box_type = box_type
        assert score_mode in [
            "slow", "fast"
        ], "Score mode must be in [slow, fast] but got: {}".format(score_mode)

        self.dilation_kernel = None if not use_dilation else np.array(
            [[1, 1], [1, 1]])

    def polygons_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
        '''
        _bitmap: single map with shape (1, H, W),
            whose values are binarized as {0, 1}
        '''

        bitmap = _bitmap
        height, width = bitmap.shape

        boxes = []
        scores = []

        contours, _ = cv2.findContours((bitmap * 255).astype(np.uint8),
                                       cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        for contour in contours[:self.max_candidates]:
            epsilon = 0.002 * cv2.arcLength(contour, True)
            approx = cv2.approxPolyDP(contour, epsilon, True)
            points = approx.reshape((-1, 2))
            if points.shape[0] < 4:
                continue

            score = self.box_score_fast(pred, points.reshape(-1, 2))
            if self.box_thresh > score:
                continue

            if points.shape[0] > 2:
                box = self.unclip(points, self.unclip_ratio)
                if len(box) > 1:
                    continue
            else:
                continue
            box = box.reshape(-1, 2)

            _, sside = self.get_mini_boxes(box.reshape((-1, 1, 2)))
            if sside < self.min_size + 2:
                continue

            box = np.array(box) 
            box[:, 0] = np.clip(
                np.round(box[:, 0] / width * dest_width), 0, dest_width)
            box[:, 1] = np.clip(
                np.round(box[:, 1] / height * dest_height), 0, dest_height)
            boxes.append(box.tolist())
            scores.append(score)
        return boxes, scores

    def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
        '''
        _bitmap: single map with shape (1, H, W),
                whose values are binarized as {0, 1}
        '''

        bitmap = _bitmap
        height, width = bitmap.shape

        outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
        if len(outs) == 3:
            img, contours, _ = outs[0], outs[1], outs[2]
        elif len(outs) == 2:
            contours, _ = outs[0], outs[1]

        num_contours = min(len(contours), self.max_candidates)

        boxes = []
        scores = []
        idx = 0
        for index in range(num_contours):
            contour = contours[index]

            if contour.shape[0] < 4:
                continue

            points, sside, aspect_ratio, max_length = self.get_mini_boxes(contour)

            if sside < self.min_size:
                continue
            points = np.array(points)

            if self.score_mode == "fast":
                score = self.box_score_fast(pred, points.reshape(-1, 2))
            else:
                score = self.box_score_slow(pred, contour)

            if self.box_thresh > score-0.1:
                continue

            img_width = int(
                max(
                    np.linalg.norm(points[0] - points[1]),
                    np.linalg.norm(points[2] - points[3])))
            img_height = int(
                max(
                    np.linalg.norm(points[0] - points[3]),
                    np.linalg.norm(points[1] - points[2])))
            wh_ratio = img_width / img_height
            if wh_ratio > 0.07:
                idx = 1

            # if max_length < 10:
            #     unclip_ratio = self.unclip_ratio + 0.012 * aspect_ratio + 0.03 * aspect_ratio ** 0.3 
            # else:
            unclip_ratio = self.unclip_ratio + 0.012 * aspect_ratio - 0.1 + 0.02 * aspect_ratio ** 0.3 + 0.02 * aspect_ratio ** 0.1  

            box = self.unclip(points, unclip_ratio).reshape(-1, 1, 2)
            box, sside, _, _ = self.get_mini_boxes(box)

            box = np.array(box)
            box += 0.25*aspect_ratio**0.1 
            box[:, 0] = np.clip(
                np.round(box[:, 0] / width * dest_width), 0, dest_width)
            box[:, 1] = np.clip(
                np.round(box[:, 1] / height * dest_height), 0, dest_height)
            boxes.append(box.astype("int32"))
            scores.append(score)

        boxes = np.array(boxes, dtype="int32")
        if boxes.shape[0] > 4 and idx == 0:
            boxes[:,2:,1] += 20
        return boxes, scores

    def unclip(self, box, unclip_ratio):
        poly = Polygon(box)
        distance = poly.area * unclip_ratio / poly.length
        offset = pyclipper.PyclipperOffset()
        offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
        expanded = np.array(offset.Execute(distance))
        return expanded

    def get_mini_boxes(self, contour):
        bounding_box = cv2.minAreaRect(contour)
        points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])

        index_1, index_2, index_3, index_4 = 0, 1, 2, 3
        if points[1][1] > points[0][1]:
            index_1 = 0
            index_4 = 1
        else:
            index_1 = 1
            index_4 = 0
        if points[3][1] > points[2][1]:
            index_2 = 2
            index_3 = 3
        else:
            index_2 = 3
            index_3 = 2

        box = [
            points[index_1], points[index_2], points[index_3], points[index_4]
        ]

        max_length = max(bounding_box[1])
        min_length = min(bounding_box[1])

        return box, min_length,max_length / max(min_length, 0.00001), max_length

    def box_score_fast(self, bitmap, _box):
        '''
        box_score_fast: use bbox mean score as the mean score
        '''
        h, w = bitmap.shape[:2]
        box = _box.copy()
        xmin = np.clip(np.floor(box[:, 0].min()).astype("int32"), 0, w - 1)
        xmax = np.clip(np.ceil(box[:, 0].max()).astype("int32"), 0, w - 1)
        ymin = np.clip(np.floor(box[:, 1].min()).astype("int32"), 0, h - 1)
        ymax = np.clip(np.ceil(box[:, 1].max()).astype("int32"), 0, h - 1)

        mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
        box[:, 0] = box[:, 0] - xmin
        box[:, 1] = box[:, 1] - ymin
        cv2.fillPoly(mask, box.reshape(1, -1, 2).astype("int32"), 1)
        return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]

    def box_score_slow(self, bitmap, contour):
        '''
        box_score_slow: use polyon mean score as the mean score
        '''
        h, w = bitmap.shape[:2]
        contour = contour.copy()
        contour = np.reshape(contour, (-1, 2))

        xmin = np.clip(np.min(contour[:, 0]), 0, w - 1)
        xmax = np.clip(np.max(contour[:, 0]), 0, w - 1)
        ymin = np.clip(np.min(contour[:, 1]), 0, h - 1)
        ymax = np.clip(np.max(contour[:, 1]), 0, h - 1)

        mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)

        contour[:, 0] = contour[:, 0] - xmin
        contour[:, 1] = contour[:, 1] - ymin

        cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype("int32"), 1)
        return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]

    def __call__(self, outs_dict, shape_list):
        pred = outs_dict['maps']
        if isinstance(pred, paddle.Tensor):
            pred = pred.numpy()
        pred = pred[:, 0, :, :]
        segmentation = pred > self.thresh 

        boxes_batch = []
        for batch_index in range(pred.shape[0]):
            src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]
            if self.dilation_kernel is not None:
                mask = cv2.dilate(
                    np.array(segmentation[batch_index]).astype(np.uint8),
                    self.dilation_kernel)
            else:
                mask = segmentation[batch_index]
            if self.box_type == 'poly':
                boxes, scores = self.polygons_from_bitmap(pred[batch_index],
                                                          mask, src_w, src_h)
            elif self.box_type == 'quad':
                boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,
                                                       src_w, src_h)
            else:
                raise ValueError("box_type can only be one of ['quad', 'poly']")

            boxes_batch.append({'points': boxes})
        return boxes_batch


class DetResizeForTest(object):
    def __init__(self, **kwargs):
        super(DetResizeForTest, self).__init__()
        self.resize_type = 0
        self.keep_ratio = False
        if 'image_shape' in kwargs:
            self.image_shape = kwargs['image_shape']
            self.resize_type = 1
            if 'keep_ratio' in kwargs:
                self.keep_ratio = kwargs['keep_ratio']
        elif 'limit_side_len' in kwargs:
            self.limit_side_len = kwargs['limit_side_len']
            self.limit_type = kwargs.get('limit_type', 'min')
        elif 'resize_long' in kwargs:
            self.resize_type = 2
            self.resize_long = kwargs.get('resize_long', 960)
        else:
            self.limit_side_len = 736
            self.limit_type = 'min'

    def __call__(self, data):
        img = data['image']
        src_h, src_w, _ = img.shape
        if sum([src_h, src_w]) < 64:
            img = self.image_padding(img)

        if self.resize_type == 0:
            # img, shape = self.resize_image_type0(img)
            img, [ratio_h, ratio_w] = self.resize_image_type0(img)
        elif self.resize_type == 2:
            img, [ratio_h, ratio_w] = self.resize_image_type2(img)
        else:
            # img, shape = self.resize_image_type1(img)
            img, [ratio_h, ratio_w] = self.resize_image_type1(img)
        data['image'] = img
        data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
        return data

    def image_padding(self, im, value=0):
        h, w, c = im.shape
        im_pad = np.zeros((max(32, h), max(32, w), c), np.uint8) + value
        im_pad[:h, :w, :] = im
        return im_pad

    def resize_image_type1(self, img):
        resize_h, resize_w = self.image_shape
        ori_h, ori_w = img.shape[:2]  # (h, w, c)
        if self.keep_ratio is True:
            resize_w = ori_w * resize_h / ori_h
            N = math.ceil(resize_w / 32)
            resize_w = N * 32
        ratio_h = float(resize_h) / ori_h
        ratio_w = float(resize_w) / ori_w
        img = cv2.resize(img, (int(resize_w), int(resize_h)))
        # return img, np.array([ori_h, ori_w])
        return img, [ratio_h, ratio_w]

    def resize_image_type0(self, img):
        """
        resize image to a size multiple of 32 which is required by the network
        args:
            img(array): array with shape [h, w, c]
        return(tuple):
            img, (ratio_h, ratio_w)
        """
        limit_side_len = self.limit_side_len
        h, w, c = img.shape

        # limit the max side
        if self.limit_type == 'max':
            if max(h, w) > limit_side_len:
                if h > w:
                    ratio = float(limit_side_len) / h
                else:
                    ratio = float(limit_side_len) / w
            else:
                ratio = 1.
        elif self.limit_type == 'min':
            if min(h, w) < limit_side_len:
                if h < w:
                    ratio = float(limit_side_len) / h
                else:
                    ratio = float(limit_side_len) / w
            else:
                ratio = 1.
        elif self.limit_type == 'resize_long':
            ratio = float(limit_side_len) / max(h, w)
        else:
            raise Exception('not support limit type, image ')
        resize_h = int(h * ratio)
        resize_w = int(w * ratio)

        limit_side_len2 = 3600
        if max(resize_h, resize_w) > limit_side_len2:
            if resize_h > resize_w:
                resize_w = float(limit_side_len2) / resize_h * resize_w
                resize_h = limit_side_len2
            else:
                resize_h = float(limit_side_len2) / resize_w * resize_h
                resize_w = limit_side_len2

        # resize_h = max(int(round(resize_h / 32) * 32), 32)
        # resize_w = max(int(round(resize_w / 32) * 32), 32)

        resize_h = min(max(int(round(resize_h / 32) * 32), 32), 6400)
        resize_w = min(max(int(round(resize_w / 32) * 32), 32), 6400)

        try:
            if int(resize_w) <= 0 or int(resize_h) <= 0:
                return None, (None, None)
            img = cv2.resize(img, (int(resize_w), int(resize_h)))
        except:
            print(img.shape, resize_w, resize_h)
            sys.exit(0)
        ratio_h = resize_h / float(h)
        ratio_w = resize_w / float(w)
        return img, [ratio_h, ratio_w]

    def resize_image_type2(self, img):
        h, w, _ = img.shape

        resize_w = w
        resize_h = h

        if resize_h > resize_w:
            ratio = float(self.resize_long) / resize_h
        else:
            ratio = float(self.resize_long) / resize_w

        resize_h = int(resize_h * ratio)
        resize_w = int(resize_w * ratio)

        max_stride = 128
        resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
        resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
        img = cv2.resize(img, (int(resize_w), int(resize_h)))
        ratio_h = resize_h / float(h)
        ratio_w = resize_w / float(w)

        return img, [ratio_h, ratio_w]


class TextDetector(object):
    def __init__(self, args):
        self.args = args
        self.det_algorithm = "DB"

        postprocess_params = {}
        if self.det_algorithm == "DB":
            postprocess_params['name'] = 'DBPostProcess'
            postprocess_params["thresh"] = args.det_db_thresh
            postprocess_params["box_thresh"] = args.det_db_box_thresh
            postprocess_params["max_candidates"] = 1000
            postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio
            postprocess_params["use_dilation"] = args.use_dilation
            postprocess_params["score_mode"] = args.det_db_score_mode
            postprocess_params["box_type"] = args.det_box_type
        
        self.postprocess_op = DBPostProcess(**postprocess_params)
        self.predictor, self.input_tensor, self.output_tensors, self.config = utility.create_predictor(
            args, 'det', logger)
        pre_process_list = [{
            'DetResizeForTest': {
                # 'limit_side_len': args.det_limit_side_len,
                # 'limit_type': args.det_limit_type,
            }
        }, {
            'NormalizeImage': {
                'std': [0.229, 0.224, 0.225],
                'mean': [0.485, 0.456, 0.406],
                'scale': '1./255.',
                'order': 'hwc'
            }
        }, {
            'ToCHWImage': None
        }, {
            'KeepKeys': {
                'keep_keys': ['image', 'shape']
            }
        }]
        self.preprocess_op = create_operators(pre_process_list)
        self.benchmark = False
        if self.benchmark:
            import auto_log
            pid = os.getpid()
            gpu_id = utility.get_infer_gpuid()
            self.autolog = auto_log.AutoLogger(
                model_name="det",
                model_precision=args.precision,
                batch_size=1,
                data_shape="dynamic",
                save_path=None,
                inference_config=self.config,
                pids=pid,
                process_name=None,
                gpu_ids=gpu_id if args.use_gpu else None,
                time_keys=[
                    'preprocess_time', 'inference_time', 'postprocess_time'
                ],
                warmup=2,
                logger=logger)

    def order_points_clockwise(self, pts):
        # rect = np.zeros((4, 2), dtype="float32")
        s = np.argmin(pts.sum(axis=1))
        rec = np.roll(pts, shift=-s, axis=0).astype(np.float32)
        # rect[0] = pts[np.argmin(s)]
        # rect[2] = pts[np.argmax(s)]
        # tmp = np.delete(pts, (np.argmin(s), np.argmax(s)), axis=0)
        # diff = np.diff(np.array(tmp), axis=1)
        # rect[1] = tmp[np.argmin(diff)]
        # rect[3] = tmp[np.argmax(diff)]
        return rec

    def clip_det_res(self, points, img_height, img_width):
        for pno in range(points.shape[0]):
            points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
            points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
        return points

    def filter_tag_det_res(self, dt_boxes, image_shape):
        img_height, img_width = image_shape[0:2]
        dt_boxes_new = []
        for box in dt_boxes:
            if type(box) is list:
                box = np.array(box)
            box = self.order_points_clockwise(box)
            box = self.clip_det_res(box, img_height, img_width)
            rect_width = int(np.linalg.norm(box[0] - box[1]))
            rect_height = int(np.linalg.norm(box[0] - box[3]))
            if rect_width <= 3 or rect_height <= 3:
                continue
            dt_boxes_new.append(box)
        dt_boxes = np.array(dt_boxes_new)
        return dt_boxes

    def filter_tag_det_res_only_clip(self, dt_boxes, image_shape):
        img_height, img_width = image_shape[0:2]
        dt_boxes_new = []
        for box in dt_boxes:
            if type(box) is list:
                box = np.array(box)
            box = self.clip_det_res(box, img_height, img_width)
            dt_boxes_new.append(box)
        dt_boxes = np.array(dt_boxes_new)
        return dt_boxes

    def predict(self, data):
        st = time.time()
        if self.benchmark:
            self.autolog.times.start()

        img, shape_list, ori_shape = data
        
        img = np.expand_dims(img, axis=0)
        shape_list = np.expand_dims(shape_list, axis=0)
        # img = img.copy()

        if self.benchmark:
            self.autolog.times.stamp()

        self.input_tensor.copy_from_cpu(img)
        self.predictor.run()
        outputs = []
        for output_tensor in self.output_tensors:
            output = output_tensor.copy_to_cpu()
            outputs.append(output)
        if self.benchmark:
            self.autolog.times.stamp()
        preds = {}
        preds['maps'] = outputs[0]

        post_result = self.postprocess_op(preds, shape_list)
        dt_boxes = post_result[0]['points']

        dt_boxes = self.filter_tag_det_res(dt_boxes, ori_shape)
        if self.benchmark:
            self.autolog.times.end(stamp=True)
        et = time.time()
        return dt_boxes, et - st

    def preprocess(self, img):
        ori_shape = img.shape[:2]
        data = {'image': img}

        data = transform(data, self.preprocess_op)
        img, shape_list = data
        if img is None:
            return [None, 0, ori_shape]
        # img = np.expand_dims(img, axis=0)
        # shape_list = np.expand_dims(shape_list, axis=0)
        # img = img.copy()
        print(img.shape, shape_list, ori_shape)
        return [img, shape_list, ori_shape]

    def __call__(self, data):
        img_norm, flag, shape_list, ori_img = data
        if flag == 0:
            return None, 0
        ori_shape = shape_list[:2].tolist()
        # For image like poster with one side much greater than the other side,
        # splitting recursively and processing with overlap to enhance performance.
        # MIN_BOUND_DISTANCE = 50
        # dt_boxes = np.zeros((0, 4, 2), dtype=np.float32)
        # elapse = 0
        # 3, h w
        # if img.shape[1] / img.shape[2] > 2 and img.shape[1] > self.args.det_limit_side_len:
        #     start_h = 0
        #     end_h = 0
        #     while end_h <= img.shape[1]:
        #         end_h = start_h + img.shape[2] * 3 // 4
        #         subimg = img[:, start_h: end_h, :]
        #         if len(subimg) == 0:
        #             break
        #         sub_dt_boxes, sub_elapse = self.predict([subimg, shape_list, [((end_h-start_h)/img.shape[1]) * ori_shape[0], ori_shape[1]]])
        #         offset = start_h
        #         # To prevent text blocks from being cut off, roll back a certain buffer area.
        #         if len(sub_dt_boxes) == 0 or img.shape[2] - max([x[-1][1] for x in sub_dt_boxes]) > MIN_BOUND_DISTANCE:
        #             start_h = end_h
        #         else:
        #             sorted_indices = np.argsort(sub_dt_boxes[:, 2, 1])
        #             sub_dt_boxes = sub_dt_boxes[sorted_indices]
        #             bottom_line = 0 if len(sub_dt_boxes) <= 1 else int(np.max(sub_dt_boxes[:-1, 2, 1]))
        #             if bottom_line > 0:
        #                 start_h += bottom_line
        #                 sub_dt_boxes = sub_dt_boxes[sub_dt_boxes[:, 2, 1] <= bottom_line]
        #             else:
        #                 start_h = end_h
        #         if len(sub_dt_boxes) > 0:
        #             if dt_boxes.shape[0] == 0:
        #                 dt_boxes = sub_dt_boxes + np.array([0, offset], dtype=np.float32)
        #             else:
        #                 dt_boxes = np.append(dt_boxes,
        #                                      sub_dt_boxes + np.array([0, offset], dtype=np.float32),
        #                                      axis=0)
        #         elapse += sub_elapse
        # elif img.shape[2] / img.shape[1] > 3 and img.shape[2] > self.args.det_limit_side_len * 3:
        #     start_w = 0
        #     end_w = 0
        #     while end_w <= img.shape[2]:
        #         end_w = start_w + img.shape[1] * 3 // 4
        #         subimg = img[:, :, start_w: end_w]
        #         if len(subimg) == 0:
        #             break
        #         sub_dt_boxes, sub_elapse = self.predict([subimg, shape_list, [ori_shape[0], ((end_w-start_w)/img.shape[2]) * ori_shape[1]]])
        #         offset = start_w
        #         if len(sub_dt_boxes) == 0 or img.shape[1] - max([x[-1][0] for x in sub_dt_boxes]) > MIN_BOUND_DISTANCE:
        #             start_w = end_w
        #         else:
        #             sorted_indices = np.argsort(sub_dt_boxes[:, 2, 0])
        #             sub_dt_boxes = sub_dt_boxes[sorted_indices]
        #             right_line = 0 if len(sub_dt_boxes) <= 1 else int(np.max(sub_dt_boxes[:-1, 1, 0]))
        #             if right_line > 0:
        #                 start_w += right_line
        #                 sub_dt_boxes = sub_dt_boxes[sub_dt_boxes[:, 1, 0] <= right_line]
        #             else:
        #                 start_w = end_w
        #         if len(sub_dt_boxes) > 0:
        #             if dt_boxes.shape[0] == 0:
        #                 dt_boxes = sub_dt_boxes + np.array([offset, 0], dtype=np.float32)
        #             else:
        #                 dt_boxes = np.append(dt_boxes,
        #                                      sub_dt_boxes + np.array([offset, 0], dtype=np.float32),
        #                                      axis=0)
        #         elapse += sub_elapse
        # else:
        # MIN_BOUND_DISTANCE = 50
        # dt_boxes = np.zeros((0, 4, 2), dtype=np.float32)
        # elapse = 0
        # img = ori_img.copy()
        # if img.shape[0] / img.shape[1] > 3 and img.shape[0] > self.args.det_limit_side_len * 3:
        #     start_h = 0
        #     end_h = 0
        #     # num_step = (img.shape[0] // (img.shape[1] * 3 // 4)) - 1
        #     # for step_i in range(num_step):
        #     #     end_h = start_h + img.shape[1] * 3 // 4
        #     #     if step_i == num_step-1:
        #     #         subimg = img[start_h:, :]
        #     #     else:
        #     #         subimg = img[start_h:end_h, :]
        #     while end_h <= img.shape[1]:
        #         end_h = start_h + img.shape[2] * 3 // 4
        #         subimg = img[start_h: end_h, :]
        #         if len(subimg) == 0:
        #             break
        #         sub_dt_boxes, sub_elapse = self.predict(self.preprocess(subimg))
        #         offset = start_h
        #         # To prevent text blocks from being cut off, roll back a certain buffer area.
        #         if len(sub_dt_boxes) == 0 or img.shape[1] - max([x[-1][1] for x in sub_dt_boxes]) > MIN_BOUND_DISTANCE:
        #             start_h = end_h
        #         else:
        #             sorted_indices = np.argsort(sub_dt_boxes[:, 2, 1])
        #             sub_dt_boxes = sub_dt_boxes[sorted_indices]
        #             bottom_line = 0 if len(sub_dt_boxes) <= 1 else int(np.max(sub_dt_boxes[:-1, 2, 1]))
        #             if bottom_line > 0:
        #                 start_h += bottom_line
        #                 sub_dt_boxes = sub_dt_boxes[sub_dt_boxes[:, 2, 1] <= bottom_line]
        #             else:
        #                 start_h = end_h
        #         if len(sub_dt_boxes) > 0:
        #             if dt_boxes.shape[0] == 0:
        #                 dt_boxes = sub_dt_boxes + np.array([0, offset], dtype=np.float32)
        #             else:
        #                 dt_boxes = np.append(dt_boxes,
        #                                      sub_dt_boxes + np.array([0, offset], dtype=np.float32),
        #                                      axis=0)
        #         elapse += sub_elapse
        # elif img.shape[1] / img.shape[0] > 3 and img.shape[1] > self.args.det_limit_side_len * 3:
        #     start_w = 0
        #     end_w = 0
        #     # num_step = (img.shape[1] // (img.shape[0] * 3 // 4)) - 1
        #     # for step_i in range(num_step):
        #     #     end_h = start_h + img.shape[0] * 3 // 4
        #     #     if step_i == num_step-1:
        #     #         subimg = img[:, start_w:]
        #     #     else:
        #     #         subimg = img[:, start_w: end_w]
        #     while end_w <= img.shape[1]:
        #         end_w = start_w + img.shape[0] * 3 // 4
        #         subimg = img[:, start_w: end_w]
        #         if len(subimg) == 0:
        #             break
        #         sub_dt_boxes, sub_elapse = self.predict(self.preprocess(subimg))
        #         offset = start_w
        #         if len(sub_dt_boxes) == 0 or img.shape[0] - max([x[-1][0] for x in sub_dt_boxes]) > MIN_BOUND_DISTANCE:
        #             start_w = end_w
        #         else:
        #             sorted_indices = np.argsort(sub_dt_boxes[:, 2, 0])
        #             sub_dt_boxes = sub_dt_boxes[sorted_indices]
        #             right_line = 0 if len(sub_dt_boxes) <= 1 else int(np.max(sub_dt_boxes[:-1, 1, 0]))
        #             if right_line > 0:
        #                 start_w += right_line
        #                 sub_dt_boxes = sub_dt_boxes[sub_dt_boxes[:, 1, 0] <= right_line]
        #             else:
        #                 start_w = end_w
        #         if len(sub_dt_boxes) > 0:
        #             if dt_boxes.shape[0] == 0:
        #                 dt_boxes = sub_dt_boxes + np.array([offset, 0], dtype=np.float32)
        #             else:
        #                 dt_boxes = np.append(dt_boxes,
        #                                      sub_dt_boxes + np.array([offset, 0], dtype=np.float32),
        #                                      axis=0)
        #         elapse += sub_elapse
        # else:
        dt_boxes, elapse = self.predict([img_norm, shape_list, ori_shape])
        return dt_boxes, elapse


class TextRecognizer(object):
    def __init__(self, args):
        self.rec_image_shape = [int(v) for v in args.rec_image_shape.split(",")]
        self.rec_batch_num = args.rec_batch_num
        self.rec_algorithm = args.rec_algorithm
        postprocess_params = {
            'name': 'CTCLabelDecode',
            "character_dict_path": os.path.abspath('./paddle_ocr/ppocr/utils/ppocr_keys_v1.txt'),
            "use_space_char": args.use_space_char
        }
        
        self.postprocess_op = build_post_process(postprocess_params)
        self.postprocess_params = postprocess_params
        self.predictor, self.input_tensor, self.output_tensors, self.config = \
            utility.create_predictor(args, 'rec', logger)
        self.benchmark = False
        if self.benchmark:
            import auto_log
            pid = os.getpid()
            gpu_id = utility.get_infer_gpuid()
            self.autolog = auto_log.AutoLogger(
                model_name="rec",
                model_precision=args.precision,
                batch_size=args.rec_batch_num,
                data_shape="dynamic",
                save_path=None,  #args.save_log_path,
                inference_config=self.config,
                pids=pid,
                process_name=None,
                gpu_ids=gpu_id if args.use_gpu else None,
                time_keys=[
                    'preprocess_time', 'inference_time', 'postprocess_time'
                ],
                warmup=0,
                logger=logger)
        

    def resize_norm_img(self, img, max_wh_ratio, rec_image_shape):
        imgC, imgH, imgW = rec_image_shape
        assert imgC == img.shape[2]
        imgW = int((imgH * max_wh_ratio))
        h, w = img.shape[:2]
        ratio = w / float(h)
        if math.ceil(imgH * ratio) > imgW:
            resized_w = imgW
        else:
            resized_w = int(math.ceil(imgH * ratio))
        # if self.rec_algorithm == 'RARE':
        #     if resized_w > self.rec_image_shape[2]:
        #         resized_w = self.rec_image_shape[2]
        #     imgW = self.rec_image_shape[2]
        resized_image = cv2.resize(img, (resized_w, imgH))
        resized_image = resized_image.astype('float32')
        resized_image = resized_image.transpose((2, 0, 1)) / 255
        resized_image -= 0.5
        resized_image /= 0.5
        padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
        padding_im[:, :, 0:resized_w] = resized_image
        return padding_im

    def __call__(self, img_list, batch_num, image_shape):
        st = time.time()
        img_num = len(img_list)
        # Calculate the aspect ratio of all text bars
        width_list = []
        for img in img_list:
            width_list.append(img.shape[1] / float(img.shape[0]))
        # Sorting can speed up the recognition process
        indices = np.argsort(np.array(width_list))
        rec_res = [['', 0.0]] * img_num
        # batch_num = self.rec_batch_num
        
        if self.benchmark:
            self.autolog.times.start()
        for beg_img_no in range(0, img_num, batch_num):
            end_img_no = min(img_num, beg_img_no + batch_num)
            norm_img_batch = []
        #    imgC, imgH, imgW = self.rec_image_shape[:3]
            imgC, imgH, imgW = image_shape
            max_wh_ratio = imgW / imgH
            wh_ratio_list = []
            for ino in range(beg_img_no, end_img_no):
                h, w = img_list[indices[ino]].shape[0:2]
                wh_ratio = w * 1.0 / h
                max_wh_ratio = max(max_wh_ratio, wh_ratio)
                wh_ratio_list.append(wh_ratio)
            for ino in range(beg_img_no, end_img_no):
                norm_img = self.resize_norm_img(img_list[indices[ino]],
                                                    max_wh_ratio, image_shape)
                norm_img = norm_img[np.newaxis, :]
                norm_img_batch.append(norm_img)
            norm_img_batch = np.concatenate(norm_img_batch)
            norm_img_batch = norm_img_batch.copy()
            if self.benchmark:
                self.autolog.times.stamp()

            self.input_tensor.copy_from_cpu(norm_img_batch)
            self.predictor.run()
            outputs = []
            for output_tensor in self.output_tensors:
                output = output_tensor.copy_to_cpu()
                outputs.append(output)
            if self.benchmark:
                self.autolog.times.stamp()
            if len(outputs) != 1:
                preds = outputs
            else:
                preds = outputs[0]
            rec_result = self.postprocess_op(preds)
            for rno in range(len(rec_result)):
                rec_res[indices[beg_img_no + rno]] = rec_result[rno]
            if self.benchmark:
                self.autolog.times.end(stamp=True)
        return rec_res, time.time() - st


def sorted_boxes(dt_boxes):
    """
    Sort text boxes in order from top to bottom, left to right
    args:
        dt_boxes(array):detected text boxes with shape [4, 2]
    return:
        sorted boxes(array) with shape [4, 2]
    """
    num_boxes = dt_boxes.shape[0]
    sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
    _boxes = list(sorted_boxes)

    for i in range(num_boxes - 1):
        for j in range(i, -1, -1):
            if abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10 and \
                    (_boxes[j + 1][0][0] < _boxes[j][0][0]):
                tmp = _boxes[j]
                _boxes[j] = _boxes[j + 1]
                _boxes[j + 1] = tmp
            else:
                break
    return _boxes


ops = create_operators([
        {'DecodeImage':{
        'img_mode': 'BGR',
        'channel_first': False}
        },
        {
            'DetResizeForTest': {
                # 'limit_side_len': args.det_limit_side_len,
                # 'limit_type': args.det_limit_type,
            }
        }, {
            'NormalizeImage': {
                'std': [0.229, 0.224, 0.225],
                'mean': [0.485, 0.456, 0.406],
                'scale': '1./255.',
                'order': 'hwc'
            }
        }, {
            'ToCHWImage': None
        }, {
            'KeepKeys': {
                'keep_keys': ['image', 'img_path', 'flag', 'shape']
            }
        }])

root_dir = './data/'

def producer(name, q, data): #生产者，从data里面取数据，塞入队列q，如果q已满，则等待
    for file_name in data:
        img_path = os.path.join(root_dir, file_name)
        data = {'img_path': file_name, 'flag': 1}
        try:
            with open(img_path, 'rb') as f:
                    img = f.read()
                    data['image'] = img
            outs = transform(data, ops[:1])
            ori_img = outs['image']
            outs = transform(outs, ops[1:])
            outs.append(ori_img)
        except:
            # print(img_path)
            # try:
            #     img = Image.open(img_path).convert('RGB')
            #     img = np.array(img)
            #     data['image'] = img
            #     outs = transform(data, self.ops[1:])
            #     outs.append(img)
            # except:
            outs = [None, file_name, 0, np.array([736, 736, 1, 1]), None]
        q.put(outs)
        print('Producer {} put {}; '.format(name, file_name))
    q.put(None) #将None放入queue作为标记给生产者用
    

def consumer(name, q):
    args = utility.parse_args()
    args.det_model_dir = './model/det/' #'./model_rep/det/' #'./ch_PP-OCRv4_det_infer' #'./model/det/'
    args.rec_model_dir = './model/rec/' #'./model_rep/rec/' #'./ch_PP-OCRv4_rec_infer' # './model/rec/'
    args.drop_score = 0.9
    text_detector = TextDetector(args)
    text_recognizer = TextRecognizer(args)
    draw_img_save_dir = './'
    os.makedirs(draw_img_save_dir, exist_ok=True)
    if args.warmup:
        img = np.random.uniform(0, 255, [640, 640, 3]).astype(np.uint8)
        for i in range(2):
            res = text_detector(img)
        img = np.random.uniform(0, 255, [48, 320, 3]).astype(np.uint8)
        for i in range(2):
            res = text_recognizer([img] * int(args.rec_batch_num))
    total_time = 0
    img_crop_list_index_all = []
    dt_boxes_all = []
    img_crop_ratio_dict = {}
    filter_boxes_num = 0
    dt_boxes_num = 0
    image_file_list = []
    image_file_list_bad = []
    reader_time = time.time()
    reader_cost = 0
    det_time = 0
    st = time.time()
    idx =0
    while True: #通过while的方式推动生产者不断尝试从queue中取数据
        get_result = q.get()
        q.task_done()
        if get_result==None: #判断生产者是否已经结束，即将data的所有数据都加入queue中
            q.put(None) #该步很重要，当producer()放入的None被某个consumer()抽取后，其他consumer()就没有结束标志了。缺点是最后queue中始终留有结束标志
            print("All data have been tooken out!")
            break
        img, image_file, flag, shape_list, ori_im = get_result
        # print('consumer {} get {}; '.format(name, get_result)) #输出当前从queue中得到的数据
        logger.info("{} The predict time of {}".format(idx, image_file))
        if flag ==0:
            image_file_list_bad.append(image_file)
            continue
        image_file_list.append(image_file)
        # img = img.numpy()
        # shape_list = shape_list.numpy()
        # ori_im = ori_im.numpy()
        reader_cost += time.time() - reader_time
        # total_time += time
        dt_boxes, elapse = text_detector([img, flag, shape_list, ori_im])
        total_time += elapse
        det_time += elapse
        if dt_boxes is None:
            logger.debug("no dt_boxes found, elapsed : {}".format(elapse))
            # time_dict['all'] += (end - start)
            img_crop_list_index_all.append([])
            dt_boxes_all.append([])
            continue
        else:
            logger.debug("dt_boxes num : {}, elapsed : {}".format(
                len(dt_boxes), elapse))
        # save_results.append(save_pred)
        logger.info(len(dt_boxes))
        
        img_crop_list = []

        dt_boxes = sorted_boxes(dt_boxes)
        dt_boxes_all.append(dt_boxes)
        for bno in range(len(dt_boxes)):
            tmp_box = dt_boxes[bno]
            if args.det_box_type == "quad":
                img_crop = get_rotate_crop_image(ori_im, tmp_box)
            else:
                img_crop = get_minarea_rect_crop(ori_im, tmp_box)
            img_crop_list.append(img_crop)

        img_crop_list_index = []
        for img_crop in img_crop_list:
            h,w = img_crop.shape[:2]
            gen_ratio = round(float(w)/float(h))
            ratio_resize = 1 if gen_ratio ==0 else gen_ratio
            if ratio_resize in img_crop_ratio_dict:
                img_crop_list_index.append([ratio_resize, len(img_crop_ratio_dict[ratio_resize])])
                img_crop_ratio_dict[ratio_resize].append(img_crop)
            else:
                img_crop_ratio_dict[ratio_resize] = [img_crop]
                img_crop_list_index.append([ratio_resize, 0])

        img_crop_list_index_all.append(img_crop_list_index)
        reader_time = time.time()
        idx+=1
    # print(image_file_list_bad)
    # text_detector.predictor.clear_intermediate_tensor()
    # # clear Tensor
    # text_detector.predictor.try_shrink_memory()
        
    rec_dict = {}
    rec_time = 0
    # print(max(list(img_crop_ratio_dict.keys())))
    for k, img_crop_list in img_crop_ratio_dict.items():
        image_shape = (3,48,336)
        if k == 4:
            image_shape = (3,48,288)
        if k == 2:
            image_shape = (3,48,240)
        if k < 6:
            rec_bs = 30
        else:
            rec_bs = min(max(int(30./float(k)*6), 1), len(img_crop_list))
        print('Rec:', k, len(img_crop_list), rec_bs)
        rec_res, elapse = text_recognizer(img_crop_list, rec_bs, image_shape)
        total_time += elapse
        rec_time += elapse
        rec_dict[k] = rec_res
    save_results = []
    # print(len(img_crop_list_index_all), len(dt_boxes_all), len(image_file_list))
    for img_crop_index, dt_boxes, image_file in zip(img_crop_list_index_all, dt_boxes_all, image_file_list):

        dt_boxes_num += len(dt_boxes)
        res_list = []
        # if len(img_crop_index) != len(dt_boxes):
        #     print(len(img_crop_index), len(dt_boxes))

        cnt = 0
        for idx, rec_index in enumerate(img_crop_index):
            rec_result = rec_dict[rec_index[0]][rec_index[1]]
            box = dt_boxes[idx]
            text, score = rec_result[0], rec_result[1]
            if score >= args.drop_score:
                filter_boxes_num += 1
                res = {
                        "transcription": text,
                        "points": np.array(box).astype(np.int32).tolist(),
                        "score": score,
                }
                res_list.append(res)
            # if score > 0.5 and score <args.drop_score:
            #     cnt += 1

        # if cnt > 25:
        #     for idx, rec_index in enumerate(img_crop_index):
        #         rec_result = rec_dict[rec_index[0]][rec_index[1]]
        #         box = dt_boxes[idx]
        #         text, score = rec_result[0], rec_result[1]
        #         if score > 0.7 and score <0.9:
        #             if '\u4e00' <= text <= '\u9fff' and len(text)>1:
        #                 filter_boxes_num += 1
        #                 res = {
        #                         "transcription": text,
        #                         "points": np.array(box).astype(np.int32).tolist(),
        #                         "score": score,
        #                 }
        #                 res_list.append(res)


        save_pred = image_file + "\t" + json.dumps(
                res_list, ensure_ascii=False) + "\n"
        save_results.append(save_pred)
    logger.info("Reader time: {}".format(reader_cost))
    logger.info("Total time: {}".format(time.time()-st))
    logger.info("rec_time: {}".format(rec_time))
    logger.info("det_time: {}".format(det_time))
    # print('dt_boxes:', dt_boxes_num)
    # print('filter_boxes:', filter_boxes_num)

    with open(os.path.join(draw_img_save_dir, "result.txt"),
            'w', encoding='utf-8') as f:
        f.writelines(save_results)
    # text_detector.autolog.report()
    # text_recognizer.autolog.report()
    


if __name__ == "__main__":

    data_lines = []
    data_dir = os.path.abspath(root_dir)
    for img_name in os.listdir(data_dir):
        if img_name[-4:] in ['.jpg', 'jpeg', '.png']:
            data_lines.append(img_name)

    q = queue.Queue(maxsize=5) #生成一个最大容量为5的queue
    threads_list = []
    prod = threading.Thread(target=producer,args=('Producer', q, data_lines)) #生成一个生产者线程对象，该生产者名为A
    threads_list.append(prod) #将producer线程加入线程列表
    cons = threading.Thread(target=consumer,args=('Consumer', q))
    threads_list.append(cons) #将consumer线程加入线程列表
    st= time.time()
    for i in threads_list: #并列启动所有线程
        i.setDaemon(True) #保证子线程在主线程退出时，无论出于什么状况都强制退出
        i.start()
    for i in threads_list: #将所有线程阻塞（即，不执行完，就不执行后面程序）
        i.join()
    
    # 判断q里面是否还有剩余的对象没有处理（包括None），有则挨个拿出，否则q不为空后面的q.join()将一直阻塞（后面程序无法执行）
    if not q.empty():
        for i in range(q.qsize()):
            q.get()
            q.task_done() #不能删除，作用于前一个q.task_done()相同，
    print('time:', time.time()-st)
    q.join() #保证q阻塞，接受前面所有的q.task_done()发来的信息，否则程序一直停在该处不往下执行。（必须保证前面任何一处出现q.get()后都有q.task_done()）

    print("Program is over!")
