# OCR识别模块
import dataclasses
import os
import sys

import cv2
import fastdeploy as fd
import numpy as np

sys.path.append('..')

from tools import cos


class PosMixin:
    box: tuple[int, int, int, int]  # 位置. left_top，width, height

    @property
    def left(self):
        return self.box[0]

    @property
    def top(self):
        return self.box[1]

    @property
    def width(self):
        return self.box[2]

    @property
    def height(self):
        return self.box[3]

    @property
    def right(self):
        return self.left + self.width

    @property
    def bottom(self):
        return self.top + self.height


@dataclasses.dataclass
class OpticalLine(PosMixin):
    text: str
    box: tuple[int, int, int, int]  # 位置. left_top，width, height
    page_num: int | None = None  # 页码

    url: str | None = None  # 图片url
    line_num: int | None = None  # 行编号
    # chars: List[OpticalChars] | None = None  # 识别后的文字

    column_num: int | None = None  # 所在栏

    rec_score: float | None = None
    cls_score: float | None = None
    error: int | None = 0  # 错误


current_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'workspace')

model_file = 'https://s-chengdu.100tifen.com/paddle/zoo/paddle_ocr.zip'
mobile_model_file = 'https://s-chengdu.100tifen.com/paddle/zoo/paddle_mobile.zip'


def init():
    if not os.path.exists('workspace'):
        os.mkdir('workspace')
    else:
        return
    print('开始下载模型')
    cos.download(model_file, os.path.join('workspace', 'paddle_ocr.zip'), unzip=True)


init()


def parse_arguments():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--det_model",
        required=False,
        default=os.path.join(current_path, 'det/ch/ch_PP-OCRv4_det_infer'),
        help="Path of Detection model of PPOCR."
    )
    parser.add_argument(
        "--cls_model",
        # required=True,
        default=os.path.join(current_path, 'cls/ch_ppocr_mobile_v2.0_cls_infer'),
        help="Path of Classification model of PPOCR.")
    parser.add_argument(
        "--rec_model",
        required=False,
        default=os.path.join(current_path, 'rec/ch/ch_PP-OCRv4_rec_infer/'),  # 可以尝试使用server模型
        help="Path of Recognization model of PPOCR.")
    parser.add_argument(
        "--rec_label_file",
        required=False,
        default=os.path.join(current_path, 'ppocr_keys_v1.txt'),
        help="Path of Recognization model of PPOCR.")
    parser.add_argument(
        "--image", type=str, required=False,
        default='/users/yuanxu/codes/walle/data/math/simplest.png',
        help="Path of test image file.")
    parser.add_argument(
        "--device",
        type=str,
        default='cpu',
        help="Type of inference device, support 'cpu' or 'gpu'.")
    parser.add_argument(
        "--device_id",
        type=int,
        default=0,
        help="Define which GPU card used to run model.")
    parser.add_argument(
        "--cls_bs",
        type=int,
        default=1,
        help="Classification model inference batch size.")
    parser.add_argument(
        "--rec_bs",
        type=int,
        default=6,
        help="Recognition model inference batch size")
    parser.add_argument(
        "--backend",
        type=str,
        default="default",
        help="Type of inference backend, support ort/trt/paddle/openvino, default 'openvino' for cpu, 'tensorrt' for gpu"
    )

    parser.add_argument(
        '--port',
        type=int,
        default=7001,
        help="Port of inference server."
    )

    parser.add_argument(
        '--mobile',
        type=bool,
        default=False,
        help="使用mobile版模型"
    )

    return parser.parse_args()


def build_option(args):
    det_option = fd.RuntimeOption()
    cls_option = fd.RuntimeOption()
    rec_option = fd.RuntimeOption()

    if args.device.lower() == "gpu":
        det_option.use_gpu(args.device_id)
        cls_option.use_gpu(args.device_id)
        rec_option.use_gpu(args.device_id)

    if args.backend.lower() == "trt":
        assert args.device.lower(
        ) == "gpu", "TensorRT backend require inference on device GPU."
        det_option.use_trt_backend()
        cls_option.use_trt_backend()
        rec_option.use_trt_backend()

        # If use TRT backend, the dynamic shape will be set as follow.
        # We recommend that users set the length and height of the detection model to a multiple of 32.
        # We also recommend that users set the Trt input shape as follow.
        det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
                                       [1, 3, 960, 960])
        cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
                                       [args.cls_bs, 3, 48, 320],
                                       [args.cls_bs, 3, 48, 1024])
        rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
                                       [args.rec_bs, 3, 48, 320],
                                       [args.rec_bs, 3, 48, 2304])

        # Users could save TRT cache file to disk as follow.
        det_option.set_trt_cache_file(args.det_model + "/det_trt_cache.trt")
        cls_option.set_trt_cache_file(args.cls_model + "/cls_trt_cache.trt")
        rec_option.set_trt_cache_file(args.rec_model + "/rec_trt_cache.trt")

    elif args.backend.lower() == "pptrt":
        assert args.device.lower(
        ) == "gpu", "Paddle-TensorRT backend require inference on device GPU."
        det_option.use_paddle_infer_backend()
        det_option.paddle_infer_option.collect_trt_shape = True
        det_option.paddle_infer_option.enable_trt = True

        cls_option.use_paddle_infer_backend()
        cls_option.paddle_infer_option.collect_trt_shape = True
        cls_option.paddle_infer_option.enable_trt = True

        rec_option.use_paddle_infer_backend()
        rec_option.paddle_infer_option.collect_trt_shape = True
        rec_option.paddle_infer_option.enable_trt = True

        # If use TRT backend, the dynamic shape will be set as follow.
        # We recommend that users set the length and height of the detection model to a multiple of 32.
        # We also recommend that users set the Trt input shape as follow.
        det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
                                       [1, 3, 960, 960])
        cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
                                       [args.cls_bs, 3, 48, 320],
                                       [args.cls_bs, 3, 48, 1024])
        rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
                                       [args.rec_bs, 3, 48, 320],
                                       [args.rec_bs, 3, 48, 2304])

        # Users could save TRT cache file to disk as follow.
        det_option.set_trt_cache_file(args.det_model)
        cls_option.set_trt_cache_file(args.cls_model)
        rec_option.set_trt_cache_file(args.rec_model)

    elif args.backend.lower() == "ort":
        det_option.use_ort_backend()
        cls_option.use_ort_backend()
        rec_option.use_ort_backend()

    elif args.backend.lower() == "paddle":
        det_option.use_paddle_infer_backend()
        cls_option.use_paddle_infer_backend()
        rec_option.use_paddle_infer_backend()

    elif args.backend.lower() == "openvino":
        assert args.device.lower(
        ) == "cpu", "OpenVINO backend require inference on device CPU."
        det_option.use_openvino_backend()
        cls_option.use_openvino_backend()
        rec_option.use_openvino_backend()

    elif args.backend.lower() == "pplite":
        assert args.device.lower(
        ) == "cpu", "Paddle Lite backend require inference on device CPU."
        det_option.use_lite_backend()
        cls_option.use_lite_backend()
        rec_option.use_lite_backend()

    return det_option, cls_option, rec_option


args = parse_arguments()
# args.backend = 'ort'
if args.mobile:
    print('args.mobile', '====================')
    if not os.path.exists('workspace/mobile'):
        cos.download(mobile_model_file, os.path.join('workspace', 'paddle_mobile.zip'), unzip=True)
    det_model_file = os.path.join(current_path, 'mobile/text_detection_module', "inference.pdmodel")
    det_params_file = os.path.join(current_path, 'mobile/text_detection_module', "inference.pdiparams")

    cls_model_file = None
    cls_params_file = None
    rec_model_file = os.path.join(current_path, 'mobile/text_recognition_module', "inference.pdmodel")
    rec_params_file = os.path.join(current_path, 'mobile/text_recognition_module', "inference.pdiparams")
    rec_label_file = os.path.join(current_path, 'mobile/ch_doc_dict.txt')  # args.rec_label_file
else:

    det_model_file = os.path.join(args.det_model, "inference.pdmodel")
    det_params_file = os.path.join(args.det_model, "inference.pdiparams")

    cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
    cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")

    rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
    rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
    rec_label_file = args.rec_label_file

det_option, cls_option, rec_option = build_option(args)

det_model = fd.vision.ocr.DBDetector(
    det_model_file, det_params_file, runtime_option=det_option)

if cls_model_file:
    cls_model = fd.vision.ocr.Classifier(
        cls_model_file, cls_params_file, runtime_option=cls_option)
else:
    cls_model = None

rec_model = fd.vision.ocr.Recognizer(
    rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)

# Parameters settings for pre and post processing of Det/Cls/Rec Models.
# All parameters are set to default values.
det_model.preprocessor.max_side_len = 960
det_model.postprocessor.det_db_thresh = 0.3
det_model.postprocessor.det_db_box_thresh = 0.6
det_model.postprocessor.det_db_unclip_ratio = 1.5
det_model.postprocessor.det_db_score_mode = "fast"
det_model.postprocessor.use_dilation = False
if cls_model:
    cls_model.postprocessor.cls_thresh = 0.9

# Create PP-OCRv3, if cls_model is not needed, just set cls_model=None .
# cls_model = None
ppocr_v3 = fd.vision.ocr.PPOCRv4(
    det_model=det_model, cls_model=None, rec_model=rec_model)

# Set inference batch size for cls model and rec model, the value could be -1 and 1 to positive infinity.
# When inference batch size is set to -1, it means that the inference batch size
# of the cls and rec models will be the same as the number of boxes detected by the det model.
ppocr_v3.cls_batch_size = args.cls_bs
ppocr_v3.rec_batch_size = args.rec_bs


def run_paddle_ocr(img, column_num=None, page_num=None, line_num=None, offset_left=0, offset_top=0):
    """
    执行OCR
    :param img:
    :return:
    """
    im = cv2.imread(img) if isinstance(img, str) else img
    # Predict and reutrn the results
    result = ppocr_v3.predict(im)

    # print(data.keys())
    # level, page_num, block_num, par_num, line_num, word_num, left, top, width, height, conf, text
    data = []
    # since 24.5.29过小的图片，没有boxes
    if not result.boxes and result.text:
        left, top = 0, 0
        height, width = im.shape[:2]
        line = OpticalLine(
            text=result.text[0],
            page_num=page_num,
            column_num=column_num,
            line_num=line_num,
            box=(left, top, width, height),
            cls_score=0,
            rec_score=result.rec_scores[0]
        )
        data.append(line)
        return data
    for box, text, rec_score in zip(result.boxes, result.text, result.rec_scores):
        pt1 = box[0], box[1]  # left top
        pt2 = box[2], box[3]  # right top
        pt3 = box[4], box[5]  # right bottom
        pt4 = box[6], box[7]  # left bottom
        left = min(pt1[0], pt4[0])  # 左侧最小
        top = min(pt1[1], pt2[1])  # 顶部最小
        width = max(pt2[0], pt3[0]) - left
        height = max(pt3[1], pt4[1]) - top
        line = OpticalLine(
            text=text,
            page_num=page_num,
            column_num=column_num,
            line_num=line_num,
            box=(left + offset_left, top + offset_top, width, height),
            cls_score=-1,
            rec_score=rec_score
        )
        data.append(line)
    return sorted(data, key=lambda x: x.box[1])  # 从上到下排序


def run_paddle_ocr_with_layout(layout, img):
    EXT_OFFSET = 5
    result = []
    if result is None:
        column_result = run_paddle_ocr(
            img
        )
        return column_result
    for column_num, column in enumerate(layout['columns']):
        x, y, w, h = column
        x = int(x)
        y = int(y)
        w = int(w)
        h = int(h)
        x -= EXT_OFFSET
        if x < 0:
            x = 0
        y -= EXT_OFFSET
        if y < 0:
            y = 0
        w += EXT_OFFSET * 2
        if x + w > img.shape[1]:
            w = img.shape[1] - x
        h += EXT_OFFSET * 2
        if y + h > img.shape[0]:
            h = img.shape[0] - y
        column_img = img[y:y + h, x:x + w]

        # column_result = ocr('../data/column.jpg')
        try:
            column_result = run_paddle_ocr(
                column_img.copy(),
                column_num=column_num,
                offset_left=x,
                offset_top=y
            )
            result.extend(column_result)
        except Exception as e:
            result.append(OpticalLine(
                text=str(e),
                page_num=None,
                column_num=column_num,
                box=None,
                error=1
            ))
    return result


def text_predict(img, short_size):
    # im = cv2.imread(img) if isinstance(img, str) else img
    # im = cv2.imread(img)
    numpy_image = np.array(img)
    result = ppocr_v3.predict(numpy_image)
    data = []
    for box, text, rec_score in zip(result.boxes, result.text, result.rec_scores):
        print(box)
        pt1 = box[0], box[1]  # left top
        pt2 = box[2], box[3]  # right top
        pt3 = box[4], box[5]  # right bottom
        pt4 = box[6], box[7]  # left bottom
        left = min(pt1[0], pt4[0])  # 左侧最小
        top = min(pt1[1], pt2[1])  # 顶部最小
        width = max(pt2[0], pt3[0]) - left
        height = max(pt3[1], pt4[1]) - top
        line = [
            np.array([
                [box[0], box[1]],
                [box[2], box[3]],
                [box[4], box[5]],
                [box[6], box[7]]
            ]),
            text,
            rec_score

        ]

        data.append(line)
    print('data', data)
    return data


if __name__ == '__main__':
    img = '1.jpg'
    im = cv2.imread(img) if isinstance(img, str) else img
    result = ppocr_v3.predict(im)
    print(result)
