import cv2
import onnxruntime
import numpy as np
import pyclipper
from shapely.geometry import Polygon
from typing import List, Dict, Optional
import os

base_dir = os.path.dirname(os.path.abspath(__file__))

det_model = os.path.join(base_dir, 'model/det_model.onnx')
rec_model = os.path.join(base_dir, 'model/rec_model.onnx')
key_path = os.path.join(base_dir, 'characters/ppocr_keys_v1.txt')

class Singleton(type):
    _instances = {}
    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super().__call__(*args, **kwargs)
        return cls._instances[cls]

class TextDetector(metaclass=Singleton):
    def __init__(self, model_path: str):
        self.thresh = 0.3
        self.unclip_ratio = 1.6
        self.box_thresh = 0.5
        self.max_candidates = 1000
        self.m_l = 1920
        self.min_size = 3
        self.model_path = model_path
        self.sess = onnxruntime.InferenceSession(
            self.model_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])

    def preprocess(self, img):
        std = np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3)).astype('float32')
        mean = np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3)).astype('float32')
        img = (img.astype(np.float32) / 255 - mean) / std
        img = img.transpose((2, 0, 1))
        return img

    def get_mini_boxes(self, contour):
        bounding_box = cv2.minAreaRect(contour)
        points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
        index_1, index_2, index_3, index_4 = 0, 1, 2, 3
        if points[1][1] > points[0][1]:
            index_1, index_4 = 0, 1
        else:
            index_1, index_4 = 1, 0
        if points[3][1] > points[2][1]:
            index_2, index_3 = 2, 3
        else:
            index_2, index_3 = 3, 2
        box = [points[index_1], points[index_2], points[index_3], points[index_4]]
        return box, min(bounding_box[1])

    def pre_resize(self, img):
        h, w = img.shape[:2]
        m_l = min((max(128, h, w) // 32 + 1) * 32, self.m_l)
        if min(h, w) < 128:
            padimg = (np.ones((m_l, m_l, 3), np.uint8) * 127)
            padimg[:h, :w, :] = img
            return padimg, 1, 1
        if h > w:
            n_w = round(w / h * m_l) // 32 * 32
            return (cv2.resize(img, (n_w, m_l)), h / m_l, w / n_w,)
        else:
            n_h = round(h / w * m_l) // 32 * 32
            return (cv2.resize(img, (m_l, n_h)), h / n_h, w / m_l,)

    def unclip(self, box):
        poly = Polygon(box)
        distance = poly.area * self.unclip_ratio / poly.length
        offset = pyclipper.PyclipperOffset()
        offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
        expanded = np.array(offset.Execute(distance))
        return expanded

    def box_score_fast(self, bitmap, _box):
        h, w = bitmap.shape[:2]
        box = _box.copy()
        xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1)
        xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1)
        ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1)
        ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1)
        mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
        box[:, 0] = box[:, 0] - xmin
        box[:, 1] = box[:, 1] - ymin
        cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
        return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]

    def order_points_clockwise(self, pts):
        xSorted = pts[np.argsort(pts[:, 0]), :]
        leftMost = xSorted[:2, :]
        rightMost = xSorted[2:, :]
        tl, bl = leftMost[np.argsort(leftMost[:, 1]), :]
        tr, br = rightMost[np.argsort(rightMost[:, 1]), :]
        rect = np.array([tl, tr, br, bl], dtype="float32")
        return rect

    def clip_det_res(self, points, img_height, img_width):
        for pno in range(points.shape[0]):
            points[pno, 0] = int(min(max(points[pno, 0], 0), img_width-1))
            points[pno, 1] = int(min(max(points[pno, 1], 0), img_height-1))
        return points

    def postprocess(self, pred, w_r, h_r, ori_h, ori_w):
        pred = pred[0, :, :]
        segmentation = pred > self.thresh
        mask = segmentation.astype(np.uint8) * 255
        contours, _ = cv2.findContours(
            mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        num_contours = min(len(contours), self.max_candidates)
        boxes = []
        for index in range(num_contours):
            contour = contours[index]
            points, sside = self.get_mini_boxes(contour)
            points = np.array(points)
            if sside < self.min_size:
                continue
            score = self.box_score_fast(pred, points.reshape(-1, 2))
            if score < self.box_thresh:
                continue
            box = self.unclip(points).reshape(-1, 1, 2)
            box, sside = self.get_mini_boxes(box)
            if sside < self.min_size + 2:
                continue
            box = np.array(box)
            box[:, 0] = box[:, 0] * w_r
            box[:, 1] = box[:, 1] * h_r
            box = box.astype(np.int16)
            box = self.order_points_clockwise(box)
            box = self.clip_det_res(box, ori_h, ori_w)
            rect_width = int(np.linalg.norm(box[0] - box[1]))
            rect_height = int(np.linalg.norm(box[0] - box[3]))
            if rect_width <= 3 or rect_height <= 3:
                continue
            boxes.append(box)
        return np.array(boxes)

    def __call__(self, img):
        if not isinstance(img, list):
            img = [img]
        
        imgs = {'img':[],'ratio_h':[],'ratio_w':[],'ori_h': [], 'ori_w': []}
        for im in img:
            ori_h, ori_w = im.shape[:2]
            imgs['ori_h'].append(ori_h)
            imgs['ori_w'].append(ori_w)
        
        for i in img:
            im, ratio_h, ratio_w = self.pre_resize(i)
            imgs['img'].append(im)
            imgs['ratio_h'].append(ratio_h)
            imgs['ratio_w'].append(ratio_w)
        
        
        for i,im in enumerate(imgs['img']):
            imgs['img'][i] = self.preprocess(im)
        
        batch = imgs['img']
        batch = np.array(batch)
        if batch.ndim!=4:
            batch = batch.unsqueeze(0)
        preds = self.sess.run(None, {'x': batch})[0]
        result = []
        for i,pred in enumerate(preds):
            res = self.postprocess(pred, imgs['ratio_w'][i], imgs['ratio_h'][i],imgs['ori_h'][i], imgs['ori_w'][i])
            result.append(res)
        return result

class TextRecognizer(metaclass=Singleton):
    def __init__(self, model_path: str, key_path: str = 'model/ppocr_keys_v1.txt', imageH: int = 48):
        self.batch_num = 16
        self.imgC = 3
        self.imgH = imageH
        self.character_dict_path = key_path
        self.model_path = model_path
        self.character = self.gen_character()
        self.sess = onnxruntime.InferenceSession(
            self.model_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])

    def gen_character(self):
        # 从字符字典文件中读取并生成字符列表
        character_str = []
        with open(self.character_dict_path, "rb") as fin:
            lines = fin.readlines()
            for line in lines:
                # 解码每行，并去除换行符
                line = line.decode('utf-8').strip("\n").strip("\r\n")
                character_str.append(line)
        # 添加空格字符
        character_str.append(" ")
        # 将字符列表转换为字典，并在开头添加一个空白字符
        dict_character = list(character_str)
        dict_character = ['blank'] + dict_character
        return dict_character

    def resize_norm_img(self, img, max_wh_ratio):
        # 调整图像大小并进行归一化
        imgW = int((48 * max_wh_ratio))  # 根据宽高比计算宽度
        h, w = img.shape[:2]  # 获取原始图像的高度和宽度
        ratio = w / float(h)  # 计算宽高比
        resized_w = min(imgW, int(self.imgH * ratio))  # 计算调整后的宽度
        resized_image = cv2.resize(img, (resized_w, self.imgH))  # 调整图像大小
        resized_image = resized_image.astype('float32')  # 转换为浮点型
        # 归一化：先除以255，再减去0.5，最后除以0.5
        resized_image = resized_image.transpose((2, 0, 1)) / 255
        resized_image -= 0.5
        resized_image /= 0.5
        # 填充图像至固定尺寸
        padding_im = np.zeros((self.imgC, self.imgH, imgW), dtype=np.float32)
        padding_im[:, :, 0:resized_w] = resized_image
        return padding_im

    def ctc_postprocess(self, preds):
        # CTC解码后处理
        text_index = preds.argmax(axis=2)  # 获取预测的最大值索引
        result_list = []  # 存储最终结果
        ignored_tokens = [0]  # 忽略的token列表
        batch_size = len(text_index)  # 批次大小
        for batch_idx in range(batch_size):
            char_list = []  # 当前样本的字符列表
            for idx in range(len(text_index[batch_idx])):
                if text_index[batch_idx][idx] in ignored_tokens:
                    continue  # 如果是忽略的token，则跳过
                if idx > 0 and text_index[batch_idx][idx - 1] == text_index[batch_idx][idx]:
                    continue  # 如果与前一个字符相同，则跳过
                # 将字符添加到列表中
                char_list.append(self.character[int(text_index[batch_idx][idx])])
            # 将字符列表连接成字符串
            text = ''.join(char_list)
            result_list.append(text)  # 将字符串添加到结果列表中
        return result_list

    def __call__(self, img_list):
        # 主函数入口，用于执行整个文本识别过程
        img_num = len(img_list)  # 图像列表中的图像数量
        if img_num < 1:
            return []  # 如果没有图像，直接返回空列表
        # 计算每个图像的宽高比
        width_list = [img.shape[1] / float(img.shape[0]) for img in img_list]
        indices = np.argsort(np.array(width_list))  # 按宽高比排序的索引
        rec_res = [['']] * img_num  # 初始化识别结果列表
        for beg_img_no in range(0, img_num, self.batch_num):
            end_img_no = min(img_num, beg_img_no + self.batch_num)  # 确定当前批次的结束位置
            max_wh_ratio = 0  # 当前批次的最大宽高比
            for ino in range(beg_img_no, end_img_no):
                h, w = img_list[indices[ino]].shape[0:2]  # 获取当前图像的高度和宽度
                max_wh_ratio = max(max_wh_ratio, w/h)  # 更新最大宽高比
            # 对当前批次的所有图像进行预处理
            norm_img_batch = np.stack([self.resize_norm_img(
                img_list[indices[ino]], max_wh_ratio) for ino in range(beg_img_no, end_img_no)])
            # 运行ONNX模型，获取预测结果
            preds = self.sess.run(None, {'x': norm_img_batch})[0]
            # 对预测结果进行CTC后处理
            rec_result = self.ctc_postprocess(preds)
            # 将识别结果放置到正确的位置
            for rno in range(len(rec_result)):
                rec_res[indices[beg_img_no + rno]] = rec_result[rno]
        return rec_res  # 返回所有图像的识别结果

class OCRConfig:
    def __init__(self, det_model_path: str, rec_model_path: str, key_path: str):
        self.det_model_path = det_model_path
        self.rec_model_path = rec_model_path
        self.key_path = key_path

class OCRBatch:
    def __init__(self, config: Optional[OCRConfig] = None, mobile_config: Optional[bool] = False):
        if config is None:
            package_dir = os.path.dirname(os.path.abspath(__file__))
            config = OCRConfig(
                det_model_path=os.path.join(package_dir, 'det/model.onnx'),
                rec_model_path=os.path.join(package_dir, 'rec/model.onnx'),
                key_path=os.path.join(package_dir, 'ppocr_keys_v1.txt')
            )
        else:
            cwd = os.getcwd()
            if not os.path.isabs(config.det_model_path):
                config.det_model_path = os.path.abspath(os.path.join(cwd, config.det_model_path))
            if not os.path.isabs(config.rec_model_path):
                config.rec_model_path = os.path.abspath(os.path.join(cwd, config.rec_model_path))
            if not os.path.isabs(config.key_path):
                config.key_path = os.path.abspath(os.path.join(cwd, config.key_path))
            for path in [config.det_model_path, config.rec_model_path, config.key_path]:
                if not os.path.exists(path):
                    raise FileNotFoundError(f"文件不存在: {path}")

        self.text_detector = TextDetector(config.det_model_path)
        if mobile_config:
            self.text_recognizer = TextRecognizer(config.rec_model_path, config.key_path, 32)
        else:
            self.text_recognizer = TextRecognizer(config.rec_model_path, config.key_path)

    def sorted_boxes(self, dt_boxes):
        num_boxes = dt_boxes.shape[0]
        sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
        _boxes = list(sorted_boxes)
        for i in range(num_boxes - 1):
            if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \
                    (_boxes[i + 1][0][0] < _boxes[i][0][0]):
                tmp = _boxes[i]
                _boxes[i] = _boxes[i + 1]
                _boxes[i + 1] = tmp
        return _boxes

    def get_rotate_crop_image(self, img, points):
        img_crop_width = int(max(np.linalg.norm(
            points[0] - points[1]), np.linalg.norm(points[2] - points[3])))
        img_crop_height = int(max(np.linalg.norm(
            points[0] - points[3]), np.linalg.norm(points[1] - points[2])))
        pts_std = np.float32([[0, 0], [img_crop_width, 0],
                             [img_crop_width, img_crop_height], [0, img_crop_height]])
        M = cv2.getPerspectiveTransform(points, pts_std)
        dst_img = cv2.warpPerspective(
            img, M, (img_crop_width, img_crop_height),
            borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC)
        return dst_img

    def __call__(self, imgs):
        if not isinstance(imgs, list):
            imgs = [imgs]
            
        if len(imgs) == 0:
            return []

        # 1. 找到最大高度和宽度
        max_height = max(im.shape[0] for im in imgs)  # 最大高度
        max_width = max(im.shape[1] for im in imgs)   # 最大宽度

        # 2. 对每张图片进行填充
        padded_batch = []
        for im in imgs:
            height, width,_ = im.shape
            # 计算需要填充的高度和宽度
            pad_height = max_height - height
            pad_width = max_width - width
            # 对称填充（上下、左右）
            padded_img = np.pad(
                im,
                ((pad_height // 2, pad_height - pad_height // 2), (pad_width // 2, pad_width - pad_width // 2), (0, 0)),
                mode='constant',  # 填充模式，这里用常数填充
                constant_values=255  # 填充值，设置为 255（白色）
            )
            # padded_img = np.resize(im,(max_height,max_width,3))
            padded_batch.append(padded_img)
        imgs = padded_batch

        res_dets = self.text_detector(imgs)

        result = []
        img_crop_list = []
        # 记录每张图片的检测框数量，用于后续分割识别结果
        box_counts = []
        for i,res_det in enumerate(res_dets):
            if res_det is None:
                result.append([])
                box_counts.append(0)
                continue
            dt_boxes = self.sorted_boxes(res_det)
            res_dets[i] = dt_boxes
            box_counts.append(len(dt_boxes))
            for bno in range(len(dt_boxes)):
                img_crop = self.get_rotate_crop_image(imgs[i], dt_boxes[bno])
                img_crop_list.append(img_crop)
        rec_ress = self.text_recognizer(img_crop_list)
        
        # 使用box_counts来分割识别结果
        start_idx = 0
        for i, box_count in enumerate(box_counts):
            res = []
            if box_count > 0:
                cur_rec_res = rec_ress[start_idx:start_idx + box_count]
                for box, rec_result in zip(res_dets[i], cur_rec_res):
                    res.append({'txt': rec_result, 'box': box.tolist()})
                start_idx += box_count
            result.append(res)
        return result[0] if len(imgs) == 1 else result

ort_infer = None

def load_model():
    global ort_infer
    if ort_infer is None:
        ort_infer = OCRBatch(OCRConfig(det_model, rec_model, key_path), True)
    return ort_infer

def predict(image):
    model = load_model()
    return model([image])

def predict_batch(images):
    model = load_model()
    return model(images)
