import cv2
import numpy as np
import math
import os.path
import string
import argparse
import onnxruntime as ort


class TextRecognizer(object):
    def __init__(self, model_path, char_dict_path):
        #model_path = os.path.join(data_dir, model_path)
        #char_dict_path = os.path.join(data_dir, char_dict_path)
        self.rec_image_shape = [3, 32, 320]
        self.character_type = 'en'
        self.rec_algorithm = 'CRNN'
        self.postprocess_op = CTCLabelDecode(
            character_dict_path=char_dict_path,
            character_type=self.character_type,
            use_space_char=True
        )
        self.predictor = ort.InferenceSession(model_path)
        self.input_name = self.predictor.get_inputs()[0].name
        self.output_tensors = None

    def resize_norm_img(self, img, max_wh_ratio):
        imgC, imgH, imgW = self.rec_image_shape
        assert imgC == img.shape[2]
        if self.character_type == "ch":
            imgW = int((32 * max_wh_ratio))
        h, w = img.shape[:2]
        ratio = w / float(h)
        if math.ceil(imgH * ratio) > imgW:
            resized_w = imgW
        else:
            resized_w = int(math.ceil(imgH * ratio))
        resized_image = cv2.resize(img, (resized_w, imgH))
        resized_image = resized_image.astype('float32')
        resized_image = resized_image.transpose((2, 0, 1)) / 255
        resized_image -= 0.5
        resized_image /= 0.5
        padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
        padding_im[:, :, 0:resized_w] = resized_image
        return padding_im

    def process(self, image):
        h, w = image.shape[0:2]
        wh_ratio = w * 1.0 / h

        norm_img = self.resize_norm_img(image, wh_ratio)
        norm_img = norm_img[np.newaxis, :]

        outputs = self.predictor.run(self.output_tensors, {self.input_name: norm_img})
        preds = outputs[0]
        rec_result = self.postprocess_op(preds)
        text, score = rec_result[0]
        return text, score


class BaseRecLabelDecode(object):
    """ Convert between text-label and text-index """

    def __init__(self,
                 character_dict_path=None,
                 character_type='ch',
                 use_space_char=False):
        support_character_type = [
            'ch', 'en', 'EN_symbol', 'french', 'german', 'japan', 'korean',
            'it', 'xi', 'pu', 'ru', 'ar', 'ta', 'ug', 'fa', 'ur', 'rs', 'oc',
            'rsc', 'bg', 'uk', 'be', 'te', 'ka', 'chinese_cht', 'hi', 'mr',
            'ne', 'EN'
        ]
        assert character_type in support_character_type, "Only {} are supported now but get {}".format(
            support_character_type, character_type)

        self.beg_str = "sos"
        self.end_str = "eos"

        if character_type in support_character_type:
            self.character_str = ""
            assert character_dict_path is not None, "character_dict_path should not be None when character_type is {}".format(
                character_type)
            with open(character_dict_path, "rb") as fin:
                lines = fin.readlines()
                for line in lines:
                    line = line.decode('utf-8').strip("\n").strip("\r\n")
                    self.character_str += line
            if use_space_char:
                self.character_str += " "
            dict_character = list(self.character_str)

        else:
            raise NotImplementedError
        self.character_type = character_type
        dict_character = self.add_special_char(dict_character)
        self.dict = {}
        for i, char in enumerate(dict_character):
            self.dict[char] = i
        self.character = dict_character

    def add_special_char(self, dict_character):
        return dict_character

    def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
        """ convert text-index into text-label. """
        result_list = []
        ignored_tokens = self.get_ignored_tokens()
        batch_size = len(text_index)
        for batch_idx in range(batch_size):
            char_list = []
            conf_list = []
            for idx in range(len(text_index[batch_idx])):
                if text_index[batch_idx][idx] in ignored_tokens:
                    continue
                if is_remove_duplicate:
                    # only for predict
                    if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
                            batch_idx][idx]:
                        continue
                char_list.append(self.character[int(text_index[batch_idx][
                    idx])])
                if text_prob is not None:
                    conf_list.append(text_prob[batch_idx][idx])
                else:
                    conf_list.append(1)
            text = ''.join(char_list)
            result_list.append((text, np.mean(conf_list)))
        return result_list

    def get_ignored_tokens(self):
        return [0]  # for ctc blank


class CTCLabelDecode(BaseRecLabelDecode):
    """ Convert between text-label and text-index """

    def __init__(self,
                 character_dict_path=None,
                 character_type='ch',
                 use_space_char=False,
                 **kwargs):
        super(CTCLabelDecode, self).__init__(character_dict_path,
                                             character_type, use_space_char)

    def __call__(self, preds, label=None, *args, **kwargs):
        preds_idx = preds.argmax(axis=2)
        preds_prob = preds.max(axis=2)
        text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
        if label is None:
            return text
        label = self.decode(label)
        return text, label

    def add_special_char(self, dict_character):
        dict_character = ['blank'] + dict_character
        return dict_character


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--image', type=str, help='input image')
    return parser.parse_args()


if __name__ == "__main__":
    #data_dir = os.path.join('data', 'model')
    file_path=r"C:\Users\ASUS\Desktop\1.png"
    model_path = os.path.join('ocr', 'ocr.onnx')
    char_dict_path = os.path.join('ocr', 'key.txt')
    text_recognizer = TextRecognizer(model_path, char_dict_path)
    if os.path.isfile(file_path):
        image = cv2.imread(file_path)
        sub1=image[0:50,0:150]
        sub2=image[50:100,0:150]
        sub3=image[100:150,0:150]
        final_matrix = np.zeros((50, 450, 3), np.uint8)
        # change
        final_matrix[0:50, 0:150] = sub1
        final_matrix[0:50, 150:300] = sub2
        final_matrix[0:50, 300:450] = sub3
        # image = image[:30, 977:1058, :]
        text, score = text_recognizer.process(final_matrix)
        print(text, score)
    elif os.path.isdir(file_path):
        import glob
        image_list = glob.glob(os.path.join(file_path, '*.*'))
        image_list.sort()
        for i, image_path in enumerate(image_list):
            image = cv2.imread(image_path)
            # image = image[:28, 977:1058, :]
            text, score = text_recognizer.process(image)
            print(i+1, text, score)
