import numpy as np
import copy
import cv2
import onnxruntime as rt
import torch
from PIL import Image
from torchvision import transforms
from torch.autograd import Variable
import params

from multiprocessing.dummy import Pool as ThreadPool
# from multiprocessing import Pool as ThreadPool
from functools import partial

def sorted_boxes(dt_boxes):
    """
    Sort text boxes in order from top to bottom, left to right
    args:
        dt_boxes(array):detected text boxes with shape [4, 2]
    return:
        sorted boxes(array) with shape [4, 2]
    """
    num_boxes = dt_boxes.shape[0]
    sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
    _boxes = list(sorted_boxes)

    for i in range(num_boxes - 1):
        if abs(_boxes[i+1][0][1] - _boxes[i][0][1]) < 10 and \
            (_boxes[i + 1][0][0] < _boxes[i][0][0]):
            tmp = _boxes[i]
            _boxes[i] = _boxes[i + 1]
            _boxes[i + 1] = tmp
    return _boxes

def get_rotate_crop_image(img, points):
    # points = points[0]
    img_height, img_width = img.shape[0:2]
    left = int(np.min(points[:, 0]))
    right = int(np.max(points[:, 0]))
    top = int(np.min(points[:, 1]))
    bottom = int(np.max(points[:, 1]))
    img_crop = img[top:bottom, left:right, :].copy()
    points[:, 0] = points[:, 0] - left
    points[:, 1] = points[:, 1] - top
    img_crop_width = int(np.linalg.norm(points[0] - points[1]))
    img_crop_height = int(np.linalg.norm(points[0] - points[3]))
    pts_std = np.float32([[0, 0], [img_crop_width, 0],\
        [img_crop_width, img_crop_height], [0, img_crop_height]])

    M = cv2.getPerspectiveTransform(points, pts_std)
    # dst_img = img_crop
    # TODO: 透视变换
    dst_img = cv2.warpPerspective(
        img_crop,
        M, (img_crop_width, img_crop_height),
        borderMode=cv2.BORDER_REPLICATE)
    dst_img_height, dst_img_width = dst_img.shape[0:2]
    if dst_img_height * 1.0 / dst_img_width >= 1.5:
        dst_img = np.rot90(dst_img)
    return dst_img

class resizeNormalize(object):

    def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation

    def __call__(self, img):
        size = self.size
        imgW, imgH = size
        scale = img.size[1] * 1.0 / imgH
        w = img.size[0] / scale
        w = int(w)
        img = img.resize((w, imgH), self.interpolation)
        w, h = img.size
        if w <= imgW:
            newImage = np.zeros((imgH, imgW), dtype='uint8')
            newImage[:] = 255
            newImage[:, :w] = np.array(img)
            img = Image.fromarray(newImage)
        else:
            img = img.resize((imgW, imgH), self.interpolation)
        # img = (np.array(img)/255.0-0.5)/0.5
        img = transforms.ToTensor()(img)
        img.sub_(0.5).div_(0.5)
        return img

class strLabelConverter(object):

    def __init__(self, alphabet):
        self.alphabet = alphabet + 'ç'  # for `-1` index
        self.dict = {}
        for i, char in enumerate(alphabet):
            # NOTE: 0 is reserved for 'blank' required by wrap_ctc
            self.dict[char] = i + 1

    def encode(self, text, depth=0):
        """Support batch or single str."""
        length = []
        result = []
        for str in text:
            length.append(len(str))
            for char in str:
                # print(char)
                index = self.dict[char]
                result.append(index)
        text = result
        return torch.IntTensor(text), torch.IntTensor(length)

    def decode(self, t, length, raw=False):
        if length.numel() == 1:
            length = length[0]
            t = t[:length]
            if raw:
                return ''.join([self.alphabet[i - 1] for i in t])
            else:
                char_list = []
                for i in range(length):
                    if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
                        char_list.append(self.alphabet[t[i] - 1])
                return ''.join(char_list)
        else:
            texts = []
            index = 0
            for i in range(length.numel()):
                l = length[i]
                texts.append(self.decode(
                    t[index:index + l], torch.IntTensor([l]), raw=raw))
                index += l
            return texts

class CRNNHandle:
    def __init__(self, model_path):

        self.sess = rt.InferenceSession(model_path)

    def predict(self, im):
        """
        预测
        """
        image = im
        scale = image.size[1] * 1.0 / 32
        w = image.size[0] / scale
        w = int(w)
        transformer = resizeNormalize((w, 32))

        image = transformer(image)
        image = image.to(torch.device('cpu'))
        image = image.view(1, *image.size())
        # print('Rec image:', image.size())
        # image = Variable(image)
        # print(image.size())

        # image = image.transpose(2, 0, 1)
        # transformed_image = np.expand_dims(image, axis=0)
        transformed_image = np.array(image)
        preds = self.sess.run(["out"], {"input": transformed_image.astype(np.float32)})
        # print('preds:', preds)
        preds = torch.Tensor(preds[0])
        _, preds = preds.max(2)

        preds = preds.transpose(1, 0).contiguous().view(-1)
        preds_size = Variable(torch.IntTensor([preds.size(0)]))
        converter = strLabelConverter(''.join(params.alphabet))
        sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
        return sim_pred

def process(box, img, crnn_handle):
    left = int(np.min(box[:, 0]))
    right = int(np.max(box[:, 0]))
    top = int(np.min(box[:, 1]))
    bottom = int(np.max(box[:, 1]))
    if left == right == top == bottom:  # 跳过四个坐标全一样的点，如四个0
        pass

    else:
        tmp_box = copy.deepcopy(box)
        partImg_array = get_rotate_crop_image(img, tmp_box.astype(np.float32))

        # if visualize:
        #     partImg = Image.fromarray(partImg_array).convert("RGB")
        #     partImg.save(params.debug_data_path + "crnn_part_img_{}.jpg".format(index))

        # angle_index = angle_handle.predict(partImg_array)  # TODO: 检测小图角度

        angle_class = params.lable_map_dict[1]
        rotate_angle = params.rotae_map_dict[angle_class]

        if rotate_angle != 0:
            partImg_array = np.rot90(partImg_array, rotate_angle // 90)

        partImg = Image.fromarray(partImg_array).convert("RGB")

        # if visualize:
        #     partImg.save(params.debug_data_path + "crnn_part_img_rotated_{}.jpg".format(index))

        partImg_ = partImg.convert('L')
        newW, newH = partImg.size
        try:
            crnn_vertical_handle = None  # TODO:处理竖向文字
            if crnn_vertical_handle is not None and angle_class in ["shudao", "shuzhen"]:
                simPred = crnn_vertical_handle.predict(partImg_)
            else:
                simPred = crnn_handle.predict(partImg_)  ##识别的文本
        except:
            pass

        if simPred.strip() != u'':
            return {'box': tuple(map(tuple, tmp_box)), 'left': left, 'right': right, 'top': top, 'bottom': bottom, 'cx': 0 , 'cy': 0 , 'text': simPred, 'w': newW , 'h': newH ,
                            'degree': 0}

def crnnRecWithBox(img, boxes_list, visualize=False, index=None):
    """
    crnn模型，ocr识别
    @@model,
    @@converter,
    @@img:Array
    @@text_recs:text box
    @@ifIm:是否输出box对应的img

    """

    crnn_model_path = params.crnn_model_path
    crnn_handle = CRNNHandle(crnn_model_path)

    # results = []

    boxes_list = sorted_boxes(np.array(boxes_list))

    pool = ThreadPool(processes=8)
    part_process = partial(process, img=img, crnn_handle=crnn_handle)
    results = pool.map_async(part_process, boxes_list)
    pool.close()
    pool.join()

    return results

    # for index, box in enumerate(boxes_list):
    #     left = int(np.min(box[:, 0]))
    #     right = int(np.max(box[:, 0]))
    #     top = int(np.min(box[:, 1]))
    #     bottom = int(np.max(box[:, 1]))
    #     if left == right == top == bottom:  # 跳过四个坐标全一样的点，如四个0
    #         continue
    #
    #     else:
    #         tmp_box = copy.deepcopy(box)
    #         partImg_array = get_rotate_crop_image(img, tmp_box.astype(np.float32))
    #
    #         if visualize:
    #             partImg = Image.fromarray(partImg_array).convert("RGB")
    #             partImg.save(params.debug_data_path + "crnn_part_img_{}.jpg".format(index))
    #
    #         # angle_index = angle_handle.predict(partImg_array)  # TODO: 检测小图角度
    #
    #         angle_class = params.lable_map_dict[1]
    #         rotate_angle = params.rotae_map_dict[angle_class]
    #
    #         if rotate_angle != 0:
    #             partImg_array = np.rot90(partImg_array, rotate_angle // 90)
    #
    #         partImg = Image.fromarray(partImg_array).convert("RGB")
    #
    #         if visualize:
    #             partImg.save(params.debug_data_path + "crnn_part_img_rotated_{}.jpg".format(index))
    #
    #         partImg_ = partImg.convert('L')
    #         # partImg.show()
    #         newW, newH = partImg.size
    #         try:
    #             crnn_vertical_handle = None  # TODO:处理竖向文字
    #             if crnn_vertical_handle is not None and angle_class in ["shudao", "shuzhen"]:
    #                 simPred = crnn_vertical_handle.predict(partImg_)
    #             else:
    #                 simPred = crnn_handle.predict(partImg_)  ##识别的文本
    #         except:
    #             continue
    #
    #         if simPred.strip() != u'':
    #             results.append(
    #                 {'box': tuple(map(tuple, tmp_box)), 'left': left, 'right': right, 'top': top, 'bottom': bottom,
    #                  'cx': 0, 'cy': 0, 'text': simPred, 'w': newW, 'h': newH,
    #                  'degree': 0})
    # return results

if __name__ == '__main__':
    crnn_model_path = '../models/ocr-lstm.onnx'
    crnn_handle = CRNNHandle(crnn_model_path)

    annotation_path = 'F:/laibo/Data/CRNN_data/AllCRNNTestList/annotation.txt'
    lexicon_path = 'F:/laibo/Data/CRNN_data/AllCRNNTestList/lexicon.txt'

    lexicon_index = {}
    img_gt = {}
    imgs_path = list()

    with open(lexicon_path, 'r', encoding='utf-8') as lexicon_file:
        lines = lexicon_file.readlines()
        for i, line in enumerate(lines):
            line = line.strip()
            lexicon_index[i] = line

    with open(annotation_path, 'r') as annotation_file:
        lines = annotation_file.readlines()
        for line in lines[10:20]:
            img_path, label = line.split(' ')
            imgs_path.append(img_path)
            label = int(label.strip())
            img_gt[img_path] = lexicon_index[label]
            print('img_path: {}, gt: {}'.format(img_path, lexicon_index[label]))

    for img_path in imgs_path:
        full_img_path = 'F:/laibo/Data/CRNN_data/AllCRNNTestList/' + img_path
        partImg_array = cv2.imread(full_img_path)

        partImg = Image.fromarray(partImg_array).convert("RGB")

        # if visualize:
        #     partImg.save(params.debug_data_path + "crnn_part_img_rotated_{}.jpg".format(index))

        partImg_ = partImg.convert('L')
        newW, newH = partImg.size

        simPred = crnn_handle.predict(partImg_)  ##识别的文本

        print('gt: {}, pred: {}'.format(img_gt[img_path], simPred))

