import os
import numpy as np
import copy
import cv2
import ocr.params as params
from PIL import Image
import torch
from torchvision import transforms
from torch.autograd import Variable
from torch import nn
# from net.ShuffleNetV2 import shufflenet_v2_x0_5

from multiprocessing.dummy import Pool as ThreadPool
from functools import partial


def sorted_boxes(dt_boxes):
    """
    Sort text boxes in order from top to bottom, left to right
    args:
        dt_boxes(array):detected text boxes with shape [4, 2]
    return:
        sorted boxes(array) with shape [4, 2]
    """
    num_boxes = dt_boxes.shape[0]
    sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
    _boxes = list(sorted_boxes)

    for i in range(num_boxes - 1):
        if abs(_boxes[i+1][0][1] - _boxes[i][0][1]) < 10 and \
            (_boxes[i + 1][0][0] < _boxes[i][0][0]):
            tmp = _boxes[i]
            _boxes[i] = _boxes[i + 1]
            _boxes[i + 1] = tmp
    return _boxes


def get_rotate_crop_image(img, points):
    img_height, img_width = img.shape[0:2]
    left = int(np.min(points[:, 0]))
    right = int(np.max(points[:, 0]))
    top = int(np.min(points[:, 1]))
    bottom = int(np.max(points[:, 1]))
    img_crop = img[top:bottom, left:right, :].copy()
    points[:, 0] = points[:, 0] - left
    points[:, 1] = points[:, 1] - top
    img_crop_width = int(np.linalg.norm(points[0] - points[1]))
    img_crop_height = int(np.linalg.norm(points[0] - points[3]))
    pts_std = np.float32([[0, 0], [img_crop_width, 0],\
        [img_crop_width, img_crop_height], [0, img_crop_height]])
    M = cv2.getPerspectiveTransform(points, pts_std)
    # dst_img = img_crop
    # TODO: 透视变换
    dst_img = cv2.warpPerspective(
        img_crop,
        M, (img_crop_width, img_crop_height),
        borderMode=cv2.BORDER_REPLICATE)
    dst_img_height, dst_img_width = dst_img.shape[0:2]
    if dst_img_height * 1.0 / dst_img_width >= 1.5:
        dst_img = np.rot90(dst_img)
    return dst_img


def process(image_name, box, img, crnn_handle, row_index, col_index, visualize):
    # print('box shape', box.shape)
    left = int(np.min(box[:, 0]))
    right = int(np.max(box[:, 0]))
    top = int(np.min(box[:, 1]))
    bottom = int(np.max(box[:, 1]))
    if left == right or top == bottom:  # 跳过四个坐标全一样的点，如四个0
        pass
    # elif left >= 512 or top >= 512:
    #     pass
    else:
        tmp_box = copy.deepcopy(box)
        partImg_array = get_rotate_crop_image(img, tmp_box.astype(np.float32))

        # partImg = Image.fromarray(partImg_array).convert('RGB')
        # print('partImg shape:', partImg_array.shape)
        # partImg.show()

        # if visualize:
        #     partImg = Image.fromarray(partImg_array).convert("RGB")
        #     idx = np.random.randint(0, 100)
        #     print(idx)
        #     partImg.save(params.debug_data_path + "crnn_part_img_{}.png".format(idx))
        #     partImg.show('image')

        # angle_index = angle_handle.predict(partImg_array)  # TODO: 检测小图角度

        angle_class = params.lable_map_dict[1]
        rotate_angle = params.rotae_map_dict[angle_class]

        if rotate_angle != 0:
            partImg_array = np.rot90(partImg_array, rotate_angle // 90)

        partImg = Image.fromarray(partImg_array).convert("RGB")

        partImg_ = partImg.convert('L')
        newW, newH = partImg.size
        try:
            crnn_vertical_handle = None  # TODO:处理竖向文字
            if crnn_vertical_handle is not None and angle_class in ["shudao", "shuzhen"]:
                simPred = crnn_vertical_handle.predict(partImg_)
            else:
                simPred = crnn_handle.predict(partImg_)  ##识别的文本
                # print('simPred: ', simPred)
                if visualize:
                    os.makedirs(params.debug_data_path + image_name, exist_ok=True)
                    partImg.save(
                        params.debug_data_path + image_name + "/crnn_part_img_rotated_{}_{}_{}.jpg".format(row_index,
                                                                                                           col_index,
                                                                                                           simPred))
        except:
            pass

        if simPred.strip() != u'':
            # return {'box': tuple(map(tuple, tmp_box)), 'left': left, 'right': right, 'top': top, 'bottom': bottom, 'cx': 0 , 'cy': 0 , 'text': simPred, 'w': newW , 'h': newH ,
            #                 'degree': 0}
            return simPred



def crnnRecWithBox(image_name, img, box_list, crnn_handle, row_index, col_index, visualize=False):
    """
    crnn模型，ocr识别
    @@model,
    @@converter,
    @@img:Array
    @@text_recs:text box
    @@ifIm:是否输出box对应的img

    """
    # crnn_model_path = params.crnn_model_path
    # crnn_net = CRnn(32, 1, len(params.alphabet) + 1, nh=256, n_rnn=2, leakyRelu=False, lstmFlag=True)
    # crnn_handle = CRNNHandle(crnn_model_path, crnn_net, gpu_id=None)

    # angle_net = shufflenet_v2_x0_5(num_classes=len(params.lable_map_dict), pretrained=False)
    # angle_handle = AangleClassHandle(params.shufflenet_model_path, angle_net, gpu_id=None)

    # results = []

    # boxes_list = sorted_boxes(np.array(boxes_list))
    box_list = np.array(box_list)
    # print(box_list.shape)

    # pool = ThreadPool()
    # part_process = partial(process, img=img, crnn_handle=crnn_handle)
    # results = pool.map(part_process, box_list)
    # pool.close()
    # pool.join()
    results = process(image_name, box_list, img, crnn_handle, row_index, col_index, visualize)


    return results


class resizeNormalize(object):

    def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation

    def __call__(self, img):
        size = self.size
        imgW, imgH = size
        scale = img.size[1] * 1.0 / imgH
        w = img.size[0] / scale
        w = int(w)
        img = img.resize((w, imgH), self.interpolation)
        w, h = img.size
        if w <= imgW:
            newImage = np.zeros((imgH, imgW), dtype='uint8')
            newImage[:] = 255
            newImage[:, :w] = np.array(img)
            img = Image.fromarray(newImage)
        else:
            img = img.resize((imgW, imgH), self.interpolation)
        # img = (np.array(img)/255.0-0.5)/0.5
        img = transforms.ToTensor()(img)
        img.sub_(0.5).div_(0.5)
        return img


class strLabelConverter(object):

    def __init__(self, alphabet):
        self.alphabet = alphabet + 'ç'  # for `-1` index
        self.dict = {}
        for i, char in enumerate(alphabet):
            # NOTE: 0 is reserved for 'blank' required by wrap_ctc
            self.dict[char] = i + 1

    def encode(self, text, depth=0):
        """Support batch or single str."""
        length = []
        result = []
        for str in text:
            length.append(len(str))
            for char in str:
                # print(char)
                index = self.dict[char]
                result.append(index)
        text = result
        return torch.IntTensor(text), torch.IntTensor(length)

    def decode(self, t, length, raw=False):
        if length.numel() == 1:
            length = length[0]
            t = t[:length]
            if raw:
                return ''.join([self.alphabet[i - 1] for i in t])
            else:
                char_list = []
                for i in range(length):
                    if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
                        char_list.append(self.alphabet[t[i] - 1])
                return ''.join(char_list)
        else:
            texts = []
            index = 0
            for i in range(length.numel()):
                l = length[i]
                texts.append(self.decode(
                    t[index:index + l], torch.IntTensor([l]), raw=raw))
                index += l
            return texts


class CRNNHandle:
    def __init__(self, model_path, net, gpu_id=None):
        """
       初始化pytorch模型
       :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
       :param net: 网络计算图，如果在model_path中指定的是参数的保存路径，则需要给出网络的计算图

       :param gpu_id: 在哪一块gpu上运行
       """

        if gpu_id is not None and isinstance(gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:{}".format(gpu_id))
        else:
            self.device = torch.device("cpu")
        self.net = torch.load(model_path, map_location=self.device)
        # print('device:', self.device)

        if net is not None:
            # 如果网络计算图和参数是分开保存的，就执行参数加载
            net = net.to(self.device)

            try:
                sk = {}
                for k in self.net:
                    sk[k.replace("module.", "")] = self.net[k]
                    # sk[k[7:]] = self.net[k]

                net.load_state_dict(sk)
            except Exception as e:
                print(e)
                net.load_state_dict(self.net)

            self.net = net
            # print('load model')
        self.net.eval()

    def predict(self, im):
        """
        预测
        """
        image = im.convert('L')
        scale = image.size[1] * 1.0 / 32
        w = image.size[0] / scale
        w = int(w)
        transformer = resizeNormalize((w, 32))

        image = transformer(image)
        image = image.to(self.device)
        image = image.view(1, *image.size())
        image = Variable(image)
        preds = self.net(image)

        _, preds = preds.max(2)

        preds = preds.transpose(1, 0).contiguous().view(-1)
        preds_size = Variable(torch.IntTensor([preds.size(0)]))
        converter = strLabelConverter(''.join(params.alphabet))
        sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
        return sim_pred


class BidirectionalLSTM(nn.Module):

    def __init__(self, nIn, nHidden, nOut):
        super(BidirectionalLSTM, self).__init__()
        self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
        self.embedding = nn.Linear(nHidden * 2, nOut)

    def forward(self, input):
        recurrent, _ = self.rnn(input)
        T, b, h = recurrent.size()
        t_rec = recurrent.view(T * b, h)
        output = self.embedding(t_rec)  # [T * b, nOut]
        output = output.view(T, b, -1)
        return output


class CRnn(nn.Module):

    def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False, lstmFlag=True):
        """
        是否加入lstm特征层
        """
        super(CRnn, self).__init__()
        assert imgH % 16 == 0, 'imgH has to be a multiple of 16'

        ks = [3, 3, 3, 3, 3, 3, 2]
        ps = [1, 1, 1, 1, 1, 1, 0]
        ss = [1, 1, 1, 1, 1, 1, 1]
        nm = [64, 128, 256, 256, 512, 512, 512]
        self.lstmFlag = lstmFlag

        cnn = nn.Sequential()

        def convRelu(i, batchNormalization=False):
            nIn = nc if i == 0 else nm[i - 1]
            nOut = nm[i]
            cnn.add_module('conv{0}'.format(i),
                           nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
            if batchNormalization:
                cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
            if leakyRelu:
                cnn.add_module('relu{0}'.format(i),
                               nn.LeakyReLU(0.2, inplace=True))
            else:
                cnn.add_module('relu{0}'.format(i), nn.ReLU(True))

        convRelu(0)
        cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))  # 64x16x64
        convRelu(1)
        cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))  # 128x8x32
        convRelu(2, True)
        convRelu(3)
        cnn.add_module('pooling{0}'.format(2),
                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 256x4x16
        convRelu(4, True)
        convRelu(5)
        cnn.add_module('pooling{0}'.format(3),
                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 512x2x16
        convRelu(6, True)  # 512x1x16

        self.cnn = cnn
        if self.lstmFlag:
            self.rnn = nn.Sequential(
                BidirectionalLSTM(512, nh, nh),
                BidirectionalLSTM(nh, nh, nclass))
        else:
            self.linear = nn.Linear(nh * 2, nclass)

    def forward(self, input):
        # conv features
        conv = self.cnn(input)
        b, c, h, w = conv.size()

        assert h == 1, "the height of conv must be 1"
        conv = conv.squeeze(2)
        conv = conv.permute(2, 0, 1)  # [w, b, c]
        if self.lstmFlag:
            # rnn features
            output = self.rnn(conv)
        else:
            T, b, h = conv.size()

            t_rec = conv.contiguous().view(T * b, h)

            output = self.linear(t_rec)  # [T * b, nOut]
            output = output.view(T, b, -1)

        return output


class AangleClassHandle():
    def __init__(self, model_path, net, gpu_id=None):
        """
           初始化pytorch模型
           :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
           :param net: 网络计算图，如果在model_path中指定的是参数的保存路径，则需要给出网络的计算图

           :param gpu_id: 在哪一块gpu上运行
           """

        if gpu_id is not None and isinstance(gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:{}".format(gpu_id))
        else:
            self.device = torch.device("cpu")
        self.net = torch.load(model_path, map_location=self.device)
        # print('device:', self.device)

        self.trans = transforms.Compose([
            # transforms.Resize((int(48 / 1.0), int(196 / 0.875))),
            # transforms.CenterCrop((48, 196)),
            #
            transforms.Resize((48, 196)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

        if net is not None:
            # 如果网络计算图和参数是分开保存的，就执行参数加载
            net = net.to(self.device)

            try:
                sk = {}
                for k in self.net:
                    sk[k[7:]] = self.net[k]

                net.load_state_dict(sk)
            except:
                net.load_state_dict(self.net)

            self.net = net
            # print('load model')
        self.net.eval()

    def predict(self, im):
        """
        预测
        """
        im = Image.fromarray(im).convert("RGB")
        image = self.trans(im)
        image = image.to(self.device)
        # if torch.cuda.is_available():
        #     image = image.cuda()
        image = image.view(1, *image.size())
        image = Variable(image)
        preds = self.net(image)
        preds = torch.softmax(preds, 1)
        preds = preds.cpu().detach().numpy()
        preds = np.argmax(preds)
        return preds
