# coding=utf-8

import os


import sys
import string
from PIL import Image
import argparse
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms
import time
from config import ConfigOpt
from model import Model
import logging
import math
import cv2
import numpy as np
logging.basicConfig(
    format='[%(asctime)s] [%(filename)s]:[line:%(lineno)d] [%(levelname)s] %(message)s', level=logging.INFO)

class CTCLabelConverter(object):
    """ Convert between text-label and text-index """

    def __init__(self, character):
        # character (str): set of the possible characters.
        dict_character = list(character)

        self.dict = {}
        for i, char in enumerate(dict_character):
            # NOTE: 0 is reserved for 'blank' token required by CTCLoss
            self.dict[char] = i + 1

        self.character = ['[blank]'] + dict_character  # dummy '[blank]' token for CTCLoss (index 0)

    def encode(self, text):
        """convert text-label into text-index.
        input:
            text: text labels of each image. [batch_size]

        output:
            text: concatenated text index for CTCLoss.
                    [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
            length: length of each text. [batch_size]
        """
        length = [len(s) for s in text]
        text_all = ''.join(text)
        text_all = [self.dict[char] for char in text_all]

        return (torch.IntTensor(text_all), torch.IntTensor(length))

    def decode(self, text_index, length):
        """ convert text-index into text-label. """
        texts = []
        index = 0
        for l in length:
            t = text_index[index:index + l]

            char_list = []
            for i in range(l):
                if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):  # removing repeated characters and blank.
                    char_list.append(self.character[t[i]])
            text = ''.join(char_list)

            texts.append(text)
            index += l
        return texts


class InferResizeNormalize(object):

    def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation
        self.toTensor = transforms.ToTensor()

    def __call__(self, img):
        img = img.resize(self.size, self.interpolation)
        img = self.toTensor(img)
        img.sub_(0.5).div_(0.5)
        return img


class NormalizePAD(object):
    def __init__(self, max_size, PAD_type='right'):
        self.toTensor = transforms.ToTensor()
        self.max_size = max_size
        self.max_width_half = math.floor(max_size[2] / 2)
        self.PAD_type = PAD_type

    def __call__(self, img):
        img = self.toTensor(img)
        img.sub_(0.5).div_(0.5)
        c, h, w = img.size()
        # Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
        # Pad_img[:, :, :w] = img  # right pad
        # if self.max_size[2] != w:  # add border Pad
        #     Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)

        return img


class OcrRec:
    def __init__(self, opt=None):
        self.max_length = 50
        self.opt = ConfigOpt()
        if opt:
            self.opt = opt

        self.batch_size = 1
        self.model = None
        self.converter = None
        self.load_model()
        # self.save_model()

    def load_model(self):
        if 'CTC' in self.opt.Prediction:
            self.converter = CTCLabelConverter(self.opt.character)
        # else:
        #     self.converter = AttnLabelConverter(self.opt.character)
        self.opt.num_class = len(self.converter.character)
        if self.opt.rgb:
            self.opt.input_channel = 3
        self.model = Model(self.opt)
        print('model input parameters', self.opt.imgH, self.opt.imgW, self.opt.num_fiducial, self.opt.input_channel,
              self.opt.output_channel, self.opt.hidden_size, self.opt.num_class, self.opt.batch_max_length,
              self.opt.Transformation, self.opt.FeatureExtraction, self.opt.SequenceModeling, self.opt.Prediction)

        '''DataParallel'''
        # self.model = torch.nn.DataParallel(self.model)


        if torch.cuda.is_available():
            self.model = self.model.cuda()
        # load model
        print('loading pretrained model from %s' % self.opt.saved_model)
        if torch.cuda.is_available():
            self.model.load_state_dict(torch.load(self.opt.saved_model))
        else:
            self.model.load_state_dict(torch.load(self.opt.saved_model, map_location="cpu"))
        self.model.eval()

    # def save_model(self):
    #     torch.save(self.model, 'saved_models/8-24_NoJYFW_YYZZ_train-AT-TMixCrop//YYZZ.pkl')

    def text_rec(self, img):
        """
        resize PIL image to fixed height, keep width/height ratio
        do inference
        :param img:
        :return:
        """
        # img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
        # ret1, img = cv2.threshold(np.array(img), 0, 255, cv2.THRESH_OTSU)

        if isinstance(img, str) and os.path.isfile(img):  # lhw 判断输入类型
            img = Image.open(img)
        else:
            img = Image.fromarray(img)

        if self.opt.rgb:
            c = 3
            img = img.convert('RGB')
        else:
            img = img.convert('L')
            c = 1
        w, h = img.size
        raw_img = img.copy()


        # 直方图正则化
        Maximg = np.max(img)
        Minimg = np.min(img)
        
        Omin, Omax = 0, 255
        a = float(Omax - Omin) / (Maximg - Minimg)
        b = Omin - a * Minimg
        
        O = a * img + b
        img = O.astype(np.uint8)
        
        # ret1, img = cv2.threshold(np.array(img), 0, 255, cv2.THRESH_OTSU)
        # Resize2
        # resized_max_w = self.opt.imgW
        # transformer = NormalizePAD((c, self.opt.imgH, self.opt.imgW))
        # ret1, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
        img = Image.fromarray(img)


        ratio = w / float(h)  # 得到宽高比
        # if math.ceil(self.opt.imgH * ratio) > self.opt.imgW:  # 当imgH*宽高比大于预设宽度时，最大宽度为预设宽度
        #     resized_w = self.opt.imgW
        # else:
        resized_w = math.ceil(self.opt.imgH * ratio)
        # img = img.resize((resized_w, self.opt.imgH), Image.BICUBIC)
        transformer = InferResizeNormalize((resized_w, self.opt.imgH), interpolation=Image.BILINEAR)
        # Resize End

        # 转换回PIL Image格式
        

        img = transformer(img)
        img = img.view(1, *img.size())
        img = Variable(img)

        with torch.no_grad():
            if torch.cuda.is_available():
                img = img.cuda()
                length_for_pred = torch.cuda.IntTensor([self.opt.batch_max_length] * self.batch_size)
                text_for_pred = torch.cuda.LongTensor(self.batch_size, self.opt.batch_max_length + 1).fill_(0)
            else:
                length_for_pred = torch.IntTensor([self.opt.batch_max_length] * self.batch_size)
                text_for_pred = torch.LongTensor(self.batch_size, self.opt.batch_max_length + 1).fill_(0)
            if 'CTC' in self.opt.Prediction:
                preds = self.model(img, text_for_pred).softmax(2)
                # Select max probabilty (greedy decoding) then decode index to character
                preds_size = torch.IntTensor([preds.size(1)] * self.batch_size)
                preds_prob_vals, preds_index = preds.permute(1, 0, 2).max(2)
                # preds_index[preds_prob_vals<0.3] = 0
                # preds_prob_vals = preds_prob_vals.transpose(1, 0).contiguous().view(-1)
                preds_index = preds_index.transpose(1, 0).contiguous().view(-1)
                # print(preds_index)
                # print(preds_prob_vals)
                preds_str = self.converter.decode(preds_index.data, preds_size.data)
            else:
                preds = self.model(img, text_for_pred, is_train=False)
                # select max probabilty (greedy decoding) then decode index to character
                _, preds_index = preds.max(2)
                preds_str = self.converter.decode(preds_index, length_for_pred)
                preds_str = [pred[:pred.find('[s]')] for pred in preds_str]
            # print("pred:", preds_str[0])
        return preds_str[0]


def conver_label(gt):
    cn = ['！', '（', '）', '：', '，', '．', '［', '］', '；', '％', '０', '１', '２', '３', '４', '５', '６', '７', '８', '９', '％', '－',
          'Ａ', 'Ｂ', 'Ｃ', 'Ｄ',
          'Ｅ',
          'Ｆ', 'Ｇ', 'Ｈ', 'Ｉ', 'Ｊ', 'Ｋ', 'Ｌ', 'Ｍ', 'Ｎ', 'Ｏ', 'Ｐ', 'Ｑ', 'Ｒ', 'Ｓ', 'Ｔ', 'Ｕ', 'Ｖ', 'Ｗ', 'Ｘ', 'Ｙ', 'Ｚ', 'ａ',
          'ｂ',
          'ｃ', 'ｄ', 'ｅ', 'ｆ', 'ｇ', 'ｈ', 'ｉ', 'ｊ', 'ｋ', 'ｌ', 'ｍ', 'ｎ', 'ｏ', 'ｐ', 'ｑ', 'ｒ', 'ｓ', 'ｔ', 'ｕ', 'ｖ', 'ｗ', 'ｗ',
          'ｙ',
          'ｚ', '￥']
    en = ['!', '(', ')', ':', ',', '.', '[', ']', ';', '%', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '%', '-',
          'A', 'B', 'C', 'D',
          'E',
          'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a',
          'b',
          'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
          'y',
          'z', '¥']
    gt = ''.join(gt.split()).strip()
    gt_tmp = list(gt)

    for i in range(len(gt_tmp)):
        c = gt_tmp[i]
        if c in cn:
            n = cn.index(c)
            gt_tmp[i] = en[n]

    # for i in range(len(gt_tmp)):
    #     for c in range(len(cn)):
    #         if gt_tmp[i] == cn[c]:
    #             gt_tmp[i] = en[c]

    gt_tmp = ''.join(gt_tmp)
    '''
    for i in range(len(gt_tmp)):
        if c in cn:
            j = cn.index(c)
            gt_tmp[i] = en[j]
    '''
    return gt_tmp

if __name__ == '__main__':

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    # cudnn.benchmark = True
    # cudnn.deterministic = True
    # opt.num_gpu = torch.cuda.device_count()
    opt = ConfigOpt()
    # opt.saved_model = 'saved_models/9-17.pth'
    # opt.saved_model = 'saved_models/11-18_VGG_AUG_GRAY_baiduCTC/iter_200000.pth'
    # opt.saved_model = 'saved_models/10-21_baiduCTC_YYZZ/iter_100000.pth'
    # opt.saved_model = 'saved_models/12-24_VGG_Train_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/9-17_NoJYFW_YYZZ_train-AT-Addr_GSMC/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/8-24_NoJYFW_YYZZ_train-AT-TMixCrop/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/12-1_VGG_AUG_GRAY_baiduCTC/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/12-8_VGG_AUG_GRAY_baiduCTC/iter_500000.pth'
    # opt.saved_model = 'saved_models/12-24_VGG_Train_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/1-28_VGG_Croped_160W_Invoice_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/2-20_VGG_Croped_160W_Invoice_Mix_caiwu_bj2018_hb20xx/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/3-8_VGG_Croped_160W_Invoice_Mix_caiwu_bj2018_hb20xx_ADD0.98/mtl_best_accuracy.pth'
    # opt.saved_model = "saved_models/4-30_VGG_AT-AT_aug-MixedARTID-Croped_hb7021_AUG-Mix_caiwu_bj2018_hb20xx-0.98_Mix_caiwu_bj2018_hb20xx/mtl_best_accuracy.pth"
    # opt.saved_model = 'saved_models/6-7_ConvEmbeddingGC_AT-AT_aug-MixedARTID-Croped_hb7021_AUG-Mix_caiwu_bj2018_hb20xx-0.98_Mix_caiwu_bj2018_hb20xx/mtl_best_accuracy.pth'


    # 英文数字
    # opt.saved_model = 'saved_models/7-9_VGG_dcp_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/7-12_TPS_VGG_dcp_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/7-13_TPS_ConvEmbeddingGC_dcp_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/7-19_VGG_dcp_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/7-21_VGG_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/7-21_VGG_FT/best_norm_ED.pth'
    # opt.saved_model = 'saved_models/7-26_VGG_Juan_FT/best_norm_ED.pth'
    # opt.saved_model = 'saved_models/test/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/7-26_VGG_Juan_FT/mtl_best_accuracy.pth'
    # opt.saved_model = 'saved_models/test/iter_250000.pth'
    # opt.saved_model = 'saved_models/test/mtl_best_accuracy.pth'
    opt.saved_model = 'saved_models/test_DCP/mtl_best_accuracy.pth'

    opt.imgW = 320
    opt.rgb = None
    # opt.Transformation = None
    opt.FeatureExtraction = 'ResNet'  # VGG|RCNN|ResNet|MobileNetV3|DenseNet|CNN_Lite3|ConvEmbeddingGC
    ocr_rec = OcrRec(opt=opt)
    # image_path = sys.argv[1]

    # image_path = '/data1/lhw/workspace/mtl-text-recognition/demo_images/微信截图_20210219173059.png'
    # image_path = "/data2/lhw/Debug/Train_ticket/0.05_5-18_DB_V2_3-8/tmp.png"
    # image_path = '/data1/lhw/workspace/OCR_Project/lhw/Juan_test/test.png'
    # image_path = '/data1/lhw/workspace/OCR_Project/lhw/Juan_test/test_shuihao.png'
    # image_path = '/data1/lhw/workspace/OCR/demo_images/微信截图_20210527163925.png'
    image_path = "/data1/lhw/workspace/OCR/demo_images"
    # image_path = '/data1/lhw/workspace/Test_data/dcp_Test'
    # image_path = '/data1/lhw/workspace/Test_data/Juan_Croped'
    # image_path = '/data1/lhw/workspace/OCR_Project/lhw/name_test.png'
    # image_path = '/data1/lhw/workspace/OCR_Project/lhw/tmp.png'
    # image_path = '/data1/lhw/workspace/OCR_Test/ss.png'
    # image_path = '/home/lhw/workspace/mtl-text-recognition/demo_images'
    # image_path = '/home/lhw/workspace/mtl-text-recognition/demo_images/tmp.png'
    # image_path = '/data1/lhw/workspace/OCR_Test/tmp.png'
    # image_path = '/data2/dtj/DTJCOMP/ppccddff/20200416普票1196/pp1/20190225_151828/(小写)￥640.00.3.jpg'
    # image_path = '/data1/lhw/workspace/mtl-text-recognition/tmp2.png'
    # image_path = '/data1/lhw/workspace/mtl-text-recognition/demo_images/WX20200918-102519.png'
    # image_path = 'demo_images/de12.png'
    # ocr_rec.save_model()

    '''End2End_test'''

    # # 从GT TXT中读取检测坐标and Paddle识别的结果，进行对比，若结果相同，则保存Crop并保存GT
    # # image_path = '/data2/ocrdata/hb7021'
    #
    # gt_path = '/data2/ocrdata/80.混拍数据/2020.10财务部拍照/DB_Paddle_out'
    #
    # # save_path = '/data2/ocrdata/hb20xx/'
    # save_txt = open(os.path.join(gt_path, 'Croped_Checked_labels.txt'), 'w')
    # gt_file = open(os.path.join(gt_path, 'labels.txt'))
    # # gt_list = os.listdir(gt_path)
    # count = 0
    # # all_count = 0
    # # for gt_name in gt_list:
    # #     print(gt_name)
    # #     gt_file = open(os.path.join(gt_path, gt_name.strip()))
    # #     image_file = os.path.join(image_path, gt_name[:-3].strip() + 'jpg')
    # #     image = cv2.imread(image_file)
    #
    # for line in gt_file.readlines():
    #     img_path, x1, x2, y1, y2 = line.split('\t')[0].split(',')[0], line.split('\t')[0].split(',')[1], line.split('\t')[0].split(',')[2], line.split('\t')[0].split(',')[3], line.split('\t')[0].split(',')[4]
    #     gt = line.split('\t')[1]
    #     # x1, x2, y1, y2, gt = line.split(',')[0], line.split(',')[1], line.split(',')[2], line.split(',')[3], line.split(',')[4].strip()
    #     # img = image[int(y1):int(y2), int(x1):int(x2)]
    #     img = cv2.imread(img_path)
    #     try:
    #         res_text = ocr_rec.text_rec(img)
    #         gt = conver_label(gt)
    #         ratio = (int(x2)-int(x1))/(int(y2)-int(y1))
    #         if gt == res_text and ratio > 1.5 and gt:
    #             # save_img_path = os.path.join(save_path, str(count) + '.jpg')
    #             # cv2.imwrite(save_img_path, img)
    #             save_txt.writelines(img_path + '\t' + gt + '\n')
    #             # print(count, gt, res_text)
    #             # count += 1
    #     except:
    #         pass
    '''End2End_test End'''


    '''原始infer代码'''
    times = []
    if os.path.isfile(image_path):
        res_text = ocr_rec.text_rec(image_path)

        print(f"{image_path.split(os.path.sep)[-1]}\t{res_text}")
    elif os.path.isdir(image_path):
        image_list = os.listdir(image_path)
        for image_file in image_list:
            suffix = image_file.split('.')[-1]
            if suffix not in ('jpg', 'jpeg', 'png'):
                continue
            img_path = os.path.join(image_path, image_file)
            if not os.path.isfile(img_path):
                print(f"not file {img_path}")
                continue
            print(img_path)
            start_time = time.time()
            res_text = ocr_rec.text_rec(img_path)
            times.append(time.time() - start_time)
            print(f"{image_file}\t{res_text}")
    # print('ave_time:{}'.format(np.average(times)))
    '''原始infer代码 End'''