# coding=utf-8

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import sys
import string
from PIL import Image
import argparse
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms
from utils.utils import CTCLabelConverter, AttnLabelConverter
from utils.dataset import RawDataset, AlignCollate
from config import ConfigOpt
from model import Model
import logging
import time
import numpy as np
import math


logging.basicConfig(
    format='[%(asctime)s] [%(filename)s]:[line:%(lineno)d] [%(levelname)s] %(message)s', level=logging.INFO)


class InferResizeNormalize(object):

    def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation
        self.toTensor = transforms.ToTensor()

    def __call__(self, img):
        img = img.resize(self.size, self.interpolation)
        img = self.toTensor(img)
        img.sub_(0.5).div_(0.5)
        return img


class NormalizePAD(object):

    def __init__(self, max_size, PAD_type='right'):
        self.toTensor = transforms.ToTensor()
        self.max_size = max_size
        self.max_width_half = math.floor(max_size[2] / 2)
        self.PAD_type = PAD_type

    def __call__(self, img):
        img = self.toTensor(img)
        img.sub_(0.5).div_(0.5)
        c, h, w = img.size()
        Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
        Pad_img[:, :, :w] = img  # right pad
        if self.max_size[2] != w:  # add border Pad
            Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)

        return img


class OcrRec:
    def __init__(self, opt=None):
        self.max_length = 25
        self.opt = ConfigOpt()
        if opt:
            self.opt = opt
        self.batch_size = 1
        self.model = None
        self.converter = None
        self.load_model()

    def load_model(self):
        if 'CTC' in self.opt.Prediction:
            self.converter = CTCLabelConverter(self.opt.character)
        else:
            self.converter = AttnLabelConverter(self.opt.character)
        self.opt.num_class = len(self.converter.character)
        if self.opt.rgb:
            self.opt.input_channel = 3
        self.model = Model(self.opt)
        print('model input parameters', self.opt.imgH, self.opt.imgW, self.opt.num_fiducial, self.opt.input_channel,
              self.opt.output_channel, self.opt.hidden_size, self.opt.num_class, self.opt.batch_max_length,
              self.opt.Transformation, self.opt.FeatureExtraction, self.opt.SequenceModeling, self.opt.Prediction)
        self.model = torch.nn.DataParallel(self.model)
        if torch.cuda.is_available():
            self.model = self.model.cuda()
        # load model
        print('loading pretrained model from %s' % self.opt.saved_model)
        if torch.cuda.is_available():
            self.model.load_state_dict(torch.load(self.opt.saved_model))
        else:
            self.model.load_state_dict(torch.load(self.opt.saved_model, map_location="cpu"))
        self.model.eval()

        """计算模型的参数量"""
        para = sum([np.prod(list(p.size())) for p in self.model.parameters()])
        print('Model params: {:4f}M'.format( para * 4 / 1024 / 1024))

    def text_rec(self, img):
        """
        resize PIL image to fixed height, keep width/height ratio
        do inference
        :param img:
        :return:
        """
        if isinstance(img, str) and os.path.isfile(img):
            img = Image.open(img)
            img = img.convert('RGB')
            import PIL.ImageOps
            # img = PIL.ImageOps.invert(img)
        # print(img)
        if not img.mode == 'L':
            img = img.convert('L')


        # Resize1
        # ratio = self.opt.imgH / img.size[1]
        # target_w = int(img.size[0] * ratio)
        # transformer = InferResizeNormalize((target_w, self.opt.imgH))
        # image_tensors = [transformer(img)]
        # image_tensors = torch.cat([t.unsqueeze(0) for t in image_tensors], 0)
        # Resize End

        # Resize2
        resized_max_w = self.opt.imgW
        transformer = NormalizePAD((1, self.opt.imgH, resized_max_w))
        w, h = img.size
        ratio = w / float(h)
        if math.ceil(self.opt.imgH * ratio) > self.opt.imgW:
            resized_w = self.opt.imgW
        else:
            resized_w = math.ceil(self.opt.imgH * ratio)
        img = img.resize((resized_w, self.opt.imgH), Image.BICUBIC)
        # Resize End



        img = transformer(img)
        img = img.view(1, *img.size())
        img = Variable(img)

        with torch.no_grad():
            if torch.cuda.is_available():
                img = img.cuda()
                length_for_pred = torch.cuda.IntTensor([self.opt.batch_max_length] * self.batch_size)
                text_for_pred = torch.cuda.LongTensor(self.batch_size, self.opt.batch_max_length + 1).fill_(0)
            else:
                length_for_pred = torch.IntTensor([self.opt.batch_max_length] * self.batch_size)
                text_for_pred = torch.LongTensor(self.batch_size, self.opt.batch_max_length + 1).fill_(0)
            if 'CTC' in self.opt.Prediction:
                preds = self.model(img, text_for_pred).softmax(2)
                # Select max probabilty (greedy decoding) then decode index to character
                preds_size = torch.IntTensor([preds.size(1)] * self.batch_size)
                preds_prob_vals, preds_index = preds.permute(1, 0, 2).max(2)
                # preds_index[preds_prob_vals<0.8] = 0
                # preds_prob_vals = preds_prob_vals.transpose(1, 0).contiguous().view(-1)
                preds_index = preds_index.transpose(1, 0).contiguous().view(-1)
                # print(preds_index)
                # print(preds_prob_vals)
                preds_str = self.converter.decode(preds_index.data, preds_size.data)
            else:
                preds = self.model(img, text_for_pred, is_train=False)
                # select max probabilty (greedy decoding) then decode index to character
                _, preds_index = preds.max(2)
                preds_str = self.converter.decode(preds_index, length_for_pred)
                preds_str = [pred[:pred.find('[s]')] for pred in preds_str]
            # print("pred:", preds_str[0])
        return preds_str[0]


def open_txt(file_name):
    with open(file_name, 'r') as f:
        try:
            line = f.readline()
            while line:
                yield line.strip()
                line = f.readline()
        except:
            print('No value')


def post_label(gt):
    cn = ['！', '（', '）', '：', '，', '．', '［', '］', '；', '％', '０', '１', '２', '３', '４', '５', '６', '７', '８', '９', '％', '－',
          'Ａ', 'Ｂ', 'Ｃ', 'Ｄ',
          'Ｅ',
          'Ｆ', 'Ｇ', 'Ｈ', 'Ｉ', 'Ｊ', 'Ｋ', 'Ｌ', 'Ｍ', 'Ｎ', 'Ｏ', 'Ｐ', 'Ｑ', 'Ｒ', 'Ｓ', 'Ｔ', 'Ｕ', 'Ｖ', 'Ｗ', 'Ｘ', 'Ｙ', 'Ｚ', 'ａ',
          'ｂ',
          'ｃ', 'ｄ', 'ｅ', 'ｆ', 'ｇ', 'ｈ', 'ｉ', 'ｊ', 'ｋ', 'ｌ', 'ｍ', 'ｎ', 'ｏ', 'ｐ', 'ｑ', 'ｒ', 'ｓ', 'ｔ', 'ｕ', 'ｖ', 'ｗ', 'ｗ',
          'ｙ',
          'ｚ']
    en = ['!', '(', ')', ':', ',', '.', '[', ']', ';', '%', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '%', '-',
          'A', 'B', 'C', 'D',
          'E',
          'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a',
          'b',
          'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
          'y',
          'z']
    gt = ''.join(gt.split()).strip()
    gt_tmp = list(gt)
    for i in range(len(gt_tmp)):
        for c in range(len(cn)):
            if gt_tmp[i] == cn[c]:
                gt_tmp[i] = en[c]
    gt_tmp = ''.join(gt_tmp)
    '''
    for i in range(len(gt_tmp)):
        if c in cn:
            j = cn.index(c)
            gt_tmp[i] = en[j]
    '''
    return gt_tmp


def string_distance(str1, str2):
    m = str1.__len__()
    n = str2.__len__()
    distance = np.zeros((m + 1, n + 1))
    for i in range(0, m + 1):
        distance[i, 0] = i
    for j in range(0, n + 1):
        distance[0, j] = j

    for i in range(1, m + 1):
        for j in range(1, n + 1):
            if str1[i - 1] == str2[j - 1]:
                cost = 0
            else:
                cost = 1

            distance[i, j] = min(distance[i - 1, j] + 1, distance[i, j - 1] + 1, distance[i - 1, j - 1] + cost)
    return distance[m, n]


def conver_label(gt):
    cn = ['！', '（', '）', '：', '，', '．', '［', '］', '；', '％', '０', '１', '２', '３', '４', '５', '６', '７', '８', '９', '％', '－',
          'Ａ', 'Ｂ', 'Ｃ', 'Ｄ',
          'Ｅ',
          'Ｆ', 'Ｇ', 'Ｈ', 'Ｉ', 'Ｊ', 'Ｋ', 'Ｌ', 'Ｍ', 'Ｎ', 'Ｏ', 'Ｐ', 'Ｑ', 'Ｒ', 'Ｓ', 'Ｔ', 'Ｕ', 'Ｖ', 'Ｗ', 'Ｘ', 'Ｙ', 'Ｚ', 'ａ',
          'ｂ',
          'ｃ', 'ｄ', 'ｅ', 'ｆ', 'ｇ', 'ｈ', 'ｉ', 'ｊ', 'ｋ', 'ｌ', 'ｍ', 'ｎ', 'ｏ', 'ｐ', 'ｑ', 'ｒ', 'ｓ', 'ｔ', 'ｕ', 'ｖ', 'ｗ', 'ｗ',
          'ｙ',
          'ｚ', '￥']
    en = ['!', '(', ')', ':', ',', '.', '[', ']', ';', '%', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '%', '-',
          'A', 'B', 'C', 'D',
          'E',
          'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a',
          'b',
          'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
          'y',
          'z', '¥']
    gt = ''.join(gt.split()).strip()
    gt_tmp = list(gt)

    for i in range(len(gt_tmp)):
        c = gt_tmp[i]
        if c in cn:
            n = cn.index(c)
            gt_tmp[i] = en[n]
            


    # for i in range(len(gt_tmp)):
    #     for c in range(len(cn)):
    #         if gt_tmp[i] == cn[c]:
    #             gt_tmp[i] = en[c]




    gt_tmp = ''.join(gt_tmp)
    '''
    for i in range(len(gt_tmp)):
        if c in cn:
            j = cn.index(c)
            gt_tmp[i] = en[j]
    '''
    return gt_tmp



if __name__ == '__main__':
    # cudnn.benchmark = True
    # cudnn.deterministic = True
    # opt.num_gpu = torch.cuda.device_count()
    opt = ConfigOpt()
    # Edit
    opt.saved_model = 'saved_models/9-17_NoJYFW_YYZZ_train-AT-Addr_GSMC/mtl_best_accuracy.pth'
    


    # Edit End
    ocr_rec = OcrRec(opt=opt)
    # path = sys.argv[1]
    # path = '/data2/dtj/DTJCOMP/ppccddff/20200513营业执照-已标注未整理/lin/00405/北京市朝阳区百子湾路11号18幢1-4层北京柏林湾酒店内一层8202室.1.jpg'
    # path = '/home/lhw/workspace/mtl-text-recognition/demo_images'
    path = '/data2/lhw/YYZZ_labeled_data/NoJYFW_labels.txt'
    # path = '/data2/dtj/DTJCOMP/ppccddff/new_pp_labeled.txt'
    # path = '/data2/DTJCOMP/ppccddff/labeled_train/hcp (44)/始发改签开车后改签不予退票.3.jpg'
    # Pre_out_txt = open('saved_models/7-6_VGG_CTC_AT_MixedAR_FT/V2_NoJYFW_YYZZ_labeled_data.txt', 'w')
    # 单独文本图片
    char_acc = []
    if os.path.isfile(path):
        # 读取txt list
        count_line = 0
        correct_num = 0
        if path[-1] == 't':
            images = [line.strip() for line in open_txt(path)]
            for item in images:
                img_path = item.split('\t')[0]
                try:
                    # gt = str(item.split('\t')[0].split('/')[-1][:-6])
                    gt = str(item.split('\t')[1])
                except:
                    gt = None
                # image = Image.open(img_path)
                # image = image.convert("L")
                try:
                    res_text = ocr_rec.text_rec(img_path)
                    gt = conver_label(gt)
                    dis = string_distance(gt, res_text)
                    if gt == res_text.strip() or gt == res_text.strip()[:-1]:
                        correct_num += 1
                    char_acc.append((len(gt) - dis) / len(gt))
                    # Pre_out_txt.writelines(item + '\t' + res_text + '\t' + str(dis) + '\n')
                    
                    count_line += 1
                except:
                    pass
            print('ACC={}%'.format(correct_num / count_line * 100))
            print('Char_ACC={}%'.format(np.mean(np.array(char_acc)) * 100))

        else:
            gt = path.split('/')[-1][:-6]
            res_text = ocr_rec.text_rec(path)

            dis = string_distance(gt, res_text)
            print(dis)
            print(f"{path.split(os.path.sep)[-1]}\t{res_text}")
    # 图片路径
    elif os.path.isdir(path):
        image_path = os.path.join(path, 'images')
        ano_path = os.path.join(path, 'annotations')
        image_list = os.listdir(image_path)
        # Pre_txt = open('/data1/lhw/workspace/mtl-text-recognition/saved_models/TrainTicket_VGG_bilstm256_CTC/train_annotations_1PAD.txt', 'w')
        # 读入一个图片
        count_all = 0
        count = 0
        times = []
        rec_times = []
        for image_file in image_list:
            suffix = image_file.split('.')[-1]
            image_name = image_file.split('.')[0]
            ano_name = image_name + '.txt'
            if suffix not in ('jpg', 'jpeg', 'png'):
                continue
            img_path = os.path.join(image_path, image_file)
            ano_file = os.path.join(ano_path, ano_name)
            if not os.path.isfile(img_path):
                print(f"not file {img_path}")
                continue
            if not os.path.isfile(ano_file):
                print(f"not file {ano_file}")
                continue
            # out_path = os.path.join(path, 'pre_out/TrainTicket_VGG_bilstm256_CTC/')
            # try:
            #     if os.path.exists(out_path):
            #         pass
            #     else:
            #         os.mkdir(out_path)
            # except:
            #     pass
            # out_file = open(os.path.join(out_path, ano_name), 'w')
            start_time = time.time()
            # 读入图片
            '''单个图计算时间'''
            per_image_start = time.time()
            # if isinstance(img_path, str) and os.path.isfile(img_path):
            img = Image.open(img_path)
            img = img.convert('L')
        
            # # print(img_path)
            # print(ano_file)
            # print(img)
            # 读入txt
            '''ID所需的配置'''
            # ids = ['name', 'sex', 'nation', 'year', 'mon', 'day', 'addr', 'id', 'org', 'life']
            for line in open_txt(ano_file):
                boxes_tmp = line.split(',')
                # print(boxes_tmp)
                '''
                    Train V1
                    xmin, ymin, xmax, ymax
                '''
                xmin, ymin, xmax, ymax, gt = int(float(boxes_tmp[0])), int(float(boxes_tmp[1])), int(
                    float(boxes_tmp[2])), int(float(boxes_tmp[3])), str(boxes_tmp[4])
                '''End'''
                '''=================================================================================================='''

                '''Normal'''
                # xmin, ymin, xmax, ymax, gt, id = int(float(boxes_tmp[0])), int(float(boxes_tmp[1])), int(
                #    float(boxes_tmp[4])), int(float(boxes_tmp[5])), str(boxes_tmp[8]), str(boxes_tmp[9])
                '''End'''
                # box = [[[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]]
                # img_gray_np = np.array(img)[ymin:ymax, xmin:xmax]
                
                # if id in ids:
                # if xmin-5 and ymin-5 and xmax+5 and ymax+5:
                #     box = (xmin-3, ymin-3, xmax+3, ymax+3)
                box = (xmin, ymin, xmax, ymax)
                # print('box:', box)
                img_tmp = img.crop(box=box)
                # print('img_tmp:', img_tmp)
                # img_tmp = Image.fromarray(img_gray_np)

                per_rec_start = time.time()
                res_text = ocr_rec.text_rec(img_tmp)
                per_rec_end = time.time()
                rec_times.append(per_rec_end-per_rec_start)


                # print(f"{gt}\t{res_text}")
                if res_text:
                    gt = gt.strip()
                    label = str(res_text)
                    # label = post_label(str(res_text))
                    if label == gt:
                        count += 1
                else:
                    label = 'None'
                # dis = string_distance(gt, label)
                out = line + ',' + label
                # out_file.writelines(out + '\n')
                # Pre_txt.writelines(out + ',' + '\n')
                count_all += 1

            per_image_end = time.time()
            times.append(per_image_end-per_image_start)
        print('ACC={}%'.format(count / count_all * 100))
        print('Predict per image cost average time is {}'.format(np.mean(times)))
        print('Predict per rec cost average time is {}'.format(np.mean(rec_times)))
