# -*- encoding: utf-8 -*-
# author:lmolhw
# datetime:2021-2-1 16:30

"""
文件说明：
        使用Paddle检查测试集数据
"""

import paddle.fluid as fluid
import numpy as np
import time
import threading
import string
import argparse
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms
import sys
import os
import cv2
from PIL import Image
import math

if __name__ == "__main__":
    # sys.path.insert(0, '/data1/lhw/workspace/OCR-30-mjh/code/Detection/DB')
    #
    # from utility import create_predictor
    # from DBNet.db_process import DBProcessTest
    # from DBNet.db_postprocess import DBPostProcess
    # from utils import sort_boxes
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    sys.path.insert(0, '/data1/lhw/workspace/OCR-30-mjh/code/Detection/PaddleOCR')
    from utility import create_predictor
    from character import CharacterOps

    # sys.path.insert(0, '/data1/lhw/workspace/OCR-30-mjh/code/')
    # # from Detection.YOLO.predict import OCRDetector as YOLO_Detector


    # sys.path.insert(0, "/data1/lhw/workspace/OCR-30-mjh/code/OCR/V1")
    # print(sys.path)
    from config import ConfigOpt
    from model import Model
    from infer import OcrRec
    import logging
    import math
    from nltk.metrics.distance import edit_distance
#
else:
    from .utility import create_predictor
    from .DBNet.db_process import DBProcessTest
    from .DBNet.db_postprocess import DBPostProcess
    from .utils import sort_boxes

def open_txt(file_name):
    with open(file_name, 'r') as f:
        try:
            line = f.readline()
            while line:
                yield line.strip()
                line = f.readline()
        except:
            print('No value')


class PaddleTextRecognizer(object):
    def __init__(self):
        self.predictor, self.input_tensor, self.output_tensors = create_predictor(
            '/data1/mjh/OCR-30-mjh/models/CRNN/Paddle/ch_ppocr_server_v1.1_rec_infer', mode="rec", pre_cache=3000,
            gpu_device=0)
        self.use_zero_copy_run = False
        self.rec_image_shape = [3, 32, 320]
        self.character_type = 'ch'
        self.rec_batch_num = 4
        self.rec_algorithm = 'CRNN'
        self.text_len = '25'
        char_ops_params = {
            "character_type": self.character_type,
            "character_dict_path": '/data1/mjh/OCR-30-mjh/models/CRNN/Paddle/ppocr_keys_v1.txt',
            "use_space_char": True,
            "max_text_length": self.text_len
        }
        if self.rec_algorithm == "CRNN":
            char_ops_params['loss_type'] = 'ctc'
            self.loss_type = 'ctc'
        else:
            NameError(f"{self.rec_algorithm} is not supported")
        self.char_ops = CharacterOps(char_ops_params)

    def resize_norm_img(self, img, max_wh_ratio):
        imgC, imgH, imgW = self.rec_image_shape
        assert imgC == img.shape[2]
        # if self.character_type == "ch":
        #     imgW = int((32 * max_wh_ratio))
        h, w = img.shape[:2]
        ratio = w / float(h)
        # if math.ceil(imgH * ratio) > imgW:
        #     resized_w = imgW
        # else:
        resized_w = int(math.ceil(imgH * ratio))
        resized_image = cv2.resize(img, (resized_w, imgH))
        resized_image = resized_image.astype('float32')
        resized_image = resized_image.transpose((2, 0, 1)) / 255
        resized_image -= 0.5
        resized_image /= 0.5
        # padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
        # padding_im[:, :, 0:resized_w] = resized_image
        return resized_image

    def __call__(self, img_list):
        print("----->")
        img_num = len(img_list)
        # Calculate the aspect ratio of all text bars
        width_list = []
        for img in img_list:
            width_list.append(img.shape[1] / float(img.shape[0]))
        # Sorting can speed up the recognition process
        indices = np.argsort(np.array(width_list))

        # rec_res = []
        rec_res = [['', 0.0]] * img_num
        batch_num = self.rec_batch_num
        predict_time = 0
        for beg_img_no in range(0, img_num, batch_num):
            end_img_no = min(img_num, beg_img_no + batch_num)
            norm_img_batch = []
            max_wh_ratio = 0
            for ino in range(beg_img_no, end_img_no):
                # h, w = img_list[ino].shape[0:2]
                h, w = img_list[indices[ino]].shape[0:2]
                wh_ratio = w * 1.0 / h
                max_wh_ratio = max(max_wh_ratio, wh_ratio)
            for ino in range(beg_img_no, end_img_no):
                if self.loss_type == "ctc":
                    norm_img = self.resize_norm_img(img_list[indices[ino]],
                                                    max_wh_ratio)
                    norm_img = norm_img[np.newaxis, :]
                    norm_img_batch.append(norm_img)

            norm_img_batch = np.concatenate(norm_img_batch, axis=0)
            norm_img_batch = norm_img_batch.copy()

            starttime = time.time()
            if self.use_zero_copy_run:
                self.input_tensor.copy_from_cpu(norm_img_batch)
                self.predictor.zero_copy_run()
            else:
                norm_img_batch = fluid.core.PaddleTensor(norm_img_batch)
                self.predictor.run([norm_img_batch])

            if self.loss_type == "ctc":
                rec_idx_batch = self.output_tensors[0].copy_to_cpu()
                rec_idx_lod = self.output_tensors[0].lod()[0]
                predict_batch = self.output_tensors[1].copy_to_cpu()
                predict_lod = self.output_tensors[1].lod()[0]
                elapse = time.time() - starttime
                predict_time += elapse
                for rno in range(len(rec_idx_lod) - 1):
                    beg = rec_idx_lod[rno]
                    end = rec_idx_lod[rno + 1]
                    rec_idx_tmp = rec_idx_batch[beg:end, 0]
                    preds_text = self.char_ops.decode(rec_idx_tmp)
                    beg = predict_lod[rno]
                    end = predict_lod[rno + 1]
                    probs = predict_batch[beg:end, :]
                    ind = np.argmax(probs, axis=1)
                    blank = probs.shape[1]
                    valid_ind = np.where(ind != (blank - 1))[0]
                    if len(valid_ind) == 0:
                        continue
                    score = np.mean(probs[valid_ind, ind[valid_ind]])
                    # rec_res.append([preds_text, score])
                    rec_res[indices[beg_img_no + rno]] = [preds_text, score]

        return rec_res, predict_time


def conver_label(gt):
    cn = ['！', '（', '）', '：', '，', '．', '［', '］', '；', '％', '０', '１', '２', '３', '４', '５', '６', '７', '８', '９', '％', '－',
          'Ａ', 'Ｂ', 'Ｃ', 'Ｄ',
          'Ｅ',
          'Ｆ', 'Ｇ', 'Ｈ', 'Ｉ', 'Ｊ', 'Ｋ', 'Ｌ', 'Ｍ', 'Ｎ', 'Ｏ', 'Ｐ', 'Ｑ', 'Ｒ', 'Ｓ', 'Ｔ', 'Ｕ', 'Ｖ', 'Ｗ', 'Ｘ', 'Ｙ', 'Ｚ', 'ａ',
          'ｂ',
          'ｃ', 'ｄ', 'ｅ', 'ｆ', 'ｇ', 'ｈ', 'ｉ', 'ｊ', 'ｋ', 'ｌ', 'ｍ', 'ｎ', 'ｏ', 'ｐ', 'ｑ', 'ｒ', 'ｓ', 'ｔ', 'ｕ', 'ｖ', 'ｗ', 'ｗ',
          'ｙ',
          'ｚ', '￥']
    en = ['!', '(', ')', ':', ',', '.', '[', ']', ';', '%', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '%', '-',
          'A', 'B', 'C', 'D',
          'E',
          'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a',
          'b',
          'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
          'y',
          'z', '¥']
    gt = ''.join(gt.split()).strip()
    gt_tmp = list(gt)

    for i in range(len(gt_tmp)):
        c = gt_tmp[i]
        if c in cn:
            n = cn.index(c)
            gt_tmp[i] = en[n]

    # for i in range(len(gt_tmp)):
    #     for c in range(len(cn)):
    #         if gt_tmp[i] == cn[c]:
    #             gt_tmp[i] = en[c]

    gt_tmp = ''.join(gt_tmp)
    '''
    for i in range(len(gt_tmp)):
        if c in cn:
            j = cn.index(c)
            gt_tmp[i] = en[j]
    '''
    return gt_tmp



if __name__ == "__main__":

    paddle_ocr = PaddleTextRecognizer()
    lists = ['new_pp_labeled.txt', 'new_train_ticket_labeled.txt', 'new_yyzz_labeled.txt', 'new_zp_labeled.txt', 'new_de_labeled.txt']
    # 选择不同票种的txt数据
    org_path = '/data2/dtj/DTJCOMP/ppccddff/'
    for txt in lists:
        path = os.path.join(org_path, txt)
        print(txt)
        # 保存都正确的结果作为测试集,与原始txt文件同名
        out_txt = open(os.path.join(org_path, 'Paddle_Checked_' + txt), 'w')
        images = [line.strip() for line in open_txt(path)]
        for item in images:
            img_path = item.split('\t')[0]
            paddle_res = ''
            try:
                # gt = str(item.split('\t')[0].split('/')[-1][:-6])
                img = cv2.imread(img_path)
                gt = str(item.split('\t')[1])
                paddle_res = paddle_ocr([img])[0][0][0]
            except:
                gt = ''

            if paddle_res and paddle_res == gt:
                out_txt.writelines(img_path + '\t' + gt + '\n')




