# -*- encoding: utf-8 -*-
# author:lmolhw
# datetime:2021-1-20 10:40

"""
文件说明：
        调用PaddleOCR-DB+CRNN对票据数据进行预标注
"""
import paddle.fluid as fluid
import numpy as np
import time
import threading
import string
import argparse
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms
import sys
import os
from PIL import Image
import math

if __name__ == "__main__":
    sys.path.insert(0, '/data1/lhw/workspace/OCR-30-mjh/code/Detection/DB')

    from utility import create_predictor
    from DBNet.db_process import DBProcessTest
    from DBNet.db_postprocess import DBPostProcess
    from utils import sort_boxes
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    sys.path.insert(0, '/data1/lhw/workspace/OCR-30-mjh/code/Detection/PaddleOCR')
    from utility import create_predictor
    from character import CharacterOps

    sys.path.insert(0, '/data1/lhw/workspace/OCR-30-mjh/code/')
    # from Detection.YOLO.predict import OCRDetector as YOLO_Detector


    # sys.path.insert(0, "/data1/lhw/workspace/OCR-30-mjh/code/OCR/V1")
    # print(sys.path)
    from config import ConfigOpt
    from model import Model
    from infer import OcrRec
    import logging
    import math
    from nltk.metrics.distance import edit_distance
#
else:
    from .utility import create_predictor
    from .DBNet.db_process import DBProcessTest
    from .DBNet.db_postprocess import DBPostProcess
    from .utils import sort_boxes


class OCRDetector(object):
    def __init__(self):
        self.locker = threading.Lock()
        max_side_len = 960
        postprocess_params = {}
        preprocess_params = {'max_side_len': max_side_len}
        # resize，normalize
        self.preprocess_op = DBProcessTest(preprocess_params)

        postprocess_params["thresh"] = 0.3
        postprocess_params["box_thresh"] = 0.5
        postprocess_params["max_candidates"] = 1000
        postprocess_params["unclip_ratio"] = 1.6

        self.postprocess_op = DBPostProcess(postprocess_params)
        self.use_zero_copy_run = False
        detectionModelDir = "/data1/mjh/OCR-30-mjh/models/DB/ch_ppocr_server_v1.1_det_infer"
        self.predictor, self.input_tensor, self.output_tensors = create_predictor(
            detectionModelDir,
            mode="det", pre_cache=3000,
            gpu_device=0)

    @staticmethod
    def order_points_clockwise(pts):

        xSorted = pts[np.argsort(pts[:, 0]), :]
        leftMost = xSorted[:2, :]
        rightMost = xSorted[2:, :]

        leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
        (tl, bl) = leftMost

        rightMost = rightMost[np.argsort(rightMost[:, 1]), :]
        (tr, br) = rightMost

        rect = np.array([tl, tr, br, bl], dtype="float32")
        return rect

    @staticmethod
    def clip_det_res(points, img_height, img_width):
        for pno in range(points.shape[0]):
            points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
            points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
        return points

    def filter_tag_det_res(self, dt_boxes, image_shape):
        img_height, img_width = image_shape[0:2]
        dt_boxes_new = []
        for box in dt_boxes:
            box = self.order_points_clockwise(box)
            box = self.clip_det_res(box, img_height, img_width)
            rect_width = int(np.linalg.norm(box[0] - box[1]))
            rect_height = int(np.linalg.norm(box[0] - box[3]))
            if rect_width <= 3 or rect_height <= 3:
                continue
            dt_boxes_new.append(box)
        dt_boxes = np.array(dt_boxes_new)
        return dt_boxes

    def filter_tag_det_res_only_clip(self, dt_boxes, image_shape):
        img_height, img_width = image_shape[0:2]
        dt_boxes_new = []
        for box in dt_boxes:
            box = self.clip_det_res(box, img_height, img_width)
            dt_boxes_new.append(box)
        dt_boxes = np.array(dt_boxes_new)
        return dt_boxes

    def get_bbox(self, img):
        if isinstance(img, Image.Image):
            img = np.array(img)
            # 化成BGR
            img = img[:, :, [2, 1, 0]]

        ori_im = img.copy()
        im, ratio_list = self.preprocess_op(img)
        if im is None:
            return None, 0
        im = im.copy()
        start_time = time.time()
        if self.use_zero_copy_run:
            self.input_tensor.copy_from_cpu(im)
            self.predictor.zero_copy_run()
        else:
            im = fluid.core.PaddleTensor(im)
            self.predictor.run([im])
        outputs = []
        for output_tensor in self.output_tensors:
            output = output_tensor.copy_to_cpu()
            outputs.append(output)
        outs_dict = {'maps': outputs[0]}
        # 预测得到的文本框，还原
        dt_boxes_list = self.postprocess_op(outs_dict, [ratio_list])
        dt_boxes = self.filter_tag_det_res(dt_boxes_list[0], ori_im.shape)
        elapse = time.time() - start_time


        if dt_boxes is None:
            return None

        dt_boxes = sort_boxes(dt_boxes)
        # return dt_boxes, img
        return dt_boxes

        # return dt_boxes, elapse

def conver_label(gt):
    cn = ['！', '（', '）', '：', '，', '．', '［', '］', '；', '％', '０', '１', '２', '３', '４', '５', '６', '７', '８', '９', '％', '－',
          'Ａ', 'Ｂ', 'Ｃ', 'Ｄ',
          'Ｅ',
          'Ｆ', 'Ｇ', 'Ｈ', 'Ｉ', 'Ｊ', 'Ｋ', 'Ｌ', 'Ｍ', 'Ｎ', 'Ｏ', 'Ｐ', 'Ｑ', 'Ｒ', 'Ｓ', 'Ｔ', 'Ｕ', 'Ｖ', 'Ｗ', 'Ｘ', 'Ｙ', 'Ｚ', 'ａ',
          'ｂ',
          'ｃ', 'ｄ', 'ｅ', 'ｆ', 'ｇ', 'ｈ', 'ｉ', 'ｊ', 'ｋ', 'ｌ', 'ｍ', 'ｎ', 'ｏ', 'ｐ', 'ｑ', 'ｒ', 'ｓ', 'ｔ', 'ｕ', 'ｖ', 'ｗ', 'ｗ',
          'ｙ',
          'ｚ', '￥']
    en = ['!', '(', ')', ':', ',', '.', '[', ']', ';', '%', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '%', '-',
          'A', 'B', 'C', 'D',
          'E',
          'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a',
          'b',
          'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
          'y',
          'z', '¥']
    gt = ''.join(gt.split()).strip()
    gt_tmp = list(gt)

    for i in range(len(gt_tmp)):
        c = gt_tmp[i]
        if c in cn:
            n = cn.index(c)
            gt_tmp[i] = en[n]

    # for i in range(len(gt_tmp)):
    #     for c in range(len(cn)):
    #         if gt_tmp[i] == cn[c]:
    #             gt_tmp[i] = en[c]

    gt_tmp = ''.join(gt_tmp)
    '''
    for i in range(len(gt_tmp)):
        if c in cn:
            j = cn.index(c)
            gt_tmp[i] = en[j]
    '''
    return gt_tmp



class PaddleTextRecognizer(object):
    def __init__(self):
        self.predictor, self.input_tensor, self.output_tensors = create_predictor(
            '/data1/mjh/OCR-30-mjh/models/CRNN/Paddle/ch_ppocr_server_v1.1_rec_infer', mode="rec", pre_cache=3000,
            gpu_device=0)
        self.use_zero_copy_run = False
        self.rec_image_shape = [3, 32, 320]
        self.character_type = 'ch'
        self.rec_batch_num = 4
        self.rec_algorithm = 'CRNN'
        self.text_len = '25'
        char_ops_params = {
            "character_type": self.character_type,
            "character_dict_path": '/data1/mjh/OCR-30-mjh/models/CRNN/Paddle/ppocr_keys_v1.txt',
            "use_space_char": True,
            "max_text_length": self.text_len
        }
        if self.rec_algorithm == "CRNN":
            char_ops_params['loss_type'] = 'ctc'
            self.loss_type = 'ctc'
        else:
            NameError(f"{self.rec_algorithm} is not supported")
        self.char_ops = CharacterOps(char_ops_params)

    def resize_norm_img(self, img, max_wh_ratio):
        imgC, imgH, imgW = self.rec_image_shape
        assert imgC == img.shape[2]
        # if self.character_type == "ch":
        #     imgW = int((32 * max_wh_ratio))
        h, w = img.shape[:2]
        ratio = w / float(h)
        # if math.ceil(imgH * ratio) > imgW:
        #     resized_w = imgW
        # else:
        resized_w = int(math.ceil(imgH * ratio))
        resized_image = cv2.resize(img, (resized_w, imgH))
        resized_image = resized_image.astype('float32')
        resized_image = resized_image.transpose((2, 0, 1)) / 255
        resized_image -= 0.5
        resized_image /= 0.5
        # padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
        # padding_im[:, :, 0:resized_w] = resized_image
        return resized_image

    def __call__(self, img_list):
        print("----->")
        img_num = len(img_list)
        # Calculate the aspect ratio of all text bars
        width_list = []
        for img in img_list:
            width_list.append(img.shape[1] / float(img.shape[0]))
        # Sorting can speed up the recognition process
        indices = np.argsort(np.array(width_list))

        # rec_res = []
        rec_res = [['', 0.0]] * img_num
        batch_num = self.rec_batch_num
        predict_time = 0
        for beg_img_no in range(0, img_num, batch_num):
            end_img_no = min(img_num, beg_img_no + batch_num)
            norm_img_batch = []
            max_wh_ratio = 0
            for ino in range(beg_img_no, end_img_no):
                # h, w = img_list[ino].shape[0:2]
                h, w = img_list[indices[ino]].shape[0:2]
                wh_ratio = w * 1.0 / h
                max_wh_ratio = max(max_wh_ratio, wh_ratio)
            for ino in range(beg_img_no, end_img_no):
                if self.loss_type == "ctc":
                    norm_img = self.resize_norm_img(img_list[indices[ino]],
                                                    max_wh_ratio)
                    norm_img = norm_img[np.newaxis, :]
                    norm_img_batch.append(norm_img)

            norm_img_batch = np.concatenate(norm_img_batch, axis=0)
            norm_img_batch = norm_img_batch.copy()

            starttime = time.time()
            if self.use_zero_copy_run:
                self.input_tensor.copy_from_cpu(norm_img_batch)
                self.predictor.zero_copy_run()
            else:
                norm_img_batch = fluid.core.PaddleTensor(norm_img_batch)
                self.predictor.run([norm_img_batch])

            if self.loss_type == "ctc":
                rec_idx_batch = self.output_tensors[0].copy_to_cpu()
                rec_idx_lod = self.output_tensors[0].lod()[0]
                predict_batch = self.output_tensors[1].copy_to_cpu()
                predict_lod = self.output_tensors[1].lod()[0]
                elapse = time.time() - starttime
                predict_time += elapse
                for rno in range(len(rec_idx_lod) - 1):
                    beg = rec_idx_lod[rno]
                    end = rec_idx_lod[rno + 1]
                    rec_idx_tmp = rec_idx_batch[beg:end, 0]
                    preds_text = self.char_ops.decode(rec_idx_tmp)
                    beg = predict_lod[rno]
                    end = predict_lod[rno + 1]
                    probs = predict_batch[beg:end, :]
                    ind = np.argmax(probs, axis=1)
                    blank = probs.shape[1]
                    valid_ind = np.where(ind != (blank - 1))[0]
                    if len(valid_ind) == 0:
                        continue
                    score = np.mean(probs[valid_ind, ind[valid_ind]])
                    # rec_res.append([preds_text, score])
                    rec_res[indices[beg_img_no + rno]] = [preds_text, score]

        return rec_res, predict_time


class CTCLabelConverter(object):
    """ Convert between text-label and text-index """

    def __init__(self, character):
        # character (str): set of the possible characters.
        dict_character = list(character)

        self.dict = {}
        for i, char in enumerate(dict_character):
            # NOTE: 0 is reserved for 'blank' token required by CTCLoss
            self.dict[char] = i + 1

        self.character = ['[blank]'] + dict_character  # dummy '[blank]' token for CTCLoss (index 0)

    def encode(self, text):
        """convert text-label into text-index.
        input:
            text: text labels of each image. [batch_size]

        output:
            text: concatenated text index for CTCLoss.
                    [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
            length: length of each text. [batch_size]
        """
        length = [len(s) for s in text]
        text_all = ''.join(text)
        text_all = [self.dict[char] for char in text_all]

        return (torch.IntTensor(text_all), torch.IntTensor(length))

    def decode(self, text_index, length):
        """ convert text-index into text-label. """
        texts = []
        index = 0
        for l in length:
            t = text_index[index:index + l]

            char_list = []
            for i in range(l):
                if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):  # removing repeated characters and blank.
                    char_list.append(self.character[t[i]])
            text = ''.join(char_list)

            texts.append(text)
            index += l
        return texts

class InferResizeNormalize(object):

    def __init__(self, size, interpolation=Image.ANTIALIAS):
        self.size = size
        self.interpolation = interpolation
        self.toTensor = transforms.ToTensor()

    def __call__(self, img):
        img = img.resize(self.size, self.interpolation)
        img = self.toTensor(img)
        img.sub_(0.5).div_(0.5)
        return img

if __name__ == "__main__":
    import cv2

    os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    image_path = '/data2/ocrdata/hb20xx'
    out_path = '/data2/ocrdata/hb20xx/DB_Paddle_out_0.98'
    Out_labels = open('/data2/ocrdata/hb20xx/DB_Paddle_out_0.98/labels.txt', 'w')
    image_list = os.listdir(image_path)
    # lists = ['Android', 'Apple']
    # lists = ['北京账号201804-06', '北京账号201807-09', '北京账号201810-12']
    lists = ['2018-01-01至2018-06-30湖北', '2018-07-01至2018-12-31湖北', '2019-01-01至2019-06-30湖北', '2019-07-01至2019-12-31湖北', '2020-01-01至2020-07-31湖北']
    paddle_det = OCRDetector()
    # ocr_detector = YOLO_Detector()
    paddle_ocr = PaddleTextRecognizer()
    """
        文件结构：
            第一层：2018-01-01至2018-06-30湖北
            第二层：10101201807000083
            第三层：IMG
    """
    for item in lists:
        per_folder = os.path.join(image_path, item)

        # 判断OUT文件夹是否存在，不存在创建
        OUT_folder = os.path.join(out_path, item)
        if not os.path.exists(OUT_folder):
            os.makedirs(OUT_folder)
        Per_Folder_count = 0
        for son_file in os.listdir(per_folder):
            son_folder = os.path.join(per_folder, son_file)
            for GrandSon_file in os.listdir(son_folder):
                img_path = os.path.join(son_folder, GrandSon_file)
                img = cv2.imread(img_path)
                res = paddle_det.get_bbox(img)
                for item in res:
                    print(Per_Folder_count)
                    x1, x2, y1, y2 = int(min(item[:, 0])), int(max(item[:, 0])), int(min(item[:, 1])), int(max(item[:, 1]))
                    tmp = img[int(y1):int(y2), int(x1):int(x2)]
                    paddle_res = paddle_ocr([tmp])
                    Paddle_Pre = conver_label(paddle_res[0][0][0])
                    Paddle_Threds = paddle_res[0][0][1]
                    if Paddle_Threds > 0.98:
                        out_img_path = os.path.join(OUT_folder, str(Per_Folder_count) + '.jpg')
                        cv2.imwrite(out_img_path, tmp)
                        # print(paddle_res)
                        OUT_line = out_img_path + '\t' + Paddle_Pre + '\n'
                        # print(OUT_line)
                        Out_labels.writelines(OUT_line)
                        Per_Folder_count += 1

    # img = '/home/lhw/workspace/mtl-text-recognition/demo_images/tmp.png'
    # img = cv2.imread(img)
    # paddle_res = paddle_ocr([img])
    # print(paddle_res)
    # for image_file in image_list:
    #     print(image_file)
    #     img = cv2.imread(os.path.join(image_path, image_file))
    #     txt = open(os.path.join(out_path, image_file.replace('jpg', '') + '.txt'), 'w')
    #     # img = Image.open("/data1/lhw/workspace/OCR_Test/发票/20190225_163812.jpg")
    #
    #     paddle_res = paddle_ocr([tmp])
    #     opt = ConfigOpt()
    #     opt.saved_model = 'saved_models/12-24_VGG_Train_FT/mtl_best_accuracy.pth'
    #     opt.imgW = 640
    #     opt.rgb = None
    #     opt.FeatureExtraction = 'VGG'  # VGG|RCNN|ResNet|MobileNetV3|DenseNet|CNN_Lite3
    #     ocr_rec = OcrRec(opt=opt)
    #
    #
    #     # img_crop_list = paddle_ocr.classify(img, dt_boxes)
    #     res = paddle_det.get_bbox(img)
    #     # print(res)
    #     for item in res:
    #         # print(item)
    #         x1, x2, y1, y2 = int(min(item[:, 0])), int(max(item[:, 0])), int(min(item[:, 1])), int(max(item[:, 1]))
    #         tmp = img[int(y1):int(y2), int(x1):int(x2)]
    #         paddle_res = paddle_ocr([tmp])
    #         # print(paddle_res[0])
    #         out = ','.join([str(x1), str(x2), str(y1), str(y2), paddle_res[0][0][0]]) + '\n'
    #         txt.writelines(out)
    #         V1_res = ocr_rec.text_rec(tmp)
    #         print(V1_res)



    # 从GT TXT中读取检测坐标and Paddle识别的结果，进行对比，若结果相同，则保存Crop并保存GT
    # image_path = '/data2/ocrdata/hb7021'
    #
    # gt_path = '/data2/ocrdata/hb7021/DB_Paddle_out'
    #
    # save_path = '/data2/ocrdata/hb7021/Croped_Checked_threshold_0.9'
    # save_txt = open(os.path.join(save_path, 'labels.txt'), 'w')
    #
    # gt_list = os.listdir(gt_path)
    # count = 0
    # all_count = 0
    # for gt_name in gt_list:
    #     print(gt_name)
    #     gt_file = open(os.path.join(gt_path, gt_name.strip()))
    #     image_file = os.path.join(image_path, gt_name[:-3].strip() + 'jpg')
    #     image = cv2.imread(image_file)
    #
    #     for line in gt_file.readlines():
    #         x1, x2, y1, y2, gt = line.split(',')[0], line.split(',')[1], line.split(',')[2], line.split(',')[3], \
    #                              line.split(',')[4].strip()
    #         img = image[int(y1):int(y2), int(x1):int(x2)]
    #         try:
    #             res_text = paddle_ocr([img])
    #             gt = conver_label(gt)
    #             ratio = (int(x2) - int(x1)) / (int(y2) - int(y1))
    #             save_img_path = os.path.join(save_path, str(count) + '.jpg')
    #             # if gt == res_text and ratio > 1.5 and gt:
    #             #     save_img_path = os.path.join(save_path, str(count) + '.jpg')
    #             #     cv2.imwrite(save_img_path, img)
    #             #     save_txt.writelines(save_img_path + '\t' + gt + '\n')
    #             #     print(count, gt, res_text)
    #             if res_text[0][0][1] > 0.9 and ratio > 1.5 and gt:
    #                 save_txt.writelines(save_img_path + '\t' + gt + '\n')
    #                 cv2.imwrite(save_img_path, img)
    #                 count += 1
    #                 print(count, gt, res_text)
    #         except:
    #             pass
    #     all_count += 1
    # print(all_count)
    # bbox = np.array([[1503, 558], [1747, 621], [1730, 688], [1486, 634]])
    # x1, x2, y1, y2 = min(bbox[:, 0]), max(bbox[:, 0]), min(bbox[:, 1]), max(bbox[:, 1])
