import glob
import math
import os
import time

import cv2
import numpy as np
import torch
import yaml
from edit_distance import edit_distance
from tqdm import tqdm

from model.crnn import CRNN
from utils import ConfigDict, CTCLabel, parse_version

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class Recognizer_eval():
    def __init__(self, model, ctc_label, config) -> None:
        self.config_rec = config
        self.ctc_label = ctc_label
        self.ocr_model = model

    def read_process_img(self, img_path):
        img_size = (self.config_rec.img_width, self.config_rec.img_height)
        x_batch = np.zeros([len(img_path), img_size[1], img_size[0], 1])
        batch_count = 0

        for i_p in img_path:
            temp_img = cv2.imread(i_p, cv2.IMREAD_GRAYSCALE)
            h, w = temp_img.shape
            hnew = img_size[1]
            wnew = int(1.0 * hnew / h * w)
            if img_size[0] > wnew:
                temp_img = cv2.resize(temp_img, (wnew, hnew), interpolation=cv2.INTER_CUBIC)
                x_batch[batch_count, :, :wnew, 0] = (temp_img[:, :] - 127.0) / 128.0
            else:
                temp_img = cv2.resize(temp_img, img_size, interpolation=cv2.INTER_CUBIC)
                x_batch[batch_count, :, :, 0] = (temp_img[:, :] - 127.0) / 128.0
            batch_count += 1
        return x_batch

    def process_batch(self, x_batch, show_time=False):

        x_batch = torch.from_numpy(np.array(x_batch))
        x_batch = x_batch.permute(0, 3, 1, 2).float()
        x_batch = x_batch.to(device)
        batch_size = x_batch.size(0)
        start_time = time.time()
        pred_prob = self.ocr_model(x_batch)
        end_time = time.time()
        preds = pred_prob.argmax(2).view(-1).cpu().numpy()
        pred_size = [pred_prob.size(1)] * batch_size
        preds_txt = self.ctc_label.decode(preds, pred_size)
        return preds_txt, end_time - start_time


def eval_char_acc(dict_file, gt, pred, tolerance_edit_distance: list, p_list, save_file=None):
    assert len(gt) == len(pred)
    cnt = [0] * len(tolerance_edit_distance)
    ned = 0
    total_char_num = 0
    with open(dict_file, 'r', encoding='utf-8') as f_r:
        _dict = [item.rstrip('\n') for item in f_r.readlines()]
    info = {}
    for i in range(len(gt)):
        clean_label_without_unknown = ''
        clean_label_with_unknown = ''
        for ch in gt[i]:
            if ch in _dict:
                clean_label_without_unknown += ch
                clean_label_with_unknown += ch
            else:
                clean_label_with_unknown += 'UNKNOWN'
        # gt[i] = clean_label
        # print('gt:', gt[i])
        # print('pred:', pred[i])

        # eng_prefix = gt[i].split(' ')[0]

        pred_without_unknown = pred[i]
        index = pred_without_unknown.find('UNKNOWN')
        while index >= 0:
            pred_without_unknown = pred_without_unknown[:index] + pred_without_unknown[index + len('UNKNOWN'):]
            index = pred_without_unknown.find('UNKNOWN')

        if len(clean_label_without_unknown) != 0:
            ed = edit_distance(pred_without_unknown.lower(), clean_label_without_unknown.lower())[0]  # / len(clean_label_without_unknown)
            total_char_num += len(gt[i])
        else:
            ed = 0
        ned += ed
        for j, ted in enumerate(tolerance_edit_distance):
            if ed <= ted:  # pred[i] == clean_label_without_unknown or pred[i].startswith(eng_prefix):
                cnt[j] += 1
        info[p_list[i]] = [ed, gt[i], pred[i]]
    info = dict(sorted(info.items(), key=lambda kv: kv[1][0]))
    if save_file is not None:
        with open(save_file, 'w', encoding='utf-8') as f_w:
            for k, v in info.items():
                f_w.write('{} ed:{} gt:{} pred:{}\n'.format(k, v[0], v[1], v[2]))
    acc = [c * 1.0 / len(gt) for c in cnt]
    ned_avg = ned * 1.0 / total_char_num  # total_char_num
    return acc, 1 - ned_avg


def Eval(model, ctc_label, config, eval_dir, eval_gt, tolerance_edit_distance, save_file=None):
    with open(eval_gt, 'r', encoding='utf-8') as f_r:
        img_path = []
        img_label = []
        for line in f_r.readlines():
            line = line.rstrip('\n')
            img_path.append(os.path.join(eval_dir, line.split(' ')[0]))
            space_index = line.find(' ')
            img_label.append(line[space_index + 1:])
    rec = Recognizer_eval(model, ctc_label, config)
    batch_size = 128
    assert len(img_label) == len(img_path)
    batch_num = math.ceil(len(img_label) / batch_size)
    sum_gt = []
    sum_pred = []
    batch_time_lst = []
    p_list = []
    for i in tqdm(range(batch_num)):
        x_batch = rec.read_process_img(img_path[i * batch_size:(i + 1) * batch_size])
        rst, batch_time = rec.process_batch(x_batch)
        sum_gt.extend(img_label[i * batch_size:(i + 1) * batch_size])
        sum_pred.extend(rst)
        batch_time_lst.append(batch_time)
        p_list.extend(img_path[i * batch_size:(i + 1) * batch_size])
    line_acc, ned = eval_char_acc(config.dict_file, sum_gt, sum_pred, tolerance_edit_distance, p_list, save_file)
    return line_acc, ned, sum(batch_time_lst) / len(img_label) * 1000  # ms/image


def main_eval(model_type: str, model: str, test_sets: dict, tolerance_edit_distance: list, eval_result_save_dir: str):
    """
    :param model_type: one of ['origin', 'origin_light', 'light_new_v1', 'light_new_v2', 'light_new_v3', 'light_v1', 'light_v2', 'light_v3', ...]
    :param model:
    :param test_sets:
    :param tolerance_edit_distance:
    :param eval_result_save_dir:
    :return:
    """
    print(f'\n\n====== model type: {model_type} model: {model}, tolerance_edit_distance: {tolerance_edit_distance}')

    # load config file
    if model_type == 'origin':
        cfg_file = './Config/v3.yaml'
    elif 'light' in model_type:
        cfg_file = 'Config/v4.3_light.yaml'
    elif model_type == "cnc":
        cfg_file = "Config/cnc.yaml"
    else:
        raise Exception(f"Check configuration file, parameter 'model_type'={model_type} is invalid!!!")
    cfg_file = "Config/cnc.yaml"
    print(f"config file: {cfg_file}")
    config = ConfigDict(yaml.load(open(cfg_file, encoding='utf-8'), Loader=yaml.FullLoader))

    if model_type in ['origin', 'origin_light']:
        ocr_model = CRNN_OCR(config.input_channel, config.num_class).to(device)
    elif model_type in ['light_new_v1', 'light_new_v2', 'light_new_v3']:
        ocr_model = CRNN_OCR_light(config.input_channel, config.num_class, parse_version(model_type)).to(device)
    elif model_type in ['light_v1', 'light_v2', 'light_v3']:
        ocr_model = CRNN_OCR_light_old(config.input_channel, config.num_class, parse_version(model_type)).to(device)
    elif model_type == "cnc":
        ocr_model = CRNN_OCR_for_cnc(config.input_channel, config.num_class).to(device)
    else:
        raise Exception(f"Check configuration file, parameter 'model_type={model_type} is invalid!!!")
    ocr_model.load_state_dict(torch.load(model, map_location=device))
    ocr_model.eval()
    ctc_label = CTCLabel(config.dict_file)

    eval_result_detail_dir = eval_result_save_dir.rstrip('/') + "_detail"
    os.makedirs(eval_result_save_dir, exist_ok=True)
    os.makedirs(eval_result_detail_dir, exist_ok=True)

    for k, v in test_sets.items():
        eval_dir, eval_gt = v
        print(model, k)
        save_file = os.path.join(eval_result_save_dir, f"{model.split('/')[-1]}_{k}.txt")
        detail_file = os.path.join(eval_result_detail_dir, f"{model.split('/')[-1]}_{k}.txt")
        with open(save_file, 'w', encoding='utf-8') as f:
            line_acc, char_acc, speed = Eval(ocr_model, ctc_label, config, eval_dir, eval_gt, tolerance_edit_distance,
                                             save_file=detail_file)
            info = ''
            for j, ed in enumerate(tolerance_edit_distance):
                info += f'****** tolerance_edit_distance: {ed}, eval_line_acc: {line_acc[j]}\n'
            info += f'eval_char_acc: {char_acc}\nspeed: {speed} ms/image\n'
            print(info)
            f.write(info)


def batch_run():
    # model_type: ['origin', 'origin_light', 'light_new_v1', 'light_new_v2', 'light_new_v3', 'light_v1', 'light_v2', 'light_v3', ...]
    # models: {model_type: model_list}
    # models = {'origin': ['./checkpoint_v2/Model_origin/crnn_origin_195000.pth'],
    #           'origin_light': ['./checkpoint_v2/Model_origin_light/crnn_origin_light_70000.pth'],
    #           'light_new_v1': ['./checkpoint_v2/Model_light_new_v1/crnn_light_new_v1_120000.pth'],
    #           'light_new_v2': ['./checkpoint_v2/Model_light_new_v2/crnn_light_new_v2_95000.pth'],
    #           'light_new_v3': ['./checkpoint_v2/Model_light_new_v3/crnn_light_new_v3_145000.pth']}
    models = {'light_new_v1': ['./checkpoint_v6.3.1/Model_light_new_v1/crnn_light_new_v1_28000.pth',
                               './checkpoint_v6.3.1/Model_light_new_v1/crnn_light_new_v1_40000.pth']}
    # test_sets = {'heiti': ('./data/ch_news_fixed/ch_news_heiti_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #              'kaiti': ('./data/ch_news_fixed/ch_news_kaiti_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #              'simsum': ('./data/ch_news_fixed/ch_news_simsum_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #              'youyuan': ('./data/ch_news_fixed/ch_news_youyuan_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #              'img0': ('./data/img/img0', './data/img/img0.txt'),
    #              'img1': ('./data/img/img1', './data/img/img1.txt'),
    #              'img2': ('./data/img/img2', './data/img/img2.txt'),
    #              'imgk': ('./data/img/img_k', './data/img/img_k.txt'),
    #              "testsets1": ("./data/testset/testsets1/", "./data/testset/testsets1.txt"),
    #              "testsets2": ("./data/testset/testsets2/", "./data/testset/testsets2.txt"),
    #              "testsets3": ("./data/testset/testsets3/", "./data/testset/testsets3.txt"),
    #              "testsets4": ("./data/testset/testsets4/", "./data/testset/testsets4.txt"),
    #              "real_testsets": ("./data/real_testset/test/", "./data/real_testset/test_gt.txt")}
    # test_sets = {"real_testsets": ("./data/real_testset/train/", "./data/real_testset/train_gt.txt")}
    test_sets = {"real_testsets": ("./data/real_testset/test/", "./data/real_testset/test_gt.txt")}

    # test_sets = {"real_testsets": ("/home/chenlei/data/datasets/ZhuHai/0323_ocr_testset/black_bg/", "/home/chenlei/data/datasets/ZhuHai/0323_ocr_testset/black_bg.txt")}

    models = {"cnc": ["./checkpoint_cnc/Model_cnc/crnn_cnc_136000.pth", "./checkpoint_cnc/Model_cnc/crnn_cnc_140000.pth"]}
    # test_sets = {"cnc": ("/home/chenlei/data/Zhuhai/cnc_data/processed/horizontal/test", "/home/chenlei/data/Zhuhai/cnc_data/processed/horizontal/test.txt")}
    test_sets = {"cnc": ("./data/cnc/sub_horizontal_test", "./data/cnc/sub_horizontal_test.txt")}
    # inference_data = ['/home/prir1005/pubdata/intel_cache_disk/cl/data_ZhuHai/text_data_boxes']
    tolerance_edit_distance = [0, 1, 2]
    eval_result_save_dir = "./checkpoint_cnc_tmp/eval_result/"
    for k, v in models.items():
        for model in v:
            main_eval(k, model, test_sets, tolerance_edit_distance, eval_result_save_dir)


def batch_run_all_model():
    # model_type: ['origin', 'origin_light', 'light_new_v1', 'light_new_v2', 'light_new_v3', 'light_v1', 'light_v2', 'light_v3', ...]
    # models: {model_type: model_list}
    models = {'light_new_v1': []}
    test_sets = {'heiti': ('./data/ch_news_fixed/ch_news_heiti_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
                 'kaiti': ('./data/ch_news_fixed/ch_news_kaiti_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
                 'simsum': ('./data/ch_news_fixed/ch_news_simsum_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
                 'youyuan': ('./data/ch_news_fixed/ch_news_youyuan_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
                 'img0': ('./data/img/img0', './data/img/img0.txt'),
                 'img1': ('./data/img/img1', './data/img/img1.txt'),
                 'img2': ('./data/img/img2', './data/img/img2.txt'),
                 'imgk': ('./data/img/img_k', './data/img/img_k.txt'),
                 "testsets1": ("./data/testset/testsets1/", "./data/testset/testsets1.txt"),
                 "testsets2": ("./data/testset/testsets2/", "./data/testset/testsets2.txt"),
                 "testsets3": ("./data/testset/testsets3/", "./data/testset/testsets3.txt"),
                 "testsets4": ("./data/testset/testsets4/", "./data/testset/testsets4.txt"),
                 "real_testsets": ("./data/real_testset/IMG_dst/", "./data/real_testset/0323.txt")}

    # models = {"origin": []}
    # test_sets = {"cnc": ("/home/chenlei/data/Zhuhai/cnc_data/mixed/test_text_line/", "/home/chenlei/data/Zhuhai/cnc_data/mixed/test_gt.txt")}

    for k, v in models.items():
        model_list = glob.glob(f"./checkpoint_v4.3/Model_{k}/*.pth")
        model_list = sorted(model_list)
        models[k] = model_list
    print(models)

    tolerance_edit_distance = [0, 1, 2]
    eval_result_save_dir = "./checkpoint_v4.3/eval_result/"
    for k, v in models.items():
        for model in v:
            main_eval(k, model, test_sets, tolerance_edit_distance, eval_result_save_dir)


# 将高版本（1.6+）的pytorch模型转化为低版本的模型
def pytorch_model_high_to_low():
    # models = ['./Model_light_v1/crnn_light_v1_134000.pth', './Model_light_v2/crnn_light_v2_290000.pth',
    #           './Model_light_v3/crnn_light_v3_505000.pth']
    # models = ['./Model_light/crnn_light_480000.pth']
    model = './checkpoint_cnc/Model_cnc/crnn_cnc_140000.pth'
    config = ConfigDict(yaml.load(open('./Config/cnc.yaml', encoding='utf-8'), Loader=yaml.FullLoader))
    # ocr_model = CRNN_OCR(config.input_channel, config.num_class).to(device)
    # ocr_model = CRNN_OCR_light(config.input_channel, config.num_class, 1).to(device)
    ocr_model = CRNN_OCR_for_cnc(config.input_channel, config.num_class).to(device)
    ocr_model.load_state_dict(torch.load(model, map_location=device))
    new_name = model.split('.')
    new_name[-2] += '_unzipped'
    new_name = '.'.join(new_name)
    torch.save(ocr_model.state_dict(), new_name, _use_new_zipfile_serialization=False)
    print(model, '-->', new_name)


if __name__ == '__main__':
    batch_run()
    # batch_run_all_model()
    # pytorch_model_high_to_low()
