import os
import sys

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import yaml

from utils import Balanced_dataset, ConfigDict, CTCLabel, check_path, parse_version
from model.crnn import CRNN, CRNN_OCR_for_cnc
from eval import Eval


def batch_eval(model, ctc_label, config):
    # testset = [('./data/ch_news_fixed/ch_news_heiti_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #            ('./data/ch_news_fixed/ch_news_kaiti_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #            ('./data/ch_news_fixed/ch_news_simsum_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #            ('./data/ch_news_fixed/ch_news_youyuan_1-10_fixed/', './data/ch_news_fixed/ch_news_1-10.txt'),
    #            ('./data/img/img0', './data/img/img0.txt'),
    #            ('./data/img/img1', './data/img/img1.txt'),
    #            ('./data/img/img2', './data/img/img2.txt'),
    #            ('./data/img/img_k', './data/img/img_k.txt')]
    # testset_name = ['heiti', 'kaiti', 'simsum', 'youyuan', '0', '1', '2', 'k', "real_testsets"]
    testset = [("/home/prir1005/cl/data/intelligent_assessment/output", "/home/prir1005/cl/data/intelligent_assessment/output/test_gt.txt")]
    testset_name = ["real_testsets"]
    tolerance_edit_distance = [0, 1, 2]
    eval_res = {}
    for i, (eval_dir, eval_gt) in enumerate(testset):
        line_accs, char_acc, speed = Eval(model, ctc_label, config, eval_dir, eval_gt, tolerance_edit_distance)
        info = ""
        for j, ed in enumerate(tolerance_edit_distance):
            info += f'line acc with {ed} ed: {line_accs[j]}, '
        info += f"char acc: {char_acc}, speed:{speed} s/image"
        eval_res[testset_name[i]] = info
    return eval_res, line_accs[0], char_acc


def train():
    if len(sys.argv) > 1:
        config = ConfigDict(yaml.load(open(sys.argv[1], encoding='utf-8'), Loader=yaml.FullLoader))
    else:
        config = ConfigDict(yaml.load(open('./config/v1.yaml', encoding='utf-8'), Loader=yaml.FullLoader))
    check_path(config)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # model = CRNN_OCR_for_cnc(config.input_channel, config.num_class).to(device)
    model = CRNN(config.num_class).to(device)

    if not config.load_model_path == '' and os.path.exists(config.load_model_path):
        print('======> load model', config.load_model_path)
        model.load_state_dict(torch.load(config.load_model_path, map_location=device))
        global_step = int(config.load_model_path.split('.')[-2].split('_')[-1])
    else:
        global_step = 0
    model.train()
    ctc_label = CTCLabel(config.dict_file)
    train_dataset = Balanced_dataset(config, ctc_label.dict_char2index)

    criterion = nn.CTCLoss(zero_infinity=True)
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
    loss_arr = []
    acc_arr = []
    ned_arr = []

    f_log = open(os.path.join(config.train_log_dir, "log.txt"), 'a', encoding="utf-8")
    info = f"======> Start step is: {global_step}\n"
    print(info, end='')
    f_log.write(info)

    best_line_acc = 0
    best_char_acc = 0
    while global_step < config.stop_step:
        try:
            imgs, labels = train_dataset.get_batch()
        except Exception as e:
            print('Error: error generates when getting next batch data. ', e.args)
            continue
        text, length = ctc_label.encode(labels)
        imgs = imgs.to(device)
        batch_size = imgs.size(0)
        preds = model(imgs)
        pred_size = torch.IntTensor([preds.size(1)] * batch_size)

        preds_ = preds.permute(1, 0, 2)
        torch.backends.cudnn.enabled = False
        loss = criterion(preds_, text.to(device), pred_size.to(device), length.to(device))
        torch.backends.cudnn.enabled = True

        model.zero_grad()
        loss.backward()
        optimizer.step()

        # loss_arr.append(loss.item())
        probs = preds.argmax(2).view(-1)
        preds_text = ctc_label.decode(probs.data.cpu().numpy(), pred_size.data.cpu().numpy())
        acc, ned = ctc_label.get_acc_ned(labels, preds_text)
        loss_arr.append(loss.item())
        acc_arr.append(acc)
        ned_arr.append(ned)

        global_step += 1
        if global_step % 50 == 0:
            info = f"gt:{labels[0]}      pred:{preds_text[0]}\n"
            info += f"step:{global_step}  loss:{np.mean(loss_arr)}  line_acc:{np.mean(acc_arr)}  ned:{np.mean(ned_arr)}\n"
            print(info, end='')
            f_log.write(info)
        if global_step % 2000 == 0:
            model.eval()
            eval_res, line_acc, char_acc = batch_eval(model, ctc_label, config)
            model.train()
            info = "-" * 20 + " Eval begin\n"
            info += f"step: {global_step}\n"
            for k, v in eval_res.items():
                info += f"testset {k}: {v}\n"
            info += "-" * 20 + " Eval end\n"
            print(info, end='')
            f_log.write(info)

            model_path = os.path.join(config.save_dir, f'crnn_{global_step}.pth')
            torch.save(model.state_dict(), model_path)
            if best_line_acc < line_acc:
                best_line_acc = line_acc
                model_path = os.path.join(config.save_dir, f'best_line_acc_crnn_{global_step}.pth')
                torch.save(model.state_dict(), model_path)
            if best_char_acc < char_acc:
                best_char_acc = char_acc
                model_path = os.path.join(config.save_dir, f'best_char_acc_crnn_{global_step}.pth')
                torch.save(model.state_dict(), model_path)
            info = '*' * 20 + f" Save model {model_path}\n"
            print(info, end='')
            f_log.write(info)
    f_log.close()


if __name__ == '__main__':
    train()
