import os
from tqdm import tqdm
from model import *
from dataset import *
from loguru import logger
from transformers import AdamW, get_scheduler
# from apex import amp


def evaluation(test_data, model, epoch):
    model.eval()
    d_recall_numerator = 0  # Detection的Recall的分子
    d_recall_denominator = 0  # Detection的Recall的分母
    d_precision_numerator = 0  # Detection的precision的分子
    d_precision_denominator = 0  # Detection的precision的分母
    c_recall_numerator = 0  # Correction的Recall的分子
    c_recall_denominator = 0  # Correction的Recall的分母
    c_precision_numerator = 0  # Correction的precision的分子
    c_precision_denominator = 0  # Correction的precision的分母
    
    prograss = tqdm(range(len(test_data)))
    for i in prograss:
        src, tgt = test_data[i]['src'], test_data[i]['tgt']
        # 将相邻字符间加入空格，防止tokenizer解码数字时候会把如“20”这样的数字解码成一个字符
        src_with_space = []
        for s in src:
            src_with_space.append(' '.join(list(s)))
        tgt_with_space = []
        for t in tgt:
            tgt_with_space.append(' '.join(list(t)))

        src_tokens = tokenizer(src_with_space, return_tensors='pt', max_length=max_len, truncation=True)['input_ids'][0][1:-1]
        tgt_tokens = tokenizer(tgt_with_space, return_tensors='pt', max_length=max_len, truncation=True)['input_ids'][0][1:-1]

        # 正常情况下，src和tgt的长度应该是一致的
        if len(src_tokens) != len(tgt_tokens):
            print("第%d条数据异常" % i)
            continue

        correction_outputs, _ = model(src, max_length=max_len)
        predict_tokens = correction_outputs[0][1:len(src_tokens) + 1].argmax(1).detach().cpu()

        # 计算错误token的数量
        d_recall_denominator += (src_tokens != tgt_tokens).sum().item()
        # 计算在这些错误token，有多少网络也认为它是错误的
        d_recall_numerator += (predict_tokens != src_tokens)[src_tokens != tgt_tokens].sum().item()
        # 计算网络找出的错误token的数量
        d_precision_denominator += (predict_tokens != src_tokens).sum().item()
        # 计算在网络找出的这些错误token中，有多少是真正错误的
        d_precision_numerator += (src_tokens != tgt_tokens)[predict_tokens != src_tokens].sum().item()
        # 计算Detection的recall、precision和f1-score
        d_recall = d_recall_numerator / (d_recall_denominator + 1e-9)
        d_precision = d_precision_numerator / (d_precision_denominator + 1e-9)
        d_f1_score = 2 * (d_recall * d_precision) / (d_recall + d_precision + 1e-9)

        # 计算错误token的数量
        c_recall_denominator += (src_tokens != tgt_tokens).sum().item()
        # 计算在这些错误token中，有多少网络预测对了
        c_recall_numerator += (predict_tokens == tgt_tokens)[src_tokens != tgt_tokens].sum().item()
        # 计算网络找出的错误token的数量
        c_precision_denominator += (predict_tokens != src_tokens).sum().item()
        # 计算网络找出的错误token中，有多少是正确修正的
        c_precision_numerator += (predict_tokens == tgt_tokens)[predict_tokens != src_tokens].sum().item()

        # 计算Correction的recall、precision和f1-score
        c_recall = c_recall_numerator / (c_recall_denominator + 1e-9)
        c_precision = c_precision_numerator / (c_precision_denominator + 1e-9)
        c_f1_score = 2 * (c_recall * c_precision) / (c_recall + c_precision + 1e-9)

        prograss.set_postfix({
            'd_recall': d_recall,
            'd_precision': d_precision,
            'd_f1_score': d_f1_score,
            'c_recall': c_recall,
            'c_precision': c_precision,
            'c_f1_score': c_f1_score,
        })
    
    logger.debug("Epoch {}, "
                  "detection recall {:.4f}, "
                  "detection precision {:.4f}, "
                  "detection f1 {:.4f}, "
                  "correction recall {:.4f}, "
                  "correction precision {:.4f}"
                  "correction f1 {:.4f}, ".format(epoch,
                                                       d_recall,
                                                       d_precision,
                                                       d_f1_score,
                                                       c_recall,
                                                       c_precision,
                                                       c_f1_score))


if __name__=="__main__":
    """
        1. 解析命令行获取的参数
    """ 
    import argparse
    parser = argparse.ArgumentParser(description='MDCSpell parameters')
    parser.add_argument('--lr', default=5e-5, type=float, help='学习率')
    parser.add_argument('--batch_size', default=32, type=int, help='训练的batch size')
    parser.add_argument('--epochs', default=12, type=int, help='作者没有说明，按经验取了一个')
    parser.add_argument('--pos_weight', default=30, type=float, help='处理类别不平衡，在detector loss中，给正样本的权重')
    parser.add_argument('--warmup_steps', default=2000, type=int, help='warmup steps')
    parser.add_argument('--method', default=2, type=int, help='corrector的构建方法')
    # parser.add_argument('--corrector_method', default=2, type=int, help='corrector的构建方法')
    # parser.add_argument('--posweight_method', default=1, type=int, help='corrector的构建方法')
    parser.add_argument('--fp16', action='store_true', default=False, help='是否使用fp16加速')
    parser.add_argument('--time', default=1, type=int, help='第几次重复实验')
    parser.add_argument('--init_dense', action='store_true', default=False, help='是否用embedding layer初始化corrector的dense layer')
    args = parser.parse_args()

    # max_len作者没有讲，按经验取
    print("max_len: ", max_len) # dataset.py中定义了
    lr = args.lr
    batch_size = args.batch_size
    epochs = args.epochs
    pos_weight = args.pos_weight
    warmup_steps = args.warmup_steps
    method = args.method
    fp16 = args.fp16
    init_dense = args.init_dense
    time=args.time

    # 每${log_after_step}步，打印一次日志
    log_after_step = 100

    # 模型存放的位置。
    model_path = './models/split_pos{}_{}_m{}_{}_{}/'.format(pos_weight, warmup_steps, method, init_dense, time)
    os.makedirs(model_path, exist_ok=True)
    # log存放的位置
    log_path = './logs/'
    os.makedirs(log_path, exist_ok=True)
    logger.add(log_path + 'train_split_pos{}_{}_m{}_{}_{}.log'.format(pos_weight, warmup_steps, method, init_dense, time), format="{time:HH:mm:ss} | {level} | {name} | {message}", level="DEBUG")

    # 记录模型参数进入logger
    logger.debug("max_len: {}".format(max_len))
    logger.debug("pos_weight: {}".format(pos_weight))
    logger.debug("lr: {}".format(lr))
    logger.debug("batch_size: {}".format(batch_size))
    logger.debug("warmup_steps: {}".format(warmup_steps))
    logger.debug("method: {}".format(method))
    logger.debug("fp16: {}".format(fp16))
    logger.debug("init_dense: {}".format(init_dense))


    """
    2. 获取dataloader
    """
    train_data = CSCDataset()
    train_loader = DataLoader(train_data, batch_size=batch_size, collate_fn=collate_fn, shuffle=True)


    """
    3. 训练
    """

    model = MDCSpellModel(tokenizer=tokenizer, pos_weight=pos_weight).to(device)
    criterion = MDCSpellLoss(pos_weight=pos_weight)
    optimizer = AdamW(model.parameters(), lr=lr)
    if fp16:
        model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
    num_training_steps = epochs * len(train_loader)
    lr_scheduler = get_scheduler(
        "linear",
        optimizer=optimizer,
        num_warmup_steps=warmup_steps,
        num_training_steps=num_training_steps,
    )
    start_epoch = 0  # 从哪个epoch开始
    total_step = 0  # 一共更新了多少次参数

    model = model.to(device)
    total_loss = 0.  # 记录loss

    d_recall_numerator = 0  # Detection的Recall的分子
    d_recall_denominator = 0  # Detection的Recall的分母
    d_precision_numerator = 0  # Detection的precision的分子
    d_precision_denominator = 0  # Detection的precision的分母
    c_recall_numerator = 0  # Correction的Recall的分子
    c_recall_denominator = 0  # Correction的Recall的分母
    c_precision_numerator = 0  # Correction的precision的分子
    c_precision_denominator = 0  # Correction的precision的分母

    for epoch in range(start_epoch, epochs):
        model.train()
        step = 0
        for sequences, correction_targets, detection_targets, correction_inputs in tqdm(train_loader):
            optimizer.zero_grad()
            correction_targets, detection_targets = correction_targets.to(device), detection_targets.to(device)
            correction_inputs = correction_inputs.to(device)
            # correction_outputs, detection_outputs = model(sequences, max_length=max_len)
            correction_outputs, detection_outputs, attn_masks = model(sequences, max_length=max_len)
            loss = criterion(correction_outputs, correction_targets, 
                                detection_outputs, detection_targets, attn_masks)
            if fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()
            optimizer.step()
            lr_scheduler.step()

            step += 1
            total_step += 1

            total_loss += loss.detach().item()

            # 计算Detection的recall和precision指标
            # 大于0.5，认为是错误token，反之为正确token
            d_predicts = detection_outputs >= 0.5
            # 计算错误token中被网络正确预测到的数量
            d_recall_numerator += d_predicts[detection_targets == 1].sum().item()
            # 计算错误token的数量
            d_recall_denominator += (detection_targets == 1).sum().item()
            # 计算网络预测的错误token的数量
            d_precision_denominator += d_predicts.sum().item()
            # 计算网络预测的错误token中，有多少是真错误的token
            d_precision_numerator += (detection_targets[d_predicts == 1]).sum().item()

            # 计算Correction的recall和precision
            # 将输出映射成index，即将correction_outputs的Shape由(32, 128, 21128)变为(32,128)
            correction_outputs = correction_outputs.argmax(2)
            # 对于填充、[CLS]和[SEP]这三个token不校验
            correction_outputs[(correction_targets == 0) | (correction_targets == 101) | (correction_targets == 102)] = 0
            # correction_targets的[CLS]和[SEP]也要变为0
            correction_targets[(correction_targets == 101) | (correction_targets == 102)] = 0
            # Correction的预测结果，其中True表示预测正确，False表示预测错误或无需预测
            c_predicts = correction_outputs == correction_targets
            # 计算错误token中被网络正确纠正的token数量
            c_recall_numerator += c_predicts[detection_targets == 1].sum().item()
            # 计算错误token的数量
            c_recall_denominator += (detection_targets == 1).sum().item()
            # 计算网络纠正token的数量
            correction_inputs[(correction_inputs == 101) | (correction_inputs == 102)] = 0
            c_precision_denominator += (correction_outputs != correction_inputs).sum().item()
            # 计算在网络纠正的这些token中，有多少是真正被纠正对的
            c_precision_numerator += c_predicts[correction_outputs != correction_inputs].sum().item()

            if total_step % log_after_step == 0:
                loss = total_loss / log_after_step
                d_recall = d_recall_numerator / (d_recall_denominator + 1e-9)
                d_precision = d_precision_numerator / (d_precision_denominator + 1e-9)
                c_recall = c_recall_numerator / (c_recall_denominator + 1e-9)
                c_precision = c_precision_numerator / (c_precision_denominator + 1e-9)

                logger.info("Epoch {}, "
                    "Step {}/{}, "
                    "Total Step {}, "
                    "loss {:.5f}, "
                    "detection recall {:.4f}, "
                    "detection precision {:.4f}, "
                    "correction recall {:.4f}, "
                    "correction precision {:.4f}".format(epoch, step, len(train_loader), total_step,
                                                        loss,
                                                        d_recall,
                                                        d_precision,
                                                        c_recall,
                                                        c_precision))

                total_loss = 0.
                total_correct = 0
                total_num = 0
                d_recall_numerator = 0
                d_recall_denominator = 0
                d_precision_numerator = 0
                d_precision_denominator = 0
                c_recall_numerator = 0
                c_recall_denominator = 0
                c_precision_numerator = 0
                c_precision_denominator = 0

        torch.save({
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'epoch': epoch + 1,
            'total_step': total_step,
        }, model_path + 'epoch-{}.pt'.format(epoch+1))

        # # 在sighan13,14,15上评价模型
        # logger.debug("===== Eval =====")
        # logger.debug("evaluate on sighan13")
        # with open("../data/realise/test.sighan13.pkl", mode='br') as f:
        #     sighan13 = pickle.load(f)
        # evaluation(sighan13, model, epoch)
        # logger.debug("evaluate on sighan14")
        # with open("../data/realise/test.sighan14.pkl", mode='br') as f:
        #     sighan14 = pickle.load(f)
        # evaluation(sighan14, model, epoch)
        # logger.debug("evaluate on sighan15")
        # with open("../data/realise/test.sighan15.pkl", mode='br') as f:
        #     sighan15 = pickle.load(f)
        # evaluation(sighan15, model, epoch)
