import torch
import time
import os
from torch.nn import functional
from transformers import AdamW, get_cosine_schedule_with_warmup
from exp03.comodel import ComModel
from utils.logger import global_time_str
from utils import get_dict_str_in_lines

from tqdm import tqdm
from exp03.tester import test


def train(
        args,
        train_data_loaders,
        dev_data_loaders,
        test_data_loaders,
        three_golden_set_devs,
        three_golden_set_tests,
        logger
):
    logger.info("train(): Train start!")
    model = ComModel(args).to(args.device)

    no_decay = ["bias", "LayerNorm.weight"]  # 这是什么配置项？
    optimizer_grouped_parameters = [
        {
            "params": [
                p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
            ],
            "weight_decay": 1e-2,
            # "lr": 2e-5
        },
        {
            "params": [
                p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)
            ],
            "weight_decay": 0.0,
            # "lr": 1e-3
        },
    ]

    adam_w_optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr)

    data_loader_len_total = sum([len(train_data_loader) for train_data_loader in train_data_loaders])
    scheduler = get_cosine_schedule_with_warmup(
        adam_w_optimizer,
        int(0.1 * args.train_epochs * data_loader_len_total),
        args.train_epochs * data_loader_len_total
    )

    best_t_f1_dev = [0] * len(dev_data_loaders)

    # 开始进行多轮训练
    for epoch_i in range(args.train_epochs):
        epoch_i_start_time = time.time()

        for i in range(len(train_data_loaders)):
            all_epoch_i_loss = []
            all_is_on_loss, all_a_loss, all_o_loss, all_s_loss = [], [], [], []
            right_is_on, all_is_on = 0, 0  # 用来进行辅助统计

            for batch_data in tqdm(train_data_loaders[i], desc=f"Train: {epoch_i}/{args.train_epochs}", delay=0.5):
                model.train()
                adam_w_optimizer.zero_grad()

                inputs_plus, label_plus = batch_data['inputs_plus'], batch_data['label_plus']

                y = model(inputs_plus, label_plus['a_s_'], label_plus['a_e_'], args)

                is_on_logits, as_p, ae_p = y['is_on_logits'], y['as_p'], y['ae_p']
                os_p, oe_p, s_logits = y['os_p'], y['oe_p'], y['s_logits']

                # 计算 aspect 相关的 loss
                a_loss = (functional.cross_entropy(as_p, label_plus['a_s'], ignore_index=-1) +
                          functional.cross_entropy(ae_p, label_plus['a_e'], ignore_index=-1)) / 2
                all_a_loss.append(a_loss)

                # 计算 opinion 相关的 loss
                o_loss = -torch.sum(functional.log_softmax(os_p, dim=1).reshape([-1]) * label_plus['o_s'].reshape([-1])) - \
                         torch.sum(functional.log_softmax(oe_p, dim=1).reshape([-1]) * label_plus['o_e'].reshape([-1]))
                o_loss = o_loss / (label_plus['o_s'].reshape([-1]) != 0).nonzero().size(0)
                all_o_loss.append(o_loss)

                # 计算是否有 next 相关的 loss
                is_on_loss = functional.cross_entropy(is_on_logits, label_plus['is_on'], ignore_index=-1)
                all_is_on_loss.append(is_on_loss)

                # 做一些自己的统计
                is_on_hh = torch.argmax(is_on_logits, dim=1)
                for ff, vv in enumerate(is_on_hh):
                    all_is_on += 1
                    if vv == label_plus['is_on'][ff]:
                        right_is_on += 1
                        pass
                    pass

                # 计算情感相关的 loss
                s_loss = functional.cross_entropy(s_logits, label_plus['s'], ignore_index=-1)
                all_s_loss.append(s_loss)

                # 计算总共的 loss
                loss = is_on_loss * 8 + a_loss * 3.2 + o_loss + s_loss
                all_epoch_i_loss.append(loss)

                # 反向传播和优化
                loss.backward()
                adam_w_optimizer.step()
                scheduler.step()
                pass

            # 打印当前 epoch 的 loss 信息
            logger.info(get_dict_str_in_lines({
                "epoch_i": epoch_i,
                "aver_loss_sum": torch.sum(torch.stack(all_epoch_i_loss)).item() / len(all_epoch_i_loss),
                "is_on_loss_sum": torch.sum(torch.stack(all_is_on_loss)).item(),
                "a_loss_sum": torch.sum(torch.stack(all_a_loss)).item(),
                "o_loss_sum": torch.sum(torch.stack(all_o_loss)).item(),
                "s_loss_sum": torch.sum(torch.stack(all_s_loss)).item(),
                "time cost": float(time.time() - epoch_i_start_time),
                "is_on": f'{right_is_on}/{all_is_on}'
            }))
            pass

        # 在前 15% 轮之后，才会有验证的流程，并更新 best_model
        if epoch_i < args.train_epochs * 0.05:
            continue
            pass

        current_f1_list = []
        for i in range(len(dev_data_loaders)):
            as_f, p_f, t_f = test(
                args,
                model,
                dev_data_loaders[i],
                three_golden_set_devs[i],
                logger,
                is_test=False,
            )
            current_f1_list.append(t_f)
            pass

        if sum(current_f1_list) > sum(best_t_f1_dev):
            # 此时 i > 15 并且得到了更好的结果，所以需要对最好的模型进行保存
            logger.info(f'A better model fountd. Now save {epoch_i}th model')
            best_model_dir_path = f"{args.model_dir}/{global_time_str}-{args.a}-{args.a_ww}-{args.b}-{args.b_ww}"
            if not os.path.exists(best_model_dir_path):
                best_model_dir_path = args.model_dir
                pass
            torch.save(
                model.state_dict(),
                f'{best_model_dir_path}/model.pth'
            )
            best_t_f1_dev = current_f1_list
            pass
        pass

    # 多轮训练完毕
    # 尝试进行测试
    model = ComModel(args).to(args.device)
    logger.info(f"ComModel has been moved to {args.device}!")

    last_result_dir_path = f"{args.model_dir}/{global_time_str}-{args.a}-{args.a_ww}-{args.b}-{args.b_ww}"
    if not os.path.exists(last_result_dir_path):
        last_result_dir_path = args.model_dir
        pass

    for i in range(len(test_data_loaders)):
        mt_f, st_f, t_f1_test = test(
            args, model, test_data_loaders[i], three_golden_set_tests[i], logger,
            is_test=True,
            model_dir=f'{last_result_dir_path}/model.pth'
        )
        logger.info("BEST DEV Triplets F1:" + str(best_t_f1_dev[i]))
        logger.info("Multi Aspect Triplets F1:" + str(mt_f))
        logger.info("Single Aspect Triplets F1:" + str(st_f))
        logger.info("TEST Triplets F1:" + str(t_f1_test))
        pass
    pass
