import torch
import time
import os
from torch.nn import functional
from transformers import AdamW, get_cosine_schedule_with_warmup
from model.comodel1 import ComModel
from utils.logger import global_time_str
from utils import get_dict_str_in_lines

from tqdm import tqdm
from tester import test


def train(
        args,
        train_data_loader,
        dev_data_loader,
        test_data_loader,
        three_golden_set_dev,
        three_golden_set_test,
        logger
):
    logger.info("train(): Train start!")
    model = ComModel(args).to(args.device)

    no_decay = ["bias", "LayerNorm.weight"]  # 这是什么配置项？
    optimizer_grouped_parameters = [
        {
            "params": [
                p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
            ],
            "weight_decay": 1e-2,
            # "lr": 2e-5
        },
        {
            "params": [
                p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)
            ],
            "weight_decay": 0.0,
            # "lr": 1e-3
        },
    ]

    adam_w_optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr)

    scheduler = get_cosine_schedule_with_warmup(
        adam_w_optimizer,
        int(0.1 * args.train_epochs * len(train_data_loader)),
        args.train_epochs * len(train_data_loader)
    )

    best_t_f1_dev = 0

    # 开始进行多轮训练
    for epoch_i in range(args.train_epochs):
        epoch_i_start_time = time.time()
        all_epoch_i_loss = []
        all_is_on_loss, all_a_loss, all_o_loss, all_s_loss = [], [], [], []

        for batch_data in tqdm(train_data_loader, desc=f"Train: {epoch_i}/{args.train_epochs}"):
            model.train()
            adam_w_optimizer.zero_grad()

            inputs_plus, label_plus = batch_data['inputs_plus'], batch_data['label_plus']
            B = inputs_plus['input_ids'].size(0)  # batch_size

            y = model(inputs_plus, label_plus['a_s_'], label_plus['a_e_'], args)

            is_on_logits, as_p, ae_p = y['is_on_logits'], y['as_p'], y['ae_p']
            os_p, oe_p, s_logits = y['os_p'], y['oe_p'], y['s_logits']

            # 计算 aspect 相关的 loss
            a_loss = (functional.cross_entropy(as_p, label_plus['a_s'], ignore_index=-1) +
                      functional.cross_entropy(ae_p, label_plus['a_e'], ignore_index=-1)) / 2
            all_a_loss.append(a_loss)

            # 计算 opinion 相关的 loss
            o_loss = -torch.sum(functional.log_softmax(os_p, dim=1).reshape([-1]) * label_plus['o_s'].reshape([-1])) - \
                     torch.sum(functional.log_softmax(oe_p, dim=1).reshape([-1]) * label_plus['o_e'].reshape([-1]))
            o_loss = o_loss / (label_plus['o_s'].reshape([-1]) != 0).nonzero().size(0)
            all_o_loss.append(o_loss)

            # 计算是否有 next 相关的 loss
            is_on_loss = functional.cross_entropy(is_on_logits, label_plus['is_on'], ignore_index=-1)
            all_is_on_loss.append(is_on_loss)

            # 计算情感相关的 loss
            s_loss = functional.cross_entropy(s_logits, label_plus['s'], ignore_index=-1)
            all_s_loss.append(s_loss)

            # 计算总共的 loss
            loss = is_on_loss * 8 + a_loss * 3.2 + o_loss + s_loss
            all_epoch_i_loss.append(loss)

            # 反向传播和优化
            loss.backward()
            adam_w_optimizer.step()
            scheduler.step()
            pass

        # 打印当前 epoch 的 loss 信息
        logger.info(get_dict_str_in_lines({
            "epoch_i": epoch_i,
            "aver_loss_sum": torch.sum(torch.stack(all_epoch_i_loss)).item() / len(all_epoch_i_loss),
            "is_on_loss_sum": torch.sum(torch.stack(all_is_on_loss)).item(),
            "a_loss_sum": torch.sum(torch.stack(all_a_loss)).item(),
            "o_loss_sum": torch.sum(torch.stack(all_o_loss)).item(),
            "s_loss_sum": torch.sum(torch.stack(all_s_loss)).item(),
            "time cost": float(time.time() - epoch_i_start_time)
        }))

        # 在 15 轮之后，才会有验证的流程，并更新 best_model
        if epoch_i < 15:
            continue
            pass

        as_f, p_f, t_f = test(args, model, dev_data_loader, three_golden_set_dev, logger, is_test=False)
        if t_f < best_t_f1_dev:
            continue
            pass

        # 此时 i > 15 并且得到了更好的结果，所以需要对最好的模型进行保存
        logger.info(f'A better model fountd. Now save {epoch_i}th model')
        best_model_dir_path = f"{args.model_dir}/{global_time_str}-{args.a}-{args.a_ww}-{args.b}-{args.b_ww}"
        if not os.path.exists(best_model_dir_path):
            best_model_dir_path = args.model_dir
            pass
        torch.save(
            model.state_dict(),
            f'{best_model_dir_path}/model.pth'
        )
        best_t_f1_dev = max(best_t_f1_dev, t_f)
        pass

    # 多轮训练完毕
    # 尝试进行测试
    model = ComModel(args).to(args.device)
    logger.info(f"ComModel has been moved to {args.device}!")

    # 进行模型保存
    def test_and_save_result(model_dir: str):
        mt_f, st_f, t_f1_test = test(
            args, model, test_data_loader, three_golden_set_test, logger,
            is_test=True,
            model_dir=model_dir
        )
        logger.info("BEST DEV Triplets F1:" + str(best_t_f1_dev))
        logger.info("Multi Aspect Triplets F1:" + str(mt_f))
        logger.info("Single Aspect Triplets F1:" + str(st_f))
        logger.info("TEST Triplets F1:" + str(t_f1_test))
        pass

    last_result_dir_path = f"{args.model_dir}/{global_time_str}-{args.a}-{args.a_ww}-{args.b}-{args.b_ww}"
    if not os.path.exists(last_result_dir_path):
        last_result_dir_path = args.model_dir
        pass
    test_and_save_result(f'{last_result_dir_path}/model.pth')
    pass
