import torch
import torch.optim as optim
import torch.nn as nn
from args import args
from fastNLP import logger
from fastNLP.core.batch import DataSetIter
from fastNLP.core.sampler import BucketSampler
from fastNLP import AccuracyMetric, SpanFPreRecMetric
from fastNLP import Trainer
from models.modules import MaskedConv2d
# from models.modules import MaskedConv2d, MaskedMLP
from torch.optim.lr_scheduler import CosineAnnealingLR,ExponentialLR
import time
import numpy as np
import utils



def train_nlp(model, task_lst, writer, task_id, vocabs):
    total_time = time.time()
    logger.info("Start training...")

    params_w = []

    # for name, param in model.named_parameters():
    #     split = name.split(".")
    #     if split[-2] == str("score_{}".format(task_id)):
    #         print("name: ", name, "weight: ", param)

    # for n, p in model.named_parameters():
    #     if not p.requires_grad:
    #         continue
    #     split = n.split(".")
    #     if n.find('weight') != -1 or n.find('bias') != -1:
    #         print("weight: ",n)
    #         params_w.append(p)

    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        # split = n.split(".")
        # if (n.find('weight') != -1  or split[-1] == "bias") and split[0] != "out":
        #     params_w.append(p)
        # if split[0] == "out" and int(split[1]) == task_id and (n.find('weight') != -1 or split[-1] == "bias"):
        #     params_w.append(p)
        if n.find('weight') != -1 or n.find('bias') != -1:
            # print("weight: ",n)
            params_w.append(p)

    if args.optimizer == "adam":
        optimizer_w = optim.Adam(params_w, lr=args.train_weight_lr, weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                              lr=args.train_weight_lr,
                              momentum=args.momentum,
                              weight_decay=args.wd)

    # optimizer_w, optimizer_m = prepare_optimizers(model, task_id)

    # for i in range(args.num_tasks):
    #     optimizer_w, M = prepare_optimizers(model, i)
    #     optimizer_mlist.append(M)

    if args.no_scheduler:
        scheduler_w = None
    else:
        scheduler_w = CosineAnnealingLR(optimizer_w, T_max=args.epochs)

    steps = 0

    for i_epoch in range(args.epochs):
        start_time = time.time()

        logger.info("====== Train Epoch {} ======".format(i_epoch))
        # for name, param in model.named_parameters():
        #     split = name.split(".")
        #     if split[-2] == str("score_{}".format(task_id)):
        #         print("name: ", name, "weight: ", param)
        mask_list,steps = train_nlp_epoch(model, task_lst, writer, i_epoch, task_id, optimizer_w, scheduler_w, vocabs, steps)
        # for name, param in model.named_parameters():
        #     split = name.split(".")
        #     if split[-2] == str("score_{}".format(task_id)):
        #         print("name: ", name, "weight: ", param)
        logger.info("Epoch {} finished. Elapse: {:.3f}s.".format(
            i_epoch,
            time.time() - start_time))

        # 获取 mask 以及 画图
        if i_epoch == 0:
            mask_int = mask_list

        overlap_list = [0.0 for _ in range(args.num_tasks)]
        ones_overlap = 0
        ones_m1, ones_m2 = 0, 0
        if i_epoch >= 0:
            # print(mask_all_epoch[i_epoch][i])
            for name in mask_list[task_id]:
                m1 = mask_list[task_id][name]
                m2 = mask_int[task_id][name]
                ones_m1 += torch.sum(m1)
                ones_m2 += torch.sum(m2)
                ones_overlap += torch.sum(m1 * m2)
            logger.info(f"num_params : task: {task_id}, num: {ones_m1}")
            args.num_params= ones_m1 # 在csv中打印模型有多少参数
            writer.add_scalar(f"train/task_{task_id}/overlap", ones_overlap / (ones_m1 + ones_m2),
                              i_epoch)


        # log acc per epoch for all tasks
        # logger.info("Test")
        eval_epoch_nlp(model, task_lst, writer, task_id, i_epoch, dev=False)

        # logger.info(args.exp_name)
        # for acc in test_acc.items():
        #     logger.info(acc)

    logger.info("Training finished. Elapse {:.4f} hours.".format(
        (time.time() - total_time) / 3600))

def train_nlp_epoch(model, task_lst, writer, i_epoch, task_id, optimizer_w, scheduler_w, vocabs, steps):
    total_loss = 0
    n_tasks = len(task_lst)
    task_seq = list(np.random.permutation(n_tasks))

    # 替换 empty_task = set()
    include_tasks = args.tasks
    if include_tasks is None:
        empty_task = set()
    else:
        empty_task = set(range(len(task_lst))) - set(include_tasks)

    model.train()
    model.zero_grad()

    mask_list = [0.0 for _ in range(args.num_tasks)]

    # # Clear the grad on all the parameters.
    # for p in model.parameters():
    #     p.grad = None

    # logger.info(model)

    # Make a list of the parameters relavent to this task.

    for task in task_lst:
        task.train_loader = iter(task.train_loader) # todo


    device = args.device
    while len(empty_task) < n_tasks:
        if task_id in empty_task:
            continue
        # if task_id in empty_task:
        #     # print(task_id,empty_task)
        #     break # 将 continue 修改为 break
        task = task_lst[task_id] # 读取对应数据的任务
        # optimizer_m = optimizer_mlist[task_id]
        # optimizer_m.zero_grad()
        batch = next(task.train_loader, None)
        # batch = task.train_loader
        if batch is None:
            empty_task.add(task_id)
            task.train_loader = DataSetIter(
                task.train_set,
                args.batch_size,
                sampler=BucketSampler(batch_size=args.batch_size),
            )
            continue # 如果这次的batch为None时会跳过这次执行，执行下一次while循环

        # if task_id in empty_task:
        #     break # 将 continue 修改为 break

        x, y = batch

        # batch_task_id = x["task_id"].to(device)
        batch_x = x["x"].to(device)
        batch_y = y["y"].to(device)
        # 这是一个很关键的修改，针对torch版本问题，对torch\nn\utils\rnn.py的249行，_VF._pack_padded_sequence(input, lengths, batch_first) -->_VF._pack_padded_sequence(input, lengths.cpu(), batch_first)

        if "seq_len" in x:
            seq_len = x["seq_len"].to(device)
            # writer.add_graph(model, input_to_model=batch_x)
            out = model(batch_x, batch_y, seq_len, task_id=task_id)
        else:
            seq_len = None
            out = model(batch_x, batch_y, task_id=task_id)
        loss, pred = out["loss"], out["pred"]
        steps += 1

        # loss = add_sparse_term(loss, model, i_epoch)

        total_loss += loss.item()
        loss = loss / args.accumulation_steps
        loss.backward()

        metrics = []
        for t in task_lst:
            if utils.need_acc(t.task_name):
                metrics.append(AccuracyMetric())
            else:
                metrics.append(
                    SpanFPreRecMetric(
                        vocabs[t.task_name],
                        encoding_type="bioes" if t.task_name == "ner" else "bio",
                    )
                )
        metrics[task_id].evaluate(pred, batch_y, seq_len)

        # metrics = task.metrics[0]
        # metrics.evaluate(pred, batch_y, seq_len)

        if steps % args.accumulation_steps == 0:
            nn.utils.clip_grad_value_(model.parameters(), 5) # 梯度截断对mask有没有影响？
            # optimizer_m.step()

            # if i_epoch in [0,1,3] :  #毕业论文的tip
            #     optimizer_w.step()

            # if scheduler_m is not None:
            #     scheduler_m.step()
            if scheduler_w is not None:
                scheduler_w.step()
            optimizer_w.step()
            optimizer_w.zero_grad()

            if steps % args.print_every == 0:
                writer.add_scalar("train_loss" , total_loss / args.print_every ,
                                  steps)
                score = metrics[task_id].get_metric()
                metric_name = "acc" if "acc" in score else "f1"
                score = score["acc"] if "acc" in score else score["f"]
                writer.add_scalar("train_acc", score, steps)
                writer.add_scalar("train/Learning rate", optimizer_w.state_dict()['param_groups'][0]['lr'],i_epoch)
                # logger.info(
                #     " - Step {}: loss {}\t{}\t{}: {}".format(
                #     steps ,
                #     total_loss / args.print_every ,
                #     task.task_name ,
                #     task.metric_key ,
                #     score ,
                # ))
                logger.info(
                    " - Step {}: loss {}\t{}\t{}: {}".format(
                        steps,
                        total_loss / args.print_every,
                        task.task_name,
                        metric_name,
                        score,
                    )
                )
                total_loss = 0
            mask_list[task_id] = mask_status_nlp(model, task_id)
    if scheduler_w is not None:
        scheduler_w.step()
    return mask_list,steps



def eval_epoch_nlp(model, task_lst, writer, task_id, i_epoch, dev=False): # dev是一个重要参数
    dev_loss = 0
    e_steps = 0
    avg_acc = 0
    step = 0
    dev_acc = {}
    model.eval()
    device = args.device

    with torch.no_grad():
        # samples = 0
        task = task_lst[task_id]
        # if task.task_id in self.empty_tasks:
        #     continue
        if dev:
            data_loader = task.dev_loader
        else:
            data_loader = task.test_loader
        for batch in data_loader:
            step = step + 1
            x, y = batch

            batch_x = x["x"].to(device)
            batch_y = y["y"].to(device)

            if "seq_len" in x:
                seq_len = x["seq_len"].to(device)
                out = model(batch_x, batch_y, seq_len, task_id=task_id)
            else:
                seq_len = None
                out = model(batch_x, batch_y, task_id=task_id)
            loss, pred = out["loss"], out["pred"]

            dev_loss += loss.item()
            e_steps += 1

            task.metrics[0].evaluate(pred, batch_y, seq_len) # 有疑问！
            eval_res = task.metrics[0].get_metric()
            # print(eval_res)
            dev_acc[task.task_name] = eval_res
            # print(dev_acc)
            avg_acc += eval_res["acc"] if "acc" in eval_res else eval_res[
                "f"] # 这里没有做均值
            # print(avg_acc)

            # samples += batch_x.size(0)


    # avg_acc /= len(task_lst)  # - len(self.empty_tasks)
    avg_acc /= step
    dev_acc["avg"] = avg_acc
    dev_loss = dev_loss / e_steps

    logger.info('\nTest set:task id: {:.0f}, task name: {} , Accuracy: {:.4f}\n'
                .format(task_id, task.task_name, avg_acc))

    if i_epoch == args.epochs-1:
        # logger.info(
        #     "\nTest set:task id: {:.0f}, task name: {} , Accuracy: {:.4f}"
        #         .format(task_id, task.task_name, eval_res["acc"] if "acc" in eval_res else eval_res["f"]))
        utils.write_result_to_csv_dense_nlp(
            name=f"{args.exp_name}~set={args.dataset}~task={task_id}~task_name={task.task_name}",
            Accuracy=avg_acc,
            num_params=args.num_params,
        )

    return dev_loss, dev_acc

def add_sparse_term(loss, model,i_epoch):
    sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            # L0 reg
            sparse_regularization += torch.sum(torch.gt(param, 0))

            # idx = torch.gt(param, 0)
            # pos_param = param * idx
            # sparse_regularization += torch.sum(pos_param)

            # L1-pos reg: relu(x+rl)
            pos_param = torch.gt(param, 0) * param
            # pos_param = torch.gt(param, -args.train_score_lr) * (param+args.train_score_lr)
            sparse_regularization += torch.sum(pos_param)
    # loss = loss + alpha(args.alpha,i_epoch) * sparse_regularization # todo
    loss = loss + args.alpha * sparse_regularization
    # print(alpha(args.alpha,i_epoch))
    return loss

def prepare_optimizers(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)


    # if len(params_w) == 0 or len(params_m) == 0:
    #     raise RuntimeError('check target_param being "weights" or "masks" \n\
    # and check masks being set in the network')


    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,  # 这个设置成 0
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m

def mask_status_nlp(model, task_id):
    masks = {}
    m_sparsity = {}

    for n, m in model.named_modules():
        if hasattr(m, "sparsity"):
            # print(n,m.sparsity)
            for i in range(args.num_tasks):
                m_sparsity[n + ".scores" + '.' + str(i)] = m.sparsity
            # m_sparsity_list[task_id] = m_sparsity[n + ".scores"+'.'+str(task_id)]


    if args.pruning_method == 'topK':
        for name, param in model.named_parameters():
            # print(name,param.numel())
            split = name.split(".")
            # print(name,task_id,split[-1])
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = module_util.GetSubnet.apply(
                        param.abs(), m_sparsity[name]  ##有可能存在问题
                    )
                    # print(name,'所含参数量：',param.numel())
                    # print('对应的mask的参数量：',mask.numel())
                    masks[name] = mask

    if args.pruning_method == 'threshold':
        para_num = 0

        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] == str("score_{}".format(task_id)):
                # print(name)
                mask = (param > 0.).float()
                masks[name] = mask

            # if split[-2] in ["scores","scores_xi","scores_xf","scores_xu","scores_xo","scores_hi","scores_hf","scores_hu","scores_ho"]:
            #     # print(name)
            #     if split[-1] == str(task_id):
            #         # print(name)
            #         # print(name,param)
            #         # mask = unit_step(param)
            #         mask = (param > 0.).float()
            #         # print(name, '所含参数量：', param.numel())
            #         # print(name,'对应的mask的参数量：', mask.numel())
            #         # print(name, '对应的mask中1的个数：', torch.sum(mask))
            #         masks[name] = mask
    return masks