import time
import copy
import os

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from args import args
from fastNLP import logger
from fastNLP.core.batch import DataSetIter
from fastNLP.core.sampler import BucketSampler, RandomSampler
from fastNLP import AccuracyMetric, SpanFPreRecMetric
from torch.optim.lr_scheduler import CosineAnnealingLR,ExponentialLR
import models.module_util as module_util
from models.modules import BinaryStep
import nlp_trainers.trainer_utils as trainer_utils
import math




def prepare_optimizers(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)


    # if len(params_w) == 0 or len(params_m) == 0:
    #     raise RuntimeError('check target_param being "weights" or "masks" \n\
    # and check masks being set in the network')


    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,  # 这个设置成 0
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m

def prepare_optimizers_2(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)


    # if len(params_w) == 0 or len(params_m) == 0:
    #     raise RuntimeError('check target_param being "weights" or "masks" \n\
    # and check masks being set in the network')

    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,  # 这个设置成 0
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=9.03685701234717e-06, # 这个是修改的
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m

def add_sparse_term(loss, model,i_epoch):
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    neg_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            # L0 reg
            # sparse_regularization_pos += torch.sum(torch.gt(param, 0))
            # sparse_regularization_neg
            # idx = torch.gt(param, 0)
            # pos_param = param * idx
            # sparse_regularization += torch.sum(pos_param)

            # L1-pos reg: relu(x+rl)
            pos_param = torch.gt(param, 0) * param
            neg_parm  = torch.abs(torch.lt(param, 0) * param)
            # pos_param = torch.gt(param, -args.train_score_lr) * (param+args.train_score_lr)
            pos_sparse_regularization += torch.sum(pos_param)
            neg_sparse_regularization += torch.sum(neg_parm)
    # loss = loss + alpha(args.alpha,i_epoch) * sparse_regularization
    loss = loss + (0.00009 * args.alpha) * pos_sparse_regularization + (0.00009 * (1 - args.alpha)) * neg_sparse_regularization
    # print(alpha(args.alpha,i_epoch))
    return loss

def add_sparse_term_v1(loss, model,i_epoch):
    sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            # L0 reg
            sparse_regularization += torch.sum(torch.gt(param, 0))

            # idx = torch.gt(param, 0)
            # pos_param = param * idx
            # sparse_regularization += torch.sum(pos_param)

            # L1-pos reg: relu(x+rl)
            pos_param = torch.gt(param, 0) * param
            # pos_param = torch.gt(param, -args.train_score_lr) * (param+args.train_score_lr)
            sparse_regularization += torch.sum(pos_param)
    loss = loss + args.alpha * sparse_regularization
    # print(alpha(args.alpha,i_epoch))
    return loss

def add_sparse_term_v2(loss, model,i_epoch):
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    neg_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            # L0 reg
            # sparse_regularization_pos += torch.sum(torch.gt(param, 0))
            # sparse_regularization_neg
            # idx = torch.gt(param, 0)
            # pos_param = param * idx
            # sparse_regularization += torch.sum(pos_param)

            # L1-pos reg: relu(x+rl)
            pos_param = torch.gt(param, 0) * param
            neg_parm  = torch.abs(torch.lt(param, 0) * param)
            pos_param = torch.pow(pos_param,2)
            neg_parm = torch.pow(neg_parm,2)
            # pos_param = torch.gt(param, -args.train_score_lr) * (param+args.train_score_lr)
            pos_sparse_regularization += torch.sum(pos_param)
            neg_sparse_regularization += torch.sum(neg_parm)
    # loss = loss + alpha(args.alpha,i_epoch) * sparse_regularization
    loss = loss + (0.0001 * args.alpha) * pos_sparse_regularization + (0.0001 * (1 - args.alpha)) * neg_sparse_regularization
    # print(alpha(args.alpha,i_epoch))
    return loss

def add_sparse_term_v3(loss, model,i_epoch):
    # ccis
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('score') != -1:
            # print(name)
            # L0 reg
            # sparse_regularization_pos += torch.sum(torch.gt(param, 0))
            # sparse_regularization_neg
            # idx = torch.gt(param, 0)
            # pos_param = param * idx
            # sparse_regularization += torch.sum(pos_param)

            # L1-pos reg: relu(x+rl)
            pos_param = torch.gt(param, 0) * param
            # pos_param = torch.gt(param, -args.train_score_lr) * (param+args.train_score_lr)
            pos_sparse_regularization += torch.sum(pos_param)
    # print("pos_sparse_regularization",pos_sparse_regularization)
    # loss = loss + alpha(args.alpha,i_epoch) * sparse_regularization
    # print(pos_sparse_regularization)
    # print(type(args.alpha))
    loss = loss + args.alpha * pos_sparse_regularization
    # print('loss',loss)
    # print(alpha(args.alpha,i_epoch))
    return loss

def add_sparse_term_v4(loss, model,i_epoch):
    # todo 需要做公式注释
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('score') != -1:
            L2_regularization = torch.pow(param - args.intercept, 2) # args.intercept = 0.02/0.005/0.001(1172)
            pos_sparse_regularization += torch.sum(L2_regularization)
    loss = loss + args.alpha * pos_sparse_regularization
    # print(alpha(args.alpha,i_epoch))
    return loss

def add_sparse_term_v5(loss, model,epoch):
    # 阈值随着epoch变化
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('score') != -1:
            if epoch < 10:
                pos_param = torch.gt(param, 0.09) * param
            elif 10 <= epoch < 20:
                pos_param = torch.gt(param, 0.065) * param
            elif 20 <= epoch < 30:
                pos_param = torch.gt(param, 0.04) * param
            elif 30 <= epoch < 40:
                pos_param = torch.gt(param, 0.015) * param
            else:
                pos_param = torch.gt(param, -0.01) * param
            pos_sparse_regularization += torch.sum(pos_param)
    loss = loss + args.alpha * pos_sparse_regularization
    return loss

def alpha(input,i_epoch): # todo 调整alpha的值，之前的实验有可能使用了这个结果
    if i_epoch < args.alpha_epoch:
        if i_epoch % args.alpha_epoch_1 < args.alpha_epoch_2:
            output = input
        else:
            output = 0
    else:
        output = 0
    return output

def intercept(input,i_epoch):
    if i_epoch < 50:
        output = input
    elif i_epoch < 100:
        output = input/20
    elif i_epoch < 100:
        output = input/40
    else:
        output = 0
    return output



def print_mask_status(model, logger):
    for task_id in range(model.num_tasks):# 修改成20
        kr = model.get_mask_keep_ratio(task_id)
        logger.info("keep ratio for task {}: {:.4f}".format(task_id, kr))

    for task1_id in range(model.num_tasks):
        for task2_id in range(task1_id + 1, model.num_tasks):
            overlap = model.get_mask_overlap(task1_id, task2_id)
            logger.info("overlap of {}-{}: {:.4f}".format(
                task1_id, task2_id, overlap))


def mask_status(model, task_id):
    masks = {}
    m_sparsity = {}

    for n, m in model.named_modules():
        if hasattr(m, "sparsity"):
            # print(n,m.sparsity)
            for i in range(args.num_tasks):
                m_sparsity[n + ".scores" + '.' + str(i)] = m.sparsity
            # m_sparsity_list[task_id] = m_sparsity[n + ".scores"+'.'+str(task_id)]


    if args.pruning_method == 'topK':
        for name, param in model.named_parameters():
            # print(name,param.numel())
            split = name.split(".")
            # print(name,task_id,split[-1])
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = module_util.GetSubnet.apply(
                        param.abs(), m_sparsity[name]  ##有可能存在问题
                    )
                    # print(name,'所含参数量：',param.numel())
                    # print('对应的mask的参数量：',mask.numel())
                    masks[name] = mask
    if args.pruning_method == 'threshold':
        para_num = 0
        unit_step = BinaryStep.apply
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    # print(name,param)
                    # mask = unit_step(param)
                    mask = (param >= 0.).float()
                    # print(name, '所含参数量：', param.numel())
                    # print('对应的mask的参数量：', mask.numel())
                    # print('对应的mask中1的个数：', torch.sum(mask))
                    masks[name] = mask
    return masks


def train_nlp(model, task_lst, writer, vocabs):
    total_time = time.time()
    logger.info("Start training...")
    args.num_params = [0.0 for _ in range(args.num_tasks)]
    best_acc = 0
    best_epoch = 0
    steps = 0

    # 调整optimizer
    if args.scheduler_weight:
        optimizer_w = trainer_utils.prepare_weight_optimizers_nlp(model)
        scheduler_w = CosineAnnealingLR(optimizer_w, T_max=args.epochs)
    else:
        optimizer_w = trainer_utils.prepare_weight_optimizers_nlp(model)
        scheduler_w = None

    if args.scheduler_score:
        optimizer_m = trainer_utils.prepare_score_optimizers_nlp(model)
        scheduler_m = trainer_utils.LRcheduler_wm(optimizer_m, T_max=args.lr_epochs)
    else:
        optimizer_m = trainer_utils.prepare_score_optimizers_nlp(model)
        scheduler_m = None



    metrics = []
    for t in task_lst:
        if utils.need_acc(t.task_name):
            metrics.append(AccuracyMetric())
        else:
            metrics.append(
                SpanFPreRecMetric(
                    vocabs[t.task_name],
                    encoding_type="bioes" if t.task_name == "ner" else "bio",
                )
            )

    include_tasks = args.tasks
    if include_tasks is None:
        empty_tasks = set()
    else:
        empty_tasks = set(range(len(task_lst))) - set(include_tasks)

    for i_epoch in range(args.epochs):
        start_time = time.time()
        logger.info("Epoch {}".format(i_epoch))

        logger.info("========== Train ==========")
        steps = train_nlp_epoch(model, task_lst, writer,i_epoch, optimizer_w, optimizer_m, metrics, empty_tasks, steps)

        if scheduler_m is not None:
            scheduler_m.step()

        if scheduler_w is not None:
            scheduler_w.step()

        logger.info("Epoch {} finished. Elapse: {:.3f}s.".format(
            i_epoch,
            time.time() - start_time))

        Mean_Parameter_Num = trainer_utils.mask_status_nlp_v2(model, writer, i_epoch, task_lst)


        # log acc per epoch for all tasks
        logger.info("========== Test ==========")
        dev_loss, dev_acc = eval_nlp_epoch(model, task_lst, writer, i_epoch, vocabs, empty_tasks, dev=False )
        eval_str = "Validation loss {}, avg acc {:.4f}%".format(
            dev_loss, dev_acc["avg"]
        )
        for task, value in dev_acc.items():
            if utils.need_acc(task) and task != "avg":
                eval_str += ", {} acc {:.4f}%".format(task, value["acc"])
            elif task != "avg":
                eval_str += ", {} f1 {:.4f}%".format(task, value["f"])
        logger.info(eval_str)

        if dev_acc["avg"] > best_acc:
            best_acc = dev_acc["avg"]
            best_epoch = i_epoch
            logger.info("Updating best acc...")

        logger.info(
            "Current best acc [{:.4f}%] occured at epoch [{}].Mean_Parameter_Num [{:.0f}]".format(
                best_acc, best_epoch, Mean_Parameter_Num
            )
        )

        #
        # logger.info(args.exp_name)
        #
        # for acc in dev_acc.items():
        #     logger.info(acc)

    logger.info("Training finished. Elapse {:.4f} hours.".format(
        (time.time() - total_time) / 3600))


def train_nlp_epoch(model, task_lst, writer, i_epoch, optimizer_w, optimizer_m, metrics, empty_tasks, steps):
    total_loss = 0
    n_tasks = len(task_lst)
    task_seq = list(np.random.permutation(n_tasks))
    empty_task = copy.deepcopy(empty_tasks)

    model.train()
    model.zero_grad()

    # # Clear the grad on all the parameters.
    # for p in model.parameters():
    #     p.grad = None

    # logger.info(model)

    # Make a list of the parameters relavent to this task.

    device = args.device
    for task in task_lst:
        task.train_loader = iter(task.train_loader)
    while len(empty_task) < n_tasks:
        for task_id in task_seq:
            if task_id in empty_task:
                continue
            task = utils.find_task(task_id, task_lst)
            # task = task_lst[task_id]
            # optimizer_m = optimizer_mlist[task_id] # 这里写复杂了，其他task_id的score是不会更新的
            # optimizer_m.zero_grad()
            # optimizer_w.zero_grad()


            batch = next(task.train_loader, None)
            if batch is None:
                empty_task.add(task_id)
                task.train_loader = DataSetIter(
                    task.train_set,
                    args.batch_size,
                    sampler=BucketSampler(batch_size=args.batch_size),
                )
                continue
            x, y = batch
            # batch_task_id = x["task_id"].to(device)
            batch_x = x["x"].to(device)
            batch_y = y["y"].to(device)

            args.cur_task_id = task_id

            if "seq_len" in x:
                seq_len = x["seq_len"].to(device)
                # writer.add_graph(model, input_to_model=batch_x)
                out = model(batch_x, batch_y, seq_len, task_id=task_id, sparsity=args.sparsity, epoch=i_epoch)
            else:
                seq_len = None
                out = model(batch_x, batch_y, task_id=task_id, sparsity=args.sparsity, epoch=i_epoch)
            loss, pred = out["loss"], out["pred"]
            steps += 1

            loss = add_sparse_term_v5(loss, model, i_epoch)
            total_loss += loss.item()
            loss = loss / args.accumulation_steps
            loss.backward()

            metrics[task_id].evaluate(pred, batch_y, seq_len)
            # metrics = task.metrics[0]
            # metrics.evaluate(pred, batch_y, seq_len)

            if steps % args.accumulation_steps == 0:
                nn.utils.clip_grad_value_(model.parameters(), 5)
                # if i_epoch < 25:
                #     optimizer_m.step() # todo: 控制是否 hardsharing

                # optimizer_m.zero_grad()
                optimizer_m.step()
                optimizer_w.step()

                optimizer_w.zero_grad()
                optimizer_m.zero_grad()
                # if scheduler_m is not None:
                #     scheduler_m.step()
                # if scheduler_w is not None:
                #     scheduler_w.step()

            if steps % args.print_every == 0:
                writer.add_scalar("train_loss", total_loss / args.print_every,
                                  steps)
                score = metrics[task_id].get_metric()
                # score = metrics.get_metric()
                metric_name = "acc" if "acc" in score else "f1"
                score = score["acc"] if "acc" in score else score["f"]
                writer.add_scalar("train_acc", score, steps)
                writer.add_scalar("train/weight Learning rate", optimizer_w.state_dict()['param_groups'][0]['lr'], i_epoch)
                writer.add_scalar("train/score Learning rate", optimizer_m.state_dict()['param_groups'][0]['lr'], i_epoch)
                logger.info(
                    " - Step {}: loss {}\t{}\t{}: {}".format(
                        steps,
                        total_loss / args.print_every,
                        task.task_name,
                        metric_name,
                        score,
                    )
                )
                # logger.info(" - Step {}: loss {}\t{}\t{}: {}".format(
                #     steps,
                #     total_loss / args.print_every,
                #     task.task_name,
                #     task.metric_key,
                #     score,
                # ))
                total_loss = 0
                # corrects, samples = 0, 0
    return steps

def eval_nlp_epoch(model, task_lst, writer, i_epoch, vocabs, empty_tasks, dev=True):
    logger.info("Evaluating...")
    dev_loss = 0
    e_steps = 0
    avg_acc = 0
    dev_acc = {}
    model.eval()
    metrics = []
    for t in task_lst:
        if utils.need_acc(t.task_name):
            metrics.append(AccuracyMetric())
        else:
            metrics.append(
                SpanFPreRecMetric(
                    vocabs[t.task_name],
                    encoding_type="bioes" if t.task_name == "ner" else "bio",
                )
            )
    # avg_acc_pos = 0
    # avg_acc_chunk = 0
    # avg_acc_ner = 0

    device = args.device

    with torch.no_grad():
        for task_id in range(len(task_lst)):
            corrects, samples = 0, 0
            # samples = 0
            task = utils.find_task(task_id, task_lst)
            if task.task_id in empty_tasks:
                continue
            if dev:
                data_loader = task.dev_loader
            else:
                data_loader = task.test_loader
            for batch in data_loader:
                x, y = batch
                batch_x = x["x"].to(device)
                batch_y = y["y"].to(device)
                if "seq_len" in x:
                    seq_len = x["seq_len"].to(device)
                    out = model(batch_x, batch_y, seq_len, task_id=task_id, sparsity=args.sparsity, epoch=i_epoch)
                else:
                    seq_len = None
                    out = model(batch_x, batch_y, task_id=task_id, epoch=i_epoch)
                loss, pred = out["loss"], out["pred"]

                dev_loss += loss.item()
                e_steps += 1

                metrics[task_id].evaluate(pred, batch_y, seq_len)

                task.metrics[0].evaluate(pred, batch_y, seq_len)
                samples += batch_x.size(0)


                # # eval_res = task.metrics[0].get_metric()
                # # dev_acc[task.task_name] = eval_res
                # # avg_acc += eval_res["acc"] if "acc" in eval_res else eval_res[
                # #     "f"]
                #
                # if task.task_name == 'pos':
                #     avg_acc_pos += eval_res["acc"] if "acc" in eval_res else eval_res["f"]
                # elif task.task_name == 'chunk':
                #     avg_acc_chunk += eval_res["acc"] if "acc" in eval_res else eval_res["f"]
                # else:
                #     avg_acc_ner += eval_res["acc"] if "acc" in eval_res else eval_res["f"]
                #
                # if i_epoch == 20:
                #     utils.write_result_to_csv_wm_nlp(
                #         name=f"{args.exp_name}~set={args.dataset}~task={task_id}~task_name={task.task_name}",
                #         Accuracy=eval_res["acc"] if "acc" in eval_res else eval_res["f"],
                #         num_params=args.num_params[task_id],
                #     )
                #
                # # samples += batch_x.size(0)

        for i in range(len(task_lst)):
            task = utils.find_task(i, task_lst)
            eval_res = metrics[i].get_metric()
            dev_acc[task.task_name] = eval_res
            avg_acc += eval_res["acc"] if "acc" in eval_res else eval_res["f"]

    avg_acc /= len(task_lst) - len(empty_tasks)
    dev_acc["avg"] = avg_acc
    dev_loss = dev_loss / e_steps

    if i_epoch == args.epochs - 1:
        for task_id in range(len(task_lst)):
            task = task_lst[task_id]
            utils.write_result_to_csv_wm_nlp(
                name=f"{args.exp_name}~set={args.dataset}~task={task_id}~task_name={task.task_name}",
                Accuracy=dev_acc[task.task_name]["acc"] if "acc" in dev_acc[task.task_name] else dev_acc[task.task_name]["f"],
                num_params=args.num_params[task_id],
            )

    return dev_loss, dev_acc

    # avg_acc /= len(task_lst)  # - len(self.empty_tasks)
    # dev_acc["avg_acc_pos"] = avg_acc_pos / e_steps*3
    # dev_acc["avg_acc_chunk"] = avg_acc_chunk / e_steps*3
    # dev_acc["avg_acc_ner"] = avg_acc_ner / e_steps*3
    # dev_loss = dev_loss / e_steps
    # return dev_loss, dev_acc



def prepare_optimizers_nlp(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        # print('all: ', n)
        split = n.split(".")
        if args.train_weight_tasks < 0:
            # if split[-1] == "weight" or split[-1] == "bias":
            if n.find('weight') != -1 or n.find('bias') != -1:
                # print("weight: ",n)
                params_w.append(p)
        if n.find('score') != -1:  # if n.find('score') != -1 and int(split[-1]) == idx
            # print("score",n)
            params_m.append(p)


    # if len(params_w) == 0 or len(params_m) == 0:
    #     raise RuntimeError('check target_param being "weights" or "masks" \n\
    # and check masks being set in the network')


    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,  # 这个设置成 0
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        if len(params_m) != 0:
            optimizer_m = optim.SGD(params_m,
                                    lr=args.train_score_lr,
                                    momentum=args.momentum,
                                    weight_decay=args.wd)

    return optimizer_w, optimizer_m
    # else:
    #     return optimizer_w