import time
import os

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from args import args
from fastNLP import logger
from fastNLP.core.batch import DataSetIter
from fastNLP.core.sampler import BucketSampler, RandomSampler
from torch.optim.lr_scheduler import CosineAnnealingLR,MultiStepLR,ExponentialLR
import models.module_util as module_util
from models.modules import BinaryStep
import math





def train_cv(model, data_loader, writer):


    total_time = time.time()
    logger.info("Start training...")

    best_acc1 = [0.0 for _ in range(args.num_tasks)]



    criterion = nn.CrossEntropyLoss().to(args.device)


    optimizer_mlist = []
    scheduler_mlist =  []
    lr_writer = [0.0 for _ in range(args.num_tasks)]
    args.num_params = [0.0 for _ in range(args.num_tasks)]
    optimizer_w = None
    optimizer_m = None


    if args.no_scheduler:
        scheduler_w, scheduler_m = None, None
        for i in range(args.num_tasks):
            optimizer_w, M = prepare_optimizers(model, i)
            optimizer_mlist.append(M)
    else:
        for i in range(args.num_tasks):
            if args.train_weight_tasks < 0: # 是否需要训练 weight
                optimizer_w, M = prepare_optimizers(model, i)
            else:
                M = prepare_optimizers(model, i)

            optimizer_mlist.append(M)
            if args.pruning_method == 'topK':
                scheduler_m = CosineAnnealingLR(M, T_max=args.epochs)#######
            else:
                # scheduler_m = MultiStepLR(M, milestones=[100,130,150,170,190,200,210,220,230,240,245], gamma=0.1) # 失败
                scheduler_m = LRcheduler_wm(M, T_max_1 = args.lr_epochs , T_max_2 = args.epochs-args.lr_epochs)
                # scheduler_m = CosineAnnealingLR(M, T_max=args.epochs)
            scheduler_mlist.append(scheduler_m)
        if optimizer_w is not None:
            scheduler_w = CosineAnnealingLR(optimizer_w, T_max=args.epochs-args.w_epochs)#######
        else:
            scheduler_w = None


    for i_epoch in range(args.epochs):
        start_time = time.time()
        logger.info("Epoch {}".format(i_epoch))

        # target_param = 'scores' if i_epoch % 2 == 0 else 'weights' # scores 和 weights交替更新
        target_param = 'scores'

        loss_list,mask_list =  train_cv_epoch(model, data_loader, criterion, optimizer_mlist,optimizer_w, i_epoch)
        if args.warmup_save == True:
            print("warmup_save done")
            if i_epoch == 9:
                torch.save(model.state_dict(), os.path.join(
                    args.save_model_path, "warmup_{}.th".format(i_epoch)))

        if i_epoch == 0:
            mask_int = mask_list


        if scheduler_mlist is not None:
            for i in range(args.num_tasks):
                scheduler_mlist[i].step() # 需要注释掉吗？ 必然不可以注释

                # lr_writer[i] = scheduler_mlist[i].get_lr()[0]

        if scheduler_w is not None:
            if i_epoch >= args.w_epochs:###
                scheduler_w.step()

        for i in range(args.num_tasks):
            writer.add_scalar(f"train/task_{i}/loss", loss_list[i],
                              i_epoch)
            # 画 lr 随 epoch 的变化图
            writer.add_scalar(f"lr_mask/task_{i}/lr", lr_writer[i],
                              i_epoch)

            overlap_list = [0.0 for _ in range(args.num_tasks)]
            ones_overlap = 0
            ones_m1, ones_m2 = 0, 0
            if i_epoch > 0:
                # print(mask_all_epoch[i_epoch][i])
                for name in mask_list[i]:
                    m1 = mask_list[i][name]
                    m2 = mask_int[i][name]
                    ones_m1 += torch.sum(m1)
                    ones_m2 += torch.sum(m2)
                    ones_overlap += torch.sum(m1 * m2)
                logger.info(f"num_params : task: {i}, num: {ones_m1}")
                args.num_params[i] = ones_m1 # 在csv中打印模型有多少参数
                writer.add_scalar(f"train/task_{i}/overlap", ones_overlap / (ones_m1 + ones_m2),
                                  i_epoch)


        logger.info("Epoch {} finished. Elapse: {:.3f}s.".format(
            i_epoch,
            time.time() - start_time))


        # log acc per epoch for all tasks
        logger.info("========== Test ==========")
        eval_cv_all(model,
                    criterion,
                    data_loader,
                    i_epoch,
                    writer,
                    best_acc1) # 这里不需要返回值

        # for acc in test_acc.items():
        #     logger.info(acc)




    logger.info("Training finished. Elapse {:.4f} hours.".format(
        (time.time() - total_time) / 3600))


def train_cv_epoch(model, data_loader, criterion, optimizer_mlist,optimizer_w, i_epoch):
    n_tasks = args.num_tasks # 20
    task_seq = list(np.random.permutation(n_tasks)) # 打乱 task
    empty_task = set()
    model.zero_grad()# 交换了位置
    model.train()


    loss_list = [0.0 for _ in range(args.num_tasks)]
    mask_list = [0.0 for _ in range(args.num_tasks)]


    data_loader.make_iterable()
    setattr(args, "cur_task_id", None) # 做啥的？

    steps = 0
    while len(empty_task) < n_tasks : # 这里应该是不运行的！后边应该删掉 and steps < args.iter_lim * n_tasks
        for task_id in task_seq:
            if task_id in empty_task:
                continue

            # # Tell the model which task it is trying to solve -- in Scenario NNs this is ignored.
            # model.apply(lambda m: setattr(m, "task", idx))


            train_loader = data_loader.get_train_loader(task_id) # 一个task的一个数据
            batch = next(train_loader, None)
            if batch is None:
                empty_task.add(task_id)
                continue

            data, target = batch
            data, target = data.to(args.device), target.to(args.device)



            # direct the model to select params associated with current task
            args.cur_task_id = task_id

            # optimizer_m = optimizer_mlist[task_id]
            # optimizer_m.zero_grad()
            optimizer_w.zero_grad()


            output = model(data)
            loss = criterion(output, target)
            if args.pruning_method == 'threshold':
                loss = add_sparse_term_v3(loss,model,i_epoch)
            loss.backward()

            if steps % args.accumulation_steps == 0:
                # optimizer_m.step()
                # print(optimizer_m)
                if optimizer_w is not None:
                    if i_epoch >= args.w_epochs:###
                        optimizer_w.step()

            mask_list[task_id] = mask_status(model, task_id)

            round_idx = steps // n_tasks # 向下取整
            if round_idx % args.log_interval == 0:
                logger.info(
                    f"Round:{round_idx},Task:{task_id},Loss:{loss.item():.6f}")
            steps += 1
            loss_list[task_id] = loss
    return loss_list, mask_list
    # return loss_list


def eval_cv_all(model, criterion, data_loader ,i_epoch ,writer,best_acc1):
    model.zero_grad()
    model.eval()




    if args.num_tasks:
        curr_acc1 = [0.0 for _ in range(args.num_tasks)]

    # test_loss = 0
    # correct = 0
    # logit_entropy = 0.0

    for idx in range(args.num_tasks):
        with torch.no_grad(): # 交换了位置

            test_loss = 0
            correct = 0
            logit_entropy = 0.0

            args.cur_task_id = idx # 控制对应的 task 选择对应的 mask

            # data_loader.dev_loader(idx) # 得到对应 task id 的数据
            data_dev = data_loader.get_dev_loader(idx)

            for data, target in data_dev:
                if type(data) == list:
                    data = data[0]
                data, target = data.to(args.device), target.to(args.device)
                output = model(data)
                if len(output.shape) == 1:
                    output = output.unsqueeze(0)
                logit_entropy += (
                    -(output.softmax(dim=1) * output.log_softmax(dim=1))
                        .sum(1)
                        .mean()
                        .item()
                )
                test_loss += criterion(output, target).item()

                # get the index of the max log-probability
                pred = output.argmax(dim=1, keepdim=True)

                correct += pred.eq(target.view_as(pred)).sum().item()

            test_loss /= len(data_dev)
            logit_entropy /= len(data_dev)
            curr_acc1[idx] = float(correct) / len(data_dev.dataset)


            if curr_acc1[idx] > best_acc1[idx]:
                best_acc1[idx] = curr_acc1[idx]


            print(f"\nTest set:task id: {idx:.0f},  Average loss: {test_loss:.4f}, Accuracy: ({curr_acc1[idx]:.4f})")
            writer.add_scalar(f"test/task_{idx}/loss", test_loss, i_epoch)
            writer.add_scalar(f"test/task_{idx}/acc", curr_acc1[idx], i_epoch)
            if i_epoch == 249: # todo
                logger.info(f"\nTest set:task id: {idx:.0f},  Average loss: {test_loss:.4f}, Accuracy: ({curr_acc1[idx]:.4f})")
                utils.write_result_to_csv_sparsesharing(
                    name=f"{args.exp_name}~set={args.dataset}~task={idx}~pruning_iter={args.pruning_iter}",
                    curr_acc1=curr_acc1[idx],
                    num_params = args.num_params[idx],
                    best_acc1 = best_acc1[idx],
                    # save_dir=args.run_base_dir,
                )
            # print(f"\nTest set: len(data_set:{idx}):{len(data_loader.dev_loader.dataset)} , correct: {correct} , Accuracy: ({test_acc:.4f})")


def eval_cv_epoch(model, data_loader, criterion, epoch, writer, dev=True):
    model.zero_grad()
    model.eval()
    test_loss = 0
    correct = 0
    logit_entropy = 0.0

    with torch.no_grad():
        for task_id in range(len(args.num_tasks)):
            if dev:
                loader = data_loader.dev_loader
            else:
                loader = data_loader.test_loader # 这个是没有的

            for data, target in loader:
                if type(data) == list:
                    data = data[0]
                data, target = data.to(args.device), target.to(args.device)
                output = model(data)
                if len(output.shape) == 1:
                    output = output.unsqueeze(0)
                logit_entropy += (
                    -(output.softmax(dim=1) *
                      output.log_softmax(dim=1)).sum(1).mean().item())
                test_loss += criterion(output, target).item()

                # get the index of the max log-probability
                pred = output.argmax(dim=1, keepdim=True)

                correct += pred.eq(target.view_as(pred)).sum().item()

            test_loss /= len(loader)
            logit_entropy /= len(loader)
            test_acc = float(correct) / len(loader.dataset)

            logger.info(
                f"Test set: Average loss: {test_loss:.4f}, Accuracy: ({test_acc:.4f})\n"
            )

            writer.add_scalar(f"test/task_{task_id}/loss", test_loss, epoch)
            writer.add_scalar(f"test/task_{task_id}/acc", test_acc, epoch)
            writer.add_scalar(f"test/task_{task_id}/entropy", logit_entropy,
                              epoch)

            utils.write_result_to_csv(
                name=f"{args.exp_name}~set={args.dataset}~task={task_id}",
                curr_acc1=test_acc,
                # best_acc1=best_acc1[idx],
                save_dir=args.run_base_dir,
            )

            return test_acc,test_loss

def train(model, task_lst, writer):
    total_time = time.time()
    logger.info("Start training...")
    for i_epoch in range(args.epochs):
        start_time = time.time()
        logger.info("Epoch {}".format(i_epoch))

        target_param = 'scores' if i_epoch % 2 == 0 else 'weights'
        # logger.info("========== Train ==========")
        train_epoch(model, task_lst, target_param, writer)

        logger.info("Epoch {} finished. Elapse: {:.3f}s.".format(
            i_epoch,
            time.time() - start_time))

        print_mask_status(model, logger)

        # log acc per epoch for all tasks
        logger.info("========== Test ==========")
        test_loss, test_acc = eval_epoch(model, task_lst, writer, dev=False)
        logger.info(args.exp_name)
        for acc in test_acc.items():
            logger.info(acc)

    logger.info("Training finished. Elapse {:.4f} hours.".format(
        (time.time() - total_time) / 3600))

def train_epoch(model, task_lst, target_param, writer):
    total_loss = 0
    n_tasks = len(task_lst)
    task_seq = list(np.random.permutation(n_tasks))
    empty_task = set()
    model.train()
    model.zero_grad()

    # # Clear the grad on all the parameters.
    # for p in model.parameters():
    #     p.grad = None

    # logger.info(model)

    # Make a list of the parameters relavent to this task.
    optimizer_w, optimizer_m = prepare_optimizers(model)

    if args.no_scheduler:
        scheduler_w, scheduler_m = None, None
    else:
        scheduler_w = CosineAnnealingLR(optimizer_w, T_max=args.epochs)
        scheduler_m = CosineAnnealingLR(optimizer_m, T_max=args.epochs)

    for task in task_lst:
        task.train_loader = iter(task.train_loader)

    steps = 0
    device = args.device
    while len(empty_task) < n_tasks:
        for task_id in task_seq:
            if task_id in empty_task:
                continue

            task = task_lst[task_id]
            batch = next(task.train_loader, None)
            if batch is None:
                empty_task.add(task_id)
                task.train_loader = DataSetIter(
                    task.train_set,
                    args.batch_size,
                    sampler=BucketSampler(batch_size=args.batch_size),
                )
                continue
            x, y = batch

            # batch_task_id = x["task_id"].to(device)
            batch_x = x["x"].to(device)
            batch_y = y["y"].to(device)

            if "seq_len" in x:
                seq_len = x["seq_len"].to(device)
                # writer.add_graph(model, input_to_model=batch_x)
                out = model(batch_x, batch_y, seq_len, task_id=task_id)
            else:
                seq_len = None
                out = model(batch_x, batch_y, task_id=task_id)
            loss, pred = out["loss"], out["pred"]
            steps += 1

            loss = add_sparse_term(loss, model)

            total_loss += loss.item()
            loss = loss / args.accumulation_steps
            loss.backward()

            metrics = task.metrics[0]
            metrics.evaluate(pred, batch_y, seq_len)

            if steps % args.accumulation_steps == 0:
                nn.utils.clip_grad_value_(model.parameters(), 5)
                optimizer_m.step()
                optimizer_m.zero_grad()
                optimizer_w.step()
                optimizer_w.zero_grad()
                if scheduler_m is not None:
                    scheduler_m.step()
                if scheduler_w is not None:
                    scheduler_w.step()

            if steps % args.print_every == 0:
                writer.add_scalar("train_loss", total_loss / args.print_every,
                                  steps)
                score = metrics.get_metric()
                # metric_name = "acc" if "acc" in score else "f1"
                score = score["acc"] if "acc" in score else score["f"]
                writer.add_scalar("train_acc", score, steps)
                logger.info(" - Step {}: loss {}\t{}\t{}: {}".format(
                    steps,
                    total_loss / args.print_every,
                    task.task_name,
                    task.metric_key,
                    score,
                ))
                total_loss = 0
                # corrects, samples = 0, 0

def eval_epoch(model, task_lst, writer, dev=True):
    dev_loss = 0
    e_steps = 0
    avg_acc = 0
    dev_acc = {}
    model.eval()
    device = args.device

    with torch.no_grad():
        for task_id in range(len(task_lst)):
            # samples = 0
            task = task_lst[task_id]
            # if task.task_id in self.empty_tasks:
            #     continue
            if dev:
                data_loader = task.dev_loader
            else:
                data_loader = task.test_loader
            for batch in data_loader:
                x, y = batch

                batch_x = x["x"].to(device)
                batch_y = y["y"].to(device)

                if "seq_len" in x:
                    seq_len = x["seq_len"].to(device)
                    out = model(batch_x, batch_y, seq_len, task_id=task_id)
                else:
                    seq_len = None
                    out = model(batch_x, batch_y, task_id=task_id)
                loss, pred = out["loss"], out["pred"]

                dev_loss += loss.item()
                e_steps += 1

                task.metrics[0].evaluate(pred, batch_y, seq_len)
                eval_res = task.metrics[0].get_metric()
                dev_acc[task.task_name] = eval_res
                avg_acc += eval_res["acc"] if "acc" in eval_res else eval_res[
                    "f"]

                # samples += batch_x.size(0)

    avg_acc /= len(task_lst)  # - len(self.empty_tasks)
    dev_acc["avg"] = avg_acc
    dev_loss = dev_loss / e_steps
    return dev_loss, dev_acc

def prepare_optimizers(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)
            # print(params_m)


    # if len(params_w) == 0 or len(params_m) == 0:
    #     raise RuntimeError('check target_param being "weights" or "masks" \n\
    # and check masks being set in the network')


    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,  # 这个设置成 0
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m

def prepare_optimizers_2(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)


    # if len(params_w) == 0 or len(params_m) == 0:
    #     raise RuntimeError('check target_param being "weights" or "masks" \n\
    # and check masks being set in the network')

    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,  # 这个设置成 0
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=9.03685701234717e-06, # 这个是修改的
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m

def add_sparse_term(loss, model,i_epoch):
    sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            # L0 reg
            sparse_regularization += torch.sum(torch.gt(param, 0))

            # idx = torch.gt(param, 0)
            # pos_param = param * idx
            # sparse_regularization += torch.sum(pos_param)

            # L1-pos reg: relu(x+rl)
            pos_param = torch.gt(param, 0) * param
            # pos_param = torch.gt(param, -args.train_score_lr) * (param+args.train_score_lr)
            sparse_regularization += torch.sum(pos_param)
    loss = loss + alpha(args.alpha,i_epoch) * sparse_regularization
    # print(alpha(args.alpha,i_epoch))
    return loss

def add_sparse_term_v3(loss, model,i_epoch):
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            # L0 reg
            # sparse_regularization_pos += torch.sum(torch.gt(param, 0))
            # sparse_regularization_neg
            # idx = torch.gt(param, 0)
            # pos_param = param * idx
            # sparse_regularization += torch.sum(pos_param)

            # L1-pos reg: relu(x+rl)
            pos_param = torch.gt(param, 0) * param
            # pos_param = torch.gt(param, -args.train_score_lr) * (param+args.train_score_lr)
            pos_sparse_regularization += torch.sum(pos_param)
    # loss = loss + alpha(args.alpha,i_epoch) * sparse_regularization
    loss = loss + args.alpha * pos_sparse_regularization
    # print(alpha(args.alpha,i_epoch))
    return loss

def alpha(input,i_epoch):
    if i_epoch < args.alpha_epoch:
        if i_epoch % args.alpha_epoch_1 < args.alpha_epoch_2:
            output = input
        else:
            output = 0
    else:
        output = 0
    return output



def print_mask_status(model, logger):
    for task_id in range(model.num_tasks):# 修改成20
        kr = model.get_mask_keep_ratio(task_id)
        logger.info("keep ratio for task {}: {:.4f}".format(task_id, kr))

    for task1_id in range(model.num_tasks):
        for task2_id in range(task1_id + 1, model.num_tasks):
            overlap = model.get_mask_overlap(task1_id, task2_id)
            logger.info("overlap of {}-{}: {:.4f}".format(
                task1_id, task2_id, overlap))



def mask_status(model, task_id):
    masks = {}
    m_sparsity = {}

    for n, m in model.named_modules():
        if hasattr(m, "sparsity"):
            # print(n,m.sparsity)
            for i in range(args.num_tasks):
                m_sparsity[n + ".scores" + '.' + str(i)] = m.sparsity
            # m_sparsity_list[task_id] = m_sparsity[n + ".scores"+'.'+str(task_id)]


    if args.pruning_method == 'topK':
        for name, param in model.named_parameters():
            # print(name,param.numel())
            split = name.split(".")
            # print(name,task_id,split[-1])
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = module_util.GetSubnet.apply(
                        param.abs(), m_sparsity[name]  ##有可能存在问题
                    )
                    # print(name,'所含参数量：',param.numel())
                    # print('对应的mask的参数量：',mask.numel())
                    masks[name] = mask
    if args.pruning_method == 'threshold':
        para_num = 0
        unit_step = BinaryStep.apply
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    # print(name,param)
                    # mask = unit_step(param)
                    mask = (param > 0.).float()
                    # print(name,mask)
                    # print(name, '所含参数量：', param.numel())
                    # print('对应的mask的参数量：', mask.numel())
                    # print('对应的mask中1的个数：', torch.sum(mask))
                    masks[name] = mask
    return masks

class LRcheduler_wm():
    def __init__(self, optimizer, T_max_1, T_max_2, lr_epochs = args.lr_epochs):
        self.lr_epochs = lr_epochs
        self.LRcheduler_1 = CosineAnnealingLR(optimizer,T_max=T_max_1,eta_min=args.lr1_min) # 9e-6 是一个可调参数
        # self.LRcheduler_2 = CosineAnnealingLR(optimizer,T_max=T_max_2)
        self.LRcheduler_2 = ExponentialLR(optimizer, gamma=0.9)

        self.step_num = 0


    def step(self):
        if self.step_num < self.lr_epochs:
            self.step_num = self.step_num + 1
            return self.LRcheduler_1.step()
        else:
            self.step_num = self.step_num + 1
            return self.LRcheduler_2.step()


    def get_lr(self):
        if self.step_num < self.lr_epochs:
            return self.LRcheduler_1.get_lr()
        else:
            return self.LRcheduler_2.get_lr()


