import time
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from args import args
from fastNLP import logger
from torch.optim.lr_scheduler import CosineAnnealingLR,ExponentialLR
import models.module_util as module_util



def train_cv(model, data_loader, writer):


    total_time = time.time()
    logger.info("Start training...")

    best_acc1 = [0.0 for _ in range(args.num_tasks)]



    criterion = nn.CrossEntropyLoss().to(args.device)


    optimizer_mlist = []
    scheduler_mlist =  []
    lr_writer = [0.0 for _ in range(args.num_tasks)]
    args.num_params = [0.0 for _ in range(args.num_tasks)]
    optimizer_w = None
    optimizer_m = None


    if args.no_scheduler:
        scheduler_w, scheduler_m = None, None
        for i in range(args.num_tasks):
            optimizer_w, M = prepare_optimizers(model, i)
            optimizer_mlist.append(M)
    else:
        for i in range(args.num_tasks):
            if args.train_weight_tasks < 0:
                optimizer_w, M = prepare_optimizers(model, i)
            else:
                M = prepare_optimizers(model, i)

            optimizer_mlist.append(M)
            if args.pruning_method == 'topK':
                scheduler_m = CosineAnnealingLR(M, T_max=args.epochs)
            else:
                scheduler_m = LRcheduler_wm(M, T_max_1 = args.lr_epochs , T_max_2 = args.epochs-args.lr_epochs)
            scheduler_mlist.append(scheduler_m)
        if optimizer_w is not None:
            scheduler_w = CosineAnnealingLR(optimizer_w, T_max=args.epochs-args.w_epochs)
        else:
            scheduler_w = None


    for i_epoch in range(args.epochs):
        start_time = time.time()
        logger.info("Epoch {}".format(i_epoch))

        loss_list,mask_list =  train_cv_epoch(model, data_loader, criterion, optimizer_mlist,optimizer_w, i_epoch)
        if args.warmup_save == True:
            print("warmup_save done")
            if i_epoch == 9:
                torch.save(model.state_dict(), os.path.join(
                    args.save_model_path, "warmup_{}.th".format(i_epoch)))

        if i_epoch == 0:
            mask_int = mask_list


        if scheduler_mlist is not None:
            for i in range(args.num_tasks):
                scheduler_mlist[i].step()

        if scheduler_w is not None:
            if i_epoch >= args.w_epochs:
                scheduler_w.step()

        for i in range(args.num_tasks):
            writer.add_scalar(f"train/task_{i}/loss", loss_list[i],
                              i_epoch)
            writer.add_scalar(f"lr_mask/task_{i}/lr", lr_writer[i],
                              i_epoch)

            ones_overlap = 0
            ones_m1, ones_m2 = 0, 0
            if i_epoch > 0:
                for name in mask_list[i]:
                    m1 = mask_list[i][name]
                    m2 = mask_int[i][name]
                    ones_m1 += torch.sum(m1)
                    ones_m2 += torch.sum(m2)
                    ones_overlap += torch.sum(m1 * m2)
                logger.info(f"num_params : task: {i}, num: {ones_m1}")
                args.num_params[i] = ones_m1
                writer.add_scalar(f"train/task_{i}/overlap", ones_overlap / (ones_m1 + ones_m2),
                                  i_epoch)


        logger.info("Epoch {} finished. Elapse: {:.3f}s.".format(
            i_epoch,
            time.time() - start_time))

        logger.info("========== Test ==========")
        eval_cv_all(model,
                    criterion,
                    data_loader,
                    i_epoch,
                    writer,
                    best_acc1)

    logger.info("Training finished. Elapse {:.4f} hours.".format(
        (time.time() - total_time) / 3600))


def train_cv_epoch(model, data_loader, criterion, optimizer_mlist,optimizer_w, i_epoch):
    n_tasks = args.num_tasks
    task_seq = list(np.random.permutation(n_tasks))
    empty_task = set()
    model.zero_grad()
    model.train()


    loss_list = [0.0 for _ in range(args.num_tasks)]
    mask_list = [0.0 for _ in range(args.num_tasks)]


    data_loader.make_iterable()
    setattr(args, "cur_task_id", None)

    steps = 0
    while len(empty_task) < n_tasks :
        for task_id in task_seq:
            if task_id in empty_task:
                continue

            train_loader = data_loader.get_train_loader(task_id)
            batch = next(train_loader, None)
            if batch is None:
                empty_task.add(task_id)
                continue

            data, target = batch
            data, target = data.to(args.device), target.to(args.device)
            args.cur_task_id = task_id

            optimizer_w.zero_grad()


            output = model(data)
            loss = criterion(output, target)
            if args.pruning_method == 'threshold':
                loss = add_sparse_term_v3(loss,model,i_epoch)
            loss.backward()

            if steps % args.accumulation_steps == 0:
                if optimizer_w is not None:
                    if i_epoch >= args.w_epochs:
                        optimizer_w.step()

            mask_list[task_id] = mask_status(model, task_id)

            round_idx = steps // n_tasks
            if round_idx % args.log_interval == 0:
                logger.info(
                    f"Round:{round_idx},Task:{task_id},Loss:{loss.item():.6f}")
            steps += 1
            loss_list[task_id] = loss
    return loss_list, mask_list


def eval_cv_all(model, criterion, data_loader ,i_epoch ,writer,best_acc1):
    model.zero_grad()
    model.eval()

    if args.num_tasks:
        curr_acc1 = [0.0 for _ in range(args.num_tasks)]

    for idx in range(args.num_tasks):
        with torch.no_grad():

            test_loss = 0
            correct = 0
            logit_entropy = 0.0

            args.cur_task_id = idx
            data_dev = data_loader.get_dev_loader(idx)

            for data, target in data_dev:
                if type(data) == list:
                    data = data[0]
                data, target = data.to(args.device), target.to(args.device)
                output = model(data)
                if len(output.shape) == 1:
                    output = output.unsqueeze(0)
                logit_entropy += (
                    -(output.softmax(dim=1) * output.log_softmax(dim=1))
                        .sum(1)
                        .mean()
                        .item()
                )
                test_loss += criterion(output, target).item()

                pred = output.argmax(dim=1, keepdim=True)

                correct += pred.eq(target.view_as(pred)).sum().item()

            test_loss /= len(data_dev)
            logit_entropy /= len(data_dev)
            curr_acc1[idx] = float(correct) / len(data_dev.dataset)


            if curr_acc1[idx] > best_acc1[idx]:
                best_acc1[idx] = curr_acc1[idx]


            print(f"\nTest set:task id: {idx:.0f},  Average loss: {test_loss:.4f}, Accuracy: ({curr_acc1[idx]:.4f})")
            writer.add_scalar(f"test/task_{idx}/loss", test_loss, i_epoch)
            writer.add_scalar(f"test/task_{idx}/acc", curr_acc1[idx], i_epoch)
            if i_epoch == 249:
                logger.info(f"\nTest set:task id: {idx:.0f},  Average loss: {test_loss:.4f}, Accuracy: ({curr_acc1[idx]:.4f})")
                utils.write_result_to_csv_sparsesharing(
                    name=f"{args.exp_name}~set={args.dataset}~task={idx}~pruning_iter={args.pruning_iter}",
                    curr_acc1=curr_acc1[idx],
                    num_params = args.num_params[idx],
                    best_acc1 = best_acc1[idx],
                )


def prepare_optimizers(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)

    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,  # 这个设置成 0
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m


def add_sparse_term_v3(loss, model,i_epoch):
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            pos_param = torch.gt(param, 0) * param
            pos_sparse_regularization += torch.sum(pos_param)
    loss = loss + args.alpha * pos_sparse_regularization
    return loss


def mask_status(model, task_id):
    masks = {}
    m_sparsity = {}

    for n, m in model.named_modules():
        if hasattr(m, "sparsity"):
            for i in range(args.num_tasks):
                m_sparsity[n + ".scores" + '.' + str(i)] = m.sparsity

    if args.pruning_method == 'topK':
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = module_util.GetSubnet.apply(
                        param.abs(), m_sparsity[name]
                    )
                    masks[name] = mask
    if args.pruning_method == 'threshold':
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = (param > 0.).float()
                    masks[name] = mask
    return masks

class LRcheduler_wm():
    def __init__(self, optimizer, T_max_1, T_max_2, lr_epochs = args.lr_epochs):
        self.lr_epochs = lr_epochs
        self.LRcheduler_1 = CosineAnnealingLR(optimizer,T_max=T_max_1,eta_min=args.lr1_min)
        self.LRcheduler_2 = ExponentialLR(optimizer, gamma=0.9)

        self.step_num = 0


    def step(self):
        if self.step_num < self.lr_epochs:
            self.step_num = self.step_num + 1
            return self.LRcheduler_1.step()
        else:
            self.step_num = self.step_num + 1
            return self.LRcheduler_2.step()


    def get_lr(self):
        if self.step_num < self.lr_epochs:
            return self.LRcheduler_1.get_lr()
        else:
            return self.LRcheduler_2.get_lr()