import time
import numpy as np
import torch
import torch.nn as nn
import utils
from args import args
from fastNLP import logger
from torch.optim.lr_scheduler import CosineAnnealingLR
import trainers.trainer_utils as trainer_utils



def train_cv(model, data_loader, writer):

    total_time = time.time()
    logger.info("Start training...")
    best_acc1 = [0.0 for _ in range(args.num_tasks)]

    criterion = nn.CrossEntropyLoss().to(args.device)

    optimizer_mlist = []
    scheduler_mlist =  []
    lr_writer = [0.0 for _ in range(args.num_tasks)]
    args.num_params = [0.0 for _ in range(args.num_tasks)]
    optimizer_w = None
    optimizer_m = None

    if args.no_scheduler:
        scheduler_w, scheduler_m = None, None
        for i in range(args.num_tasks):
            optimizer_w, M = trainer_utils.prepare_optimizers(model, i)
            optimizer_mlist.append(M)
    else:
        for i in range(args.num_tasks):
            if args.train_weight_tasks < 0:
                optimizer_w, M = trainer_utils.prepare_optimizers(model, i)
            else:
                M = trainer_utils.prepare_optimizers(model, i)

            optimizer_mlist.append(M)
            if args.pruning_method == 'topK':
                scheduler_m = CosineAnnealingLR(M, T_max=args.epochs)
            else:
                scheduler_m = trainer_utils.LRcheduler_wm(M, T_max_1=args.lr_epochs)
            scheduler_mlist.append(scheduler_m)
        if optimizer_w is not None:
            scheduler_w = CosineAnnealingLR(optimizer_w, T_max=args.epochs-args.w_epochs)
        else:
            scheduler_w = None

    for i_epoch in range(args.epochs):
        start_time = time.time()
        logger.info("Epoch {}".format(i_epoch))

        loss_list,mask_list =  train_cv_epoch(model, data_loader, criterion, optimizer_mlist, optimizer_w, i_epoch)

        if i_epoch == 0:
            mask_int = mask_list

        if scheduler_mlist is not None:
            for i in range(args.num_tasks):
                scheduler_mlist[i].step()
                lr_writer[i] = scheduler_mlist[i].get_lr()[0]

        if scheduler_w is not None:
            if i_epoch >= args.w_epochs:
                scheduler_w.step()

        for i in range(args.num_tasks):
            writer.add_scalar(f"train/task_{i}/loss", loss_list[i],
                              i_epoch)
            writer.add_scalar(f"lr/task_{i}/lr", lr_writer[i],
                              i_epoch)

            ones_overlap = 0
            ones_m1, ones_m2 = 0, 0
            if i_epoch >= 0:
                for name in mask_list[i]:
                    m1 = mask_list[i][name]
                    m2 = mask_int[i][name]
                    ones_m1 += torch.sum(m1)
                    ones_m2 += torch.sum(m2)
                    ones_overlap += torch.sum(m1 * m2)
                logger.info(f"num_params : task: {i}, num: {ones_m1}")
                args.num_params[i] = ones_m1
                writer.add_scalar(f"train/task_{i}/overlap", ones_overlap / (ones_m1 + ones_m2),
                                  i_epoch)

        logger.info("Epoch {} finished. Elapse: {:.3f}s.".format(
            i_epoch,
            time.time() - start_time))

        # log acc per epoch for all tasks
        logger.info("========== Test ==========")
        eval_cv_all(model,
                    criterion,
                    data_loader,
                    i_epoch,
                    writer,
                    best_acc1)

    logger.info("Training finished. Elapse {:.4f} hours.".format(
        (time.time() - total_time) / 3600))


def train_cv_epoch(model, data_loader, criterion, optimizer_mlist,optimizer_w, i_epoch):
    n_tasks = args.num_tasks
    task_seq = list(np.random.permutation(n_tasks))
    empty_task = set()
    model.zero_grad()
    model.train()

    loss_list = [0.0 for _ in range(args.num_tasks)]
    mask_list = [0.0 for _ in range(args.num_tasks)]

    data_loader.make_iterable()
    setattr(args, "cur_task_id", None)

    steps = 0
    while len(empty_task) < n_tasks :
        for task_id in task_seq:
            if task_id in empty_task:
                continue

            train_loader = data_loader.get_train_loader(task_id)
            batch = next(train_loader, None)
            if batch is None:
                empty_task.add(task_id)
                continue

            data, target = batch
            data, target = data.to(args.device), target.to(args.device)
            args.cur_task_id = task_id

            optimizer_m = optimizer_mlist[task_id]
            optimizer_m.zero_grad()
            optimizer_w.zero_grad()

            output = model(data)
            loss = criterion(output, target)
            if args.pruning_method == 'threshold':
                loss = trainer_utils.add_sparse_term_v4(loss,model,i_epoch)
            loss.backward()

            if steps % args.accumulation_steps == 0:
                optimizer_m.step()
                if optimizer_w is not None:
                    if i_epoch >= args.w_epochs:
                        optimizer_w.step()

            mask_list[task_id] = trainer_utils.mask_status(model, task_id)

            round_idx = steps // n_tasks
            if round_idx % args.log_interval == 0:
                logger.info(
                    f"Round:{round_idx},Task:{task_id},Loss:{loss.item():.6f}")
            steps += 1
            loss_list[task_id] = loss
    return loss_list, mask_list



def eval_cv_all(model, criterion, data_loader ,i_epoch ,writer,best_acc1):
    model.zero_grad()
    model.eval()

    if args.num_tasks:
        curr_acc1 = [0.0 for _ in range(args.num_tasks)]

    for idx in range(args.num_tasks):
        with torch.no_grad():

            test_loss = 0
            correct = 0
            logit_entropy = 0.0

            args.cur_task_id = idx
            data_dev = data_loader.get_dev_loader(idx)

            for data, target in data_dev:
                if type(data) == list:
                    data = data[0]
                data, target = data.to(args.device), target.to(args.device)
                output = model(data)
                if len(output.shape) == 1:
                    output = output.unsqueeze(0)
                logit_entropy += (
                    -(output.softmax(dim=1) * output.log_softmax(dim=1))
                        .sum(1)
                        .mean()
                        .item()
                )
                test_loss += criterion(output, target).item()

                pred = output.argmax(dim=1, keepdim=True)
                correct += pred.eq(target.view_as(pred)).sum().item()

            test_loss /= len(data_dev)
            logit_entropy /= len(data_dev)
            curr_acc1[idx] = float(correct) / len(data_dev.dataset)


            if curr_acc1[idx] > best_acc1[idx]:
                best_acc1[idx] = curr_acc1[idx]

            print(f"\nTest set:task id: {idx:.0f},  Average loss: {test_loss:.4f}, Accuracy: ({curr_acc1[idx]:.4f})")
            writer.add_scalar(f"test/task_{idx}/loss", test_loss, i_epoch)
            writer.add_scalar(f"test/task_{idx}/acc", curr_acc1[idx], i_epoch)
            if i_epoch == 249:
                logger.info(f"\nTest set:task id: {idx:.0f},  Average loss: {test_loss:.4f}, Accuracy: ({curr_acc1[idx]:.4f})")
                utils.write_result_to_csv(
                    name=f"{args.exp_name}~set={args.dataset}~task={idx}~intercept={args.intercept}~alpha={args.alpha}",
                    curr_acc1=curr_acc1[idx],
                    num_params = args.num_params[idx],
                    best_acc1 = best_acc1[idx],
                )