import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from typing import Callable, TypeVar
from learners.baseline import Learner as BaselineLearner
from models.multi_net import EaseNet
from utils.toolkit import tensor2numpy
from tqdm import tqdm
import logging
import copy

num_workers = 8

T_EaseNet = TypeVar("T_EaseNet", bound=EaseNet)


class Learner(BaselineLearner[T_EaseNet]):
    _network: T_EaseNet
    _old_network: T_EaseNet

    def __init__(self, args, data_manager, model_func: Callable[..., T_EaseNet] = EaseNet):
        super().__init__(args, data_manager, model_func)

        self.adapter_num = args["adapter_num"]
        self.use_diagonal = args["use_diagonal"]
        self.recalc_sim = args["recalc_sim"]
        self.alpha = args["alpha"]  # forward_reweight is divide by _cur_task
        self.beta = args["beta"]

        self.forward_train = lambda model, inputs: model(inputs, mode="CUR")
        self.forward_test = lambda model, inputs: model(inputs, mode="ALL")
        # TODO extract_feats and extract_token, sth wrong
        self.extract_token = lambda inputs: self._network.extract_token(inputs)

    def _update_fc(self, nb_classes):
        self._network.update_fc(self._total_classes)

    def _train_task(self, train_loader, test_loader):
        epochs = (
            self.args["init_epochs"] if self._cur_task == 0 else self.args["inc_epochs"]
        )
        lr = self.args["init_lr"] if self._cur_task == 0 else self.args["inc_lr"]
        optimizer = self.get_optimizer(lr=lr)
        scheduler = self.get_scheduler(optimizer, epochs)

        # Use clean multi-GPU setup
        self._setup_multi_gpu_training()

        train_func = self._init_train if self._cur_task == 0 else self._inc_train
        loss = nn.CrossEntropyLoss()
        train_func(
            epochs,
            train_loader,
            test_loader,
            optimizer=optimizer,
            scheduler=scheduler,
            criterion=loss,
        )

    def _inc_train(
        self, epochs, train_loader, test_loader, optimizer, scheduler, criterion
    ):
        """Full classifier training
        add distillation loss and classifier alignment loss"""
        prog_bar = tqdm(range(epochs))
        test_acc = 0.0

        for _, epoch in enumerate(prog_bar):
            self._network.train()
            losses = 0.0
            correct, total = 0, 0

            for _, (_, inputs, targets) in enumerate(train_loader):
                loss = 0.0
                inputs, targets = inputs.to(self._device), targets.to(self._device)
                aux_targets = targets.clone()
                aux_targets = torch.where(
                    aux_targets - self._known_classes >= 0,
                    aux_targets - self._known_classes,
                    -1,
                )

                output = self.forward_train(self._network, inputs)
                logits = output["logits"]

                loss = criterion(logits, aux_targets)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()

                _, preds = torch.max(logits, dim=1)

                correct += preds.eq(aux_targets.expand_as(preds)).cpu().sum()
                total += len(aux_targets)

            if scheduler:
                scheduler.step()

            test_acc = (
                self._compute_accuracy(self._network, test_loader)
                if (
                    ((epoch + 1) % self.interval == 0 and epoch > 0)
                    or epoch == (epochs - 1)
                )
                else test_acc
            )
            train_acc = np.around(tensor2numpy(correct) * 100 / total, decimals=2)
            info = "Task {}, Epoch {}/{} => Loss {:.3f}, TrAcc {:.2f}, TeAcc {:.2f}".format(
                self._cur_task,
                epoch + 1,
                epochs,
                losses / len(train_loader),
                train_acc,
                test_acc,
            )
            prog_bar.set_description(info)

            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())

        logging.info(info)
        report_str = (
            f"Task {self._cur_task} => "
            + f"Best accuracy: {self.best_acc_cur:.2f}[{self.best_epoch[self._cur_task]}], "
            + f"Average accuracy: {np.mean(self.best_acc):.2f}"
        )
        logging.info(report_str)
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)

    def after_train(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()

        self.replace_fc(self.train_loader_for_protonet)

    # (proxy_fc = cls * dim)
    def replace_fc(self, train_loader):
        model = self._network
        model = model.eval()

        with torch.no_grad():
            # replace proto for each adapter in the current task

            start_idx = 0

            for index in range(start_idx, self._cur_task + 1):
                # only use the diagonal feature, index = -1 denotes using init PTM, index = self._cur_task denotes the last adapter's feature
                if self.use_diagonal and index != -1 and index != self._cur_task:
                    continue

                embedding_list, label_list = [], []
                for i, batch in enumerate(train_loader):
                    (_, data, label) = batch
                    data = data.to(self._device)
                    label = label.to(self._device)
                    embedding = model.backbone.forward_proto(data, adapt_index=index)
                    embedding_list.append(embedding.cpu())
                    label_list.append(label.cpu())

                embedding_list = torch.cat(embedding_list, dim=0)
                label_list = torch.cat(label_list, dim=0)

                class_list = np.unique(self.train_dataset_for_protonet.labels)
                for class_index in class_list:
                    data_index = (label_list == class_index).nonzero().squeeze(-1)
                    embedding = embedding_list[data_index]
                    proto = embedding.mean(0)
                    model.fc.weight.data[
                        class_index,
                        index
                        * self._network.out_dim : (index + 1)
                        * self._network.out_dim,
                    ] = proto

        if self.use_diagonal:
            return
        if self.recalc_sim:
            self.solve_sim_reset()
        else:
            self.solve_similarity()

    def get_A_B_Ahat(self, task_id):

        start_dim = task_id * self._network.out_dim
        end_dim = start_dim + self._network.out_dim

        start_cls, end_cls = self.get_cls_range(task_id)

        # W(Ti)  i is the i-th task index, T is the cur task index, W is a T*T matrix
        A = self._network.fc.weight.data[self._known_classes :, start_dim:end_dim]
        # W(TT)
        B = self._network.fc.weight.data[
            self._known_classes :, -self._network.out_dim :
        ]
        # W(ii)
        A_hat = self._network.fc.weight.data[start_cls:end_cls, start_dim:end_dim]

        return A.cpu(), B.cpu(), A_hat.cpu()

    def solve_similarity(self):
        for task_id in range(self._cur_task):
            # print('Solve_similarity adapter:{}'.format(task_id))
            start_cls, end_cls = self.get_cls_range(task_id=task_id)

            A, B, A_hat = self.get_A_B_Ahat(task_id=task_id)

            # calculate similarity matrix between A_hat(old_cls1) and A(new_cls1).
            similarity = torch.zeros(len(A_hat), len(A))
            for i in range(len(A_hat)):
                for j in range(len(A)):
                    similarity[i][j] = torch.cosine_similarity(A_hat[i], A[j], dim=0)

            # softmax the similarity, it will be failed if not use it
            similarity = F.softmax(similarity, dim=1)

            # weight the combination of B(new_cls2)
            B_hat = torch.zeros(A_hat.shape[0], B.shape[1])
            for i in range(len(A_hat)):
                for j in range(len(A)):
                    B_hat[i] += similarity[i][j] * B[j]

            # B_hat(old_cls2)
            self._network.fc.weight.data[
                start_cls:end_cls, -self._network.out_dim :
            ] = B_hat.to(self._device)

    def solve_sim_reset(self):
        for task_id in range(self._cur_task):

            range_dim = range(task_id + 1, self._cur_task + 1)
            for dim_id in range_dim:

                # print('Solve_similarity adapter:{}, {}'.format(task_id, dim_id))
                start_cls, end_cls = self.get_cls_range(task_id=task_id)

                start_dim = dim_id * self._network.out_dim
                end_dim = (dim_id + 1) * self._network.out_dim

                start_cls_old = self.init_cls + (dim_id - 1) * self.inc_cls
                end_cls_old = self._total_classes
                start_dim_old = task_id * self._network.out_dim
                end_dim_old = (task_id + 1) * self._network.out_dim

                A = self._network.fc.weight.data[
                    start_cls_old:end_cls_old, start_dim_old:end_dim_old
                ].cpu()
                B = self._network.fc.weight.data[
                    start_cls_old:end_cls_old, start_dim:end_dim
                ].cpu()
                A_hat = self._network.fc.weight.data[
                    start_cls:end_cls, start_dim_old:end_dim_old
                ].cpu()

                # calculate similarity matrix between A_hat(old_cls1) and A(new_cls1).
                similarity = torch.zeros(len(A_hat), len(A))
                for i in range(len(A_hat)):
                    for j in range(len(A)):
                        similarity[i][j] = torch.cosine_similarity(
                            A_hat[i], A[j], dim=0
                        )

                # softmax the similarity, it will be failed if not use it
                similarity = F.softmax(similarity, dim=1)  # dim=1, not dim=0

                # weight the combination of B(new_cls2)
                B_hat = torch.zeros(A_hat.shape[0], B.shape[1])
                for i in range(len(A_hat)):
                    for j in range(len(A)):
                        B_hat[i] += similarity[i][j] * B[j]

                # B_hat(old_cls2)
                self._network.fc.weight.data[start_cls:end_cls, start_dim:end_dim] = (
                    B_hat.to(self._device)
                )
