import logging
import copy
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.distributions.multivariate_normal import MultivariateNormal
from typing import Callable
import numpy as np

from learners.baseline import Learner as BaselineLearner
from models.single_net import SSIAT

num_workers = 8


class Learner(BaselineLearner):
    train_embeddings_old: torch.Tensor
    train_embeddings_new: torch.Tensor

    def __init__(self, args, model_func: Callable = SSIAT):
        super().__init__(args, model_func)

        self.ca_epochs = args["ca_epochs"]
        self.ca_lr = args["ca_lr"]
        self.ca_forward = lambda model, inputs: model(inputs, ca=True)

    def before_train(self):
        super().before_train()
        # extract current task features before training
        if self._cur_task > 0:
            self._network.to(self._device)
            self.train_embeddings_old, _ = self._extract_tokens(
                self.train_loader
            )

    def after_train(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()

        if self._cur_task > 0:
            # Semantcially shifted classifier alignment
            self.train_embeddings_new, _ = self._extract_tokens(
                self.train_loader
            )
            old_class_mean = self._class_means[: self._known_classes]
            gap = self.displacement(
                self.train_embeddings_old,
                self.train_embeddings_new,
                old_class_mean,
                4.0,
            )
            gap = 0
            old_class_mean += gap
            self._class_means[: self._known_classes] = old_class_mean

        self._compute_class_mean(self.data_manager)

        task_size = self.data_manager.get_task_size(self._cur_task)
        if self._cur_task > 0 and self.ca_epochs > 0:
            print("\n" + "=" * 50)
            print("Classifier Alignment Finetuning")
            print("=" * 50 + "\n")
            self._train_clf_alignment(
                task_size,
            )

    def _train_clf_alignment(self, task_size):
        for p in self._network.fc.parameters():
            p.requires_grad = True

        param_list = [
            p for p in self._network.fc.parameters() if p.requires_grad
        ]
        network_params = [
            {
                "params": param_list,
                "lr": self.ca_lr,
                "weight_decay": self.weight_decay,
            }
        ]

        optimizer = optim.SGD(
            network_params,
            lr=self.ca_lr,
            momentum=0.9,
            weight_decay=self.weight_decay,
        )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=self.ca_epochs
        )

        self._network.eval()
        self._network.to(self._device)
        # Clean multi-GPU setup for classifier alignment
        self._setup_multi_gpu_training()

        for ep in range(self.ca_epochs):
            losses = 0.0

            sampled_data = []
            sampled_label = []
            num_sampled_pcls = 256

            for c_id in range(self._total_classes):
                t_id = c_id // task_size
                decay = (t_id + 1) / (self._cur_task + 1) * 0.1
                cls_mean = self._class_means[c_id].clone().cpu() * (
                    0.9 + decay
                )  # torch.from_numpy(self._class_means[c_id]).to(self._device)
                cls_cov = self._class_covs[c_id].cpu()

                """
                FIXME precision_matrix becomes NaN when runing on GPU, but fine on CPU? WTF!
                """
                m = MultivariateNormal(cls_mean, cls_cov)
                sampled_data_single = m.sample(
                    sample_shape=torch.Size((num_sampled_pcls,))
                )
                sampled_data.append(sampled_data_single)
                sampled_label.extend([c_id] * num_sampled_pcls)

            sampled_data = torch.cat(sampled_data).float().to(self._device)
            sampled_label = torch.tensor(sampled_label).long().to(self._device)
            inputs = sampled_data
            targets = sampled_label
            sf_indexes = torch.randperm(inputs.size(0))
            inputs = inputs[sf_indexes]
            targets = targets[sf_indexes]

            for _iter in range(self._total_classes):
                inp = inputs[
                    _iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls
                ]
                tgt = targets[
                    _iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls
                ]
                outputs = self.ca_forward(self._network, inp)
                logits = outputs["logits"] * self.args["scale"]
                loss = F.cross_entropy(logits[:, : self._total_classes], tgt)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()

            scheduler.step()
            test_acc = self._compute_accuracy(self._network, self.test_loader)
            info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
                self._cur_task, losses / self._total_classes, test_acc
            )
            logging.info(info)
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = ep
                self.best_model = copy.deepcopy(self._network.state_dict())

        report_str = (
            f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}],"
            + f" Average accuracy: {np.mean(self.best_acc)}"
        )
        logging.info(report_str)
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)

    def displacement(self, Y1, Y2, embedding_old, sigma):
        Y1, Y2 = Y1.cpu(), Y2.cpu()
        DY = Y2 - Y1
        distance = np.sum(
            (
                np.tile(Y1[None, :, :], [embedding_old.shape[0], 1, 1])
                - np.tile(embedding_old[:, None, :], [1, Y1.shape[0], 1])
            )
            ** 2,
            axis=2,
        )
        W = np.exp(-distance / (2 * sigma**2)) + 1e-5
        W_norm = W / np.tile(np.sum(W, axis=1)[:, None], [1, W.shape[1]])
        displacement = np.sum(
            np.tile(W_norm[:, :, None], [1, 1, DY.shape[1]])
            * np.tile(DY[None, :, :], [W.shape[0], 1, 1]),
            axis=1,
        )
        return displacement
