import copy
import logging
import numpy as np
import torch
from torch import optim
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.nn import functional as F

from .sdm import Learner as SDMLearner
from models.single_net import StandardNet


class Learner(SDMLearner):
    _network: StandardNet
    _old_network: StandardNet

    def __init__(self, args, model_func=StandardNet):
        super(Learner, self).__init__(args, model_func)
        self.ca_lr = args["ca_lr"]
        self.ca_epochs = args["ca_epochs"]
        # TODO SLCA logit norm setting and usage
        self.logit_norm = self.args["ca_with_logit_norm"]

    def _update_fc(self, nb_classes):
        self._network.update_fc(
            nb_classes,
            freeze_old=True,
            fc_kwargs={
                # "fc_bias": self.args["fc_bias"],
                "fc_temperture": self.args["fc_temperture"],
            },
        )

    def after_train(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()

        self._network.fc.backup()
        self._compute_class_mean(self.data_manager)
        task_size = self.data_manager.get_task_size(self._cur_task)

        if self._cur_task > 0 and self.ca_epochs > 0:
            print("\n" + "=" * 50)
            print("Classifier Alignment Finetuning")
            print("=" * 50 + "\n")
            self._train_clf_alignment(
                task_size,
            )

    def after_task(self):
        super().after_task()

        self._network.fc.recall()

    def _train_clf_alignment(self, task_size):
        for p in self._network.fc.parameters():
            p.requires_grad = True

        crct_num = self._total_classes
        param_list = [
            p for p in self._network.fc.parameters() if p.requires_grad
        ]
        network_params = [
            {
                "params": param_list,
                "lr": self.ca_lr,
                "weight_decay": self.weight_decay,
            }
        ]

        optimizer = optim.SGD(
            network_params,
            lr=self.ca_lr,
            momentum=0.9,
            weight_decay=self.weight_decay,
        )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=self.ca_epochs
        )

        self._network.to(self._device)
        self._network.eval()
        for ep in range(self.ca_epochs):
            losses = 0.0

            sampled_data = []
            sampled_label = []
            num_sampled_pcls = 256

            for c_id in range(crct_num):
                t_id = c_id // task_size
                decay = (t_id + 1) / (self._cur_task + 1) * 0.1
                cls_mean = torch.tensor(
                    self._class_means[c_id], dtype=torch.float64
                ).to(self._device) * (
                    0.9 + decay
                )  # torch.from_numpy(self._class_means[c_id]).to(self._device)
                cls_cov = self._class_covs[c_id].to(self._device)

                m = MultivariateNormal(cls_mean.float(), cls_cov.float())
                sampled_data_single = m.sample(
                    sample_shape=torch.Size((num_sampled_pcls,))
                )
                sampled_data.append(sampled_data_single)
                sampled_label.extend([c_id] * num_sampled_pcls)

            sampled_data = torch.cat(sampled_data).float().to(self._device)
            sampled_label = torch.tensor(sampled_label).long().to(self._device)

            inputs = sampled_data
            targets = sampled_label

            sf_indexes = torch.randperm(inputs.size(0))
            inputs = inputs[sf_indexes]
            targets = targets[sf_indexes]

            for _iter in range(crct_num):
                inp = inputs[
                    _iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls
                ]
                tgt = targets[
                    _iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls
                ]
                outputs = self._network(inp, ca=True)
                logits = outputs["logits"]

                if self.logit_norm is not None:
                    per_task_norm = []
                    prev_t_size = 0
                    cur_t_size = 0
                    for _ti in range(self._cur_task + 1):
                        cur_t_size += self.task_sizes[_ti]
                        temp_norm = (
                            torch.norm(
                                logits[:, prev_t_size:cur_t_size],
                                p=2,
                                dim=-1,
                                keepdim=True,
                            )
                            + 1e-7
                        )
                        per_task_norm.append(temp_norm)
                        prev_t_size += self.task_sizes[_ti]
                    per_task_norm = torch.cat(per_task_norm, dim=-1)
                    norms = per_task_norm.mean(dim=-1, keepdim=True)

                    decoupled_logits = (
                        torch.div(logits[:, :crct_num], norms) / self.logit_norm
                    )
                    loss = F.cross_entropy(decoupled_logits, tgt)
                else:
                    loss = F.cross_entropy(logits[:, :crct_num], tgt)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()
            scheduler.step()

            test_acc = self._compute_accuracy(self._network, self.test_loader)
            info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
                self._cur_task, losses / self._total_classes, test_acc
            )
            logging.info(info)
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = ep
                self.best_model = copy.deepcopy(self._network.state_dict())

        report_str = (
            f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}],"
            + f" Average accuracy: {np.mean(self.best_acc)}"
        )
        logging.info(report_str)
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)
