import copy
import logging
import numpy as np
import torch
from torch import optim
from torch import nn
from torch.nn import functional as F

from .sdm import Learner as SDMLearner
from models.single_net import StandardNet


class Learner(SDMLearner):
    _network: StandardNet
    _old_network: StandardNet

    def __init__(self, args):
        super(Learner, self).__init__(args)
        self.ca_lr = args["ca_lr"]
        self.ca_epochs = args["ca_epochs"]
        # TODO SLCA logit norm setting and usage
        self.logit_norm = self.args["ca_with_logit_norm"]

    def after_train(self):
        # TODO multi-gpu setting
        if len(self._multiple_gpus) > 1:
            self._network = self._network.module  # type: ignore

        self._compute_class_mean(self.data_manager)
        task_size = self.data_manager.get_task_size(self._cur_task)

        if self._cur_task > 0 and self.ca_epochs > 0:
            print("\n" + "=" * 50)
            print("Classifier Alignment Finetuning")
            print("=" * 50 + "\n")
            self._train_clf_alignment(
                task_size,
            )

    def _train_clf_alignment(self, task_size):
        for p in self._network.fc.parameters():
            p.requires_grad = True
        self.logit_norm = self.args["ca_with_logit_norm"]
        run_epochs = self.args["ca_epochs"]
        # crct_num = self._total_classes
        param_list = [
            p for p in self._network.fc.parameters() if p.requires_grad
        ]
        network_params = [
            {
                "params": param_list,
                "lr": self.ca_lr,
                "weight_decay": self.weight_decay,
            }
        ]

        optimizer = optim.SGD(
            network_params,
            lr=self.ca_lr,
            momentum=0.9,
            weight_decay=self.weight_decay,
        )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=run_epochs
        )

        self._network.to(self._device)
        self._network.eval()
        for epoch in range(run_epochs):
            losses = 0.0

            for i, (_, inputs, targets) in enumerate(self.train_loader):
                loss = 0.0
                inputs, targets = inputs.to(self._device), targets.to(
                    self._device
                )

                loss_align = self.align_loss(
                    inputs,
                    targets,
                    self._class_means,
                    alpha=self.args["align_alpha"],
                )
                loss = loss_align

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()
            scheduler.step()

            test_acc = self._compute_accuracy(self._network, self.test_loader)
            info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
                self._cur_task, losses / self._total_classes, test_acc
            )
            logging.info(info)
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())

        report_str = f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}], Average accuracy: {np.mean(self.best_acc)}"
        logging.info(report_str)
        self._network.load_state_dict(self.best_model)

    def align_loss(self, inputs, targets, class_means: np.ndarray, alpha=0.6):
        with torch.no_grad():
            feats_new = self._network.extract_feats(inputs)
        # with torch.no_grad():
        #     feats_new = self._old_network.extract_feats(inputs)

        length = inputs.size(0) if inputs.size(0) > 1 else 32
        indices = torch.randint(low=0, high=self._total_classes, size=(length,))
        # sample B protos from old classes
        proto_aug = torch.FloatTensor(class_means[indices]).to(feats_new.device)
        targets_aug = indices.long().to(feats_new.device)

        pat_embs = feats_new[:, 1:, :].reshape(-1, feats_new.size(-1))
        assert proto_aug.shape[-1] == pat_embs.shape[-1], (
            proto_aug.shape,
            pat_embs.shape,
        )
        assert proto_aug.dim() == 2 and pat_embs.dim() == 2, (
            proto_aug.dim(),
            pat_embs.dim(),
        )
        sim = torch.einsum(
            "BD,KD->BK",
            F.normalize(proto_aug, p=2, dim=-1),
            F.normalize(pat_embs, p=2, dim=-1),
        )
        # [B, topk]
        topk_s, topk_idx = torch.topk(sim, k=20, dim=-1)
        score = nn.functional.softmax(topk_s, dim=-1)
        # [B, K, D]
        select_pats = pat_embs[None, ...].repeat(proto_aug.size(0), 1, 1)
        select_pats = select_pats.gather(
            dim=1,
            index=topk_idx[..., None].expand(-1, -1, select_pats.size(-1)),
        )
        proto_aug = alpha * proto_aug + (1 - alpha) * torch.einsum(
            "BK,BKD->BD",
            score,
            select_pats,
        )
        logits = self._network(proto_aug, ca=True)["logits"]
        loss = F.cross_entropy(logits * self.args["scale"], targets_aug)
        return loss
