import copy
import logging
import math
import numpy as np
import torch
from torch import nn, optim
from torch.nn import functional as F
from tqdm import tqdm

from .baseline import Learner as BaselineLearner
from utils.toolkit import tensor2numpy
from models.single_net import BaseSingle

num_workers = 8


class Learner(BaselineLearner):
    def __init__(self, args, data_manager, model_func=BaseSingle):
        super(Learner, self).__init__(args, data_manager, model_func)
        self.ca_lr = args["ca_lr"]
        self.ca_epochs = args["ca_epochs"]

        # loss hyper-parameter
        self.distill_alpha = args["distill_alpha"]
        self.align_alpha = args["align_alpha"]

    def _inc_train(
        self, epochs, train_loader, test_loader, optimizer, scheduler, criterion
    ):
        """Full classifier training
        add distillation loss and classifier alignment loss"""
        prog_bar = tqdm(range(epochs))
        test_acc = 0.0

        for _, epoch in enumerate(prog_bar):
            self._network.train()
            losses = 0.0
            losses_dis = 0.0
            correct, total = 0, 0

            for i, (_, inputs, targets) in enumerate(train_loader):
                loss = 0.0
                inputs, targets = inputs.to(self._device), targets.to(
                    self._device
                )
                aux_targets = targets.clone()
                aux_targets = torch.where(
                    aux_targets - self._known_classes >= 0,
                    aux_targets - self._known_classes,
                    -1,
                )

                output = self._network(inputs)
                logits = output["logits"][:, self._known_classes :]

                loss_cls = criterion(logits, aux_targets)
                loss_dis = self.dis_patch_loss(inputs)
                loss = loss_cls + (loss_dis) * self.args["distill_alpha"]

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()
                losses_dis += loss_dis.item()

                _, preds = torch.max(logits, dim=1)

                correct += preds.eq(aux_targets.expand_as(preds)).cpu().sum()
                total += len(aux_targets)

            if scheduler:
                scheduler.step()

            test_acc = (
                self._compute_accuracy(self._network, test_loader)
                if (epoch % self.interval == 0 and epoch > 0)
                or epoch == (epochs - 1)
                and epoch > 0
                else test_acc
            )
            train_acc = np.around(
                tensor2numpy(correct) * 100 / total, decimals=2
            )
            info = "Task {}, Epoch {}/{} => Loss {:.3f}|{:.3f}, TrAcc {:.2f}, TeAcc {:.2f}".format(
                self._cur_task,
                epoch + 1,
                epochs,
                losses / len(train_loader),
                losses_dis / len(train_loader),
                train_acc,
                test_acc,
            )
            prog_bar.set_description(info)

            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())

            logging.info(info)
        report_str = (
            f"Task {self._cur_task} => "
            + f"Best accuracy: {self.best_acc_cur:.2f}[{self.best_epoch[self._cur_task]}], "
            + f"Average accuracy: {np.mean(self.best_acc):.2f}"
        )
        logging.info(report_str)
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)

    def after_train(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()

        self._compute_class_mean(self.data_manager)
        task_size = self.data_manager.get_task_size(self._cur_task)

        if self._cur_task > 0 and self.ca_epochs > 0:
            print("\n" + "=" * 50)
            print("Classifier Alignment Finetuning")
            print("=" * 50 + "\n")
            self._train_clf_alignment(
                task_size,
            )

    def _train_clf_alignment(self, task_size):
        for p in self._network.fc.parameters():
            p.requires_grad = True

        # crct_num = self._total_classes
        param_list = [
            p for p in self._network.fc.parameters() if p.requires_grad
        ]
        network_params = [
            {
                "params": param_list,
                "lr": self.ca_lr,
                "weight_decay": self.weight_decay,
            }
        ]

        optimizer = optim.SGD(
            network_params,
            lr=self.ca_lr,
            momentum=0.9,
            weight_decay=self.weight_decay,
        )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=self.ca_epochs
        )

        self._network.to(self._device)
        # Clean multi-GPU setup for classifier alignment
        self._setup_multi_gpu_training()

        self._network.eval()
        for epoch in range(self.ca_epochs):
            losses = 0.0

            for _, (_, inputs, targets) in enumerate(self.train_loader):
                loss = 0.0
                inputs, targets = inputs.to(self._device), targets.to(
                    self._device
                )

                loss_align = self.align_loss(
                    inputs,
                    targets,
                    self._class_means,
                    alpha=self.align_alpha,
                )
                loss = loss_align

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()
            scheduler.step()

            test_acc = self._compute_accuracy(self._network, self.test_loader)
            info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
                self._cur_task, losses / self._total_classes, test_acc
            )
            logging.info(info)
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())

        report_str = f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}], Average accuracy: {np.mean(self.best_acc)}"
        logging.info(report_str)
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)

    def dis_patch_loss(self, inputs):
        # [nb_task, B, L + 1, D]\
        with torch.no_grad():
            feats_old = self._old_network.extract_feats(inputs)
        feats_new = self._network.extract_feats(inputs)
        cls_token = feats_new[:, 0, :]
        cos_score = torch.einsum(
            "BD,BLD->BL",
            F.normalize(cls_token, p=2, dim=-1),
            F.normalize(feats_new[:, 1:, :], p=2, dim=-1),
        )
        score = self._prob(-cos_score)

        # [B, L, D]
        diff = F.normalize(feats_new[:, 1:, :], p=2, dim=-1) - F.normalize(
            feats_old[:, 1:, :], p=2, dim=-1
        )
        # compute the norm of diff
        diff = torch.norm(diff, p=2, dim=-1)
        # Compute square error
        distill_loss = torch.mean(score * diff)

        return distill_loss

    def align_loss(self, inputs, targets, class_means, alpha=0.9):
        with torch.no_grad():
            feats_new = self._network.extract_feats(inputs)

        length = inputs.size(0) if inputs.size(0) > 1 else 32
        indices = torch.randint(low=0, high=self._total_classes, size=(length,))
        # sample B protos from old classes
        proto_aug = class_means[indices].to(feats_new.device)
        proto_aug = proto_aug.to(torch.float32)
        targets_aug = indices.long().to(feats_new.device)

        pat_embs = feats_new[:, 1:, :].reshape(-1, feats_new.size(-1))
        assert proto_aug.shape[-1] == pat_embs.shape[-1], (
            proto_aug.shape,
            pat_embs.shape,
        )
        assert proto_aug.dim() == 2 and pat_embs.dim() == 2, (
            proto_aug.dim(),
            pat_embs.dim(),
        )
        sim = torch.einsum(
            "BD,KD->BK",
            F.normalize(proto_aug, p=2, dim=-1),
            F.normalize(pat_embs, p=2, dim=-1),
        )
        # [B, topk]
        topk_s, topk_idx = torch.topk(sim, k=20, dim=-1)
        score = nn.functional.softmax(topk_s, dim=-1)
        # [B, K, D]
        select_pats = pat_embs[None, ...].repeat(proto_aug.size(0), 1, 1)
        select_pats = select_pats.gather(
            dim=1,
            index=topk_idx[..., None].expand(-1, -1, select_pats.size(-1)),
        )
        proto_aug = alpha * proto_aug + (1 - alpha) * torch.einsum(
            "BK,BKD->BD",
            score,
            select_pats,
        )
        logits = self._network(proto_aug, ca=True)["logits"]
        loss = F.cross_entropy(logits * self.args["scale"], targets_aug)
        return loss

    @staticmethod
    def _prob(x):
        """
        Args:
            x: input tensor
                [batch, 1, 1]
        """
        pi = torch.tensor(math.pi)
        result = (pi - torch.acos(x)) / pi
        return result
