# import copy
# import logging
# import math
# import numpy as np
# import torch
# from torch import nn, optim
# from torch.nn import functional as F
# from tqdm import tqdm

# from .baseline import Learner as BaselineLearner
# from utils.toolkit import tensor2numpy
# from models.single_net import BaseSingle
import math

import torch
from torch.nn import functional as F

from models.single_net import BaseSingle

from .baseline import Learner as BaselineLearner

num_workers = 8


class Learner(BaselineLearner):
    def __init__(self, args, data_manager, model_func=BaseSingle):
        super(Learner, self).__init__(args, data_manager, model_func)
        self.ca_forward = lambda model, inputs: model(inputs, ca=True)
        # loss hyper-parameter
        self.distill_alpha = args["distill_alpha"]
        self.align_alpha = args["align_alpha"]

    def _inc_train_batch(self, batch, batch_idx, criterion, strategy=None):
        """Full classifier training
        add distillation loss and classifier alignment loss"""
        out, o_metrics = super()._inc_train_batch(
            batch, batch_idx, criterion, strategy=strategy
        )

        _, inputs, _ = batch
        inputs = inputs.to(self._device)
        loss_dis = self.dis_patch_loss(inputs)
        out["loss"] += (loss_dis) * self.distill_alpha
        o_metrics["loss_metric"]["distill_loss"] = loss_dis.item()
        return out, o_metrics

    def dis_patch_loss(self, inputs):
        # [nb_task, B, L + 1, D]\
        with torch.no_grad():
            feats_old = self._old_network.extract_feats(inputs)
        feats_new = self._network.extract_feats(inputs)
        cls_token = feats_new[:, 0, :]
        cos_score = torch.einsum(
            "BD,BLD->BL",
            F.normalize(cls_token, p=2, dim=-1),
            F.normalize(feats_new[:, 1:, :], p=2, dim=-1),
        )
        score = self._prob(-cos_score)

        # [B, L, D]
        diff = F.normalize(feats_new[:, 1:, :], p=2, dim=-1) - F.normalize(
            feats_old[:, 1:, :], p=2, dim=-1
        )
        # compute the norm of diff
        diff = torch.norm(diff, p=2, dim=-1)
        # Compute square error
        distill_loss = torch.mean(score * diff)

        return distill_loss

    @staticmethod
    def _prob(x):
        """
        Args:
            x: input tensor
                [batch, 1, 1]
        """
        pi = torch.tensor(math.pi)
        result = (pi - torch.acos(x)) / pi
        return result

    @property
    def loss_family(self):
        return ["cross_entropy", "distill_loss"]

    # def after_train(self):
    #     # Clean multi-GPU teardown
    #     self._teardown_multi_gpu_training()

    #     self._compute_class_mean(self.data_manager)
    #     task_size = self.data_manager.get_task_size(self._cur_task)

    #     if self._cur_task > 0 and self.ca_epochs > 0:
    #         print("\n" + "=" * 60)
    #         print("Classifier Alignment Finetuning")
    #         print("=" * 60 + "\n")
    #         self._train_clf_alignment(
    #             task_size,
    #         )

    # def _train_clf_alignment(self, task_size):
    #     for p in self._network.fc.parameters():
    #         p.requires_grad = True

    #     # crct_num = self._total_classes
    #     param_list = [p for p in self._network.fc.parameters() if p.requires_grad]
    #     network_params = [
    #         {
    #             "params": param_list,
    #             "lr": self.ca_lr,
    #             "weight_decay": self.weight_decay,
    #         }
    #     ]

    #     optimizer = optim.SGD(
    #         network_params,
    #         lr=self.ca_lr,
    #         momentum=0.9,
    #         weight_decay=self.weight_decay,
    #     )
    #     scheduler = optim.lr_scheduler.CosineAnnealingLR(
    #         optimizer=optimizer, T_max=self.ca_epochs
    #     )

    #     self._network.to(self._device)
    #     # Clean multi-GPU setup for classifier alignment
    #     self._setup_multi_gpu_training()

    #     self._network.eval()
    #     for epoch in range(self.ca_epochs):
    #         losses = 0.0

    #         for _, (_, inputs, targets) in enumerate(self.train_loader):
    #             loss = 0.0
    #             inputs, targets = inputs.to(self._device), targets.to(self._device)

    #             loss_align = self.align_loss(
    #                 inputs,
    #                 targets,
    #                 self._class_means,
    #                 alpha=self.align_alpha,
    #             )
    #             loss = loss_align

    #             optimizer.zero_grad()
    #             loss.backward()
    #             optimizer.step()
    #             losses += loss.item()
    #         scheduler.step()

    #         test_acc = self._compute_accuracy(self._network, self.test_loader)
    #         info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
    #             self._cur_task, losses / self._total_classes, test_acc
    #         )
    #         logging.info(info)
    #         if test_acc >= self.best_acc_cur:
    #             self.best_acc_cur = test_acc
    #             self.best_acc[self._cur_task] = self.best_acc_cur
    #             self.best_epoch[self._cur_task] = epoch
    #             self.best_model = copy.deepcopy(self._network.state_dict())

    #     report_str = f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}], Average accuracy: {np.mean(self.best_acc)}"
    #     logging.info(report_str)
    #     if self.args["early_stop"]:
    #         self._network.load_state_dict(self.best_model)

    # def align_loss(self, inputs, targets, class_means, alpha=0.9):
    #     with torch.no_grad():
    #         feats_new = self._network.extract_feats(inputs)

    #     length = inputs.size(0) if inputs.size(0) > 1 else 32
    #     indices = torch.randint(low=0, high=self._total_classes, size=(length,))
    #     # sample B protos from old classes
    #     proto_aug = class_means[indices].to(feats_new.device)
    #     proto_aug = proto_aug.to(torch.float32)
    #     targets_aug = indices.long().to(feats_new.device)

    #     pat_embs = feats_new[:, 1:, :].reshape(-1, feats_new.size(-1))
    #     assert proto_aug.shape[-1] == pat_embs.shape[-1], (
    #         proto_aug.shape,
    #         pat_embs.shape,
    #     )
    #     assert proto_aug.dim() == 2 and pat_embs.dim() == 2, (
    #         proto_aug.dim(),
    #         pat_embs.dim(),
    #     )
    #     sim = torch.einsum(
    #         "BD,KD->BK",
    #         F.normalize(proto_aug, p=2, dim=-1),
    #         F.normalize(pat_embs, p=2, dim=-1),
    #     )
    #     # [B, topk]
    #     topk_s, topk_idx = torch.topk(sim, k=20, dim=-1)
    #     score = nn.functional.softmax(topk_s, dim=-1)
    #     # [B, K, D]
    #     select_pats = pat_embs[None, ...].repeat(proto_aug.size(0), 1, 1)
    #     select_pats = select_pats.gather(
    #         dim=1,
    #         index=topk_idx[..., None].expand(-1, -1, select_pats.size(-1)),
    #     )
    #     proto_aug = alpha * proto_aug + (1 - alpha) * torch.einsum(
    #         "BK,BKD->BD",
    #         score,
    #         select_pats,
    #     )
    #     logits = self._network(proto_aug, ca=True)["logits"]
    #     loss = F.cross_entropy(logits * self.args["scale"], targets_aug)
    #     return loss
