import torch
from typing import Callable
from learners.multi_base_ca import Learner as BaselineLearner

# from learners.baseline import ClsAlign
from models.multi_net import BaseMulti

num_workers = 8


class Learner(
    BaselineLearner,
):
    _network: BaseMulti

    def __init__(self, args, model_func: Callable = BaseMulti):
        super().__init__(args, model_func)

    # def _inc_train_batch(self, batch, batch_idx, criterion):
    #     del batch_idx
    #     """Handles a single batch of training."""
    #     # standard image classification
    #     _, inputs, targets = batch
    #     inputs, targets = inputs.to(self._device), targets.to(self._device)
    #     aux_targets = targets.clone()
    #     output = self.forward_train(self._network, inputs)
    #     logits = output["logits"]
    #     # logits[:, : self._known_classes] = -float("inf")
    #     # loss
    #     logits_all = torch.cat([logits], dim=0)
    #     aux_targets_all = torch.cat([aux_targets], dim=0)
    #     loss = criterion(logits_all, aux_targets_all)

    #     _, preds = torch.max(logits, dim=1)
    #     correct = preds.eq(aux_targets.expand_as(preds)).cpu().sum().item()
    #     total = len(aux_targets)

    #     out = {
    #         "loss": loss,
    #         "logits": output["logits"],
    #     }
    #     o_metrics = {
    #         "acc_metric": {"train_acc": {"correct": correct, "total": total}},
    #         "loss_metric": {"cross_entropy": loss.item()},
    #     }

    #     return out, o_metrics
    def _inc_train_batch(self, batch, batch_idx, criterion):
        del batch_idx
        """Handles a single batch of training."""
        # standard image classification
        _, inputs, targets = batch
        inputs, targets = inputs.to(self._device), targets.to(self._device)
        aux_targets = targets.clone()
        # output = self.forward_train(self._network, inputs)
        output = self.forward_test(self._network, inputs)
        logits = output["logits"]

        # Gaussian Sample Loss
        sampled_data = []
        sampled_label = []
        num_sampled_pcls = inputs.size(0)
        nb_samples = num_sampled_pcls * 20
        for c_id in range(self._known_classes):
            m = self.GD[c_id]
            sampled_data_single = m.sample(
                sample_shape=torch.Size((num_sampled_pcls,))
            )
            sampled_data.append(sampled_data_single)
            sampled_label.extend([c_id] * num_sampled_pcls)
        sampled_data = torch.cat(sampled_data).float().to(self._device)
        sampled_label = torch.tensor(sampled_label).long().to(self._device)

        sf_indexes = torch.randperm(sampled_data.size(0))
        inputs_gau = sampled_data[sf_indexes][:nb_samples]
        targets_gau = sampled_label[sf_indexes][:nb_samples]
        logits_gau = self.ca_forward(self._network, inputs_gau[:, None])[
            "logits"
        ]
        # loss
        logits_all = torch.cat([logits, logits_gau], dim=0)
        aux_targets_all = torch.cat([aux_targets, targets_gau], dim=0)
        loss = criterion(logits_all, aux_targets_all)

        _, preds = torch.max(logits, dim=1)
        correct = preds.eq(aux_targets.expand_as(preds)).cpu().sum().item()
        total = len(aux_targets)

        out = {
            "loss": loss,
            "logits": output["logits"],
        }
        o_metrics = {
            "acc_metric": {"train_acc": {"correct": correct, "total": total}},
            "loss_metric": {"cross_entropy": loss.item()},
        }

        return out, o_metrics
