import torch
import torch.nn as nn
from learners.ssiat import Learner as BaselineLearner


class Learner(BaselineLearner):
    def get_optimizer(self, lr):
        for p in self._network.fc.parameters():
            p.requires_grad = True
        optimizer = super().get_optimizer(lr)
        return optimizer

    def _inc_train_batch(self, batch, batch_idx, criterion):
        del batch_idx
        """Handles a single batch of training."""
        # standard image classification
        _, inputs, targets = batch
        inputs, targets = inputs.to(self._device), targets.to(self._device)
        aux_targets = targets.clone()
        output = self.forward_train(self._network, inputs)
        logits = output["logits"]

        # Gaussian Sample Loss
        sampled_data = []
        sampled_label = []
        num_sampled_pcls = inputs.size(0)
        nb_samples = num_sampled_pcls * self._known_classes
        for c_id in range(self._known_classes):
            m = self.GD[c_id]
            sampled_data_single = m.sample(
                sample_shape=torch.Size((num_sampled_pcls,))
            )
            sampled_data.append(sampled_data_single)
            sampled_label.extend([c_id] * num_sampled_pcls)
        sampled_data = torch.cat(sampled_data).float().to(self._device)
        sampled_label = torch.tensor(sampled_label).long().to(self._device)

        sf_indexes = torch.randperm(sampled_data.size(0))
        inputs_gau = sampled_data[sf_indexes][:nb_samples]
        targets_gau = sampled_label[sf_indexes][:nb_samples]
        logits_gau = self.ca_forward(self._network, inputs_gau)["logits"]
        # loss
        logits_all = torch.cat([logits, logits_gau], dim=0)
        aux_targets_all = torch.cat([aux_targets, targets_gau], dim=0)
        loss = criterion(logits_all, aux_targets_all)

        _, preds = torch.max(logits, dim=1)
        correct = preds.eq(aux_targets.expand_as(preds)).cpu().sum().item()
        total = len(aux_targets)

        out = {
            "loss": loss,
            "logits": output["logits"],
        }
        o_metrics = {
            "acc_metric": {"train_acc": {"correct": correct, "total": total}},
            "loss_metric": {"cross_entropy": loss.item()},
        }

        return out, o_metrics

    def after_train(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()

        if self._cur_task > 0:
            # Semantcially shifted classifier alignment
            self.train_embeddings_new, _ = self._extract_tokens(
                self.train_loader
            )
            old_class_mean = self._class_means[: self._known_classes]
            gap = self.displacement(
                self.train_embeddings_old,
                self.train_embeddings_new,
                old_class_mean,
                4.0,
            )
            old_class_mean += gap
            self._class_means[: self._known_classes] = old_class_mean

        self._compute_class_mean(self.data_manager)
        self._compute_class_distribution(update=True)

        assert self.ca_epochs <= 0, "CA epochs should be zero"
