from abc import ABC, abstractmethod
import torch
from typing import Any


class TrainingStrategy(ABC):
    """Abstract base class for training strategies"""

    @abstractmethod
    def process_batch(
        self, batch, batch_idx, criterion, network, device, known_classes, forward_fn
    ) -> Any:
        """Process a single training batch"""
        pass

    @abstractmethod
    def get_name(self) -> str:
        """Return strategy name for logging"""
        pass


class StandardTraining(TrainingStrategy):
    """Standard training without augmentation"""

    def process_batch(
        self, batch, batch_idx, criterion, network, device, known_classes, forward_fn
    ):
        del batch_idx
        _, inputs, targets = batch
        inputs, targets = inputs.to(device), targets.to(device)
        aux_targets = targets.clone()
        aux_targets = torch.where(
            aux_targets - known_classes >= 0,
            aux_targets - known_classes,
            -1,
        )
        assert aux_targets.min() >= 0, "aux_targets should be non-negative"

        output = forward_fn(network, inputs)
        logits = output["logits"][:, known_classes:]
        loss = criterion(logits, aux_targets)

        _, preds = torch.max(logits, dim=1)
        correct = preds.eq(aux_targets.expand_as(preds)).cpu().sum().item()
        total = len(aux_targets)

        return {
            "loss": loss,
            "logits": output["logits"],
        }, {
            "acc_metric": {"train_acc": {"correct": correct, "total": total}},
            "loss_metric": {"cross_entropy": loss.item()},
        }

    def get_name(self) -> str:
        return "standard"


class MixupTraining(TrainingStrategy):
    """Mixup training strategy"""

    def __init__(self, alpha: float = 0.3):
        self.alpha = alpha

    def process_batch(
        self, batch, batch_idx, criterion, network, device, known_classes, forward_fn
    ):
        del batch_idx
        _, inputs, targets = batch
        inputs, targets = inputs.to(device), targets.to(device)
        aux_targets = targets.clone()
        aux_targets = torch.where(
            aux_targets - known_classes >= 0,
            aux_targets - known_classes,
            -1,
        )
        assert aux_targets.min() >= 0, "aux_targets should be non-negative"

        mix_inputs, mix_tgt = self._mixup_data(inputs, aux_targets)
        y_a, y_b, lam = mix_tgt

        output = forward_fn(network, mix_inputs)
        logits = output["logits"][:, known_classes:]
        loss = self._mixup_criterion(criterion, logits, y_a, y_b, lam)

        _, preds = torch.max(logits, dim=1)
        correct = (
            lam * preds.eq(y_a.data).cpu().sum().float()
            + (1 - lam) * preds.eq(y_b.data).cpu().sum().float()
        ).item()
        total = len(y_a)

        return {
            "loss": loss,
            "logits": output["logits"],
        }, {
            "acc_metric": {"train_acc": {"correct": correct, "total": total}},
            "loss_metric": {"cross_entropy": loss.item()},
        }

    def _mixup_criterion(self, criterion, pred, y_a, y_b, lam):
        return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)

    def _mixup_data(self, x, y):
        """Returns mixed inputs, pairs of targets, and lambda"""
        if self.alpha > 0:
            lam = torch.distributions.Beta(self.alpha, self.alpha).sample()
        else:
            lam = 1

        batch_size = x.size()[0]
        index = torch.randperm(batch_size)

        mixed_x = lam * x + (1 - lam) * x[index, :]
        y_a, y_b = y, y[index]
        return mixed_x, (y_a, y_b, lam)

    def get_name(self) -> str:
        return f"mixup(α={self.alpha})"


class ClassAugTraining(TrainingStrategy):
    """Mixup training strategy"""

    def __init__(self, alpha: float = 20, mix_times=4, cls_per=20):
        self.alpha = alpha
        self.mix_times = mix_times
        self.cls_per = cls_per

    def process_batch(
        self, batch, batch_idx, criterion, network, device, known_classes, forward_fn
    ):
        del batch_idx
        _, inputs, targets = batch
        batch_size = inputs.size(0)
        inputs, targets = inputs.to(device), targets.to(device)
        aux_targets = targets.clone()
        aux_targets = torch.where(
            aux_targets - known_classes >= 0,
            aux_targets - known_classes,
            -1,
        )
        assert aux_targets.min() >= 0, "aux_targets should be non-negative"

        mix_inputs, mix_tgt = self._mixup_data(inputs, aux_targets)

        output = forward_fn(network, mix_inputs)
        logits = output["logits"][:, known_classes:]
        loss = criterion(logits, mix_tgt)

        _, preds = torch.max(logits, dim=1)

        # Compute accuracy
        ori_preds = preds.clone().data[:batch_size]
        ori_tgt = mix_tgt.data[:batch_size]
        correct = (ori_preds.eq(ori_tgt.data).cpu().sum().float()).item()
        total = batch_size


        return {
            "loss": loss,
            "logits": output["logits"],
        }, {
            "acc_metric": {"train_acc": {"correct": correct, "total": total}},
            "loss_metric": {"cross_entropy": loss.item()},
        }

    def get_name(self) -> str:
        return f"ClassAug(α={self.alpha}, times={self.mix_times}, cls_per={self.cls_per})"

    def _mixup_data(self, x, y):  # mixup based
        batch_size = x.size()[0]
        mix_data = []
        mix_target = []
        for _ in range(self.mix_times):
            index = torch.randperm(batch_size).to(x.device)
            for i in range(batch_size):
                if y[i] != y[index][i]:
                    new_label = self.generate_label(y[i].item(), y[index][i].item(), self.cls_per)
                    lam = torch.distributions.Beta(self.alpha, self.alpha).sample()
                    if lam < 0.4 or lam > 0.6:
                        lam = 0.5
                    mix_data.append(lam * x[i] + (1 - lam) * x[index, :][i])
                    mix_target.append(new_label)

        new_target = torch.Tensor(mix_target)
        y = torch.cat((y, new_target.to(y.device).long()), 0)
        for item in mix_data:
            x = torch.cat((x, item.unsqueeze(0)), 0)
        return x, y

    @staticmethod
    def generate_label(y_a, y_b, cls_per):
        y_a, y_b = y_a, y_b
        assert y_a != y_b
        if y_a > y_b:
            tmp = y_a
            y_a = y_b
            y_b = tmp
        label_index = ((2 * cls_per - y_a - 1) * y_a) / 2 + (y_b - y_a) - 1
        return label_index + cls_per
