import torch
from learners.multi_base_ca import Learner as BaselineLearner
from models.multi_net import MultiSSL
import torch.nn.functional as F
from typing import Dict, Any

num_workers = 8


class Learner(BaselineLearner):
    _network: MultiSSL

    def __init__(self, args, model_func=MultiSSL):
        super().__init__(args, model_func)

    def _initialize_meters(self):
        super()._initialize_meters()
        self.loss_meter.add(self.loss_family)

    @property
    def loss_family(self):
        return ["cross_entropy", "contrastive"]

    def _inc_train_batch(self, batch, batch_idx, criterion):
        del batch_idx
        """Handles a single batch of training."""
        _, inputs, targets = batch
        inputs, targets = inputs.to(self._device), targets.to(self._device)
        aux_targets = targets.clone()
        aux_targets = torch.where(
            aux_targets - self._known_classes >= 0,
            aux_targets - self._known_classes,
            -1,
        )
        output = self.forward_train(self._network, inputs)
        logits = output["logits"][:, self._known_classes :]
        loss = criterion(logits, aux_targets)
        _, preds = torch.max(logits, dim=1)
        correct = preds.eq(aux_targets.expand_as(preds)).cpu().sum().item()
        total = len(aux_targets)

        out = {
            "loss": loss,
            "logits": output["logits"],
        }
        o_metrics: Dict[str, Any] = {
            "acc_metric": {"train_acc": {"correct": correct, "total": total}},
            "loss_metric": {"cross_entropy": loss.item()},
        }

        # [B, Tasks, D]
        proj_feats = self._network.extract_token_ssl(inputs, proj_mode="all")
        B, T, D = proj_feats.size()
        unbind = torch.unbind(proj_feats, dim=1)
        anchor_feats = unbind[-1]
        anchor_tgts = aux_targets[:, None]
        contrast_feats = torch.cat(unbind[:-1], dim=0)
        contrast_tgts = aux_targets[None, :, None].repeat(T - 1, 1, 1).reshape(-1, 1)
        # contrast_tgts = (
        #     -torch.ones(B * (T - 1)).to(torch.long).to(aux_targets.device)
        # )[:, None]
        loss_contra = self.supcon_loss(
            (anchor_feats, anchor_tgts),
            (contrast_feats, contrast_tgts),
            temperature=self.args["cl_temperature"],
            base_temperature=self.args["cl_base_temperature"],
        )

        # Update the output dict
        out["loss"] += loss_contra
        o_metrics["loss_metric"]["contrastive"] = loss_contra.item()

        return out, o_metrics

    def supcon_loss(
        self,
        anchor_pair,
        contrast_pair,
        temperature=0.07,
        base_temperature=0.07,
    ):

        anchor_feats = F.normalize(anchor_pair[0], p=2, dim=-1)
        contrast_feats = F.normalize(contrast_pair[0], p=2, dim=-1)
        anchor_tgts = anchor_pair[1]
        contrast_tgts = contrast_pair[1]

        batch_size = anchor_feats.size(0)
        device = anchor_feats.device

        # positive mask
        all_tgts = torch.cat((anchor_tgts, contrast_tgts), dim=0).to(torch.long)
        mask = torch.eq(anchor_tgts, all_tgts.T).float().to(device)

        # Compute cosine similarity between anchor features and all contrast features
        # Pseudo logits
        all_feats = torch.cat((anchor_feats, contrast_feats), dim=0)
        anchor_dot_all = torch.div(torch.matmul(anchor_feats, all_feats.T), temperature)
        logits_max, _ = torch.max(anchor_dot_all, dim=1, keepdim=True)
        logits = anchor_dot_all - logits_max.detach()

        # Mask out self-contrast (diagonal)
        # set diagonal to 0
        logits_mask = torch.scatter(
            torch.ones_like(mask),
            1,
            torch.arange(batch_size).view(-1, 1).to(device),
            0,
        )
        mask = mask * logits_mask

        # Compute log probabilities
        exp_logits = torch.exp(logits) * logits_mask
        log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))

        # Compute the mean log-probabilities for positive pairs
        mask_pos_pairs = mask.sum(dim=1)
        mask_pos_pairs = torch.where(
            mask_pos_pairs < 1e-6, 1, mask_pos_pairs
        )  # Avoid division by zero
        mean_log_prob_pos = (mask * log_prob).sum(dim=1) / mask_pos_pairs

        # Final loss computation with temperature scaling
        loss = -(temperature / base_temperature) * mean_log_prob_pos
        loss = loss.mean()

        return loss
