from abc import abstractmethod
import torch
from torch import nn
# from lib.similarity import SIMILARITY
from torch.nn import functional as F


class BaseLoss(nn.Module):
    def __init__(self, cfg):
        super().__init__()
        self.cfg = cfg

        self.n_way = {
            'TRAIN': cfg['TRAIN'].N_WAY,
            'TEST': cfg['TEST'].N_WAY,
            'VALIDATE': cfg['VALIDATE'].N_WAY
        }
        self.n_shot = {
            'TRAIN': cfg['TRAIN'].N_SHOT,
            'TEST': cfg['TEST'].N_SHOT,
            'VALIDATE': cfg['VALIDATE'].N_SHOT
        }
        self.n_query = {
            'TRAIN': cfg['TRAIN'].N_QUERY,
            'TEST': cfg['TEST'].N_QUERY,
            'VALIDATE': cfg['VALIDATE'].N_QUERY
        }

        self.batch_size = {'TRAIN': cfg.TRAIN.BATCH_SIZE, 'VALIDATE': cfg.VALIDATE.BATCH_SIZE,
                           'TEST': cfg.TEST.BATCH_SIZE}

    def episodic_loss(self, logits_q, labels, type):
        batch_size = self.batch_size[type]
        n_way = self.n_way[type]
        n_query = self.n_query[type]

        labels = torch.arange(n_way).to(torch.int64).to(labels.device)
        # labels: 60 -> 1*60*1 -> 10*60*5
        labels = labels.view(1, n_way, 1).expand(batch_size, n_way, n_query)
        # labels: 10*60*5 -> 3000
        labels = labels.flatten()
        loss = F.cross_entropy(logits_q, labels)

        return loss

    def cross_entropy(self, logits_q, labels):
        loss = F.cross_entropy(logits_q, labels)
        return loss

    @abstractmethod
    def forward(self, data_f, labels, type):
        pass
