import torch
from torch import nn
from torch.nn import functional as F

# from lib.algorithms.loss import LOSS
from lib.similarity import euclid_dist
from lib.base import BaseLoss


# @LOSS.register('protonet_loss')
class ProtoNetLoss(BaseLoss):
    def __init__(self, cfg):
        super().__init__(cfg)
        self.cfg = cfg

        self.similarity = euclid_dist

    def forward(self, data_f, labels, type):
        # support_sets_f: 10*60*dim
        # query_sets_f: 10*300*dim
        # similarity = self.similarity[type]
        batch_size = self.batch_size[type]
        n_way = self.n_way[type]
        n_query = self.n_query[type]

        support_sets_f = data_f[:, :n_way, :]
        query_sets_f = data_f[:, n_way:, :]

        # sim_matrix: 10*300*60
        sim_matrix = self.similarity(support_sets_f, query_sets_f)
        # sim_matrix: 10*300*60 -> 3000*60
        sim_matrix = sim_matrix.flatten(start_dim=0, end_dim=1)

        labels = torch.arange(n_way).to(labels.device)
        # labels: 60 -> 1*60*1 -> 10*60*5
        labels = labels.view(1, n_way, 1).expand(batch_size, n_way, n_query)
        # labels: 10*60*5 -> 3000
        labels = labels.flatten().to(torch.int64)

        # sim_matrix is Euclidean distance!!!!!!!!!! The smaller, the more similar!!!!!!!!!!!!!!
        # md!!!一个负号坑了我好久!!!!!!!!!!!!!!!!!
        loss = F.cross_entropy(sim_matrix / 0.5, labels)

        # 这里算的loss是每batch_size个episode的平均loss
        return loss
