# 3p
import numpy as np
import torch
import torch.nn as nn


class ContrastiveLoss(nn.Module):
    """
    Calculate the loss as presented in the SURFMNet paper.
    """
    def __init__(self, w_id=1e3, w_norm=1, w_pattern=1):
        """Init SURFMNetLoss

        """
        super().__init__()
        self.w_id = w_id
        self.w_pattern = w_pattern # to promote distribution of feature in spectral domain in a pre-defined pattern
        self.w_norm = w_norm # to guide computed feature in spatial space towards a normalized pattern in spectral domain
        self.SmoothL1Loss = nn.SmoothL1Loss(reduction='mean')

    def forward(self, C1, C2, feat_1, feat_2, evecs_trans_1, evecs_trans_2, device):
        """Compute soft error loss

        Arguments:
            C1 {torch.Tensor} -- matrix representation of functional correspondence.
                                Shape: batch_size x num-eigenvectors x num-eigenvectors.
            C2 {torch.Tensor} -- matrix representation of functional correspondence.
                                Shape: batch_size x num-eigenvectors x num-eigenvectors.
            feat_1 {Torch.Tensor} -- learned feature 1. Shape: batch-size x num-vertices x num-features
            feat_2 {Torch.Tensor} -- learned feature 2. Shape: batch-size x num-vertices x num-features
            evecs_trans_1: {Torch.Tensor} -- inverse eigen vectors decomposition of shape 1. defined as evecs_x.t() @ mass_matrix.
                                             Shape: batch-size x num-eigenvectors x num-vertices
            evecs_trans_2: {Torch.Tensor} -- inverse eigen vectors decomposition of shape 2. defined as evecs_y.t() @ mass_matrix.
                                             Shape: batch-size x num-eigenvectors x num-vertices
            device {Torch.device} -- device used (cpu or gpu)
        Returns:
            float -- total loss
        """
        criterion = FrobeniusLoss()
        eye = torch.eye(C1.size(1), C1.size(2)).unsqueeze(0)
        eye_batch = torch.repeat_interleave(eye, repeats=C1.size(0), dim=0).to(device)

        # Identity penalty
        identity_penalty = criterion(C1, eye_batch) + criterion(C2, eye_batch)
        identity_penalty *= self.w_id

        # Spectral features
        F_hat = torch.bmm(evecs_trans_1, feat_1) # (B,K,D)
        G_hat = torch.bmm(evecs_trans_2, feat_2) # (B,K,D)

        # Normalization losses
        F_hat_norm = torch.linalg.norm(F_hat, dim=-2) # (B,D)
        G_hat_norm = torch.linalg.norm(G_hat, dim=-2) # (B,D)

        ones_batch = torch.ones_like(F_hat_norm).to(device)
        loss_norm = self.SmoothL1Loss(F_hat_norm, ones_batch).to(device) + self.SmoothL1Loss(G_hat_norm, ones_batch).to(device)
        loss_norm *= self.w_norm

        # Squared L1 loss - currently the desired pattern is uniform distribution in frequency domain, that is, all entries in spectral domain equal to each other
        F_hat_squared = F_hat ** 2
        G_hat_squared = G_hat ** 2
        F_hat_squared_mean = torch.mean(F_hat_squared, dim=-2)
        G_hat_squared_mean = torch.mean(G_hat_squared, dim=-2)
        zeros_batch = torch.zeros_like(F_hat_squared).to(device)
        loss_pattern = self.SmoothL1Loss(F_hat_squared-F_hat_squared_mean.unsqueeze(-2), zeros_batch).to(device) + self.SmoothL1Loss(G_hat_squared-G_hat_squared_mean.unsqueeze(-2), zeros_batch).to(device)
        loss_pattern *= self.w_pattern

        #
        loss = identity_penalty + loss_norm + loss_pattern

        return loss, identity_penalty, loss_norm, loss_pattern


class FrobeniusLoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, a, b):
        loss = torch.sum((a - b) ** 2, axis=(1, 2))
        return torch.mean(loss)
