import torch
import torch.nn as nn


# loss function with rel/abs Lp loss
class LpLoss(object):
    def __init__(self, d=2, p=2, size_average=True, reduction=True):
        super(LpLoss, self).__init__()

        # Dimension and Lp-norm type are postive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average

    def abs(self, x, y):
        num_examples = x.size()[0]

        # Assume uniform mesh
        h = 1.0 / (x.size()[1] - 1.0)

        all_norms = (h ** (self.d / self.p)) * torch.norm(
            x.view(num_examples, -1) - y.view(num_examples, -1), self.p, 1
        )

        if self.reduction:
            if self.size_average:
                return torch.mean(all_norms)
            else:
                return torch.sum(all_norms)

        return all_norms

    def rel(self, x, y):
        num_examples = x.size()[0]

        diff_norms = torch.norm(
            x.reshape(num_examples, -1) - y.reshape(num_examples, -1), self.p, 1
        )
        y_norms = torch.norm(y.reshape(num_examples, -1), self.p, 1)

        if self.reduction:
            if self.size_average:
                return torch.mean(diff_norms / y_norms)
            else:
                return torch.sum(diff_norms / y_norms)

        return diff_norms / y_norms

    def rel_batch(self, x, y, batch, num_graphs):
        loss = torch.tensor(0.0, dtype=x.dtype, device=x.device)
        for i in range(num_graphs):
            mask = i == batch
            rel_loss = self.rel(x[mask][None,], y[mask][None,] + 1e-8)
            if torch.isnan(rel_loss).any():
                raise ValueError(f"NaN detected in rel_loss for graph {i}")
            loss = loss + rel_loss
        loss /= num_graphs
        return loss

    def __call__(self, x, y, batch=None, num_graphs=None):
        if batch is None:
            return self.rel(x, y)
        else:
            return self.rel_batch(x, y, batch, num_graphs=num_graphs)

        # return self.rel(x, y)
