import torch
import torch.nn as nn
from torch_geometric.nn.pool import global_add_pool


# loss function with rel/abs Lp loss
class LpLoss(object):
    def __init__(self, d=2, p=2, size_average=True, reduction=True):
        super(LpLoss, self).__init__()

        # Dimension and Lp-norm type are postive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average

    def abs(self, x, y):
        num_examples = x.size()[0]

        # Assume uniform mesh
        h = 1.0 / (x.size()[1] - 1.0)

        all_norms = (h ** (self.d / self.p)) * torch.norm(
            x.view(num_examples, -1) - y.view(num_examples, -1), self.p, 1
        )

        if self.reduction:
            if self.size_average:
                return torch.mean(all_norms)
            else:
                return torch.sum(all_norms)

        return all_norms

    def rel(self, x, y, dim=None):

        if x.numel() == 0 or y.numel() == 0:
            return torch.tensor(0.0, device=x.device)

        diff_norms = torch.norm(x - y, self.p, dim=dim)
        
        y_norms = torch.norm(y, self.p, dim=dim)

        rel_l2 = diff_norms / y_norms

        if self.reduction:
            if self.size_average:
                return torch.mean(rel_l2)
            else:
                return torch.sum(rel_l2)

        return rel_l2

    def rel_batch(self, x, y, batch, mask=None):

        diff = x - y

        batch_diff_norm = torch.sqrt(global_add_pool(torch.pow(diff, 2), batch) + 1e-8)

        y_norms = torch.sqrt(global_add_pool(torch.pow(y, 2), batch) + 1e-8)

        if mask is not None:
            rel_l2 = batch_diff_norm[mask] / y_norms[mask]
        else:
            rel_l2 = batch_diff_norm / y_norms

        if rel_l2.numel() == 0:
            return torch.tensor(0.0, device=x.device)

        if self.reduction:
            if self.size_average:
                return torch.mean(rel_l2)
            else:
                return torch.sum(rel_l2)

        return rel_l2

    def rel_batch_data(self, batch_data, pred_vel, pred_press, pred_cd):
        batch_loss = torch.tensor(0.0).to(torch.float32).cuda()
        for i in range(batch_data.num_graphs):
            mask = i == batch_data.batch
            gt_vel = batch_data.norm_velocity[mask]
            gt_press = batch_data.norm_pressure[mask]
            gt_cd = batch_data.cd_data

            if batch_data.mask_vel[i]:
                sample_pred_vel = pred_vel[mask]
                batch_loss += self.rel(sample_pred_vel[None,], gt_vel[None,] + 1e-8)

            elif batch_data.mask_press[i]:
                sample_pred_press = pred_press[mask]
                batch_loss += self.rel(sample_pred_press[None,], gt_press[None,] + 1e-8)

            elif batch_data.mask_cd[i]:
                sample_pred_cd = pred_cd[i]
                batch_loss += self.rel(sample_pred_cd[None,], gt_cd[i][None,] + 1e-8)

        return batch_loss

    def __call__(self, x, y, batch=None, mask=None, dim=None):

        if batch is None:
            return self.rel(x, y, dim)
        else:
            return self.rel_batch(x, y, batch=batch, mask=mask)
