import torch
import torch.nn as nn
from utilscam import grid_sample_wrapper, mesh_grid,  batch_indexing, softmax, timer
from mlp import Conv1dNormRelu, Conv2dNormRelu

class FusionAwareInterp(nn.Module):
    def __init__(self, n_channels_3d, k=1, norm=None):
        super().__init__()
        self.k = k
        self.out_conv = Conv2dNormRelu(n_channels_3d, n_channels_3d, norm=norm)
        self.score_net = nn.Sequential(
            Conv2dNormRelu(3, 16),  # [dx, dy, |dx, dy|_2, sim]
            Conv2dNormRelu(16, n_channels_3d, act='sigmoid'),
        )

    def forward(self, uv, feat_2d, feat_3d, knn_indices ): #[4, 2, 2048]  [4, 128, 68, 120] [4, 128, 2048]
        bs, _, image_h, image_w = feat_2d.shape
        n_channels_3d = feat_3d.shape[1]  #128

        grid = mesh_grid(bs, image_h, image_w, uv.device)  # [B, 2, H, W]
        grid = grid.reshape([bs, 2, -1])  # [B, 2, HW]

        #knn_indices = k_nearest_neighbor(uv, grid, self.k)  # [B, HW, k] [4, 8160, 1]

        knn_uv, knn_feat3d = torch.split(   #[4,2,8160,1] [4, 128, 8160, 1]
            batch_indexing(
                torch.cat([uv, feat_3d], dim=1),
                knn_indices
            ), [2, n_channels_3d], dim=1)

        knn_offset = knn_uv - grid[..., None]  # [B, 2, HW, k]  [4, 2, 8160, 1]
        knn_offset_norm = torch.linalg.norm(knn_offset, dim=1, keepdim=True)  # [B, 1, HW, k] [4, 1, 8160, 1]

        score_input = torch.cat([knn_offset, knn_offset_norm], dim=1)  # [B, 3, HW, K]  [4, 3, 8160, 1]
        score = self.score_net(score_input)  # [B, n_channels_3d, HW, k] [4, 128, 8160, 1]
        # score = softmax(score, dim=-1)  # [B, n_channels_3d, HW, k]

        final = score * knn_feat3d  # [B, n_channels_3d, HW, k]
        final = final.sum(dim=-1).reshape(bs, -1, image_h, image_w)  # [B, n_channels_3d, H, W]
        final = self.out_conv(final)

        return final



class SKFusion(nn.Module):
    def __init__(self, in_channels_2d, in_channels_3d, out_channels, feat_format, norm=None, reduction=1):
        super().__init__()

        if feat_format == 'nchw':
            self.align1 = Conv2dNormRelu(in_channels_2d, out_channels, norm=norm)
            self.align2 = Conv2dNormRelu(in_channels_3d, out_channels, norm=norm)
            self.avg_pool = nn.AdaptiveAvgPool2d(1)
        elif feat_format == 'ncm':
            self.align1 = Conv1dNormRelu(in_channels_2d, out_channels, norm=norm)
            self.align2 = Conv1dNormRelu(in_channels_3d, out_channels, norm=norm)
            self.avg_pool = nn.AdaptiveAvgPool1d(1)
        else:
            raise ValueError

        self.fc_mid = nn.Sequential(
            nn.Linear(out_channels, out_channels // reduction, bias=False),
            nn.ReLU(inplace=True),
        )
        self.fc_out = nn.Sequential(
            nn.Linear(out_channels // reduction, out_channels * 2, bias=False),
            nn.Sigmoid(),
        )

    def forward(self, feat_2d, feat_3d):
        bs = feat_2d.shape[0]

        feat_2d = self.align1(feat_2d)
        feat_3d = self.align2(feat_3d)

        weight = self.avg_pool(feat_2d + feat_3d).reshape([bs, -1])  # [bs, C]
        weight = self.fc_mid(weight)  # [bs, C / r]
        weight = self.fc_out(weight).reshape([bs, -1, 2])  # [bs, C, 2]
        weight = softmax(weight, dim=-1)
        w1, w2 = weight[..., 0], weight[..., 1]  # [bs, C]

        if len(feat_2d.shape) == 4:
            w1 = w1.reshape([bs, -1, 1, 1])
            w2 = w2.reshape([bs, -1, 1, 1])
        else:
            w1 = w1.reshape([bs, -1, 1])
            w2 = w2.reshape([bs, -1, 1])

        return feat_2d * w1 + feat_3d * w2