import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import DeformConv2d

class Conv_BN_ReLU(nn.Module):
    def __init__(self, input_channels, output_channles,
                 kernel_size=3, stride=1, padding=1,
                 bias=True):
        super().__init__()
        self.CBR = nn.Sequential(
            nn.Conv2d(input_channels, output_channles,
                      kernel_size=kernel_size, stride=stride, padding=padding,
                      bias=bias),
            nn.BatchNorm2d(output_channles),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.CBR(x)
    
class MotionAwareSpatialChannelAttension(nn.Module):
    def __init__(self, input_channels, output_channels):
        super(MotionAwareSpatialChannelAttension, self).__init__()

        self.input_layer = Conv_BN_ReLU(input_channels, input_channels,
                                   kernel_size=3, stride=1, padding=1,
                                   bias=True)

        ## For spatial_pool
        self.conv_mask = nn.Conv2d(input_channels, 1, kernel_size=1, stride=1, bias=False)
        self.softmax = nn.Softmax(dim=2)

        ## For attention_weight
        self.channel_mul_conv = nn.Sequential(
            nn.Conv2d(input_channels, input_channels, kernel_size=(1, 1), stride=(1, 1), bias=False),
            nn.LayerNorm([input_channels, 1, 1]),
            nn.ReLU(inplace=True),
            nn.Conv2d(input_channels, input_channels, kernel_size=(1, 1), stride=(1, 1), bias=False),
        )

        ## For output channels adjust
        self.output_layer = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1, bias=False),
            # nn.BatchNorm2d(output_channels),
            # nn.ReLU(inplace=True),
        )    
        
    def spatial_pool(self, depth_feature):
        batch, channel, height, width = depth_feature.shape
        input_x = depth_feature                         # [N, C, H, W]
        input_x = input_x.view(batch, channel, height * width)  # [N, C, H * W]
        input_x = input_x.unsqueeze(1)                  # [N, 1, C, H * W]
        context_mask = self.conv_mask(depth_feature)    # [N, 1, H, W]
        context_mask = context_mask.view(batch, 1, height * width)  # [N, 1, H * W]
        context_mask = self.softmax(context_mask)       # [N, 1, H * W]
        context_mask = context_mask.unsqueeze(3)        # [N, 1, H * W, 1]
        # [N, 1, C, 1]
        # context attention
        context = torch.matmul(input_x, context_mask)   # [N, 1, C, H*W] * [N, 1, H*W, 1] -> # [N, 1, C, 1]
        context = context.view(batch, channel, 1, 1)    # [N, C, 1, 1]
        return context

    def forward(self, x):
        x = self.input_layer(x)

        attention_weight = self.spatial_pool(x)
        attention_weight = torch.sigmoid(self.channel_mul_conv(attention_weight))

        motion_feature = x * attention_weight
        motion_feature = self.output_layer(motion_feature)
        return motion_feature

class FeatureWarper(nn.Module):
    def __init__(self, img_feature_channels, ev_feature_channels):
        super().__init__()
        deform_kernel_size = 3
        deform_groups = 8
        assert img_feature_channels % deform_groups == 0

        self.max_offset = 24  

        ## 1. feature warping
        self.motion_extractor = MotionAwareSpatialChannelAttension(ev_feature_channels, ev_feature_channels)

        self.deformable_conv_offset_predictor = nn.Sequential(
            nn.Conv2d(ev_feature_channels,
                      (deform_groups*deform_kernel_size*deform_kernel_size*2)*2,
                      kernel_size=3, stride=1, padding=1),
            nn.GELU(),
            nn.Conv2d((deform_groups*deform_kernel_size*deform_kernel_size*2)*2,
                      deform_groups*deform_kernel_size*deform_kernel_size*2,
                      kernel_size=3, stride=1, padding=1),
        )

        self.deformable_conv = DeformConv2d(img_feature_channels, img_feature_channels,
                                            kernel_size=deform_kernel_size, stride=1, padding=deform_kernel_size//2,
                                            deform_groups=deform_groups)

    def forward(self, feature, event_feature):
        ## 1. feature warping
        motion_inform = self.motion_extractor(event_feature)
        offsets = self.deformable_conv_offset_predictor(motion_inform)
        offsets = (F.sigmoid(offsets) * 2 - 1) * self.max_offset    # -max_offset ~ max_offset

        feature_warpped = self.deformable_conv(feature, offsets)

        return feature_warpped
    