import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from typing import List
class SE_Block(nn.Module):
    def __init__(self, c):
        super().__init__()
        self.att = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(c, c, kernel_size=1, stride=1),
            nn.Sigmoid()
        )
    def forward(self, x):
        return x * self.att(x)
import torch
from torch import nn
from ..builder import FUSION_LAYERS

@FUSION_LAYERS.register_module()
class GlobalAlign(nn.Module):
    
    def __init__(self, 
                 in_channels: int, 
                 out_channels: int,
                 se=False) -> None:
        
        """
        Args:
            in_channels (List[int]): 每个模态输入特征的通道数列表，例如 [80, 256]
            out_channels (int): 融合后的输出特征通道数
        """
        super(GlobalAlign, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        total_in = sum(in_channels)
        # 根据配置决定是否使用SE模块
        if se:
            self.se = SE_Block(in_channels[1])
        else:
            self.se = None 
        
        # 多模态特征融合卷积层，与 ConvFuser 类似，输入通道数为所有模态通道之和
        self.conv = nn.Sequential(
            nn.Conv2d(total_in, out_channels, 3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )
        
        # 用于计算偏移量，输入同样为各模态特征拼接后的通道数
        self.offset_conv = nn.Conv2d(total_in, 2, kernel_size=3, stride=1, padding=1)
        
        # 变形卷积层，用于对激光雷达特征经过采样后的结果做进一步处理
        self.deform_conv = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)

    def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
        """
        Args:
            inputs (List[torch.Tensor]): 输入列表，其中：
                inputs[0] - 图像 BEV 特征，形状 [B, C1, H, W]
                inputs[1] - 激光雷达 BEV 特征，形状 [B, C2, H, W]

        Returns:
            torch.Tensor: 融合后的 BEV 特征
        """ 
        # 假设 inputs[0] 为图像特征，inputs[1] 为激光雷达特征
        img_bev = inputs[0]      # 例如形状 [B, C_img, H, W]
        lidar_bev = inputs[1]    # 例如形状 [B, C_pts, H, W]
        # 拼接后进行多模态融合
        cat_bev = torch.cat([img_bev, lidar_bev], dim=1)  # [B, C_img+C_pts, H, W]
        mm_bev = self.conv(cat_bev)                       # [B, out_channels, H, W]
        # 根据训练状态确定平移量，训练时随机，测试时固定
        if self.training:
            shift_x = random.randint(0, 5)
            shift_y = random.randint(0, 5)
        else:
            shift_x = 0
            shift_y = 9
            
        # 对图像 BEV 进行平移
        shifted_img_bev = torch.roll(img_bev, shifts=(shift_x, shift_y), dims=(3, 2))
        
        # 计算偏移量：将平移后的图像特征与激光雷达特征拼接后送入偏移卷积层
        offset = self.offset_conv(torch.cat([shifted_img_bev, lidar_bev], dim=1))  # [B, 2, H, W]
        offset = offset.permute(0, 2, 3, 1)  # 调整为 [B, H, W, 2]，符合 grid_sample 要求
        
        # 通过网格采样获取变形权重，并对激光雷达特征做变形卷积
        deform_weight = F.grid_sample(lidar_bev, offset, align_corners=True)
        deformed_feature = self.deform_conv(lidar_bev * deform_weight)
        
        # 判断是否使用SE模块
        if self.se is not None:
            deformed_feature = self.se(deformed_feature)
        
        return deformed_feature, mm_bev
        
        # 返回最终融合后的特征
        return deformed_feature, mm_bev
    

    def calculate_loss(self, deformed_feature, mm_bev):
        loss_fn = nn.MSELoss()
        loss = loss_fn(deformed_feature, mm_bev)
        return loss