from torch import nn


class SpatioTemporalJointAttention(nn.Module):
    def __init__(self, in_c, n_segment, reduction=8):
        super().__init__()
        self.n_segment = n_segment
        # 空间注意力
        self.spatial_att = nn.Sequential(
            nn.Conv2d(in_c, 1, kernel_size=3, padding=1),
            nn.Sigmoid()
        )

        # 时间注意力
        self.temporal_att = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv1d(1, in_c // reduction, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv1d(in_c // reduction, in_c, kernel_size=1),
            nn.Sigmoid()
        )

    def forward(self, x):
        # 输入形状: (N*T, C, H, W)
        nt, c, h, w = x.size()

        # 空间注意力
        spatial_mask = self.spatial_att(x)  # [nt, 1, h, w]
        t = self.n_segment
        n_batch = nt // t  # 计算真实批次大小
        # 时间注意力（优化维度变换）
        temp_feat = x.view(n_batch, t, c, h, w)  # [N, T, C, H, W]
        temp_feat = temp_feat.permute(0, 3, 4, 2, 1)  # [N, H, W, C, T]
        temp_feat = temp_feat.reshape(-1, c, t)  # [N*H*W, C, T]

        temporal_mask = self.temporal_att(temp_feat)  # [N*H*W, C, T]
        temporal_mask = temporal_mask.view(n_batch, h, w, c, t)
        temporal_mask = temporal_mask.permute(0, 4, 3, 1, 2)  # [N, T, C, H, W]
        temporal_mask = temporal_mask.reshape(nt, c, h, w)  # 恢复原始形状

        return x * spatial_mask * temporal_mask