import torch
import torch.nn as nn
import torch.nn.functional as F

class SpatialTemporalAttentionNoBatch(nn.Module):
    def __init__(self, channels, max_frames=100):
        super(SpatialTemporalAttentionNoBatch, self).__init__()
        self.channels = channels
        self.scale = channels ** 0.5
        self.max_frames = max_frames

        # 初始化为 max_frames，但后续可以动态扩展
        self.pos_embed = nn.Parameter(torch.randn(1, channels, max_frames))

        self.query_conv = nn.Conv1d(channels, channels, kernel_size=1, groups=channels)
        self.key_conv   = nn.Conv1d(channels, channels, kernel_size=1, groups=channels)
        self.value_conv = nn.Conv1d(channels, channels, kernel_size=1, groups=channels)

    def forward(self, x):
        F_, C, H, W = x.shape

        # 检查当前帧是否超过已有的位置编码长度
        if F_ > self.pos_embed.size(2):
            # 生成新的位置编码，拼接在已有位置编码后（不会回传梯度）
            extra_pos = torch.randn(1, C, F_ - self.pos_embed.size(2), device=x.device, dtype=x.dtype)
            pos = torch.cat([self.pos_embed, extra_pos], dim=2)
        else:
            pos = self.pos_embed[:, :, :F_]  # [1, C, F]

        # 加入位置编码：pos shape -> [F, C, 1, 1]（broadcast 到 H, W）
        pos = pos.permute(2, 1, 0).view(F_, C, 1, 1)
        x = x + pos

        x = x.permute(2, 3, 1, 0).contiguous()   # [H, W, C, F]
        x = x.view(H * W, C, F_)

        Q = self.query_conv(x)
        K = self.key_conv(x)
        V = self.value_conv(x)

        attn_scores = torch.bmm(Q.transpose(1, 2), K) / self.scale
        attn_weights = F.softmax(attn_scores, dim=-1)

        out = torch.bmm(attn_weights, V.transpose(1, 2))
        out = out.transpose(1, 2).contiguous().view(H, W, C, F_)
        out = out.permute(3, 2, 0, 1).contiguous()

        return out





if __name__ == '__main__':
    # 定义示例参数
    batch_size = 64  # 批次大小
    seq_len = 32  # 序列长度（例如，一个句子或音频的帧数）
    embed_dim = 4  # 每个时间步的特征维度

    # 随机生成输入数据，形状为 (batch_size, seq_len, embed_dim)
    x = torch.rand(batch_size, seq_len, embed_dim, embed_dim)

    # 实例化自注意力层
    self_attention_layer = SpatialTemporalAttentionNoBatch(32)

    # 前向传播：计算自注意力加权后的输出和注意力权重
    output = self_attention_layer(x)

    # 打印结果的形状，验证输出尺寸是否正确
    print("输出张量形状:", output.shape)  # 预期输出: (32, 10, 128)
    # print("注意力权重形状:", attn_weights.shape)  # 预期输出: (32, 10, 10)
    print(output)
