import torch
import torch.nn as nn
import torch.nn.functional as F

class WindowAttention(nn.Module):
    def __init__(self, dim, num_heads=8, window_size=7):
        super().__init__()
        self.window_size = window_size
        self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True)
        # 可学习的窗口权重 (滑动平均用)
        self.alpha = nn.Parameter(torch.ones(window_size))  # 初始均匀分布

    def forward(self, x):
        # x: [S, T, D]
        S, T, D = x.shape
        W = self.window_size

        # 提取滑动窗口
        # x_win: [S, T-W+1, W, D]
        x_win = x.unfold(dimension=1, size=W, step=1)  

        num_windows = x_win.shape[1]

        # reshape: [num_windows, S*W, D]
        x_win = x_win.permute(1, 0, 2, 3).reshape(num_windows, S*W, D)

        # 窗口内 self-attention
        out, _ = self.attn(x_win, x_win, x_win)  # [num_windows, S*W, D]

        # reshape 回 [num_windows, S, W, D]
        out = out.view(num_windows, S, W, D)

#         # 重叠加权聚合到全局时间维度
#         out_full = torch.zeros(S, T, D, device=x.device)
#         count = torch.zeros(S, T, 1, device=x.device)

#         weights = F.softmax(self.alpha, dim=0).view(1, W, 1)  # [1, W, 1]

#         for i in range(num_windows):
#             out_full[:, i:i+W, :] += out[i] * weights  # [S, W, D]
#             count[:, i:i+W, :] += weights

#         out_full = out_full / count.clamp(min=1.0)  # 归一化
#         return out_full  # [S, T, D]

        # ===============================
        # 可学习的滑动平均 (alpha)
        # ===============================
        weights = F.softmax(self.alpha, dim=0).view(1, W, 1)  # [1, W, 1]

        out_full = torch.zeros(S, T, D, device=x.device)
        count = torch.zeros(S, T, 1, device=x.device)

        for i in range(num_windows):
            # [S, W, D] * [1, W, 1] → [S, W, D]
            out_full[:, i:i+W, :] += out[i] * weights  
            count[:, i:i+W, :] += weights

        out_full = out_full / count.clamp(min=1.0)  # 归一化
        return out_full  # [S, T, D]