import torch
import torch.nn as nn
import torch.nn.functional as F

class SingleHeadSelfAttention(nn.Module):
    def __init__(self, embed_size):
        """
        初始化单头自注意力机制
        :param embed_size: 词嵌入的维度
        """
        super(SingleHeadSelfAttention, self).__init__()
        self.embed_size = embed_size

        # 定义查询、键和值矩阵
        self.query = nn.Linear(embed_size, embed_size)
        self.key = nn.Linear(embed_size, embed_size)
        self.value = nn.Linear(embed_size, embed_size)
        self.softmax = nn.Softmax(dim=-1)

        # 定义残差连接和层归一化
        self.layer_norm = nn.LayerNorm(embed_size)
        self.fc = nn.Linear(embed_size, embed_size)

    def forward(self, x, mask=None):
        """
        前向传播
        :param x: 输入张量，形状为 (N, L, embed_size)
        :param mask: 掩码张量，形状为 (N, 1, L)
        :return: 输出张量，形状为 (N, L, embed_size)
        """
        N, L, _ = x.shape

        # 计算查询、键和值
        Q = self.query(x)  # (N, L, embed_size)
        K = self.key(x)    # (N, L, embed_size)
        V = self.value(x)  # (N, L, embed_size)

        # 计算注意力权重
        attention_scores = torch.bmm(Q, K.transpose(1, 2)) / (self.embed_size ** 0.5)  # (N, L, L)

        if mask is not None:
            attention_scores = attention_scores.masked_fill(mask == 0, float("-1e20"))

        attention_weights = self.softmax(attention_scores)  # (N, L, L)

        # 计算上下文向量
        out = torch.bmm(attention_weights, V)  # (N, L, embed_size)

        # 前馈网络
        out = self.layer_norm(out)# 层归一化
        out = self.fc(out)  # (N, L, embed_size)
        out = F.relu(out) 
        out = self.layer_norm(out)

        # 残差连接
        out = out + x  # (N, L, embed_size)

        return out

class GroupedAttention(nn.Module):
    def __init__(self, embed_size, group_size=10, attention_module=None):
        """
        初始化分组注意力机制
        :param embed_size: 词嵌入的维度
        :param group_size: 每组的嵌入大小
        :param attention_module: 使用的注意力模块，默认为 SingleHeadSelfAttention
        """
        super(GroupedAttention, self).__init__()
        self.group_size = group_size
        self.num_groups = embed_size // group_size  # 下取整
        self.remainder = embed_size % group_size  # 计算剩余的嵌入大小
        self.attention_module = SingleHeadSelfAttention(group_size)


        # 为每个组创建独立的注意力模块
        self.attentions = nn.ModuleList([self.attention_module for _ in range(self.num_groups)])

        # 如果有剩余的嵌入维度，创建一个额外的注意力模块
        if self.remainder > 0:
            self.extra_attention = SingleHeadSelfAttention(self.remainder)
        else:
            self.extra_attention = None

    def forward(self, x, mask=None):
        """
        前向传播
        :param x: 输入张量，形状为 (N, L, embed_size)
        :param mask: 掩码张量，形状为 (N, 1, L)
        :return: 输出张量，形状为 (N, L, embed_size)
        """
        N, L, embed_size = x.shape

        # 计算需要填充的大小
        padding_size = self.group_size - self.remainder
        if padding_size > 0:
            # 在最后一个维度上填充0
            x = F.pad(x, (0, padding_size), "constant", 0)  # (N, L, embed_size + padding_size)

        # 重塑为 (N, L, num_groups, group_size)
        if(self.remainder == 0):
            x = x.view(N, L, self.num_groups, self.group_size)
        else:
            x = x.view(N, L, self.num_groups+1, self.group_size)


        # 转置为 (N, num_groups, L, group_size)
        x = x.transpose(1, 2)  # (N, num_groups, L, group_size)

        # 分离每个组的输入
        groups = torch.unbind(x, dim=1)  # num_groups 个 (N, L, group_size)

        # 应用注意力机制
        if mask is not None:
            # 对掩码进行相同的重塑
            mask = mask.unsqueeze(1).repeat(1, self.num_groups, 1, 1)  # (N, num_groups, 1, L)
            mask = torch.unbind(mask, dim=1)  # num_groups 个 (N, 1, L)

        outputs = []
        for i in range(self.num_groups):
            group = groups[i]  # (N, L, group_size)
            if mask is not None:
                group_mask = mask[i]  # (N, 1, L)
            else:
                group_mask = None
            out = self.attentions[i](group, group_mask)  # (N, L, group_size)
            outputs.append(out)

        # 如果有剩余的嵌入维度，应用额外的注意力模块
        if self.remainder > 0:
            # 提取剩余的嵌入维度
            group = groups[self.num_groups]
            group = group[:, :, :9]
            # remainder_x = x[:, :, :, -self.remainder:]  # (N, num_groups, L, remainder)
            # remainder_x = remainder_x.contiguous().view(N * self.num_groups, L, self.remainder)
            if mask is not None:
                remainder_mask = mask[:, :, :, -self.remainder:]  # (N, num_groups, 1, remainder)
                remainder_mask = remainder_mask.contiguous().view(N * self.num_groups, 1, L)
            else:
                remainder_mask = None
            remainder_out = self.extra_attention(group, remainder_mask)  # (N * num_groups, L, remainder)
            # remainder_out = remainder_out.view(N, self.num_groups, L, self.remainder)
            outputs.append(remainder_out)

        # 拼接所有组的输出
        x = torch.cat(outputs, dim=2)  # (N, num_groups, L, group_size + remainder)

        # 重塑回 (N, L, embed_size)
        x = x.transpose(1, 2).contiguous().view(N, L, self.num_groups * self.group_size + self.remainder)  # (N, L, embed_size)

        # 截断到原始的 embed_size
        x = x[:, :, :embed_size]  # (N, L, embed_size)

        return x


class StackedSelfAttention(nn.Module):
    def __init__(self, embed_size, num_layers):
        super(StackedSelfAttention, self).__init__()
        self.layers = nn.ModuleList([SingleHeadSelfAttention(embed_size) for _ in range(num_layers)])
        self.activation = nn.ReLU()  # 激活函数
        self.layer_norm = nn.LayerNorm(embed_size)  # 最终的层归一化

    def forward(self, x, mask=None):
        for layer in self.layers:
            x = layer(x, mask)
            x = x + x  # 残差连接（这里实际上是 x + x，相当于没有残差）
            x = self.activation(x)  # 激活函数
        x = self.layer_norm(x)  # 最终的层归一化
        return x
    
# 示例使用
if __name__ == "__main__":
    embed_size = 512
    seq_length = 100
    batch_size = 16

    # 随机输入
    x = torch.randn(batch_size, seq_length, embed_size)
    mask = torch.ones(batch_size, 1, seq_length)  # 无掩码

    # 创建单头自注意力层
    self_attn = SingleHeadSelfAttention(embed_size)

    # 前向传播
    output = self_attn(x, mask)
    print(output.shape)  # 输出形状应为 (batch_size, seq_length, embed_size)