import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class MultiHeadAttention(nn.Module):
    def __init__(self, config, num_heads=4):
        super().__init__()
        self.hidden_dim = 96  # 修改为与GRU输出维度一致
        self.num_heads = num_heads
        self.head_dim = self.hidden_dim // num_heads
        
        # 修改输入维度为96
        self.query = nn.Linear(96, self.hidden_dim)
        self.key = nn.Linear(96, self.hidden_dim)
        self.value = nn.Linear(96, self.hidden_dim)
        
        self.proj = nn.Linear(self.hidden_dim, 96)
        self.dropout = nn.Dropout(0.2)
        self.layer_norm = nn.LayerNorm(96)
        
    def forward(self, x, seq_len):
        batch_size = x.size(0)
        
        # 线性变换
        q = self.query(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
        k = self.key(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
        v = self.value(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
        
        # 注意力计算
        scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
        attn = F.softmax(scores, dim=-1)
        attn = self.dropout(attn)
        
        # 输出
        out = torch.matmul(attn, v)
        out = out.transpose(1, 2).contiguous().view(batch_size, seq_len, self.hidden_dim)
        out = self.proj(out)
        out = self.dropout(out)
        out = self.layer_norm(out + x)  # 残差连接
        
        return out 