import torch
import torch.nn as nn
import math

# 把x转一下
def rotate_half(x):
    # x [batch_size, seq_len, dim]
    # [x1,x2,x3,x4....] -> [-x2,x1,-x4,x3,...]
    xa = x[..., 0::2] # [x1,x3....]
    xb = x[..., 1::2] # [x2,x4...]
    x_ = torch.zeros(x.shape)
    x_[...,0::2] = -xb
    x_[...,1::2] = xa
    return x_


# 旋转函数
def rote(x):
    # x [batch_size, seq_len, dim]
    # 1. 先批量得到旋转因子
    theta = 10000 ** (- torch.arange(0, dim, 2) / dim) # [dim/2]
    # 2. 批量得到pos x.shape[1] = seq_len
    pos = torch.arange(0, x.shape[1]).unsqueeze(-1) # [seq_len,1]
    # m*theta
    freqs = pos * theta # [seq_len, dim/2]
    # cos和sin m*theta
    # repeat_interleave是重复插入，[1,2,3] -> [1,1,2,2,3,3]
    cos = torch.cos(freqs).repeat_interleave(2,dim=-1) # [seq_len, dim]
    sin = torch.sin(freqs).repeat_interleave(2,dim=-1) # [seq_len, dim]
    return x * cos + rotate_half(x) * sin

class SingleHeadAttention(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.Q = nn.Linear(dim, dim)
        self.K = nn.Linear(dim, dim)
        self.V = nn.Linear(dim, dim)

    def forward(self, x):
        batch_size, seq_len, dim = x.shape
        q = self.Q(x)  # [batch_size, seq_len, dim]
        k = self.K(x)  # [batch_size, seq_len, dim]
        v = self.V(x)  # [batch_size, seq_len, dim]
        q = rote(q)
        k = rote(k)
        scores = q @ k.transpose(-2, -1) / math.sqrt(dim)  # [batch_size, seq_len, seq_len]
        attn_weights = torch.softmax(scores, dim=-1)  # [batch_size, seq_len, seq_len]
        output = attn_weights @ v # [batch_size, seq_len, dim]
        return output



# 使用示例
dim = 64  # 输入维度
seq_len = 10
batch_size = 2
# 初始化模型
model = SingleHeadAttention(dim)
# 模拟输入
x = torch.randn(batch_size, seq_len, dim)
# 前向传播
output = model(x)
print("输出形状:", output.shape)  # [2, 10, 64]
