import torch
import torch.nn as nn
import math

class RotaryEmbedding(torch.nn.Module):
    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
        super().__init__()
        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
        self.register_buffer("inv_freq", inv_freq)

        # Build here to make `torch.jit.trace` work.
        self.max_seq_len_cached = max_position_embeddings
        t = torch.arange(
            self.max_seq_len_cached,
            device=self.inv_freq.device,
            dtype=self.inv_freq.dtype,
        )
        freqs = torch.einsum('i,j->ij', t, self.inv_freq)
        # Different from paper, but it uses a different permutation in order to obtain the same calculation
        emb = torch.cat((freqs, freqs), dim=-1)
        self.cos_cached = emb.cos()[None, None, :, :]
        self.sin_cached = emb.sin()[None, None, :, :]

    def forward(self, x, seq_len=None):
        # x: [bs, num_attention_heads, seq_len, head_size]
        # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
        if seq_len > self.max_seq_len_cached:
            self.max_seq_len_cached = seq_len
            t = torch.arange(
                self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype
            )
            freqs = torch.einsum('i,j->ij', t, self.inv_freq)
            # Different from paper, but it uses a different permutation in order to obtain the same calculation
            emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
            self.cos_cached = emb.cos()[None, None, :, :].to(dtype=x.dtype)
            self.sin_cached = emb.sin()[None, None, :, :].to(dtype=x.dtype)
        return (
            self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype, device=x.device),
            self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype, device=x.device),
        )


def rotate_half(x):
    """Rotates half the hidden dims of the input."""
    x1 = x[..., : x.shape[-1] // 2]
    x2 = x[..., x.shape[-1] // 2 :]
    return torch.cat((-x2, x1), dim=-1)


def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0):
    cos = cos[..., offset : q.shape[-2] + offset, :]
    sin = sin[..., offset : q.shape[-2] + offset, :]
    q_embed = (q * cos) + (rotate_half(q) * sin)
    k_embed = (k * cos) + (rotate_half(k) * sin)
    return q_embed, k_embed


if __name__ == "__main__":
    print("=== Rotary Position Embedding 测试 ===")

    # 测试参数
    batch_size = 32
    seq_len = 15
    d_model = 512
    h = 8
    d_k = d_model // h  # 64

    # 创建测试数据
    q = torch.randn(batch_size, h, seq_len, d_k)
    k = torch.randn(batch_size, h, seq_len, d_k)
    v = torch.randn(batch_size, h, seq_len, d_k)

    print(f"输入shapes:")
    print(f"  q: {q.shape}")  # [32, 8, 15, 64]
    print(f"  k: {k.shape}")  # [32, 8, 15, 64]
    print(f"  v: {v.shape}")  # [32, 8, 15, 64]

    # 创建Rotary Embedding
    rotary_emb = RotaryEmbedding(d_k)
    print(f"\nRotary Embedding配置:")
    print(f"  维度: {d_k}")
    print(f"  最大序列长度: {rotary_emb.max_seq_len_cached}")
    print(f"  逆频率shape: {rotary_emb.inv_freq.shape}")

    # 获取cos和sin
    cos, sin = rotary_emb(v, seq_len)
    print(f"\ncos/sin shapes:")
    print(f"  cos: {cos.shape}")  # [1, 1, 15, 64]
    print(f"  sin: {sin.shape}")  # [1, 1, 15, 64]

    # 应用旋转位置编码
    query_states, key_states = apply_rotary_pos_emb(q, k, cos, sin)
    print(f"\n应用RoPE后的shapes:")
    print(f"  query_states: {query_states.shape}")  # [32, 8, 15, 64]
    print(f"  key_states: {key_states.shape}")    # [32, 8, 15, 64]

    # 验证rotate_half函数
    print(f"\n=== rotate_half函数测试 ===")
    test_tensor = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=torch.float)
    rotated = rotate_half(test_tensor)
    print(f"原始: {test_tensor}")
    print(f"旋转后: {rotated}")
    print(f"预期: [[-3, -4, 1, 2], [-7, -8, 5, 6]]")

    # 验证形状保持不变
    print(f"\n=== 形状验证 ===")
    print(f"q输入输出shape一致: {q.shape == query_states.shape}")
    print(f"k输入输出shape一致: {k.shape == key_states.shape}")

    # 测试不同序列长度
    print(f"\n=== 不同序列长度测试 ===")
    for test_seq_len in [5, 10, 20]:
        test_q = torch.randn(2, 4, test_seq_len, d_k)
        test_k = torch.randn(2, 4, test_seq_len, d_k)
        test_cos, test_sin = rotary_emb(test_q, test_seq_len)
        test_q_rot, test_k_rot = apply_rotary_pos_emb(test_q, test_k, test_cos, test_sin)
        print(f"序列长度 {test_seq_len}: {test_q.shape} → {test_q_rot.shape}")

    # 验证位置编码的周期性
    print(f"\n=== 周期性验证 ===")
    # RoPE应该对相对位置敏感
    pos_0 = torch.zeros(1, 1, 1, d_k)
    pos_1 = torch.zeros(1, 1, 1, d_k)
    cos_test, sin_test = rotary_emb(pos_0, 2)

    # 位置0和位置1的编码
    pos_0_encoded, _ = apply_rotary_pos_emb(pos_0, pos_0, cos_test[:,:,:1], sin_test[:,:,:1])
    pos_1_encoded, _ = apply_rotary_pos_emb(pos_1, pos_1, cos_test[:,:,1:2], sin_test[:,:,1:2])

    print(f"位置0编码: {pos_0_encoded.shape}")
    print(f"位置1编码: {pos_1_encoded.shape}")

    print(f"\n=== 测试完成 ===")
    print(f"RoPE特点:")
    print(f"- 相对位置编码，具有位置不变性")
    print(f"- 通过旋转变换实现位置信息注入")
    print(f"- 序列长度可扩展性好")
    print(f"- 在LLaMA等现代模型中广泛使用")