import torch
import triton
import triton.language as tl
from typing import Tuple


# grid: (bsz * seq_len, 1, 1)
@triton.jit
def apply_rope_kernel(
    q_ptr,
    k_ptr,
    cos_ptr,
    sin_ptr,
    seq_len,
    stride_qr,
    stride_qh,
    stride_qd,
    stride_kr,
    stride_kh,
    stride_kd,
    stride_cr,
    stride_cd,
    stride_sr,
    stride_sd,
    num_q_heads,
    num_kv_heads,
    head_dim: tl.constexpr,
    pad_num_q_heads: tl.constexpr,
    pad_num_kv_heads: tl.constexpr,
):
    pid = tl.program_id(0)
    batch_id = pid // seq_len
    seq_id = pid % seq_len

    q_ptr += pid * stride_qr
    k_ptr += pid * stride_kr
    cos_ptr += (batch_id * seq_len + seq_id) * stride_cr
    sin_ptr += (batch_id * seq_len + seq_id) * stride_sr

    # cos 和 sin 都只用前一半, 因为前一半和后一半是一样的
    cos_offs = tl.arange(0, head_dim // 2)
    cos = tl.load(cos_ptr + cos_offs * stride_cd)
    sin = tl.load(sin_ptr + cos_offs * stride_sd)

    q1_offs = (
        tl.arange(0, pad_num_q_heads)[:, None] * stride_qh
        + tl.arange(0, head_dim // 2)[None, :] * stride_qd
    )
    q1_mask = tl.arange(0, pad_num_q_heads)[:, None] < num_q_heads
    k1_offs = (
        tl.arange(0, pad_num_kv_heads)[:, None] * stride_kh
        + tl.arange(0, head_dim // 2)[None, :] * stride_kd
    )
    k1_mask = tl.arange(0, pad_num_kv_heads)[:, None] < num_kv_heads
    q1 = tl.load(q_ptr + q1_offs, mask=q1_mask, other=0.0).to(cos.dtype)
    k1 = tl.load(k_ptr + k1_offs, mask=k1_mask, other=0.0).to(cos.dtype)

    q2_offs = q1_offs + head_dim // 2
    q2_mask = q1_mask
    k2_offs = k1_offs + head_dim // 2
    k2_mask = k1_mask
    q2 = tl.load(q_ptr + q2_offs, mask=q2_mask, other=0.0).to(cos.dtype)
    k2 = tl.load(k_ptr + k2_offs, mask=k2_mask, other=0.0).to(cos.dtype)

    qk_dtype = q1.dtype

    # q_embd = [q_embd1, q_embd2] = [q1, q2] * [cos, cos] + [-q2, q1] * [sin, sin]
    # k_embd = [k_embd1, k_embd2] = [k1, k2] * [cos, cos] + [-k2, k1] * [sin, sin]
    q_embd1 = q1 * cos - q2 * sin
    q_embd2 = q2 * cos + q1 * sin
    k_embd1 = k1 * cos - k2 * sin
    k_embd2 = k2 * cos + k1 * sin
    tl.store(q_ptr + q1_offs, q_embd1.to(qk_dtype), mask=q1_mask)
    tl.store(q_ptr + q2_offs, q_embd2.to(qk_dtype), mask=q2_mask)
    tl.store(k_ptr + k1_offs, k_embd1.to(qk_dtype), mask=k1_mask)
    tl.store(k_ptr + k2_offs, k_embd2.to(qk_dtype), mask=k2_mask)


def apply_rope(
    query_states: torch.Tensor,
    key_states: torch.Tensor,
    cos: torch.Tensor,
    sin: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
    """apply rotary position embedding for query and key with triton kernel.

    Args:
        query_states (torch.Tensor): (bsz, num_q_heads, seq_len, head_dim)
        key_states (torch.Tensor): (bsz, num_kv_heads, seq_len, head_dim)
        cos (torch.Tensor): (cos_bsz, seq_len, head_dim) (cos_bsz = bsz or 1)
        sin (torch.Tensor): (cos_bsz, seq_len, head_dim)

    Returns:
        Tuple[torch.Tensor, torch.Tensor]: (q_embd, k_embd)
    """
    # (bsz, num_heads, seq_len, head_dim) -> (bsz, seq_len, num_heads, head_dim)
    query_states = query_states.transpose(1, 2).contiguous()
    key_states = key_states.transpose(1, 2).contiguous()
    cos, sin = cos.contiguous(), sin.contiguous()

    bsz, seq_len, num_q_heads, head_dim = query_states.shape
    _, _, num_kv_heads, _ = key_states.shape
    pad_num_q_heads = triton.next_power_of_2(num_q_heads)
    pad_num_kv_heads = triton.next_power_of_2(num_kv_heads)

    num_rows = bsz * seq_len

    apply_rope_kernel[(num_rows,)](
        q_ptr=query_states,
        k_ptr=key_states,
        cos_ptr=cos,
        sin_ptr=sin,
        stride_qr=query_states.stride(1),
        stride_qh=query_states.stride(2),
        stride_qd=query_states.stride(3),
        stride_kr=key_states.stride(1),
        stride_kh=key_states.stride(2),
        stride_kd=key_states.stride(3),
        stride_cr=cos.stride(1),
        stride_cd=cos.stride(2),
        stride_sr=sin.stride(1),
        stride_sd=sin.stride(2),
        seq_len=seq_len,
        num_q_heads=num_q_heads,
        num_kv_heads=num_kv_heads,
        head_dim=head_dim,
        pad_num_q_heads=pad_num_q_heads,
        pad_num_kv_heads=pad_num_kv_heads,
    )

    # (bsz, seq_len, num_heads, head_dim) -> (bsz, num_heads, seq_len, head_dim)
    query_states = query_states.transpose(1, 2).contiguous()
    key_states = key_states.transpose(1, 2).contiguous()
    return query_states, key_states
