"""
Triton优化的Attention实现
基于Flash Attention的思想
"""

import math
import torch
import torch.nn as nn
import triton
import triton.language as tl

from .utils import TritonModuleBase, make_contiguous


@triton.jit
def attention_forward_kernel(
    Q, K, V, Out,
    softmax_scale,
    stride_qz, stride_qh, stride_qm, stride_qk,
    stride_kz, stride_kh, stride_kn, stride_kk,
    stride_vz, stride_vh, stride_vn, stride_vk,
    stride_oz, stride_oh, stride_om, stride_ok,
    Z, H, M, N, HEAD_DIM,
    BLOCK_M: tl.constexpr,
    BLOCK_N: tl.constexpr,
    BLOCK_K: tl.constexpr,
):
    """
    Flash Attention风格的kernel
    Q: (batch, heads, seq_len, head_dim)
    K: (batch, heads, seq_len, head_dim)
    V: (batch, heads, seq_len, head_dim)
    """
    start_m = tl.program_id(0)
    off_hz = tl.program_id(1)
    off_z = off_hz // H
    off_h = off_hz % H
    
    # 初始化offsets
    offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_n = tl.arange(0, BLOCK_N)
    offs_k = tl.arange(0, BLOCK_K)
    
    # 指针
    q_ptrs = Q + off_z * stride_qz + off_h * stride_qh + \
             (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk)
    k_ptrs = K + off_z * stride_kz + off_h * stride_kh + \
             (offs_n[None, :] * stride_kn + offs_k[:, None] * stride_kk)
    v_ptrs = V + off_z * stride_vz + off_h * stride_vh + \
             (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk)
    
    # 加载Q
    q = tl.load(q_ptrs, mask=offs_m[:, None] < M, other=0.0)
    
    # 初始化累加器（Flash Attention在线更新）
    acc = tl.zeros([BLOCK_M, BLOCK_K], dtype=tl.float32)
    l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
    m_i = tl.full([BLOCK_M], -float('inf'), dtype=tl.float32)
    
    # 遍历K和V
    for start_n in range(0, N, BLOCK_N):
        start_n = tl.multiple_of(start_n, BLOCK_N)
        
        # 加载K和V
        k = tl.load(k_ptrs + start_n * stride_kn, 
                   mask=(offs_n[None, :] + start_n < N), other=0.0)
        v = tl.load(v_ptrs + start_n * stride_vn,
                   mask=(offs_n[:, None] + start_n < N), other=0.0)
        
        # 计算QK^T
        qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
        qk = tl.dot(q, k)
        qk *= softmax_scale
        
        # Causal mask (GPT-2需要)
        causal_mask = offs_m[:, None] >= (start_n + offs_n[None, :])
        qk = tl.where(causal_mask, qk, -1e9)
        
        # 更新最大值
        m_ij = tl.max(qk, axis=1)
        m_i_new = tl.maximum(m_i, m_ij)
        
        # 更新softmax分母
        p = tl.exp(qk - m_i_new[:, None])
        l_ij = tl.sum(p, axis=1)
        l_i_new = tl.exp(m_i - m_i_new) * l_i + l_ij
        
        # 防止除零
        l_i_new = tl.maximum(l_i_new, 1e-8)
        
        # 更新累加器
        p = p.to(v.dtype)  # 确保数据类型匹配
        acc = acc * (l_i / l_i_new)[:, None] + tl.dot(p, v)
        
        # 更新状态
        l_i = l_i_new
        m_i = m_i_new
    
    # 写回输出
    offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_k = tl.arange(0, BLOCK_K)
    out_ptrs = Out + off_z * stride_oz + off_h * stride_oh + \
               (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok)
    
    tl.store(out_ptrs, acc, mask=offs_m[:, None] < M)


def triton_attention(q, k, v, causal=False):
    """
    Triton实现的attention
    
    Args:
        q, k, v: (batch, heads, seq_len, head_dim)
        causal: 是否使用causal mask
    """
    batch, heads, seq_len, head_dim = q.shape
    
    # Softmax缩放因子
    softmax_scale = 1.0 / math.sqrt(head_dim)
    
    # 分配输出
    out = torch.empty_like(q)
    
    # 设置block大小
    BLOCK_M = 64
    BLOCK_N = 64
    BLOCK_K = head_dim  # 必须等于head_dim，避免复杂的分块逻辑
    
    # 计算grid
    grid = (triton.cdiv(seq_len, BLOCK_M), batch * heads)
    
    # 启动kernel
    attention_forward_kernel[grid](
        q, k, v, out,
        softmax_scale,
        q.stride(0), q.stride(1), q.stride(2), q.stride(3),
        k.stride(0), k.stride(1), k.stride(2), k.stride(3),
        v.stride(0), v.stride(1), v.stride(2), v.stride(3),
        out.stride(0), out.stride(1), out.stride(2), out.stride(3),
        batch, heads, seq_len, seq_len, head_dim,  # 这里的head_dim传给HEAD_DIM参数
        BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_K=BLOCK_K,
    )
    
    return out


class TritonGPT2Attention(TritonModuleBase):
    """Triton优化的GPT2 Attention层"""
    
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        max_positions = config.max_position_embeddings
        self.register_buffer(
            "bias",
            torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
                1, 1, max_positions, max_positions
            ),
        )
        self.register_buffer("masked_bias", torch.tensor(-1e4))
        
        self.embed_dim = config.hidden_size
        self.num_heads = config.num_attention_heads
        self.head_dim = self.embed_dim // self.num_heads
        self.split_size = self.embed_dim  # Q, K, V 每个的大小
        
        if self.head_dim * self.num_heads != self.embed_dim:
            raise ValueError(
                f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
            )
        
        self.scale_attn_weights = config.scale_attn_weights
        
        # 投影层
        self.c_attn = nn.Linear(self.embed_dim, 3 * self.embed_dim)
        self.c_proj = nn.Linear(self.embed_dim, self.embed_dim)
        
        # Dropout
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
        
    def _split_heads(self, tensor, num_heads, attn_head_size):
        """
        Splits hidden_size dim into attn_head_size and num_heads
        """
        new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
        tensor = tensor.view(new_shape)
        return tensor.permute(0, 2, 1, 3)  # (batch, head, seq_length, head_features)
    
    def _merge_heads(self, tensor, num_heads, attn_head_size):
        """
        Merges attn_head_size dim and num_attn_heads dim into hidden_size
        """
        tensor = tensor.permute(0, 2, 1, 3).contiguous()
        new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
        return tensor.view(new_shape)
    
    def forward(
        self,
        hidden_states,
        attention_mask=None,
        layer_past=None,
        past_key_values=None,  # 添加新的参数名
        cache_position=None,   # 添加cache_position参数
        head_mask=None,
        use_cache=False,
        output_attentions=False,
    ):
        # 计算Q, K, V
        query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
        
        query = self._split_heads(query, self.num_heads, self.head_dim)
        key = self._split_heads(key, self.num_heads, self.head_dim)
        value = self._split_heads(value, self.num_heads, self.head_dim)
        
        # 处理past_key_values参数（兼容新旧API）
        past_key_values = past_key_values or layer_past
        if past_key_values is not None:
            # 安全解包past_key_values，处理不同的格式
            if isinstance(past_key_values, (list, tuple)) and len(past_key_values) == 2:
                past_key, past_value = past_key_values
                key = torch.cat((past_key, key), dim=-2)
                value = torch.cat((past_value, value), dim=-2)
            elif isinstance(past_key_values, (list, tuple)) and len(past_key_values) > 2:
                # 如果past_key_values包含更多元素，取前两个
                past_key, past_value = past_key_values[0], past_key_values[1]
                key = torch.cat((past_key, key), dim=-2)
                value = torch.cat((past_value, value), dim=-2)
            # 如果是DynamicCache对象或其他格式，不处理（训练时通常不需要）
            # 这是transformers 4.x版本的新特性，用于推理优化
        
        if use_cache is True:
            present = (key, value)
        else:
            present = None
        
        # 使用Triton实现attention
        if self.is_triton_enabled and hidden_states.is_cuda and not output_attentions:
            # 确保输入连续
            query = make_contiguous(query)
            key = make_contiguous(key)
            value = make_contiguous(value)
            
            # 调用Triton kernel
            try:
                attn_output = triton_attention(query, key, value, causal=True)
            except Exception as e:
                # Fallback to PyTorch implementation
                print(f"Triton attention failed: {e}, falling back to PyTorch")
                attn_output = self._attn_pytorch(query, key, value, attention_mask, head_mask)
        else:
            # PyTorch实现
            attn_output = self._attn_pytorch(query, key, value, attention_mask, head_mask)
        
        attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
        attn_output = self.c_proj(attn_output)
        attn_output = self.resid_dropout(attn_output)
        
        outputs = (attn_output, present)
        if output_attentions:
            outputs += (None,)  # Triton版本不返回attention weights
        
        return outputs
    
    def _attn_pytorch(self, query, key, value, attention_mask=None, head_mask=None):
        """PyTorch fallback实现"""
        attn_weights = torch.matmul(query, key.transpose(-1, -2))
        
        if self.scale_attn_weights:
            attn_weights = attn_weights / math.sqrt(self.head_dim)
        
        # Causal mask
        query_length, key_length = query.size(-2), key.size(-2)
        causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
        mask_value = torch.finfo(attn_weights.dtype).min
        mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
        attn_weights = torch.where(causal_mask, attn_weights, mask_value)
        
        if attention_mask is not None:
            attn_weights = attn_weights + attention_mask
        
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        attn_weights = attn_weights.type(value.dtype)
        attn_weights = self.attn_dropout(attn_weights)
        
        if head_mask is not None:
            attn_weights = attn_weights * head_mask
        
        attn_output = torch.matmul(attn_weights, value)
        
        return attn_output

