"""
Triton优化的LayerNorm实现
"""

import torch
import torch.nn as nn
import triton
import triton.language as tl

from .utils import TritonModuleBase, make_contiguous

#注解作用：
    # 这个函数会被：
    # 1. 分析参数类型和形状
    # 2. 生成 CUDA kernel 代码
    # 3. 编译成 GPU 可执行代码
    # 4. 在 GPU 上并行执行
@triton.jit
def layer_norm_forward_kernel(
    x_ptr,  # 输入指针
    y_ptr,  # 输出指针
    weight_ptr,  # weight指针
    bias_ptr,  # bias指针
    mean_ptr,  # mean指针 (用于反向传播)
    rstd_ptr,  # rstd指针 (用于反向传播)
    stride,  # 行stride
    N,  # 归一化维度大小
    eps,  # epsilon
    BLOCK_SIZE: tl.constexpr,
):
    """LayerNorm前向传播kernel"""
    # 获取当前行
    row = tl.program_id(0)
    
    # 计算当前行的起始地址
    x_ptr += row * stride
    y_ptr += row * stride
    
    # 加载数据
    cols = tl.arange(0, BLOCK_SIZE)
    mask = cols < N
    x = tl.load(x_ptr + cols, mask=mask, other=0.0).to(tl.float32)
    
    # 计算均值
    mean = tl.sum(x, axis=0) / N
    
    # 计算方差
    x_centered = x - mean
    var = tl.sum(x_centered * x_centered, axis=0) / N
    rstd = 1.0 / tl.sqrt(var + eps)
    
    # 归一化
    x_normalized = x_centered * rstd
    
    # 加载weight和bias
    weight = tl.load(weight_ptr + cols, mask=mask, other=0.0).to(tl.float32)
    bias = tl.load(bias_ptr + cols, mask=mask, other=0.0).to(tl.float32)
    
    # 应用仿射变换
    y = x_normalized * weight + bias
    
    # 存储输出
    tl.store(y_ptr + cols, y, mask=mask)
    
    # 存储中间结果用于反向传播
    if mean_ptr is not None:
        tl.store(mean_ptr + row, mean)
    if rstd_ptr is not None:
        tl.store(rstd_ptr + row, rstd)


@triton.jit
def layer_norm_backward_kernel(
    dy_ptr,  # 输出梯度指针
    x_ptr,  # 输入指针
    weight_ptr,  # weight指针
    mean_ptr,  # mean指针
    rstd_ptr,  # rstd指针
    dx_ptr,  # 输入梯度指针
    dweight_ptr,  # weight梯度指针
    dbias_ptr,  # bias梯度指针
    stride,  # 行stride
    N,  # 归一化维度大小
    BLOCK_SIZE: tl.constexpr,
):
    """LayerNorm反向传播kernel"""
    row = tl.program_id(0)
    
    # 计算地址
    dy_ptr += row * stride
    x_ptr += row * stride
    dx_ptr += row * stride
    
    # 加载数据
    cols = tl.arange(0, BLOCK_SIZE)
    mask = cols < N
    
    dy = tl.load(dy_ptr + cols, mask=mask, other=0.0).to(tl.float32)
    x = tl.load(x_ptr + cols, mask=mask, other=0.0).to(tl.float32)
    weight = tl.load(weight_ptr + cols, mask=mask, other=0.0).to(tl.float32)
    mean = tl.load(mean_ptr + row)
    rstd = tl.load(rstd_ptr + row)
    
    # 计算梯度
    x_centered = x - mean
    x_normalized = x_centered * rstd
    
    # dweight和dbias (需要原子加)
    dweight_local = dy * x_normalized
    dbias_local = dy
    
    # dx
    dx = (dy * weight - tl.sum(dy * weight, axis=0) / N - 
          x_normalized * tl.sum(dy * weight * x_normalized, axis=0) / N) * rstd
    
    # 存储dx
    tl.store(dx_ptr + cols, dx, mask=mask)
    
    # 原子加dweight和dbias (简化实现，实际需要更复杂的reduction)
    # 这里暂时跳过，在PyTorch端处理


class TritonLayerNorm(TritonModuleBase):
    """Triton优化的LayerNorm层"""
    
    def __init__(self, normalized_shape, eps=1e-5):
        super().__init__()
        self.normalized_shape = normalized_shape
        self.eps = eps
        
        # 参数
        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        
        # PyTorch fallback
        self.fallback = nn.LayerNorm(normalized_shape, eps=eps)
        self.fallback.weight = self.weight
        self.fallback.bias = self.bias
    
    def forward(self, x):
        """前向传播"""
        # 检查是否可以使用Triton
        if not self.is_triton_enabled or not x.is_cuda:
            return self.fallback(x)
        
        # 确保输入是连续的
        x = make_contiguous(x)
        
        # 获取形状信息
        orig_shape = x.shape
        x = x.view(-1, self.normalized_shape)
        M, N = x.shape
        
        # 分配输出
        y = torch.empty_like(x)
        mean = torch.empty(M, dtype=torch.float32, device=x.device)
        rstd = torch.empty(M, dtype=torch.float32, device=x.device)
        
        # 计算block size
        BLOCK_SIZE = triton.next_power_of_2(N)
        
        # 启动kernel
        grid = (M,)
        layer_norm_forward_kernel[grid](
            x, y, self.weight, self.bias, mean, rstd,
            x.stride(0), N, self.eps,
            BLOCK_SIZE=BLOCK_SIZE,
        )
        
        return y.view(orig_shape)

