import triton
import triton.language as tl
import torch
import torch_npu
import math

# 常量（从原代码继承）
QUANTMAX = 127
QUANTMIN = -128

@triton.jit
def rms_norm_quant_calc(
    input_ptr,      # *float32 (N, hidden_size)
    gamma_ptr,      # *float32 (hidden_size,)
    beta_ptr,       # *float32 (hidden_size,)
    output_ptr,     # *int8 (N, hidden_size)
    quant_scale_f,  # float32 scalar: 1 / quant_scale
    quant_offset_f, # float32 scalar: quant_offset
    QUANTMAX: tl.constexpr,
    QUANTMIN: tl.constexpr,
    N: tl.constexpr, # sequence length (rows)
    H: tl.constexpr, # hidden_size (columns)
    EPS: tl.constexpr,
    BLOCK_SIZE: tl.constexpr,
):
    # 计算行索引（每个 block 处理一行）
    row_idx = tl.program_id(0)
    if row_idx >= N:
        return

    # 只是一部分的内容 需要进行reduce操作
    sum_square = 0.0
    num_blocks = tl.cdiv(H, BLOCK_SIZE)
    for i in range(0, num_blocks):
        col_offsets = tl.arange(0, BLOCK_SIZE)
        col_mask = i * BLOCK_SIZE + col_offsets < H
        x = tl.load(input_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)

        x_square = x * x
        block_square = tl.sum(x_square, axis=0)
        sum_square += block_square

    mean_square = sum_square / H
    inv_rms = 1.0 / tl.sqrt(mean_square + EPS)

    for i in range(0, num_blocks):
        col_offsets = tl.arange(0, BLOCK_SIZE)
        col_mask = i * BLOCK_SIZE + col_offsets < H
        x = tl.load(input_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        gamma = tl.load(gamma_ptr + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        beta = tl.load(beta_ptr + i * BLOCK_SIZE + col_offsets, mask=col_mask, other=0.0)
        
        norm = x * inv_rms * gamma + beta
        quant_float = norm * quant_scale_f + quant_offset_f

        is_positive = quant_float >= 0.0
        rounded_float = tl.where(
            is_positive,
            tl.floor(quant_float + 0.5),
            tl.ceil(quant_float - 0.5)
        )
        
        quant_clamped_float = tl.where(
            rounded_float < QUANTMIN,
            float(QUANTMIN),
            tl.where(rounded_float > QUANTMAX, float(QUANTMAX), rounded_float)
        )
        
        # 存储 float（pytorch侧再转int8）
        tl.store(output_ptr + row_idx * H + i * BLOCK_SIZE + col_offsets, quant_clamped_float, mask=col_mask)


def rms_norm_quant_triton(input_tensor: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor,
                          quant_scale: torch.Tensor, quant_offset: torch.Tensor, epsilon: float = 1e-6,
                          hidden_size: int = None, quant_max: int = QUANTMAX, quant_min: int = QUANTMIN) -> torch.Tensor:
    """
    Triton 实现的 RMS Norm 量化计算，与原 PyTorch rms_norm_quant_calc 等价。
    :param input_tensor: 输入 (N, hidden_size), float16/float32
    :param gamma: gamma (hidden_size,), float32
    :param beta: beta (hidden_size,), float32
    :param quant_scale: 量化 scale (1,), float32
    :param quant_offset: 量化 offset (1,), int8/float32
    :param epsilon: EPS for stability
    :param hidden_size: 如果 None，则从 input_tensor 推断
    :param quant_max/min: 量化范围
    :return: 输出 (N, hidden_size), int8
    """
    if hidden_size is None:
        hidden_size = input_tensor.shape[-1]
    N = input_tensor.shape[0]
    
    # 转换为 float32 以确保精度（Triton 内核使用 float32）
    input_float = input_tensor.float().contiguous().npu()
    gamma_float = gamma.float().contiguous().npu()
    beta_float = beta.float().contiguous().npu()
    
    # 计算 scale 和 offset
    scale_val = 1.0 / quant_scale.item()
    offset_val = quant_offset.item()
    
    # 输出张量：float32
    output_float = torch.empty((N, hidden_size), dtype=torch.float32, device='npu')
    
    # 网格：每个行一个 block
    grid = (N,)
    
    # 启动内核（移除 num_stages 和 num_warps，使用默认）
    rms_norm_quant_calc[grid](
        input_float, gamma_float, beta_float, output_float,
        quant_scale_f=scale_val,
        quant_offset_f=offset_val,
        QUANTMAX=QUANTMAX, QUANTMIN=QUANTMIN,
        N=N, H=hidden_size, EPS=epsilon,
        BLOCK_SIZE=1024,  # 可根据 hidden_size 调整，如 512 以优化 NPU 内存
    )
    # Python 侧转换为 int8（clamp + round 已在前）
    output_int8 = output_float.to(torch.int8)
    return output_int8

def rms_norm_quant_pytorch(input_tensor, gamma, beta, quant_scale, quant_offset, epsilon, hidden_size):
    scale = 1.0 / quant_scale.item()
    offset = quant_offset.item()
    input0 = input_tensor.float()
    input1 = gamma.float()
    square_sum = torch.sum(torch.square(input0), dim=-1, keepdims=True)
    factor = 1.0 / torch.sqrt(square_sum / hidden_size + epsilon)
    output = input0 * factor * input1
    output = (output + beta.float()) * scale + offset
    output = torch.round(output)
    output = torch.clamp(output, QUANTMIN, QUANTMAX).to(torch.int8)
    return output

# 示例测试（与原 PyTorch 黄金实现比较）
if __name__ == "__main__":
    # 设置随机种子
    torch.manual_seed(42)
    
    # 参数（模拟原示例）
    N = 1024
    hidden_size = 7168
    input_tensor = torch.randn(N, hidden_size, dtype=torch.float16, device='npu')
    gamma = torch.randn(hidden_size, dtype=torch.float16, device='npu')
    beta = torch.randn(hidden_size, dtype=torch.float16, device='npu')
    quant_scale = torch.tensor([0.01], device='npu')  # 示例 scale
    quant_offset = torch.tensor([0.0], device='npu')
    epsilon = 1e-6
    
    # 原 PyTorch 黄金输出
    pytorch_out = rms_norm_quant_pytorch(input_tensor, gamma, beta, quant_scale, quant_offset, epsilon, hidden_size)
    
    # Triton 输出
    triton_out = rms_norm_quant_triton(input_tensor, gamma, beta, quant_scale, quant_offset, epsilon, hidden_size)
    
    # 比较（由于 float16 精度和 rounding 近似，容忍 atol=1e-2 for int8）
    print(f"Triton 输出形状: {triton_out.shape}")
    match_rate = torch.allclose(pytorch_out.float(), triton_out.float(), atol=1e-2) # int8转回float进行比较
    print(f"与 PyTorch 匹配: {match_rate}")
    if not match_rate:
        mismatch_count = (pytorch_out != triton_out).sum().item()
        print(f"不匹配数量: {mismatch_count} / {pytorch_out.numel()} ({mismatch_count / pytorch_out.numel() * 100:.4f}%)")