import triton
import torch
# import torch_npu
import triton.language as tl
import numpy as np
import random

DEVICE = "npu"

def process_deq_scale(deq_scale: torch.Tensor) -> torch.Tensor:
    ret = torch.frombuffer(deq_scale.numpy().tobytes(), dtype=torch.int32).to(torch.int64)
    return ret

@triton.jit
def matmul_bias_scale(
    a_ptr, b_ptr, c_ptr, bias_ptr, de_scale_ptr,
    M, N, K,
    stride_am, stride_ak,
    stride_bk, stride_bn,
    stride_cm, stride_cn,
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    offsets_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    offsets_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs_base = a_ptr + (offsets_m[:, None] * stride_am + offsets_k[None, :] * stride_ak)
    b_ptrs_base = b_ptr + (offsets_k[:, None] * stride_bk + offsets_n[None, :] * stride_bn)
    msk_m = offsets_m < M
    msk_n = offsets_n < N
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    num_blocks = tl.cdiv(K, BLOCK_SIZE_K)
    for k in range(0, num_blocks):
        # 在 for k 循环中
        a_ptrs = a_ptrs_base + k * BLOCK_SIZE_K * stride_ak
        b_ptrs = b_ptrs_base + k * BLOCK_SIZE_K * stride_bk
        a = tl.load(a_ptrs, mask=msk_m[:, None] and (offsets_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0)
        b = tl.load(b_ptrs, mask=msk_n[None, :] and (offsets_k[:, None] < K - k * BLOCK_SIZE_K), other=0.0)
        accumulator += tl.dot(a, b)
    c_ptrs = c_ptr + offsets_m[:, None] * stride_cm + offsets_n[None, :] * stride_cn
    offsets_bias = offsets_n[None, :]
    offsets_de_scale = offsets_n
    bias = tl.load(bias_ptr + offsets_bias, mask=offsets_bias < N, other=0.0)
    de_scale = tl.load(de_scale_ptr + offsets_de_scale, mask=offsets_de_scale < N, other=1.0)
    bias_float32 = tl.cast(bias, tl.float32)
    accumulator = accumulator + bias_float32
    accumulator = accumulator * de_scale
    offsets_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offsets_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_mask = (offsets_cm[:, None] < M) & (offsets_cn[None, :] < N)
    tl.store(c_ptrs, accumulator, mask=c_mask)


@triton.jit
def matmul_kernel(
    A_PTR,
    B_PTR,
    C_PTR,
    bias_ptr, de_scale_ptr,
    M,
    N,
    K,
    stride_am,
    stride_ak,
    stride_bk,
    stride_bn,
    stride_cm,
    stride_cn,
    BLOCK_M: tl.constexpr,
    BLOCK_N: tl.constexpr,
    BLOCK_K: tl.constexpr,
):
    """
    Triton实现的矩阵乘法内核：C = A @ B
    - A: (M, K)
    - B: (K, N)
    - C: (M, N)
    """
    # 程序ID和块偏移
    pid_m = tl.program_id(0)
    pid_n = tl.program_id(1)
    
    # 计算块的行和列偏移
    offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
    offs_k = tl.arange(0, BLOCK_K)
    
    # 指针偏移
    a_ptr = A_PTR + (offs_m[:, None] * stride_am + offs_k[None, :] * stride_ak)
    b_ptr = B_PTR + (offs_k[:, None] * stride_bk + offs_n[None, :] * stride_bn)
    
    # 初始化累加器
    acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
    
    # K方向分块循环
    lo = 0
    hi = tl.cdiv(K, BLOCK_K) * BLOCK_K
    for start in range(lo, hi, BLOCK_K):
        # 加载A和B块，应用掩码
        mask_a = (offs_m[:, None] < M) & (offs_k[None, :] < K)
        mask_b = (offs_k[:, None] < K) & (offs_n[None, :] < N)
        a = tl.load(a_ptr, mask=mask_a, other=0.0)
        b = tl.load(b_ptr, mask=mask_b, other=0.0)
        
        # 点积并累加
        acc += tl.dot(a, b)
        
        # 更新指针
        a_ptr += BLOCK_K * stride_ak
        b_ptr += BLOCK_K * stride_bk
    
    # 存储结果，应用掩码
    offsets_bias = offs_n
    offsets_de_scale = offs_n
    bias = tl.load(bias_ptr + offsets_bias, mask=offsets_bias < N, other=0.0)
    de_scale = tl.load(de_scale_ptr + offsets_de_scale, mask=offsets_de_scale < N, other=1.0)
    bias_float32 = tl.cast(bias, tl.float32)
    acc = acc + bias_float32
    acc = acc * de_scale
    c_ptr = C_PTR + (offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn)
    mask_c = (offs_m[:, None] < M) & (offs_n[None, :] < N)
    tl.store(c_ptr, acc, mask=mask_c)

if __name__ == "__main__":
    from test_rmsnorm import rms_norm_quant_triton, rms_norm_quant_pytorch
    torch.manual_seed(42)
    random.seed(42)
    np.random.seed(42)
    torch.manual_seed(42)
    M = 1024
    N = 2112
    K = 7168
    input_tensor = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(M, K))).to(torch.float16)
    gamma = torch.from_numpy(np.random.uniform(-1.0, 1.0, size=(K))).to(torch.float16)
    beta = torch.from_numpy(np.random.randint(-2, 2, (K)).astype(np.float16)).to(torch.float16)
    # input_tensor = torch.ones((M, K), dtype=torch.float16)  # 全1
    # gamma = torch.ones((K), dtype=torch.float16)  # 全1
    # beta = torch.zeros((K), dtype=torch.float16)  # 全0
    # quant_scale1 = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(1))).to(torch.float16)
    # quant_offset1 = torch.from_numpy(np.random.uniform(-128.0, 127.0, size=(1))).to(torch.int8)
    quant_scale1 = torch.tensor([1.0], dtype=torch.float16)  # 1.0，无缩放
    quant_offset1 = torch.tensor([0], dtype=torch.int8)  # 0，无偏移
    epsilon = 1e-6
    pytorch_out = rms_norm_quant_pytorch(input_tensor, gamma, beta, quant_scale1, quant_offset1, epsilon, K)
    # triton_out = rms_norm_quant_triton(input_tensor, gamma, beta, quant_scale1, quant_offset1, epsilon, K)
    wdqkv = torch.from_numpy(np.random.uniform(-2.0, 2.0, size=(N, K))).to(torch.int8)
    # wdqkv = torch.ones((K, N), dtype=torch.int8)  # 全1
    # wdqkv = torch.rand(N, K, device=DEVICE, dtype=torch.int8) # 元素全是0
    bias1 = torch.from_numpy(np.random.randint(-10, 10, (1, N)).astype(np.int32)).to(torch.int32)
    de_scale1 = torch.rand((N), dtype=torch.float32) / 1000
    # bias1 = torch.zeros((1, N), dtype=torch.int32)  # 全0
    # de_scale1 = torch.ones((N), dtype=torch.float32)  # 全1
    a_cpu = pytorch_out.clone().cpu()
    a_cuda = pytorch_out.clone().npu()
    b_cpu = wdqkv.clone().cpu()
    
    mm1_out = torch.matmul(a_cpu.to(torch.float32), b_cpu.to(torch.float32))
    # 这里转换了多次，float32 -> int32 -> float32 -> float16
    mm1_out = mm1_out.to(torch.int32) + bias1
    mm1_out = (mm1_out.to(torch.float32) * de_scale1).to(torch.float16)

    TILE_SIZE_M = 64
    TILE_SIZE_N = 64
    TILE_SIZE_K = 64
    GROUP_SIZE_M = 8

    triton_float32 = torch.empty((M, N), dtype=torch.float32, device=DEVICE)
    grid = (triton.cdiv(M, TILE_SIZE_M) * triton.cdiv(N, TILE_SIZE_N), )
    wdqkv_T = wdqkv.clone()
    matmul_bias_scale[grid](
        a_cuda.to(torch.float32), wdqkv_T.to(torch.float32).npu(), triton_float32,
        bias1.npu(), de_scale1.npu(),
        M, N, K,
        stride_am=a_cuda.stride(0), stride_ak=a_cuda.stride(1),
        stride_bk=wdqkv_T.stride(0), stride_bn=wdqkv_T.stride(1),
        stride_cm=triton_float32.stride(0), stride_cn=triton_float32.stride(1),
        BLOCK_SIZE_M=TILE_SIZE_M,
        BLOCK_SIZE_N=TILE_SIZE_N,
        BLOCK_SIZE_K=TILE_SIZE_K,
        GROUP_SIZE_M=GROUP_SIZE_M
    )
    c_triton_cpu = triton_float32.cpu().clone().to(torch.float16)

    atol = 1e-3  # 浮点容忍度
    match_rate = torch.allclose(mm1_out.float(), c_triton_cpu.float(), atol=atol) 
    print(f"与 PyTorch 匹配: {match_rate}")
    if not match_rate:
        print("=== 详细不匹配分析 ===")
        diff = torch.abs(mm1_out.float() - c_triton_cpu.float())
        mismatch_mask = diff > atol
        mismatch_count = mismatch_mask.sum().item()
        print(f"总不匹配数量: {mismatch_count} / {mm1_out.numel()} ({mismatch_count / mm1_out.numel() * 100:.4f}%)")
        print(f"最大差异: {diff.max().item():.6f} (位置: {torch.argmax(diff)} // N={N}, {torch.argmax(diff) % N})")
        print(f"平均差异 (不匹配位置): {diff[mismatch_mask].mean().item():.6f}")
        
        # 打印前20个不匹配位置的详细信息
        mismatch_indices = torch.nonzero(mismatch_mask, as_tuple=False)
        print(f"前20个不匹配位置详情 (i, j, PyTorch值, Triton值, 差异):")
        for idx in range(min(20, len(mismatch_indices))):
            i, j = mismatch_indices[idx]
            pyt_val = mm1_out[i, j].item()
            tri_val = c_triton_cpu[i, j].item()
            diff_val = abs(pyt_val - tri_val)
            print(f"  位置 [{i}, {j}]: PyTorch={pyt_val:.6f}, Triton={tri_val:.6f}, 差异={diff_val:.6f}")
        
        if mismatch_count > 100:
            print("... (省略更多不匹配，建议检查特定行/列模式)")
    else:
        print("测试通过！所有元素匹配（全常量数据）")
        print(f"PyTorch 输出样本 (前2x2): {mm1_out[:2, :2]}")
        print(f"Triton 输出样本 (前2x2): {c_triton_cpu[:2, :2]}")