import torch 
import torch_npu
import triton
import numpy as np
import triton.language as tl
from triton.runtime.driver import driver
from mlapo_torch import QUANTMAX, QUANTMIN


@triton.jit
def rms_norm_quant(
    input_ptr, # float16 / bfloat16
    gamma_ptr, # float16 / bfloat16
    beta_ptr, # float16 / bfloat16
#    quant_scale_f, 
#    quant_offset_f, 
    quant_scale_ptr, # float16 / bfloat16
    quant_offset_ptr, # int8
    output_ptr, # int8
    QUANTMAX: tl.constexpr,
    QUANTMIN: tl.constexpr,
    N: tl.constexpr, 
    H: tl.constexpr,
    EPS: tl.constexpr,
    BLOCK_SIZE: tl.constexpr,
):
    program_idx = tl.program_id(axis = 0)
    program_num = tl.num_programs(axis = 0)

    # input_type = input_ptr.type.element_ty
    cal_type = tl.float32
    
    quant_scale_f = tl.load(quant_scale_ptr, True).cast(cal_type)
    quant_scale_f = 1.0 / quant_scale_f
    quant_offset_f = tl.load(quant_offset_ptr, True).cast(cal_type)
    
    for row_idx in tl.range(program_idx, N, program_num): 
        
        square_sum = 0.0
        
        input_row = input_ptr + H * row_idx
        gamma_row = gamma_ptr
        beta_row = beta_ptr
        output_row = output_ptr + H * row_idx

        block_num = tl.cdiv(H, BLOCK_SIZE)
        for block_idx in tl.range(0, block_num): 
            start = block_idx * BLOCK_SIZE
            offsets = start + tl.arange(0, BLOCK_SIZE)
            mask = offsets < H
            block_input = tl.load(input_row + offsets, mask = mask).cast(cal_type)
            square_block = block_input * block_input
            square_block_sum = tl.sum(square_block)
            square_sum += square_block_sum

        square_sum_mean = square_sum / H
        denominator = tl.sqrt(square_sum_mean + EPS)
        factor = 1.0 / denominator

        for block_idx in tl.range(0, block_num): 
            start = block_idx * BLOCK_SIZE
            offsets = start + tl.arange(0, BLOCK_SIZE)
            mask = offsets < H
            block_input = tl.load(input_row + offsets, mask = mask).cast(cal_type)
            block_gamma = tl.load(gamma_row + offsets, mask = mask).cast(cal_type)
            block_rms_norm = block_input * block_gamma * factor
            block_beta = tl.load(beta_row + offsets, mask = mask).cast(cal_type)
            block_output = (block_rms_norm + block_beta) * quant_scale_f + quant_offset_f
#            tl.store(output_row + offsets, block_output, mask = mask)
            
            block_output_rounded = tl.where(
                block_output >= 0.0, 
                tl.floor(block_output + 0.5), 
                tl.ceil(block_output - 0.5)
            )
            # block_output_rounded = block_output

            block_output_clamp = tl.clamp(block_output_rounded, float(QUANTMIN), float(QUANTMAX))
            block_output_quant = tl.cast(block_output_clamp, tl.int8)
            tl.store(output_row + offsets, block_output_quant, mask = mask)



# 新写
@triton.jit
def matmul_bias_scale(
    a_ptr, # int8
    b_ptr, # int8/bf16
    bias_ptr, # int32
    de_scale_ptr,   # float32
    c_ptr, # int32 -> float32
    # 
    M, 
    N, 
    K,
    # 
    stride_a0, 
    stride_a1,  # == 1
    stride_b0, 
    stride_b1,  # == 1
    stride_c0, 
    stride_c1,  # == 1
    # 
    # transpose_a : tl.constexpr, 
    transpose_b : tl.constexpr, 
    BLOCK_SIZE_M: tl.constexpr,
    BLOCK_SIZE_N: tl.constexpr,
    BLOCK_SIZE_K: tl.constexpr,
    GROUP_SIZE_M: tl.constexpr,
):
    # a int8, b int8; a bf16, b bf16;
    a_type = a_ptr.dtype.element_ty
    b_type = b_ptr.dtype.element_ty
    acc_type = tl.int32 if b_type == tl.int8 else tl.float32
    # mm_type = tl.int8 if b_type == tl.int8 else tl.float32
    # ab_ne_type = tl.float32 # only: a fp16, b bf16
    #
    cal_type = tl.float32
    out_type = c_ptr.dtype.element_ty

    stride_am = stride_a0
    stride_ak = stride_a1

    stride_bk = stride_b0
    stride_bn = stride_b1
    if transpose_b: 
        stride_bk = stride_b1
        stride_bn = stride_b0
    
    stride_cm = stride_c0
    stride_cn = stride_c1

    program_idx = tl.program_id(axis = 0)
    program_num = tl.num_programs(axis = 0 )
    num_block_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_block_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_block_in_group = GROUP_SIZE_M * num_block_n

    num_block = num_block_m * num_block_n

    for block_idx in tl.range(program_idx, num_block, program_num): 

        group_idx = block_idx // num_block_in_group
        cur_group_first_block_row_idx = group_idx * GROUP_SIZE_M
        group_size_m_actual = min(num_block_m - cur_group_first_block_row_idx, GROUP_SIZE_M)
        block_idx_in_group = block_idx % num_block_in_group

        block_idx_m = cur_group_first_block_row_idx + block_idx_in_group % group_size_m_actual
        block_idx_n = block_idx_in_group // group_size_m_actual

        offs_m = block_idx_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
        offs_n = block_idx_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
        offs_k = tl.arange(0, BLOCK_SIZE_K)

        offs_a_2d = offs_k[None, :] * stride_ak + offs_m[:, None] * stride_am
        a_ptrs_base = a_ptr + offs_a_2d
        '''
        FIXME tl.where要求两个选项shape相同
        '''
        # offs_b_2d = tl.where(
        #     transpose_b, 
        #     offs_n[:, None] * stride_bn + offs_k[None, :] * stride_bk, # DN
        #     offs_n[None, :] * stride_bn + offs_k[:, None] * stride_bk  # ND
        # )
        offs_b_2d = offs_n[None, :] * stride_bn + offs_k[:, None] * stride_bk
        if transpose_b: 
            offs_b_2d = offs_n[:, None] * stride_bn + offs_k[None, :] * stride_bk
        
        b_ptrs_base = b_ptr + offs_b_2d

        mask_m = offs_m < M
        mask_n = offs_n < N

        # accumulator = tl.zeros( (BLOCK_SIZE_M, BLOCK_SIZE_N), dtype = tl.int32 )
        accumulator = tl.zeros( (BLOCK_SIZE_M, BLOCK_SIZE_N), dtype = acc_type )
        num_block_k = tl.cdiv(K, BLOCK_SIZE_K)

        for block_idx_k in tl.range(0, num_block_k): 

            a_ptrs = a_ptrs_base + block_idx_k * BLOCK_SIZE_K * stride_ak
            b_ptrs = b_ptrs_base + block_idx_k * BLOCK_SIZE_K * stride_bk

            offs_k_move = block_idx_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
            mask_k = offs_k_move < K

            mask_a = mask_k[None, :] and mask_m[:, None]  # ND

            # mask_b = tl.where(
            #     transpose_b, 
            #     mask_n[:, None] and mask_k[None, :], # DN 
            #     mask_n[None, :] and mask_k[:, None]  # ND
            # )
            mask_b = mask_n[None, :] and mask_k[:, None]
            if transpose_b: 
                mask_b = mask_n[:, None] and mask_k[None, :]

            a = tl.load(
                a_ptrs, 
                mask = mask_a, 
                other = 0.0, 
            )#.cast(ab_ne_type)

            '''
            "and" "not" not supported
            tl.where condition fail? 
            '''

            # if a_type == b_type: 
            #     a = a
            # else: 
            #     a = a.cast(ab_ne_type)

            # a = tl.where(
            #     a_type == b_type, 
            #     a, 
            #     a.cast(ab_ne_type)
            # )    

            # if a_type == tl.float16: 
            #     a = a.cast(ab_ne_type)
            
            b_load = tl.load(
                b_ptrs, 
                mask = mask_b, 
                other = 0.0, 
            )#.cast(ab_ne_type)

            # if a_type == b_type: 
            #     b_load = b_load
            # else: 
            #     b_load = b_load.cast(ab_ne_type)

            # b_load = tl.where(
            #     a_type == b_type, 
            #     b_load, 
            #     b_load.cast(ab_ne_type)
            # )

            # if a_type == tl.float16: 
            #     b_load = b_load.cast(ab_ne_type)

            # if not a_type == b_type:  
            #     a = a.cast(ab_ne_type)
            #     b_load = b_load.cast(ab_ne_type)
            

            # b = tl.where(
            #     transpose_b, 
            #     b_load.trans(), 
            #     b_load
            # )
            b = b_load
            if transpose_b: 
                b = b_load.trans()
            accumulator = tl.dot(a, b, accumulator)
        
        # 
        # if not cal_type == acc_type: 
        #     accumulator = accumulator.cast(cal_type)
        # accumulator = tl.where(
        #     cal_type == acc_type, 
        #     accumulator, 
        #     accumulator.cast(cal_type)
        # )
        if cal_type == acc_type: 
            accumulator = accumulator
        else: 
            accumulator = accumulator.cast(cal_type)

        offs_bias_2d = offs_n[None, :]
        mask_bias = mask_n[None, :]
        bias = tl.load(
            bias_ptr + offs_bias_2d, 
            mask = mask_bias, 
            other = 0.0).cast(cal_type)
        accumulator_bias = accumulator + bias

        offs_de_scale = offs_n
        mask_de_scale = mask_n
        de_scale = tl.load(
            de_scale_ptr + offs_de_scale, 
            mask = mask_de_scale, 
            other = 0.0
        ).cast(cal_type)
        accumulator_de_scale = accumulator_bias * de_scale
        #
        
        offs_c_2d = offs_n[None, :] * stride_cn + offs_m[:, None] * stride_cm
        c_ptrs_base = c_ptr + offs_c_2d

        mask_c = mask_n[None, :] and mask_m[:, None]

        tl.store(
            c_ptrs_base, 
            accumulator_de_scale.cast(out_type), 
            mask = mask_c
        )


@triton.jit
def rms_norm_rope_rac_cal_kernel(
    kv_input, 
    gamma, 
    cos, 
    sin, 
    slot_mapping, 
    kv_cache, 
    tokens : tl.constexpr, 
    hidden_size_kv : tl.constexpr, 
    hidden_size_rope_k : tl.constexpr, 
    epsilon : tl.constexpr, 
    BLOCK_SIZE : tl.constexpr
):
    program_idx = tl.program_id(axis = 0)
    program_num = tl.num_programs(axis = 0)
    
    output_type = kv_cache.dtype.element_ty
    cal_type = tl.float32
    
    hidden_size_rms_kv = hidden_size_kv - hidden_size_rope_k
    
    for row_idx in tl.range(program_idx, tokens, program_num): 
        ## slot pos
        slot_offset = row_idx
        slot_mask = slot_offset < tokens
        slot = tl.load(slot_mapping + slot_offset, mask = slot_mask)

        ## rms_norm
        square_sum = 0.0

        input_rms_ptr = kv_input + row_idx * hidden_size_kv
        gamma_ptr = gamma
        output_rms_ptr = kv_cache + slot * hidden_size_kv

        rms_block_num = tl.cdiv(hidden_size_rms_kv, BLOCK_SIZE)
        for rms_block_idx in tl.range(0, rms_block_num): 
            start = rms_block_idx * BLOCK_SIZE
            offsets = start + tl.arange(0, BLOCK_SIZE)
            mask = offsets < hidden_size_rms_kv
            block = tl.load(input_rms_ptr + offsets, mask = mask).cast(cal_type)
            # block = tl.cast(block, cal_type)
            square_block = block * block
            square_block_sum = tl.sum(square_block)
            square_sum += square_block_sum

        square_sum_mean = square_sum / hidden_size_rms_kv
        denominator = tl.sqrt(square_sum_mean + epsilon)
        factor = 1.0 / denominator

        for rms_block_idx in tl.range(0, rms_block_num): 
            start = rms_block_idx * BLOCK_SIZE
            offsets = start + tl.arange(0, BLOCK_SIZE)
            mask = offsets < hidden_size_rms_kv
            block_input = tl.load(input_rms_ptr + offsets, mask = mask).cast(cal_type)
            # block_input = tl.cast(block_input, cal_type)
            block_gamma = tl.load(gamma_ptr + offsets, mask = mask).cast(cal_type)
            # block_gamma = tl.cast(block_gamma, cal_type)
            block_output = (block_input * block_gamma * factor)#.cast(output_type)
            block_output = tl.cast(block_output, output_type)
            tl.store(output_rms_ptr + offsets, block_output, mask = mask)
        
        ## rope

        input_rope_ptr  = row_idx * hidden_size_kv + hidden_size_rms_kv + kv_input
        output_rope_ptr = slot    * hidden_size_kv + hidden_size_rms_kv + kv_cache
        cos_ptr         = row_idx * hidden_size_rope_k + cos
        sin_ptr         = row_idx * hidden_size_rope_k + sin
        
        hidden_size_rope_k_half = hidden_size_rope_k // 2
        rope_block_num_half = tl.cdiv(hidden_size_rope_k_half, BLOCK_SIZE)

        for rope_block_idx in tl.range(0, rope_block_num_half): 
            input_rope_start = rope_block_idx * BLOCK_SIZE
            offsets_left = input_rope_start + tl.arange(0, BLOCK_SIZE)
            offsets_right= input_rope_start + tl.arange(0, BLOCK_SIZE) + hidden_size_rope_k_half

            mask_left = offsets_left < hidden_size_rope_k_half
            mask_right = offsets_right < hidden_size_rope_k 

            block_input_left  = tl.load(input_rope_ptr + offsets_left,  mask = mask_left).cast(cal_type)
            block_input_right = tl.load(input_rope_ptr + offsets_right, mask = mask_right).cast(cal_type)
            # block_input_left = tl.cast(block_input_left, cal_type)
            # block_input_right = tl.cast(block_input_right, cal_type)
#            block_input_left  = tl.load(input_rope_ptr + offsets_left,  mask = mask_left)
#            block_input_right = tl.load(input_rope_ptr + offsets_right, mask = mask_right)

            block_cos_left = tl.load(cos_ptr + offsets_left, mask = mask_left).cast(cal_type)
            block_sin_left = tl.load(sin_ptr + offsets_left, mask = mask_left).cast(cal_type)
            # block_cos_left = tl.cast(block_cos_left, cal_type)
            # block_sin_left = tl.cast(block_sin_left, cal_type)
            block_output_left = (block_cos_left * block_input_left - block_sin_left * block_input_right)#.cast(output_type)
            block_output_left = tl.cast(block_output_left, output_type)
            tl.store(output_rope_ptr + offsets_left, block_output_left, mask = mask_left)

            block_cos_right = tl.load(cos_ptr + offsets_right, mask = mask_right).cast(cal_type)
            block_sin_right = tl.load(sin_ptr + offsets_right, mask = mask_right).cast(cal_type)
            # block_cos_right = tl.cast(block_cos_right, cal_type)
            # block_sin_right = tl.cast(block_sin_right, cal_type)
            block_output_right = (block_cos_right * block_input_right + block_sin_right * block_input_left)#.cast(output_type)
            block_output_right = tl.cast(block_output_right, output_type)
            tl.store(output_rope_ptr + offsets_right, block_output_right, mask = mask_right)


@triton.jit
def matmul_ein_sum_triton(
    a_ptr, # (M, B, K) 读-> (B, M, K)
    b_ptr, # (B, K, N)
    c_ptr, # (M, B, N) 写-> (B, M, N)
    #
    B, # head_num_q
    M, # tokens 
    N, # hidden_size_wuk_head
    K, # hidden_size_nope_q
    stride_a0, 
    stride_a1, 
    stride_a2, 
    stride_b0, 
    stride_b1,
    stride_b2, 
    stride_c0, 
    stride_c1, 
    stride_c2, 
    # 
    transpose_b : tl.constexpr, 
    BLOCK_SIZE_M : tl.constexpr, 
    BLOCK_SIZE_N : tl.constexpr, 
    BLOCK_SIZE_K : tl.constexpr, 
    GROUP_SIZE_M : tl.constexpr,
):
    cal_type = tl.float32
    output_type = c_ptr.dtype.element_ty

    stride_a_B = stride_a1
    stride_a_M = stride_a0
    stride_a_K = stride_a2
    
    stride_b_B = stride_b0
    stride_b_K = stride_b1
    stride_b_N = stride_b2
    if transpose_b: 
      stride_b_K = stride_b2
      stride_b_N = stride_b1
      
    stride_c_B = stride_c1
    stride_c_M = stride_c0
    stride_c_N = stride_c2
  
    program_idx = tl.program_id(axis = 0)
    program_num = tl.num_programs(axis = 0)
    
    num_block_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_block_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_block_in_group = GROUP_SIZE_M * num_block_n
    
    '''
    # groupedmatmul分核框架

    start_program_idx = 0 # 每个group中第一个block对应的program_idx（这个很好维护）
    start_block_idx = 0 # 每个group中当前program负责的第一个block_idx(未必存在)。用start_program_idx计算start_block_idx（对于每个program，主要是维护这个）
    
    for group_idx in range(0, B): 

        start_block_idx = program_idx - start_program_idx
        if start_block_idx < 0: 
            start_block_idx += program_num

        num_block_in_group = xx
        
        for block_idx in range(start_block_idx, num_block_in_group, program_num): 
            pass

        start_program_idx = (start_program_idx + num_block_in_batch) % program_num
    '''

    start_program_idx = 0  # 每个batch中第一个block对应的program_idx
    start_block_idx = 0  # 每个batch中当前program负责的第一个block_idx(未必存在)。用start_program_idx计算start_block_idx
    num_block_in_batch = num_block_m * num_block_n

    for batch_idx in tl.range(0, B): 
        
        start_block_idx = program_idx - start_program_idx
        if start_block_idx < 0: 
            start_block_idx += program_num

        
        a_ptr_batch = a_ptr + batch_idx * stride_a_B
        b_ptr_batch = b_ptr + batch_idx * stride_b_B
        c_ptr_batch = c_ptr + batch_idx * stride_c_B
        
        for block_idx in tl.range(start_block_idx, num_block_in_batch, program_num): 
        
            group_idx = block_idx // num_block_in_group
            cur_group_first_block_row_idx = group_idx * GROUP_SIZE_M
            group_size_m_actual = min(num_block_m - cur_group_first_block_row_idx, GROUP_SIZE_M)
            block_idx_in_group = block_idx % num_block_in_group
    
            block_idx_m = cur_group_first_block_row_idx + block_idx_in_group % group_size_m_actual
            block_idx_n = block_idx_in_group // group_size_m_actual
            
            offs_m = block_idx_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
            offs_n = block_idx_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
            offs_k = tl.arange(0, BLOCK_SIZE_K)
            
            offs_a_2d = offs_k[None, :] * stride_a_K + offs_m[:, None] * stride_a_M  # ND 
            a_ptrs_base = a_ptr_batch + offs_a_2d
            
            # offs_b_2d = tl.where(
            #     transpose_b, 
            #     offs_n[:, None] * stride_b_N + offs_k[None, :] * stride_b_K, # DN
            #     offs_n[None, :] * stride_b_N + offs_k[:, None] * stride_b_K  # ND
            # )
            offs_b_2d = offs_n[None, :] * stride_b_N + offs_k[:, None] * stride_b_K
            if transpose_b: 
                offs_b_2d = offs_n[:, None] * stride_b_N + offs_k[None, :] * stride_b_K

            b_ptrs_base = b_ptr_batch + offs_b_2d
            
            mask_m = offs_m < M
            mask_n = offs_n < N
            
            accumulator = tl.zeros( (BLOCK_SIZE_M, BLOCK_SIZE_N), dtype = cal_type )
            num_block_k = tl.cdiv(K, BLOCK_SIZE_K)
            
            for block_idx_k in tl.range(0, num_block_k): 
            
                a_ptrs = a_ptrs_base + block_idx_k * BLOCK_SIZE_K * stride_a_K
                b_ptrs = b_ptrs_base + block_idx_k * BLOCK_SIZE_K * stride_b_K
                
                offs_k_move = block_idx_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
                mask_k = offs_k_move < K
                
                mask_a = mask_k[None, :] and mask_m[:, None]  # ND
                
                # mask_b = tl.where(
                #     transpose_b, 
                #     mask_n[:, None] and mask_k[None, :], # DN 
                #     mask_n[None, :] and mask_k[:, None]  # ND
                # )
                mask_b = mask_n[None, :] and mask_k[:, None]
                if transpose_b: 
                    mask_b = mask_n[:, None] and mask_k[None, :]
                
                a = tl.load(a_ptrs, mask = mask_a, other = 0.0)#.cast(cal_type)
                b_load = tl.load(b_ptrs, mask = mask_b, other = 0.0)#.cast(cal_type)
                # b = tl.where(
                #     transpose_b, 
                #     b_load.trans(), 
                #     b_load
                # )
                b = b_load
                if transpose_b: 
                    b = b_load.trans()
                
                accumulator = tl.dot(a, b, accumulator)
            
            offs_c_2d = offs_n[None, :] * stride_c_N + offs_m[:, None] * stride_c_M
            c_ptrs_base = c_ptr_batch + offs_c_2d
            
            mask_c = mask_n[None, :] and mask_m[:, None]
            
            accumulator = tl.cast(accumulator, output_type)

            tl.store(c_ptrs_base, accumulator, mask = mask_c)
            
        start_program_idx = (start_program_idx + num_block_in_batch) % program_num



@triton.jit
def pad_head_rope_x(
    q_rope, 
    cos, 
    sin, 
    q_rope_out, 
    tokens : tl.constexpr, 
    head_num_q : tl.constexpr, 
    hidden_size_rope_q_head : tl.constexpr, 
    BLOCK_SIZE_TOKENS : tl.constexpr, 
    BLOCK_SIZE_HEAD : tl.constexpr,
):
    '''
    q_rope, q_rope_out : [tokens, head_num_q, hidden_size_rope_q_head]
    cos, sin : [tokens, hidden_size_rope_q_head]
    一个任务包括为对多token单head做rope
    '''
    program_idx = tl.program_id(axis = 0)
    program_num = tl.num_programs(axis = 0)
    hidden_size_token = hidden_size_rope_q_head * head_num_q
    cal_type = tl.float32
    output_type = q_rope_out.dtype.element_ty
    
    task_num_tokens = tl.cdiv(tokens, BLOCK_SIZE_TOKENS)
    task_num = task_num_tokens * head_num_q

    for task_idx in tl.range(program_idx, task_num, program_num): 
        
        task_idx_tokens = task_idx // head_num_q
        task_idx_head = task_idx % head_num_q

        hidden_size_rope_q_head_half = hidden_size_rope_q_head // 2
        block_num_head_half = tl.cdiv(hidden_size_rope_q_head_half, BLOCK_SIZE_HEAD)

        offs_tokens = task_idx_tokens * BLOCK_SIZE_TOKENS + tl.arange(0, BLOCK_SIZE_TOKENS)
        mask_tokens = offs_tokens < tokens

        range_block_size_head = tl.arange(0, BLOCK_SIZE_HEAD)
        start_cur_head = task_idx_head * hidden_size_rope_q_head
        offs_base_head_half_1 = range_block_size_head + start_cur_head
        offs_base_head_half_2 = range_block_size_head + start_cur_head + hidden_size_rope_q_head_half
        offs_base_cos_sin_dim_half_1 = range_block_size_head
        offs_base_cos_sin_dim_half_2 = range_block_size_head + hidden_size_rope_q_head_half

        offs_in_out_half_1_2d = offs_tokens[:, None] * hidden_size_token + offs_base_head_half_1[None, :] * 1
        offs_in_out_half_2_2d = offs_tokens[:, None] * hidden_size_token + offs_base_head_half_2[None, :] * 1
        offs_cos_sin_half_1_2d = offs_tokens[:, None] * hidden_size_rope_q_head + offs_base_cos_sin_dim_half_1[None, :] * 1
        offs_cos_sin_half_2_2d = offs_tokens[:, None] * hidden_size_rope_q_head + offs_base_cos_sin_dim_half_2[None, :] * 1

        q_ptrs_base_half_1 = q_rope + offs_in_out_half_1_2d
        q_ptrs_base_half_2 = q_rope + offs_in_out_half_2_2d
        out_ptrs_base_half_1 = q_rope_out + offs_in_out_half_1_2d
        out_ptrs_base_half_2 = q_rope_out + offs_in_out_half_2_2d
        cos_ptrs_base_half_1 = cos + offs_cos_sin_half_1_2d
        cos_ptrs_base_half_2 = cos + offs_cos_sin_half_2_2d
        sin_ptrs_base_half_1 = sin + offs_cos_sin_half_1_2d
        sin_ptrs_base_half_2 = sin + offs_cos_sin_half_2_2d

        for block_idx in tl.range(0, block_num_head_half): 
            
            move = block_idx * BLOCK_SIZE_HEAD * 1
            q_ptrs_half_1 = q_ptrs_base_half_1 + move
            q_ptrs_half_2 = q_ptrs_base_half_2 + move
            out_ptrs_half_1 = out_ptrs_base_half_1 + move
            out_ptrs_half_2 = out_ptrs_base_half_2 + move
            cos_ptrs_half_1 = cos_ptrs_base_half_1 + move
            cos_ptrs_half_2 = cos_ptrs_base_half_2 + move
            sin_ptrs_half_1 = sin_ptrs_base_half_1 + move
            sin_ptrs_half_2 = sin_ptrs_base_half_2 + move

            offs_head_half_1 = offs_base_head_half_1 + move
            offs_head_half_2 = offs_base_head_half_2 + move
            offs_cos_sin_dim_half_1 = offs_base_cos_sin_dim_half_1 + move
            offs_cos_sin_dim_half_2 = offs_base_cos_sin_dim_half_2 + move

            mask_head_half_1 = offs_head_half_1 < start_cur_head + hidden_size_rope_q_head_half
            mask_head_half_2 = offs_head_half_2 < start_cur_head + hidden_size_rope_q_head
            mask_cos_sin_dim_half_1 = offs_cos_sin_dim_half_1 < hidden_size_rope_q_head_half
            mask_cos_sin_dim_half_2 = offs_cos_sin_dim_half_2 < hidden_size_rope_q_head

            mask_q_half_1 = mask_tokens[:, None] and mask_head_half_1[None, :]
            mask_q_half_2 = mask_tokens[:, None] and mask_head_half_2[None, :]
            mask_cos_sin_half_1 = mask_tokens[:, None] and mask_cos_sin_dim_half_1[None, :]
            mask_cos_sin_half_2 = mask_tokens[:, None] and mask_cos_sin_dim_half_2[None, :]

            q_left = tl.load(q_ptrs_half_1, mask = mask_q_half_1).cast(cal_type)
            q_right = tl.load(q_ptrs_half_2, mask = mask_q_half_2).cast(cal_type)
            
            cos_left = tl.load(cos_ptrs_half_1, mask = mask_cos_sin_half_1).cast(cal_type)
            sin_left = tl.load(sin_ptrs_half_1, mask = mask_cos_sin_half_1).cast(cal_type)
            out_left = (cos_left * q_left - sin_left * q_right).cast(output_type)
            tl.store(out_ptrs_half_1, out_left, mask = mask_q_half_1)

            cos_right = tl.load(cos_ptrs_half_2, mask = mask_cos_sin_half_2).cast(cal_type)
            sin_right = tl.load(sin_ptrs_half_2, mask = mask_cos_sin_half_2).cast(cal_type)
            out_right = (cos_right * q_right + sin_right * q_left).cast(output_type)
            tl.store(out_ptrs_half_2, out_right, mask = mask_q_half_2)




def mlapo_triton(
    input : torch.Tensor, 
    gamma1 : torch.Tensor, 
    beta1 : torch.Tensor, 
    quant_scale1 : torch.Tensor,
    quant_offset1 : torch.Tensor, 
    wdqkv : torch.Tensor, 
    de_scale1 : torch.Tensor, 
    bias1 : torch.Tensor,
    # 
    gamma2 : torch.Tensor, 
    beta2 : torch.Tensor, 
    quant_scale2 : torch.Tensor, 
    quant_offset2 : torch.Tensor,
    wuq : torch.Tensor, 
    de_scale2 : torch.Tensor, 
    bias2 : torch.Tensor, 
    # 
    gamma3 : torch.Tensor,
    cos1 : torch.Tensor, 
    sin1 : torch.Tensor, 
    cos2 : torch.Tensor, 
    sin2 : torch.Tensor, 
    wuk : torch.Tensor, 
    kv_cache : torch.Tensor,
    slot_mapping : torch.Tensor, 
    # 
    hidden_size_wdq : int = 1536, 
    hidden_size_rope_q_head : int = 64, 
    hidden_size_rope_k : int = 64, 
    epsilon : float = 1e-5,  
    transpose_wdqkv : bool = True, 
    transpose_wuq : bool = True, 
    transpose_wuk : bool = False,  
    cache_mode : int = 0, 
) -> list:

    # from triton.runtime.driver import driver
    # npu hardware params from trion
    # class: self.backend: str, self.arch: str, self.warp_size: int
    target = driver.active.get_current_target() 
    # int
    device = driver.active.get_current_device()
    # dict: {"max_shared_mem": int, "num_aicore": int, "num_vectorcore": int}
    prop = driver.active.utils.get_device_properties(device)

    max_shared_mem = prop["max_shared_mem"]
    num_cube_core = prop["num_aicore"]
    num_vector_core = prop["num_vectorcore"]
    # print(f"\nRunning triton on {target.backend} {target.arch}. ")
    # print(device)
    # print(f"Max shared memory = {max_shared_mem}, AICore num = {num_cube_core}, VectorCore num = {num_vector_core}. \n")

    tokens = input.shape[0]
    head_num_q = wuk.shape[0]
    data_type = input.dtype

    # kernel参数获取
    hidden_size = input.shape[-1]
    
    # META
    DEVICE = input.device
    grid_cube = (num_cube_core, )
    grid_vec = (num_vector_core, )

    TILE_SIZE_RMS_NORM_QUANT = 1024
    # RMSNorm + Quant 处理输入 tokens
    wdqkv_type = wdqkv.dtype
    rms_norm_quant_out1 = None
    # 当input为bf16时，允许wdqkv或wuq为bf16，此时跳过rmsnormquant
    if wdqkv_type == torch.bfloat16: 
        rms_norm_quant_out1 = input
    else: 

        rms_norm_quant_out1 = torch.empty((tokens, hidden_size), dtype = torch.int8, device = DEVICE)
        
        
        rms_norm_quant[grid_vec](
            input, 
            gamma1, 
            beta1, 
            quant_scale1, 
            quant_offset1, 
            rms_norm_quant_out1, 
            QUANTMAX = QUANTMAX, 
            QUANTMIN = QUANTMIN, 
            N = tokens, 
            H = hidden_size, 
            EPS = epsilon, 
            BLOCK_SIZE = TILE_SIZE_RMS_NORM_QUANT
        )
    # return rms_norm_quant_out1.float()
    

    # Matmul bias DeQuant 对 RMSNormQuant处理的tokens降维

    BLOCK_SIZE_M = 128
    BLOCK_SIZE_N = 128
    BLOCK_SIZE_K = 256
    GROUP_SIZE_M = 8

    M_mm1 = tokens#rms_norm_quant_out1.shape[0]
    N_mm1 = wdqkv.shape[1] if not transpose_wdqkv else wdqkv.shape[0]
    K_mm1 = hidden_size#rms_norm_quant_out1.shape[1]

    mm1_out = torch.empty((M_mm1, N_mm1), dtype = data_type, device = DEVICE)

    matmul_bias_scale[grid_cube](
        rms_norm_quant_out1, 
        wdqkv, 
        bias1, 
        de_scale1, 
        mm1_out,
        M_mm1, 
        N_mm1, 
        K_mm1, 
        stride_a0 = rms_norm_quant_out1.stride(0), 
        stride_a1 = rms_norm_quant_out1.stride(1),
        stride_b0 = wdqkv.stride(0), 
        stride_b1 = wdqkv.stride(1),
        stride_c0 = mm1_out.stride(0), 
        stride_c1 = mm1_out.stride(1), 
        transpose_b = transpose_wdqkv, 
        BLOCK_SIZE_M = BLOCK_SIZE_M,
        BLOCK_SIZE_N = BLOCK_SIZE_N,
        BLOCK_SIZE_K = BLOCK_SIZE_K,
        GROUP_SIZE_M = GROUP_SIZE_M,
    )
    # return [mm1_out]


    # Split 经过RMSNormQuant处理后降维的tokens [2112(qkv)] -> [576(kv), 1536 (q)]

    hidden_size_wdkv = mm1_out.shape[-1] - hidden_size_wdq
    mm1_split1, mm1_split2 = torch.split(
        mm1_out, 
        [hidden_size_wdkv, hidden_size_wdq], 
        dim = 1
    )

    '''
    注意这里split的数据，后面搬运到NPU的时候都调用contiguous()使得数据连续，
    而不只是在python中通过修改stride获取每个数据
    '''

    # RMSNorm + Quant 处理经过 RMSNormQuant 处理后再降维后 tokens 中的 q 部分
    wuq_type = wuq.dtype
    rms_norm_quant_out2 = None
    mm1_split2_contiguous = mm1_split2.contiguous()
    if wuq_type == torch.bfloat16: 
        rms_norm_quant_out2 = mm1_split2_contiguous
    else: 
        rms_norm_quant_out2 = torch.empty((tokens, hidden_size_wdq), dtype = torch.int8, device = DEVICE)

        rms_norm_quant[grid_vec](
            mm1_split2_contiguous, 
            gamma2, 
            beta2,
            quant_scale2, 
            quant_offset2, 
            rms_norm_quant_out2,
            QUANTMAX = QUANTMAX, 
            QUANTMIN = QUANTMIN,
            N = tokens, 
            H = hidden_size_wdq, 
            EPS = epsilon,
            BLOCK_SIZE = TILE_SIZE_RMS_NORM_QUANT,
        )
    # return rms_norm_quant_out2.float()

    # MatMul bias DeQuant  对 经过 RMSNormQuant 处理后降维后 tokens 中的 q 部分做 RMSNormQuant 后 升维，总维度为 head_num_q * wuq.shape[-1]

    M_mm2 = tokens#rms_norm_quant_out2.shape[0]
    N_mm2 = wuq.shape[1] if not transpose_wuq else wuq.shape[0]
    K_mm2 = hidden_size_wdq#rms_norm_quant_out2.shape[1]

    mm2_out = torch.empty((M_mm2, N_mm2), dtype = data_type, device = DEVICE)
    
    matmul_bias_scale[grid_cube](
        rms_norm_quant_out2, 
        wuq, 
        bias2, 
        de_scale2,
        mm2_out,
        M_mm2, 
        N_mm2, 
        K_mm2,
        stride_a0 = rms_norm_quant_out2.stride(0), 
        stride_a1 = rms_norm_quant_out2.stride(1),
        stride_b0 = wuq.stride(0), 
        stride_b1 = wuq.stride(1),
        stride_c0 = mm2_out.stride(0), 
        stride_c1 = mm2_out.stride(1),
        transpose_b = transpose_wuq, 
        BLOCK_SIZE_M = BLOCK_SIZE_M,
        BLOCK_SIZE_N = BLOCK_SIZE_N,
        BLOCK_SIZE_K = BLOCK_SIZE_K,
        GROUP_SIZE_M = GROUP_SIZE_M,
    )
    # return [mm2_out]

    # split 经过 RMSNormQuant 处理后降维后 tokens 中的 kv 部分，分别做 RMSNorm 和 RoPE，最后 Reshape Cache 处理
        
#    grid_rms_rope = (40, )
    BLOCK_SIZE_RMS_ROPE = 128
    rms_norm_rope_rac_cal_kernel[grid_vec](
        mm1_split1.contiguous(), 
        gamma3, 
        cos1, 
        sin1, 
        slot_mapping, 
        kv_cache, 
        tokens, 
        mm1_split1.shape[-1], 
        hidden_size_rope_k, 
        epsilon,
        BLOCK_SIZE_RMS_ROPE
    )
    
    # return [mm2_out, kv_cache]

    # Split 经过 RMSNormQuant 处理后降维后 tokens 中的 q部分做 RMSNormQuant 后升维的单头，[192 q] -> [128 (q_nope), 64 (q_rope)]
    
    hidden_size_wuq_head = mm2_out.shape[-1] // head_num_q
    mm2_out = mm2_out.reshape(tokens, head_num_q, hidden_size_wuq_head)
    hidden_size_nope_q_head = hidden_size_wuq_head - hidden_size_rope_q_head
    mm2_split1, mm2_split2 = torch.split(
        mm2_out, 
        [hidden_size_nope_q_head, hidden_size_rope_q_head], 
        dim=2
    )

    # MatmulEinSum，对 Q 切分后的单头中的 NoPE 部分升维，输出 [tokens, head_num_q, hidden_size_wuk_head]
    
    hidden_size_wuk_head = wuk.shape[2] if not transpose_wuk else wuk.shape[1]
    matmul_ein_sum_res = torch.zeros( (tokens, head_num_q, hidden_size_wuk_head), dtype = data_type, device = DEVICE ) # 
    mm2_split1 = mm2_split1.contiguous()
    matmul_ein_sum_triton[grid_cube](
        mm2_split1, 
        wuk, 
        matmul_ein_sum_res, 
        # 
        head_num_q, 
        tokens, 
        hidden_size_wuk_head, 
        hidden_size_nope_q_head, 
        mm2_split1.stride(0), 
        mm2_split1.stride(1), 
        mm2_split1.stride(2), 
        wuk.stride(0), 
        wuk.stride(1), 
        wuk.stride(2), 
        matmul_ein_sum_res.stride(0), 
        matmul_ein_sum_res.stride(1), 
        matmul_ein_sum_res.stride(2), 
        # 
        transpose_b = transpose_wuk, 
        BLOCK_SIZE_M = BLOCK_SIZE_M, 
        BLOCK_SIZE_N = BLOCK_SIZE_N, 
        BLOCK_SIZE_K = BLOCK_SIZE_K, 
        GROUP_SIZE_M = GROUP_SIZE_M, 
    )
    # return [matmul_ein_sum_res]
    ## q rope 部分，输出 [tokens, head_num_q, hidden_size_rope_q_head]

    rope_res = torch.empty_like(mm2_split2, dtype = data_type, device = DEVICE)
    BLOCK_SIZE_TOKENS = 10
    BLOCK_SIZE_HEAD = 64
    pad_head_rope_x[grid_vec](
        mm2_split2.contiguous(), 
        cos2, 
        sin2, 
        rope_res, 
        tokens, 
        head_num_q, 
        hidden_size_rope_q_head, 
        BLOCK_SIZE_TOKENS, 
        BLOCK_SIZE_HEAD
    )
    # return [kv_cache, rope_res]

    q_out = torch.cat((matmul_ein_sum_res, rope_res), dim = 2)

    # 根据 cache_mode 返回输出

    if cache_mode == 1:
        ## 待确认
        return [
            q_out[..., 0:512], 
            kv_cache[..., 0:512],
            q_out[..., 512:576], 
            kv_cache[..., 512:576]
        ]
    else:
        return [q_out, kv_cache]