import torch
import torch_npu
import triton
import triton.language as tl
from mlapo_torch import process_deq_scale, QUANTMAX, QUANTMIN, MLAPO

EPS = 1e-6
DEVICE = "npu"

class MLAPrepareOperation:
    def __init__(
        self,
        tokens,
        head_num,
        block_size,
        block_num,
        hidden_size,
        hidden_size_wdqkv,
        hidden_size_wdq,
        hidden_size_wdkv,
        hidden_size_rope_k,
        hidden_size_rms_kv,
        hidden_size_wuq_head,
        hidden_size_rope_q_head,
        hidden_size_nope_q_head,
        hidden_size_wuk_head,
        hidden_size_output_kv,
        hidden_size_output_q,
        epsilon = EPS,
        cache_mode = 0,
        device = DEVICE,
        data_type = torch.float16
    ):
        self.tokens = tokens
        self.head_num = head_num
        self.block_size = block_size
        self.block_num = block_num
        self.hidden_size = hidden_size
        self.hidden_size_wdqkv = hidden_size_wdqkv
        self.hidden_size_wdq = hidden_size_wdq
        self.hidden_size_wdkv = hidden_size_wdkv
        self.hidden_size_rope_k = hidden_size_rope_k
        self.hidden_size_rms_kv = hidden_size_rms_kv
        self.hidden_size_wuq_head = hidden_size_wuq_head
        self.hidden_size_rope_q_head = hidden_size_rope_q_head
        self.hidden_size_nope_q_head = hidden_size_nope_q_head
        self.hidden_size_wuk_head = hidden_size_wuk_head
        self.hidden_size_output_kv = hidden_size_output_kv
        self.hidden_size_output_q = hidden_size_output_q
        self.epsilon = epsilon
        self.cache_mode = cache_mode
        self.device = device
        self.data_type = data_type
