# Defined in xlite/csrc/_C.cpp

class Runtime:
    def __init__(
        self,
        devid: int,
        size: int = 0,
        rank: int = 0,
        tp_size: int = 1,
        dp_size: int = 1) -> None: ...

    def update_core_num(self, util: float) -> None: ...
    def init_tensor_pool(self, size: int) -> int: ...

class ModelConfig:
    vocab_size: ...
    hidden_size: ...
    n_layers: ...
    attn_type: ...
    n_heads: ...
    n_kv_heads: ...
    head_dim: ...
    nope_head_dim: ...
    rope_head_dim: ...
    v_head_dim: ...
    q_lora_rank: ...
    kv_lora_rank: ...
    norm_eps: ...
    rope_theta: ...
    softmax_scale: ...
    n_dense_layers: ...
    n_routed_experts: ...
    n_shared_experts: ...
    n_expert_groups: ...
    n_limited_groups: ...
    n_act_experts: ...
    intermediate_size: ...
    moe_intermediate_size: ...
    route_scale: ...
    def_tp_size: ...
    def_dp_size: ...
    moe_ep_size: ...
    moe_tp_size: ...
    max_seq_len: ...
    max_batch_size: ...
    max_m: ...
    block_size: ...
    weight_nz: ...
    qkv_bias: ...
    qk_norm: ...

    def __init__(self) -> None: ...

class ModelAttnMeta:
    lens: ...
    cached_lens: ...
    is_prefills: ...
    block_tables: ...

    def __init__(self) -> None: ...

class AttnMeta:
    lens: ...
    cached_lens: ...
    is_prefills: ...
    block_tables_cpu: ...
    block_tables: ...
    slot_mapping: ...
    positions: ...

    def __init__(self) -> None: ...

AttnMHA: ...
AttnMLA: ...

class Model:
    embed: ...
    norm: ...
    head: ...
    attn_norm: ...
    attn_out: ...
    mha_qkv: ...
    mha_qkv_bias: ...
    mha_q_norm: ...
    mha_k_norm: ...
    mla_q_a: ...
    mla_q_b: ...
    mla_q_norm: ...
    mla_kv_a: ...
    mla_kv_b: ...
    mla_kv_norm: ...
    mlp_norm: ...
    mlp_up_gate: ...
    mlp_down: ...
    gate: ...
    gate_bias: ...
    se_up_gate: ...
    se_down: ...
    re_up_gate: ...
    re_up_gate_scale: ...
    re_down: ...
    re_down_scale: ...

    def __init__(self) -> None: ...
    def init(self, config: ModelConfig, rank: int = 0) -> None: ...

    def forward(
        self,
        rt: Runtime,
        input: torch.Tensor,
        attn_meta: ModelAttnMeta | AttnMeta,
        kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
        freqs_cis: torch.Tensor,
        output: torch.Tensor,
        curr_stream: int) -> None: ...

    def compute_logits(
        self,
        rt: Runtime,
        input: torch.Tensor,
        output: torch.Tensor,
        curr_stream: int) -> None: ...

    def forward_and_get_logits(
        self,
        rt: Runtime,
        input: torch.Tensor,
        attn_meta: ModelAttnMeta | AttnMeta,
        kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
        freqs_cis: torch.Tensor,
        output: torch.Tensor,
        curr_stream: int) -> None: ...

    def forward_with_inputs_embeds(
        self,
        rt: Runtime,
        input: torch.Tensor,
        attn_meta: ModelAttnMeta | AttnMeta,
        kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
        freqs_cis: torch.Tensor,
        output: torch.Tensor,
        curr_stream: int) -> None: ...

    def get_tensor_pool_size(self) -> int: ...

class CoreAssigner:
    def __init__(self, ratio: float) -> None: ...
    def assign_core(self, is_decode: bool) -> float: ...
    def release_core(self, is_decode: bool) -> None: ...
