import torch
from demo2 import (
    DeepseekMLA_Windows, PretrainedConfig, MockModelExtraConfig,
    get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank,
    get_o_proj_tp_group, get_o_proj_dp_group, get_npu_device_count,
    yarn_get_mscale
)

"""
原代码_forward_prefill的核心是处理完整输入序列并复用 KV 缓存，此 Demo 简化为Windows 单卡环境，模拟 KV 缓存的初始化、更新和读取，保留核心流程。
KV 缓存简化：用 PyTorch 张量模拟分页缓存，避免 NPU 的PA_NZ（分页 + 非零存储）逻辑；
缓存更新：用索引赋值替代循环，提升 Windows 环境下的效率；
缓存读取：简化为按块拼接，避免原代码的分布式缓存索引逻辑，适配单卡。
"""

# -------------------------- 扩展Demo2，添加KV缓存逻辑（Windows适配） --------------------------
class DeepseekMLA_Prefill_Windows(DeepseekMLA_Windows):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # KV缓存配置（Windows单卡：分页缓存，块大小=64）
        self.cache_block_size = 64  # 每个缓存块的token数
        self.kv_cache = None  # 缓存存储：(k_cache, v_cache)，形状=(num_blocks, block_size, num_heads, dim)

    def init_kv_cache(self, num_blocks: int):
        """初始化KV缓存（Windows单卡：CPU/GPU兼容）"""
        device = next(self.parameters()).device
        dtype = next(self.parameters()).dtype
        # K缓存：存储KV的LoRA低秩部分 + QK的RoPE部分 → (num_blocks, block_size, num_heads, kv_lora_rank + qk_rope_head_dim)
        k_cache = torch.zeros(
            (num_blocks, self.cache_block_size, self.num_local_heads,
             self.qk_nope_head_dim + self.qk_rope_head_dim),  # 正确维度！
            dtype=dtype, device=device
        )
        # V缓存：存储V的维度 → (num_blocks, block_size, num_heads, v_head_dim)
        v_cache = torch.zeros(
            (num_blocks, self.cache_block_size, self.num_local_heads, self.v_head_dim),
            dtype=dtype, device=device
        )
        self.kv_cache = (k_cache, v_cache)

    def update_kv_cache(self, slot_mapping: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
        """更新KV缓存（Windows适配：根据slot_mapping映射token到缓存位置）"""
        if self.kv_cache is None:
            raise ValueError("KV缓存未初始化，请先调用init_kv_cache")
        k_cache, v_cache = self.kv_cache
        block_size = self.cache_block_size

        # slot_mapping：(seq_len,) → 每个token对应的缓存全局索引（如0,1,2,...,seq_len-1）
        # 计算每个token在缓存中的块索引和块内索引
        block_idx = slot_mapping // block_size  # 属于哪个缓存块
        token_idx_in_block = slot_mapping % block_size  # 在块内的位置

        # 批量更新缓存（Windows环境：避免循环，用索引赋值）
        k_cache[block_idx, token_idx_in_block] = k
        v_cache[block_idx, token_idx_in_block] = v

    def forward_prefill(self, hidden_states: torch.Tensor, positions: torch.Tensor, slot_mapping: torch.Tensor):
        """Windows适配的Prefill阶段前向（含KV缓存复用）"""
        seq_len = hidden_states.shape[0]
        device = hidden_states.device

        # -------------------------- 1. 初始化KV缓存（Windows单卡） --------------------------
        if self.kv_cache is None:
            # 计算需要的缓存块数：ceil(seq_len / block_size)
            num_blocks = (seq_len + self.cache_block_size - 1) // self.cache_block_size
            self.init_kv_cache(num_blocks)
        k_cache, v_cache = self.kv_cache

        # -------------------------- 2. Q/KV前向与RoPE（复用Demo2逻辑） --------------------------
        # Q的LoRA前向
        q_lowrank, _ = self.q_a_proj(hidden_states)
        q, _ = self.q_a_layernorm(q_lowrank)
        q, _ = self.q_b_proj(q)
        q = q.view(-1, self.num_local_heads, self.qk_head_dim)
        q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)

        # KV的LoRA前向
        kv, _ = self.kv_a_proj_with_mqa(hidden_states)
        kv_a, k_pe_raw = torch.split(kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
        kv_a, _ = self.kv_a_layernorm(kv_a)

        # 应用RoPE
        cos, sin = self.rotary_emb.get_cos_sin(positions)
        q_pe_rotated = self._apply_rope(q_pe, cos, sin, positions)
        q = torch.cat([q_nope, q_pe_rotated], dim=-1)
        k_pe_rotated = self._apply_rope(k_pe_raw.view(-1, 1, self.qk_rope_head_dim), cos, sin, positions).squeeze(1)

        # KV的B投影
        kv_b, _ = self.kv_b_proj(kv_a)
        kv_b = kv_b.view(-1, self.num_local_heads, self.qk_nope_head_dim + self.v_head_dim)
        k_nope, v = torch.split(kv_b, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
        k = torch.cat([k_nope, k_pe_rotated.unsqueeze(1).repeat(1, self.num_local_heads, 1)], dim=-1)

        # -------------------------- 3. 更新KV缓存（Windows适配） --------------------------
        self.update_kv_cache(slot_mapping, k, v)

        # -------------------------- 4. 从缓存读取KV（复用缓存） --------------------------
        # Windows环境简化：直接读取所有已更新的缓存块（实际场景需按block_table筛选）
        # 计算已使用的缓存块和块内token数
        used_blocks = (slot_mapping.max() // self.cache_block_size) + 1
        cached_k = []
        cached_v = []
        for b in range(used_blocks):
            # 读取第b块的所有非零token（简化：读取整个块，实际可按有效token筛选）
            block_token_count = min(self.cache_block_size, seq_len - b * self.cache_block_size)
            cached_k.append(k_cache[b, :block_token_count])
            cached_v.append(v_cache[b, :block_token_count])
        # 拼接所有缓存块的KV
        cached_k = torch.cat(cached_k, dim=0)  # (seq_len, num_heads, k_dim)
        cached_v = torch.cat(cached_v, dim=0)  # (seq_len, num_heads, v_dim)

        # -------------------------- 5. 注意力计算与输出投影 --------------------------
        # 注意力计算（复用Demo2逻辑）
        attn_scores = torch.einsum("shd,thd->hst", q, cached_k) * self.scale
        mask = torch.tril(torch.ones(seq_len, seq_len, device=device, dtype=torch.bool))
        attn_scores = attn_scores.masked_fill(~mask, float("-inf"))
        attn_weights = torch.softmax(attn_scores, dim=-1)
        attn_output = torch.einsum("hst,thd->shd", attn_weights, cached_v)

        # 输出投影
        attn_output = attn_output.reshape(-1, self.num_local_heads * self.v_head_dim)
        output, _ = self.o_proj(attn_output)

        return output, self.kv_cache


# -------------------------- Windows环境独立执行验证 --------------------------
if __name__ == "__main__":
    # 1. 配置与模块初始化
    config = PretrainedConfig(rms_norm_eps=1e-6)
    rope_scaling = {"factor": 2.0}
    mla_prefill = DeepseekMLA_Prefill_Windows(
        config=config,
        hidden_size=4096,
        num_heads=32,
        qk_nope_head_dim=48,
        qk_rope_head_dim=16,
        v_head_dim=64,
        q_lora_rank=64,
        kv_lora_rank=64,
        rope_scaling=rope_scaling
    ).to(torch.float32)

    # 2. 构造Prefill输入（Windows环境：小序列长度）
    prefill_seq_len = 100  # Prefill处理100个token
    hidden_states = torch.randn(prefill_seq_len, 4096, dtype=torch.float32)
    positions = torch.arange(prefill_seq_len, dtype=torch.int64)  # 位置：0~99
    slot_mapping = torch.arange(prefill_seq_len, dtype=torch.int64)  # 缓存映射：0~99（连续位置）

    # 3. 执行Prefill（含KV缓存初始化与更新）
    with torch.no_grad():
        prefill_output, kv_cache = mla_prefill.forward_prefill(hidden_states, positions, slot_mapping)

    # 4. 验证结果
    print("=== Windows Prefill阶段验证 ===")
    print(f"Prefill输入形状: {hidden_states.shape} → (seq_len, hidden_size)")
    print(f"Prefill输出形状: {prefill_output.shape} → (seq_len, hidden_size)")
    print(f"KV缓存 - K形状: {kv_cache[0].shape} → (num_blocks, block_size, num_heads, k_dim)")
    print(f"KV缓存 - V形状: {kv_cache[1].shape} → (num_blocks, block_size, num_heads, v_dim)")
    # 验证缓存是否更新（非全零）
    cache_k_nonzero = torch.count_nonzero(kv_cache[0])
    print(f"K缓存非零元素数: {cache_k_nonzero} → 预期>0（缓存已更新）")