
from typing import Dict


from einops import rearrange





import torch
import torch.nn as nn

class StackProjector(nn.Module):
    def __init__(self, input_dim=80, stack_frames=3, stride=2, proj_dim=80):
        super().__init__()
        self.input_dim = input_dim
        self.stack_frames = stack_frames
        self.stride = stride
        self.linear = nn.Linear(input_dim * stack_frames, proj_dim)

    def forward(self, x, x_lengths):
        """
        x: (B, T, F)  B=batch, T=time, F=input_dim (80)
        return: (B, T_out, proj_dim)
        """
        B, T, F = x.size()
        assert F == self.input_dim, f"Expect input_dim={self.input_dim}, got {F}"

        # 计算可取的窗口数量，丢弃不足 stack_frames 的尾部
        max_start = T - self.stack_frames
        if max_start < 0:
            return torch.zeros(B, 0, self.linear.out_features, device=x.device, dtype=x.dtype)

        num_steps = max_start // self.stride + 1

        # 构造索引: [ [0,1,2], [2,3,4], [4,5,6], ... ]
        idx = torch.arange(self.stack_frames, device=x.device) \
              + torch.arange(num_steps, device=x.device).unsqueeze(1) * self.stride
        # idx: (num_steps, stack_frames)

        # 收集帧并拼接
        x_stacked = x[:, idx, :]               # (B, num_steps, stack_frames, F)
        x_stacked = x_stacked.reshape(B, num_steps, -1)  # (B, num_steps, stack_frames*F)

        # 线性投影
        out = self.linear(x_stacked)           # (B, num_steps, proj_dim)

        # 计算每个样本的输出长度
        out_lengths = ((x_lengths - self.stack_frames) // self.stride + 1).clamp(min=0)

        return out, out_lengths
class FullStackProjector(nn.Module):
    def __init__(self, stack_projector1, stack_projector2):
        super().__init__()
        self.sp1 = stack_projector1
        self.sp2 = stack_projector2

    def forward(self, x, lengths):
        x, lengths = self.sp1(x, lengths)
        x, lengths = self.sp2(x, lengths)
        return x, lengths


import torch.nn as nn

class SimpleStackProjector6x(nn.Module):
    def __init__(self, input_dim=80):
        super().__init__()
        self.input_dim = input_dim
        self.stack_frames = 6    # 每 6 帧拼接
        # self.linear = nn.Linear(input_dim * self.stack_frames, proj_dim)

    def forward(self, x):
        """
        x: (B, T, F)
        x_lengths: (B,)
        return:
            out: (B, T_out, proj_dim)
            out_lengths: (B,)
        """
        B, T, F = x.size()
        assert F == self.input_dim, f"Expect input_dim={self.input_dim}, got {F}"

        # ---- Step 1: 模拟两层卷积的长度计算
        input_length_max = x.shape[1]
        L1 = ((input_length_max - 3) // 2 + 1)
        max_len = ((L1 - 5) // 3 + 1)
        if max_len <= 0:
            return torch.zeros(B, 0, self.linear.out_features, device=x.device, dtype=x.dtype)

        # ---- Step 2: 构造索引，保证对齐两层卷积的采样点
        # 第1层卷积的索引位置
        idx1 = torch.arange(T, device=x.device)
        idx1 = idx1.unfold(0, 3, 2)  # 模拟 (stack_frames=3, stride=2)
        # idx1: (L1_max, 3)

        # 第2层卷积在 idx1 上再取 5 帧，stride=3
        idx2 = torch.arange(idx1.size(0), device=x.device)
        idx2 = idx2.unfold(0, 5, 3)  # (L2_max, 5)

        # 最终组合索引
        idx = idx1[idx2]  # (L2_max, 5, 3)
        idx = idx.reshape(idx.size(0), -1)  # (L2_max, 15)
        # 由于每个位置有重复采样 → 取前 6 个等效帧
        idx = idx[:, :6]  # (L2_max, 6)

        # ---- Step 3: 堆叠帧
        x_stacked = x[:, idx, :]   # (B, L2_max, 6, F)
        x_stacked = x_stacked.reshape(B, max_len, -1)

        # ---- Step 4: 线性投影
        # out = self.linear(x_stacked)

        return x_stacked


from vector_quantize_pytorch.random_projection_quantizer import RandomProjectionQuantizer
import torch.nn.functional as F

def get_mask_subset_prob(mask, prob=0.45, min_mask=2):
    """
    在有效 True 里，随机选择大约 prob 的位置作为新的 True，其余位置为 False。
    返回:
        subset_mask : 选中的子集 (True=保留位置)
        dropped_mask: 原来是 True 但没选中的位置
    """
    batch, seq, device = *mask.shape, mask.device
    num_to_mask = (mask.sum(dim=-1, keepdim=True) * prob).clamp(min=min_mask)

    logits = torch.rand((batch, seq), device=device)
    logits = logits.masked_fill(~mask, -1)

    randperm = logits.argsort(dim=-1).float()
    num_padding = (~mask).sum(dim=-1, keepdim=True)
    randperm -= num_padding

    subset_mask = randperm < num_to_mask
    subset_mask.masked_fill_(~mask, False)

    # === 新增: 原来 True 但未被选中的位置 ===
    dropped_mask = mask & ~subset_mask

    return subset_mask, dropped_mask


import torch

import torch

import torch


def get_bert_mask(seq_mask, prob=0.12, span_len=7):
    """
    seq_mask: (B, T)，前 N 个位置 True，后面 False
    prob: 每个位置被选为起点的概率
    span_len: 每次 mask 的连续长度

    返回:
        bert_mask:   (B, T)，在 choice_mask=True 的位置把 seq_mask 改 False
        choice_mask: (B, T)，哪些位置被选中
    """
    B, T = seq_mask.shape
    device = seq_mask.device

    # ---- Step1: 随机起点选择 (伯努利分布)
    start_mask = (torch.rand((B, T), device=device) < prob) & seq_mask

    # ---- Step2: 扩展到 span_len
    choice_mask = torch.zeros((B, T), dtype=torch.bool, device=device)
    for shift in range(span_len):
        shifted = torch.roll(start_mask, shifts=shift, dims=1)
        # 防止跨越无效区
        shifted &= torch.cat(
            [torch.ones((B, T - shift), dtype=torch.bool, device=device),
             torch.zeros((B, shift), dtype=torch.bool, device=device)], dim=1
        )
        choice_mask |= shifted

    # ---- Step3: 限制在 seq_mask 范围内
    choice_mask &= seq_mask

    # ---- Step4: 保证至少有一个块被选中
    for b in range(B):
        if choice_mask[b].sum() == 0 and seq_mask[b].any():
            valid_idx = torch.nonzero(seq_mask[b], as_tuple=False).squeeze(1)
            if len(valid_idx) > 0:
                start = valid_idx[torch.randint(0, len(valid_idx), (1,))]
                end = min(start + span_len, T)
                choice_mask[b, start:end] = True

    # ---- Step5: 确保每个 batch 中的每个 item 至少有一个非 mask 的位置
    for b in range(B):
        # 获取非 mask 的位置
        non_mask_idx = torch.nonzero(seq_mask[b] & ~choice_mask[b], as_tuple=False).squeeze(1)

        if len(non_mask_idx) > 0:
            # 如果有效位置有大于等于1个非 mask的位置，则跳过此步骤
            pass
        else:
            # 确保有效位置数大于等于 2
            valid_idx = torch.nonzero(seq_mask[b], as_tuple=False).squeeze(1)
            assert len(valid_idx) >= 2, f"Seq_mask must have at least 2 valid positions for batch {b}"

            # 随机选择一半的有效位置设置为非 mask
            half_valid_idx = valid_idx[torch.randperm(len(valid_idx))[:len(valid_idx) // 2]]

            # 将一半的有效位置设置为非 mask
            choice_mask[b, half_valid_idx] = False

    # ---- Step6: 构造 bert_mask
    bert_mask = seq_mask & ~choice_mask

    return bert_mask, choice_mask


def make_seq_mask(feat_lengths, max_len=None):
    """
    feat_lengths: (batch,) 每条序列的有效帧数
    max_len: 可选，默认取 feat_lengths 的最大值
    return: (batch, max_len) bool mask
            True = 有效帧, False = padding
    """
    batch_size = feat_lengths.size(0)
    if max_len is None:
        max_len = feat_lengths.max().item()

    # (max_len,) -> (1, max_len)
    seq_range = torch.arange(max_len, device=feat_lengths.device).unsqueeze(0)

    # (batch, 1) vs (1, max_len) 广播
    seq_mask = seq_range < feat_lengths.unsqueeze(1)

    return seq_mask



class BestRQ(nn.Module):
    def __init__(
        self,
        conformer: torch.nn.Module,
        codebook_size: int=1024,
        codebook_dim: int=16,
    ):
        super().__init__()

        self.rpq = nn.Sequential(
            SimpleStackProjector6x(80),
            nn.LayerNorm(480, elementwise_affine = True),
            RandomProjectionQuantizer(
                dim = 480,
                codebook_size = codebook_size,
                codebook_dim = codebook_dim,
                norm = False
            )
        )
        self.rpq.requires_grad = False
        self.conformer = conformer
        self.embed_dim = 384

        self.pad_id = -100

        self.to_logits = nn.Sequential(
            nn.LayerNorm(self.embed_dim),
            nn.Linear(self.embed_dim, codebook_size)
        )
        # self.mask_prob = 0.80


    def forward(
        self,
        batch_dict: Dict,
        device: torch.device,
        # x,
        # x_lengths,
    ):
        """
        输入 wav samples;
        x: (batch, time)
        x_lengths: (batch,)

        """
        x = batch_dict['feats'].to(device)
        x_lengths = batch_dict['feats_lengths'].to(device)
        labels = self.rpq(x)
        # print(f'labels shape: {labels.shape}')

        feat_seq_mask = make_seq_mask(x_lengths)
        feat_seq_mask = feat_seq_mask[:, 10::6]

        # mask_with_bert_mask, dropped_mask = get_mask_subset_prob(feat_seq_mask)
        mask_with_bert_mask, dropped_mask = get_bert_mask(feat_seq_mask)

        features, mask  = self.conformer(
            x,
            x_lengths,
            mask_with_bert_mask,
        )
        logits = self.to_logits(features)
        logits = rearrange(logits, "b n c -> b c n")

        masked_labels = labels.masked_fill(~dropped_mask, self.pad_id)
        loss = F.cross_entropy(logits, masked_labels, ignore_index=self.pad_id)

        # ===== 新增: 计算预测正确率、有效标签数、预测去重标签数 =====
        with torch.no_grad():
            preds = logits.argmax(dim=1)  # (b, n)
            valid_mask = masked_labels != self.pad_id  # 只考虑非 pad 位置

            # 准确率
            correct = (preds == masked_labels) & valid_mask
            if valid_mask.sum() > 0:
                acc = correct.sum().float() / valid_mask.sum().float()
            else:
                acc = torch.tensor(0.0, device=logits.device)

            # 有效标签位置的总数
            total_valid = valid_mask.sum().item()

            # 去重后的预测标签总数
            # print(f'去重之前的预测内容: {preds[valid_mask]}')
            unique_pred_count = preds[valid_mask].unique().numel()
            # print(f'去重后的内容: {preds[valid_mask].unique()}')
            prediction_diversity_ratio = unique_pred_count / total_valid if total_valid > 0 else 0.0

        # 返回字典形式
        return {
            "loss": loss,
            "acc": acc,
            "total_valid": total_valid,
            "unique_pred_count": unique_pred_count,
            "prediction_diversity_ratio": prediction_diversity_ratio
        }



if __name__ == '__main__':
    root_dir = "../../"
    config_path = f"{root_dir}/conf/train_2025_9_10_xlgeng.yaml"
    input_lentghs = torch.tensor([50, 32, 30, 40])
    seq_mask = make_seq_mask(input_lentghs)
    print(seq_mask )
    bert_mask, choice_mask = get_mask_subset_prob(seq_mask, 0.8)
    print(bert_mask)
    print(choice_mask)
    bert_mask, choice_mask = get_bert_mask(seq_mask)
    print(bert_mask)
    print(choice_mask)
